You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

246 lines
8.3 KiB
Python

This file contains ambiguous Unicode characters!

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from __future__ import annotations
import json
from typing import TYPE_CHECKING
import redis
from scrapy import signals, Spider
from scrapy.exceptions import CloseSpider
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
if TYPE_CHECKING:
from scrapy.crawler import Crawler
from scrapy import Request
class ScienceArticleWosSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
async def process_start(self, start):
# Called with an async iterator over the spider start() method or the
# maching method of an earlier spider middleware.
async for item_or_request in start:
yield item_or_request
def spider_opened(self, spider):
spider.logger.info("Spider opened: %s" % spider.name)
class ScienceArticleWosDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info("Spider opened: %s" % spider.name)
class WosStarterApiXkeyDownloaderMiddleware:
"""
https://api.clarivate.com/swagger-ui/?apikey=none&url=https%3A%2F%2Fdeveloper.clarivate.com%2Fapis%2Fwos-starter%2Fswagger
"""
async def process_request(self, request, spider):
key_param = {
'X-ApiKey': '53b8164e7543ccebe489988287e8b871bc2c0880'
}
request.headers.update(key_param)
# return request
class WosCookieMiddleware:
def __init__(self, redis_uri: str):
self.redis_cli = redis.from_url(redis_uri, decode_responses=True)
self.redis_key_prefix = 'cookie_pool:wos_sid'
self.cookiepool_cache_key = 'cookie_pool:wos:sid_q'
@classmethod
def from_crawler(cls, crawler: Crawler, *args, **kwargs):
settings = crawler.settings
middle = cls(
redis_uri=settings.get("REDIS_URL")
)
crawler.signals.connect(middle.open_spider, signal=signals.spider_opened)
crawler.signals.connect(middle.close_spider, signal=signals.spider_closed)
return middle
def open_spider(self, spider: Spider):
self.loading_sid_from_redis()
def close_spider(self, spider: Spider, reason: str = None):
self.del_sid_from_redis()
def process_request(self, request: Request, spider):
req_wos_sid = request.meta.get('wos_sid')
if not req_wos_sid:
sid = self.get_sid_from_redis()
if not sid:
raise CloseSpider(f"没有获取到sid即将退出")
# 把获取到的wos_sid绑定到request可以在parse方法中获取到wos_sid的值
request.meta['wos_sid'] = sid
else:
sid = req_wos_sid
cookie_1 = {'dotmatics.elementalKey': 'SLsLWlMhrHnTjDerSrlG'}
headers = {
'authority': 'webofscience.clarivate.cn',
'accept-language': 'zh-CN,zh;q=0.9',
'cache-control': 'no-cache',
'origin': 'https://webofscience.clarivate.cn',
'pragma': 'no-cache',
# 'referer': 'https://webofscience.clarivate.cn/wos/woscc/advanced-search',
}
request.cookies = cookie_1
if request.url.endswith('runQuerySearch'):
# 检索时需要带有sid参数
request._set_url(request.url + "?SID=%s" % sid)
headers.update(
{'accept': 'application/x-ndjson', 'content-type': 'text/plain;charset=UTF-8'})
else:
headers.update(
{'accept': 'application/json, text/plain, */*', 'content-type': 'application/json',
'x-1p-wos-sid': sid})
for hk, hv in headers.items():
request.headers[hk] = hv
return None
def process_response(self, request, response, spider):
if response.status != 200:
self.mark_sid_status(request.meta.get('wos_sid'))
return response
def get_sid_from_redis(self):
val = self.redis_cli.rpoplpush(self.cookiepool_cache_key, self.cookiepool_cache_key)
if val:
self.inc_used_times(val)
return val
return None
def inc_used_times(self, val: str = None):
self.redis_cli.hincrby(f'{self.redis_key_prefix}:{val}', 'used_times', 1)
def mark_sid_status(self, sid: str, status: str = 'validate'):
"""
:param sid:
:param status: validate/expired
:return:
"""
if status == "expired":
# 过期直接删除key
self.redis_cli.delete(f'{self.cookiepool_cache_key}:{sid}')
else:
self.redis_cli.hset(f'{self.redis_key_prefix}:{sid}', 'status', status)
def loading_sid_from_redis(self) -> list:
"""
加载所有的sid到List结构从缓存队列取sid
:return:
"""
valid_sid = []
keys = self.redis_cli.keys(f'{self.redis_key_prefix}:*')
for key in keys:
# 获取所有的信息
key_obj: dict = self.redis_cli.hgetall(key)
if key_obj.get("status") == "normal":
real_sid = key.rsplit(':', maxsplit=1)[-1]
valid_sid.append(real_sid)
self.redis_cli.lpush(self.cookiepool_cache_key, real_sid)
return valid_sid
def del_sid_from_redis(self):
self.redis_cli.delete(f'{self.cookiepool_cache_key}')
class WosSessionMiddleware:
def process_request(self, request: Request, spider: Spider):
pass
class A:
def __init__(self, redis_cli):
self.redis_cli = redis_cli
def load_keys(self, name):
return self.redis_cli.keys(r'cookie_pool:wos_sid:*')
def get_one_sid(self, name):
return self.redis_cli.rpoplpush(name)