You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

68 lines
1.9 KiB
Python

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
import re
import pymongo
from itemadapter import ItemAdapter
class OrgNewsPipeline:
def process_item(self, item, spider):
return item
class NewsTitleClassifyPipeline:
__KEYWORDS__ = dict(
Database=['开通', '试用', '停订', '新增', '时长'],
HumanAffairs=['现在馆长', '馆长更换']
)
keyword_db_pattern = re.compile('|'.join(__KEYWORDS__['Database']))
def process_item(self, item, spider):
adapter = ItemAdapter(item)
news_title = adapter.get("title")
tags1 = self.keyword_db_pattern.findall(news_title)
item['tags'] = tags1
return item
class NewsStandardPipeline:
content_standard_pattern = re.compile(r'[\r\n\s]')
def process_item(self, item, spider):
adapter = ItemAdapter(item)
news_content = adapter.get("news_content")
item['news_content'] = self.content_standard_pattern.sub('', news_content)
return item
class MongoPipeline:
collection_name = "data_org_news"
def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls, crawler):
return cls(
mongo_uri=crawler.settings.get("MONGO_URI"),
mongo_db=crawler.settings.get("MONGO_DATABASE", "items"),
)
def open_spider(self, spider):
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
def close_spider(self, spider):
self.client.close()
def process_item(self, item, spider):
self.db[self.collection_name].insert_one(ItemAdapter(item).asdict())
return item