init repo

This commit is contained in:
cooper 2024-05-17 13:49:44 +08:00
commit da1501e67e
18 changed files with 600 additions and 0 deletions

1
.secret Normal file
View File

@ -0,0 +1 @@
o36r0lgw71mdzm9rkwrv3wi1wn|3600|1715909100.492765|oadugxyl9fhoqsamqopc

0
decspider/__init__.py Normal file
View File

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

21
decspider/items.py Normal file
View File

@ -0,0 +1,21 @@
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
import scrapy.resolver
class NewsItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
title = scrapy.Field()
date = scrapy.Field()
source = scrapy.Field()
content = scrapy.Field()
image_urls = scrapy.Field()
classify = scrapy.Field()
collection = scrapy.Field()
url = scrapy.Field()
source_url = scrapy.Field()

168
decspider/middlewares.py Normal file
View File

@ -0,0 +1,168 @@
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
from .myutils import ProxyPool
from .settings import USERNAME, PASSWORD
from faker import Faker
class DecspiderSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesnt have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info("Spider opened: %s" % spider.name)
class DecspiderDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info("Spider opened: %s" % spider.name)
class ProxyMiddleware:
def __init__(self):
# 初始化代理列表和每个代理的失败计数
self.proxy_pool = ProxyPool()
self.proxy_failures = {proxy: 0 for proxy in self.proxy_pool.proxy_list}
self.fake = Faker()
def process_request(self, request, spider):
# 为每个请求随机选择一个代理
proxy = self.proxy_pool.get_one()
if proxy not in self.proxy_failures:
self.proxy_failures[proxy] = 0
request.meta['proxy'] = "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": USERNAME, "pwd": PASSWORD, "proxy": proxy}
ua = self.fake.user_agent()
request.headers['User-Agent'] = ua
spider.logger.info(f'Using proxy: {proxy}\nUsing UA: {ua}')
def process_response(self, request, response, spider):
# 如果响应正常,返回响应
if response.status in [200, 301, 302]:
return response
# 如果响应异常,处理失败计数
else:
self._handle_proxy_failure(request.meta['proxy'], spider)
# 重新调度请求
return request
def process_exception(self, request, exception, spider):
# 处理发生异常的请求
self._handle_proxy_failure(request.meta['proxy'], spider)
# 重新调度请求
return request
def _handle_proxy_failure(self, http_proxy, spider):
# 增加指定代理的失败计数
proxy = http_proxy.split('@')[-1][:-1]
self.proxy_failures[proxy] += 1
spider.logger.error(f'Proxy {proxy} failed, failure count: {self.proxy_failures[proxy]}')
# 如果某个代理失败次数达到2次从列表中移除
if self.proxy_failures[proxy] >= 2:
self.proxy_pool.remove(proxy)
del self.proxy_failures[proxy]
spider.logger.error(f'Removed proxy {proxy} after consecutive failures.', level=spider.logger.ERROR)
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)

110
decspider/myutils.py Normal file
View File

@ -0,0 +1,110 @@
#!/usr/bin/env python
# -- coding: utf-8 --
import time, random, os, sys, json
import requests, datetime
from .settings import SECRETID, SECRETKEY, PROXYPOOL_UPDATENUM, PROXYPOOL_MIN_NUM, PROXYPOOL_MIN_DURATION
SECRET_PATH = './.secret'
def _get_secret_token():
r = requests.post(url='https://auth.kdlapi.com/api/get_secret_token', data={'secret_id': SECRETID, 'secret_key': SECRETKEY})
if r.status_code != 200:
raise KdlException(r.status_code, r.content.decode('utf8'))
res = json.loads(r.content.decode('utf8'))
code, msg = res['code'], res['msg']
if code != 0:
raise KdlException(code, msg)
secret_token = res['data']['secret_token']
expire = str(res['data']['expire'])
_time = '%.6f' % time.time()
return secret_token, expire, _time
def _read_secret_token():
with open(SECRET_PATH, 'r') as f:
token_info = f.read()
secret_token, expire, _time, last_secret_id = token_info.split('|')
if float(_time) + float(expire) - 3 * 60 < time.time() or SECRETID != last_secret_id: # 还有3分钟过期或SecretId变化时更新
secret_token, expire, _time = _get_secret_token()
with open(SECRET_PATH, 'w') as f:
f.write(secret_token + '|' + expire + '|' + _time + '|' + SECRETID)
return secret_token
def get_secret_token():
if os.path.exists(SECRET_PATH):
secret_token = _read_secret_token()
else:
secret_token, expire, _time = _get_secret_token()
with open(SECRET_PATH, 'w') as f:
f.write(secret_token + '|' + expire + '|' + _time + '|' + SECRETID)
return secret_token
class KdlException(Exception):
"""异常类"""
def __init__(self, code=None, message=None):
self.code = code
if sys.version_info[0] < 3 and isinstance(message, unicode):
message = message.encode("utf8")
self.message = message
self._hint_message = "[KdlException] code: {} message: {}".format(self.code, self.message)
@property
def hint_message(self):
return self._hint_message
@hint_message.setter
def hint_message(self, value):
self._hint_message = value
def __str__(self):
if sys.version_info[0] < 3 and isinstance(self.hint_message, unicode):
self.hint_message = self.hint_message.encode("utf8")
return self.hint_message
class ProxyPool:
def __init__(self):
self.update_num = PROXYPOOL_UPDATENUM
self.min_num = PROXYPOOL_MIN_NUM
self.min_duration = PROXYPOOL_MIN_DURATION
self.signature = get_secret_token()
self.api_url = f'https://dps.kdlapi.com/api/getdps/?secret_id={SECRETID}&signature={self.signature}&num={self.update_num}&pt=1&format=json&sep=1'
self.proxy_list = []
def get_one(self):
self.ensure_min_num()
_proxy_list = []
while not _proxy_list:
last_got = datetime.datetime.now() - datetime.timedelta(seconds=self.min_duration)
_proxy_list = [p for p in self.proxy_list if p['last_got_time'] < last_got]
_proxy = random.choice(_proxy_list)
_proxy['last_got_time'] = datetime.datetime.now()
return _proxy['proxy']
def remove(self, proxy:str):
self.proxy_list = [p for p in self.proxy_list if p['proxy'] != proxy]
self.ensure_min_num()
def ensure_min_num(self):
while len(self.proxy_list) < self.min_num:
new_proxy_list = requests.get(self.api_url).json().get('data').get('proxy_list')
_proxy_list = [{'proxy': p, 'last_got_time': datetime.datetime(2020, 10, 1, 12, 30, 30, 100000)} for p in new_proxy_list]
self.proxy_list.extend(_proxy_list)
if __name__ == '__main__':
proxypool = ProxyPool()
print(proxypool.get_one())

75
decspider/pipelines.py Normal file
View File

@ -0,0 +1,75 @@
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
import mysql.connector
from scrapy.exceptions import DropItem
from .items import NewsItem
from .settings import MYSQL_USERNAME, MYSQL_PASSWORD, MYSQL_HOST, MYSQL_PORT, MYSQL_DATABASE
class DecspiderPipeline:
def open_spider(self, spider):
# 连接数据库
self.conn = mysql.connector.connect(user=MYSQL_USERNAME, password=MYSQL_PASSWORD, host=MYSQL_HOST, database=MYSQL_DATABASE, port=MYSQL_PORT)
self.cursor = self.conn.cursor()
# 动态生成表名
self.table_name = f'{spider.settings.get("BOT_NAME")}_{spider.name}'
spider.log(f'Dataset name: {self.table_name}')
# 检查表是否存在,如果不存在就创建表
self.cursor.execute(f"""
CREATE TABLE IF NOT EXISTS `{self.table_name}` (
id INT AUTO_INCREMENT PRIMARY KEY
)
""")
# 获取当前表的列信息
self.cursor.execute(f"SHOW COLUMNS FROM `{self.table_name}`")
existing_columns = {row[0] for row in self.cursor.fetchall()}
# 获取 NewsItem 字段信息
item_columns = set(NewsItem.fields.keys())
# 添加 NewsItem 字段到表中
for column in item_columns:
if column not in existing_columns:
self.cursor.execute(f"ALTER TABLE `{self.table_name}` ADD COLUMN `{column}` TEXT")
spider.log(f'Added column `{column}` to `{self.table_name}` table')
# 删除表中不存在于 NewsItem 中的字段
for column in existing_columns:
if column not in item_columns and column != 'id':
self.cursor.execute(f"ALTER TABLE `{self.table_name}` DROP COLUMN `{column}`")
spider.log(f'Dropped column `{column}` from `{self.table_name}` table')
self.conn.commit()
def close_spider(self, spider):
self.conn.close()
def process_item(self, item, spider):
if isinstance(item, NewsItem):
# 插入数据
columns = ', '.join(item.keys())
placeholders = ', '.join(['%s'] * len(item))
sql = f"INSERT INTO `{self.table_name}` ({columns}) VALUES ({placeholders})"
try:
self.cursor.execute(sql, list(item.values()))
self.conn.commit()
except mysql.connector.Error as e:
spider.log(f"Error when inserting item: {e}")
self.conn.rollback()
raise DropItem(f"Error when inserting item: {e}")
return item

114
decspider/settings.py Normal file
View File

@ -0,0 +1,114 @@
# Scrapy settings for decspider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = "decspider"
SPIDER_MODULES = ["decspider.spiders"]
NEWSPIDER_MODULE = "decspider.spiders"
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = "decspider (+http://www.yourdomain.com)"
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
# "Accept-Language": "en",
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# "decspider.middlewares.DecspiderSpiderMiddleware": 543,
#}
DOWNLOADER_MIDDLEWARES = {
"decspider.middlewares.ProxyMiddleware": 543,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# "scrapy.extensions.telnet.TelnetConsole": None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'decspider.pipelines.DecspiderPipeline': 300,
'crawlab.CrawlabPipeline': 888,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = "httpcache"
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"
# Set settings whose default value is deprecated to a future-proof value
REQUEST_FINGERPRINTER_IMPLEMENTATION = "2.7"
TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"
FEED_EXPORT_ENCODING = "utf-8"
# Proxy setting
SECRETID = "oadugxyl9fhoqsamqopc"
SECRETKEY = "j2gugufp2batb8y2olw9la1cptxfapko"
USERNAME = "d2667352953"
PASSWORD = "m93ih6hh"
PROXYPOOL_UPDATENUM = 1
PROXYPOOL_MIN_NUM = 3
PROXYPOOL_MIN_DURATION = 1
# MySQL Configuration
MYSQL_USERNAME = "root"
MYSQL_PASSWORD = "yGWptA_tX4bZ2q"
MYSQL_HOST = "10.18.30.148"
MYSQL_PORT = 3307
MYSQL_DATABASE = "crawler_data"
# Test
CLOSESPIDER_PAGECOUNT = 5

View File

@ -0,0 +1,4 @@
# This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,95 @@
import scrapy
import mysql.connector
from mysql.connector import errorcode
from urllib.parse import urljoin
from ..items import NewsItem
from ..settings import MYSQL_USERNAME, MYSQL_PASSWORD, MYSQL_HOST, MYSQL_PORT, MYSQL_DATABASE
class CorpnewsSpider(scrapy.Spider):
name = "corpnews"
allowed_domains = ["dongfang.com"]
def __init__(self, *args, **kwargs):
super(CorpnewsSpider, self).__init__(*args, **kwargs)
self.crawled_urls = set()
def start_requests(self):
# 连接数据库
self.conn = mysql.connector.connect(user=MYSQL_USERNAME, password=MYSQL_PASSWORD, host=MYSQL_HOST, database=MYSQL_DATABASE, port=MYSQL_PORT)
self.cursor = self.conn.cursor()
# 动态生成表名
dataset_name = f'{self.settings.get("BOT_NAME")}_{self.name}'
# 获取当前数据库中已经爬取的 URLs
try:
self.cursor.execute(f"SELECT url FROM `{dataset_name}`")
self.crawled_urls = {row[0] for row in self.cursor.fetchall()}
except mysql.connector.Error as err:
if err.errno == errorcode.ER_NO_SUCH_TABLE:
self.log(f"Table `{dataset_name}` does not exist. Initializing crawled URLs as an empty set.")
self.crawled_urls = set()
else:
self.log(f"Error fetching URLs from `{dataset_name}`: {err}")
self.crawled_urls = set()
# 断开数据库连接
self.conn.close()
# 开始请求
start_urls = ["https://www.dongfang.com/xwzx/jtyw1/qb.htm", "https://www.dongfang.com/xwzx/jcdt.htm", 'https://www.dongfang.com/xwzx/mtzs.htm']
for url in start_urls:
yield scrapy.Request(url, self.parse)
def parse(self, response):
first_news = response.xpath('//div[@class="news_top"]/div[@class="news_img"]/a')
self.log(f'crawled_urls: {self.crawled_urls}')
if first_news:
first_news_url = first_news.attrib['href']
full_url = urljoin(response.url, first_news_url)
if full_url not in self.crawled_urls:
yield scrapy.Request(full_url, self.news_parse)
news_list = response.xpath('//div[contains(@class,"swiper-slide")]/dl/dd/a')
for news in news_list:
news_url = news.attrib['href']
full_url = urljoin(response.url, news_url)
if full_url not in self.crawled_urls:
self.log(f'full_url: {full_url}')
yield scrapy.Request(full_url, self.news_parse)
next_page = response.xpath('//span[contains(@class, "p_next")]/a')
if next_page:
next_page_url = next_page.attrib['href']
yield response.follow(next_page_url, self.parse)
def news_parse(self, response):
news_item = NewsItem()
news_item['title'] = response.xpath('//div[@class="xq_nr_hd"]/h5/text()').get()
news_item['collection'] = response.xpath('//div[@class="nysubsc"]/ul/li[@class="on"]/a/text()').get()
news_item['url'] = response.url
news_info = response.xpath('//div[@class="xq_nr_hd"]/span/text()')
news_item['date'] = news_info.re(r'时间:(\d{4}-\d{2}-\d{2}) ')[0]
source_label = news_info.re(r'来源: (.*) ')
if source_label:
news_item['source'] = source_label[0]
news_item['source_url'] = ''
else:
news_item['source'] = response.xpath('//div[@class="xq_nr_hd"]/span/a/text()').get()
news_item['source_url'] = response.xpath('//div[@class="xq_nr_hd"]/span/a').attrib['href']
news_text_list = response.xpath('//div[@class="v_news_content"]/p/text()')
news_item['content'] = '\n'.join([t.get() for t in news_text_list])
news_image_urls = response.xpath('//div[@class="v_news_content"]/p/img')
news_item['image_urls'] = ';\n'.join([i.attrib['src'] for i in news_image_urls])
yield news_item

12
scrapy.cfg Normal file
View File

@ -0,0 +1,12 @@
# Automatically created by: scrapy startproject
#
# For more information about the [deploy] section see:
# https://scrapyd.readthedocs.io/en/latest/deploy.html
[settings]
default = decspider.settings
shell = ipython
[deploy]
#url = http://localhost:6800/
project = decspider