tools.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712
  1. # -*- coding: utf-8 -*-
  2. """
  3. Created on 2024-04-09
  4. ---------
  5. @summary: 主题爬虫 工具类
  6. ---------
  7. @author: Lzz
  8. """
  9. import calendar
  10. import datetime
  11. import functools
  12. import hashlib
  13. import random
  14. import re
  15. import time
  16. from collections import namedtuple
  17. import bson
  18. import execjs
  19. import redis
  20. import requests
  21. from pymongo import MongoClient
  22. from utils.clean_html import cleaner
  23. from utils.log import logger
  24. try:
  25. from pymongo.errors import DuplicateKeyError
  26. from hashlib import md5
  27. except ImportError as e:
  28. raise e
  29. SearchText = namedtuple('SearchText', ['total'])
  30. def nsssjss():
  31. ex_js = '''
  32. const jsdom = require("jsdom");
  33. const {JSDOM} = jsdom;
  34. const dom = new JSDOM(`<!DOCTYPE html><p>Hello world</p>`);
  35. window = dom.window;
  36. document = window.document;
  37. JSEncrypt = require('jsencrypt')
  38. function encryptByRSA(value) {
  39. var encrypt = new JSEncrypt;
  40. var RSAPublicKey = "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCS2TZDs5+orLYCL5SsJ54+bPCVs1ZQQwP2RoPkFQF2jcT0HnNNT8ZoQgJTrGwNi5QNTBDoHC4oJesAVYe6DoxXS9Nls8WbGE8ZNgOC5tVv1WVjyBw7k2x72C/qjPoyo/kO7TYl6Qnu4jqW/ImLoup/nsJppUznF0YgbyU/dFFNBQIDAQAB";
  41. encrypt.setPublicKey('-----BEGIN PUBLIC KEY-----' + RSAPublicKey + '-----END PUBLIC KEY-----')
  42. return encrypt.encrypt(value)
  43. }
  44. function get_njs(){
  45. nsssjss = encryptByRSA('/freecms' + '/rest/v1/notice/selectInfoMoreChannel.do' + '$$' + new Date().getTime())
  46. return nsssjss
  47. }
  48. '''
  49. ctx = execjs.compile(ex_js)
  50. njs = ctx.call('get_njs')
  51. return njs
  52. def get_pay_proxy():
  53. proxy = "http://6278CF0D:41D9C796172D@tun-vdpzuj.qg.net:15254"
  54. return {"http": proxy, "https": proxy}
  55. get_QGIP = get_pay_proxy
  56. def get_proxy(scheme=None, default=None, socks5h=False):
  57. url = "http://cc.spdata.jianyu360.com/crawl/proxy/socks5/fetch"
  58. headers = {"Authorization": "Basic amlhbnl1MDAxOjEyM3F3ZSFB"}
  59. while True:
  60. try:
  61. response = requests.get(url, headers=headers, timeout=5)
  62. response.raise_for_status()
  63. resp_json = response.json()
  64. except Exception as why:
  65. logger.error(f"代理获取失败 | {type(why).__name__} | {why}")
  66. raise why
  67. proxies = resp_json.get("data")
  68. if proxies:
  69. break
  70. else:
  71. logger.warning("暂无代理...")
  72. time.sleep(3)
  73. if socks5h:
  74. proxies = {
  75. "http": proxies.get("http").replace("socks5", "socks5h"),
  76. "https": proxies.get("http").replace("socks5", "socks5h")
  77. }
  78. logger.info(f"提取代理 | {proxies}")
  79. if not scheme:
  80. return proxies
  81. else:
  82. return proxies.get(scheme, default)
  83. def Mongo_client(env=None):
  84. kwargs = dict(host="172.20.47.168", port=27080)
  85. if env == "test":
  86. kwargs = dict(host="172.20.45.130", port=27017)
  87. return MongoClient(**kwargs)
  88. def Redis_client(env=None):
  89. connection_kwargs = dict(host='172.17.162.28', password='k5ZJR5KV4q7DRZ92DQ', port=7361, db=1)
  90. if env == "test":
  91. connection_kwargs = dict(host='172.20.45.129', password='jianyu@python', port=3379, db=1)
  92. r = redis.Redis(
  93. connection_pool=redis.ConnectionPool(**connection_kwargs),
  94. decode_responses=True
  95. )
  96. return r
  97. def int2long(param: int):
  98. """int 转换成 long """
  99. return bson.int64.Int64(param)
  100. def get_current_date(date_format="%Y-%m-%d %H:%M:%S"):
  101. return datetime.datetime.now().strftime(date_format)
  102. def date_to_timestamp(date, time_format="%Y-%m-%d %H:%M:%S"):
  103. """
  104. @summary:
  105. ---------
  106. @param date:将"2011-09-28 10:00:00"时间格式转化为时间戳
  107. @param time_format:时间格式
  108. ---------
  109. @result: 返回时间戳
  110. """
  111. if ":" in date:
  112. timestamp = time.mktime(time.strptime(date, time_format))
  113. else:
  114. timestamp = time.mktime(time.strptime(date, "%Y-%m-%d"))
  115. return int(timestamp)
  116. def timestamp_to_date(timestamp, time_format="%Y-%m-%d %H:%M:%S"):
  117. """
  118. @summary:
  119. ---------
  120. @param timestamp: 将时间戳转化为日期
  121. @param time_format: 日期格式
  122. ---------
  123. @result: 返回日期
  124. """
  125. if timestamp is None:
  126. raise ValueError("timestamp is null")
  127. date = time.localtime(timestamp)
  128. return time.strftime(time_format, date)
  129. def get_sha1(*args):
  130. """
  131. @summary: 获取唯一的40位值, 用于获取唯一的id
  132. ---------
  133. @param *args: 参与联合去重的值
  134. ---------
  135. @result: ba4868b3f277c8e387b55d9e3d0be7c045cdd89e
  136. """
  137. sha1 = hashlib.sha1()
  138. for arg in args:
  139. sha1.update(str(arg).encode())
  140. return sha1.hexdigest() # 40位
  141. def get_sha256(*args):
  142. """
  143. @summary: 获取唯一的64位值, 用于获取唯一的id
  144. ---------
  145. @param *args: 参与联合去重的值
  146. ---------
  147. @result: 5580c91ea29bf5bd963f4c08dfcacd983566e44ecea1735102bc380576fd6f30
  148. """
  149. sha256 = hashlib.sha256()
  150. for arg in args:
  151. sha256.update(str(arg).encode())
  152. return sha256.hexdigest() # 64位
  153. def md5value(val):
  154. md5 = hashlib.md5()
  155. if isinstance(val, bytes):
  156. md5.update(str(val).encode("utf-8"))
  157. elif isinstance(val, str):
  158. md5.update(val.encode("utf-8"))
  159. return md5.hexdigest()
  160. def ensure_int64(n):
  161. """
  162. >>> ensure_int64(None)
  163. 0
  164. >>> ensure_float(False)
  165. 0
  166. >>> ensure_float(12)
  167. 12
  168. >>> ensure_float("72")
  169. 72
  170. """
  171. if not n:
  172. return bson.int64.Int64(0)
  173. return bson.int64.Int64(n)
  174. def get_today_of_day(day_offset=0):
  175. return str(datetime.date.today() + datetime.timedelta(days=day_offset))
  176. def get_current_timestamp():
  177. return int(time.time())
  178. def add_zero(n):
  179. return "%02d" % n
  180. def sup_zero(indate):
  181. deal = indate.split(' ')
  182. head = deal[0].split('-')
  183. tail = ""
  184. if len(deal) == 2:
  185. tail = " " + deal[1]
  186. year = int(head[0])
  187. month = int(head[1])
  188. day = int(head[2])
  189. fdate = datetime.datetime(year=year, month=month, day=day)
  190. formatted_date = fdate.strftime("%Y-%m-%d") + tail
  191. return formatted_date
  192. def get_days_of_month(year, month):
  193. """
  194. 返回天数
  195. """
  196. return calendar.monthrange(year, month)[1]
  197. def get_year_month_and_days(month_offset=0):
  198. """
  199. @summary:
  200. ---------
  201. @param month_offset: 月份偏移量
  202. ---------
  203. @result: ('2019', '04', '30')
  204. """
  205. today = datetime.datetime.now()
  206. year, month = today.year, today.month
  207. this_year = int(year)
  208. this_month = int(month)
  209. total_month = this_month + month_offset
  210. if month_offset >= 0:
  211. if total_month <= 12:
  212. days = str(get_days_of_month(this_year, total_month))
  213. total_month = add_zero(total_month)
  214. return (year, total_month, days)
  215. else:
  216. i = total_month // 12
  217. j = total_month % 12
  218. if j == 0:
  219. i -= 1
  220. j = 12
  221. this_year += i
  222. days = str(get_days_of_month(this_year, j))
  223. j = add_zero(j)
  224. return (str(this_year), str(j), days)
  225. else:
  226. if (total_month > 0) and (total_month < 12):
  227. days = str(get_days_of_month(this_year, total_month))
  228. total_month = add_zero(total_month)
  229. return (year, total_month, days)
  230. else:
  231. i = total_month // 12
  232. j = total_month % 12
  233. if j == 0:
  234. i -= 1
  235. j = 12
  236. this_year += i
  237. days = str(get_days_of_month(this_year, j))
  238. j = add_zero(j)
  239. return (str(this_year), str(j), days)
  240. def get_month(month_offset=0):
  241. """''
  242. 获取当前日期前后N月的日期
  243. if month_offset>0, 获取当前日期前N月的日期
  244. if month_offset<0, 获取当前日期后N月的日期
  245. date format = "YYYY-MM-DD"
  246. """
  247. today = datetime.datetime.now()
  248. day = add_zero(today.day)
  249. (y, m, d) = get_year_month_and_days(month_offset)
  250. arr = (y, m, d)
  251. if int(day) < int(d):
  252. arr = (y, m, day)
  253. return "-".join("%s" % i for i in arr)
  254. def extract_file_type(file_name="附件名", file_url="附件地址", file_type_list=None):
  255. """
  256. 抽取附件类型
  257. Args:
  258. file_name: 附件名
  259. file_url: 附件地址
  260. file_type_list: 其他附件后缀
  261. Returns: 附件类型
  262. """
  263. if file_type_list is None:
  264. file_type_list = []
  265. if file_name and file_url:
  266. file_name = file_name.strip()
  267. file_types = ['zip', 'docx', 'ftp', 'pdf', 'doc', 'rar', 'gzzb', 'hzzbs',
  268. 'jpg', 'png', 'zbid', 'xls', 'xlsx', 'swp', 'dwg']
  269. if file_type_list:
  270. ftp_list = list(map(lambda x: x.lower(), file_type_list))
  271. file_types.extend(ftp_list)
  272. file_type = file_url.split('?')[0].split('.')[-1].lower()
  273. if file_type not in file_types:
  274. file_type = file_url.split('?')[-1].split('.')[-1].lower()
  275. if file_type in file_types:
  276. return file_type
  277. else:
  278. for ftp in file_types:
  279. file_type = re.search(ftp, file_name) or re.search("\." + ftp, file_url)
  280. if file_type:
  281. return file_type.group(0).replace('.', '')
  282. else:
  283. return file_type
  284. return None
  285. def remove_htmldata(remove_info_list: list, html: str, response):
  286. """
  287. 过滤详情页无效数据
  288. Args:
  289. remove_info_list: 需删除内容的xpath或文本 -> list [xpath,re,str] eg:['<re>data:image/(.*?)"',]
  290. html: 待清洗文本
  291. response: 原文响应体
  292. Returns: 清洗后的文本
  293. """
  294. if html and remove_info_list:
  295. for extra_item in remove_info_list:
  296. if re.search('^//.*', extra_item):
  297. extra_html_list = response.xpath(extra_item).extract()
  298. for extra_html in extra_html_list:
  299. if extra_html:
  300. html = html.replace(extra_html, '')
  301. elif re.search('^<re>.*', extra_item):
  302. extra_item = extra_item.replace('<re>', '')
  303. extra_html_list = re.findall(f'{extra_item}', html, re.S | re.I | re.M)
  304. if extra_html_list:
  305. for exhtml in extra_html_list:
  306. html = html.replace(exhtml, '')
  307. else:
  308. extra_html = extra_item
  309. if extra_html:
  310. html = html.replace(extra_html, '')
  311. return html
  312. def text_search(content: str) -> SearchText:
  313. """
  314. 中文检索
  315. :param content: 文本
  316. :return: 中文数量
  317. """
  318. if not content:
  319. return SearchText(0)
  320. results = re.findall('[\u4e00-\u9fa5]', content, re.S)
  321. # 列表长度即是中文的字数
  322. return SearchText(len(results))
  323. def clean_title(title):
  324. '''清洗标题'''
  325. if title:
  326. rule_list = [
  327. '\(\d{1,20}\)',
  328. '\[[\u4e00-\u9fa5]{1,9}\]',
  329. '【[\u4e00-\u9fa5]{1,9}】',
  330. ]
  331. for rule in rule_list:
  332. title = re.sub(rule, '', title)
  333. return title
  334. def substitute(html_str, special=None, completely=False):
  335. """HTML 替换"""
  336. html_str = cleaner(html=html_str, special=special, completely=completely)
  337. return html_str
  338. def handle_publish_time(publishtime):
  339. '''处理发布时间'''
  340. try:
  341. time_str = get_current_date().split(' ')[-1]
  342. if ':' not in publishtime:
  343. publishtime = publishtime + ' ' + time_str
  344. else:
  345. if '00:00:00' in publishtime:
  346. publishtime = publishtime.split(' ')[0] + ' ' + time_str
  347. l_np_publishtime = int2long(date_to_timestamp(publishtime))
  348. publishtime, l_np_publishtime = handle_publish_time_overdue(publishtime, l_np_publishtime)
  349. return publishtime, l_np_publishtime
  350. except:
  351. raise EOFError("publishtime 格式错误!")
  352. def handle_publish_time_overdue(publishtime, l_np_publishtime):
  353. """处理超期发布时间"""
  354. if l_np_publishtime and l_np_publishtime > get_current_timestamp():
  355. logger.warning("发布时间大于当前时间,已设置当前时间为发布时间!")
  356. publishtime = get_current_date()
  357. l_np_publishtime = ensure_int64(date_to_timestamp(publishtime))
  358. return publishtime, l_np_publishtime
  359. def handle_page_html(item):
  360. '''检测正文'''
  361. title = item.get('title')
  362. publishtime = item.get('publishtime')
  363. href = item.get('href')
  364. if href == "#":
  365. href = item.get('competehref')
  366. contenthtml = item.get('contenthtml')
  367. detail = item.get('detail')
  368. if not contenthtml:
  369. logger.warning(f"页面源码不能为空!\n 发布地址:{href}\n 发布时间:{publishtime}\n 标题:{title}")
  370. raise ValueError("无效正文!")
  371. else:
  372. if text_search(detail).total == 0:
  373. logger.warning("无内容数据,数据不入保存服务!")
  374. item['sendflag'] = "true"
  375. def check_data_validity(item):
  376. '''检测基础字段是否完整'''
  377. title = item.get('title')
  378. publishtime = item.get('publishtime')
  379. href = item.get('href')
  380. if href == "#":
  381. href = item.get('competehref')
  382. if not title or not publishtime or not href:
  383. logger.error(f"基础数据不能为空!\n 发布地址:{href}\n 发布时间:{publishtime}\n 标题:{title}")
  384. raise ValueError("基础数据异常")
  385. _fields = {
  386. 'title', 'publishtime', 'spidercode', 'infoformat', 'site',
  387. 'channel', 'area', 'city', 'jsondata', 'district', 'href',
  388. 'is_mixed', 'comeintime', 's_title', 'l_np_publishtime',
  389. 'contenthtml', 'competehref', 'detail', 'iscompete', 'sendflag',
  390. '_d', 'publishdept', 'type', 'T', 'projectinfo', 'is_theme'
  391. }
  392. def clean_fields(item, special_fields=None):
  393. special_fields = special_fields or _fields
  394. rm_fields = []
  395. for key, val in item.items(): # 过滤非必须字段
  396. if key not in special_fields:
  397. rm_fields.append(key)
  398. for field in rm_fields:
  399. del item[field]
  400. def join_fields(item, special_fields=None, **kwargs):
  401. special_fields = special_fields or _fields
  402. for k, v in kwargs.items():
  403. if k in special_fields:
  404. item[k] = v
  405. else:
  406. logger.error(f"{k} 入库字段未定义!")
  407. def format_fields(item, callback=handle_publish_time, **kwargs):
  408. """ 格式化入库字段(bidding) """
  409. clean_fields(item)
  410. if callable(callback):
  411. time_str, timestamp = callback(item.get('publishtime'))
  412. item['publishtime'] = time_str
  413. item['l_np_publishtime'] = timestamp
  414. item['detail'] = substitute(item.get('contenthtml'))
  415. item['s_title'] = item.get('s_title') or item.get('title')
  416. item['infoformat'] = 1
  417. item['iscompete'] = True
  418. item['sendflag'] = 'false'
  419. item['_d'] = 'comeintime'
  420. item['publishdept'] = ''
  421. item['type'] = ''
  422. item['T'] = 'bidding'
  423. join_fields(item, **kwargs)
  424. handle_page_html(item)
  425. check_data_validity(item)
  426. item['comeintime'] = int2long(int(time.time()))
  427. return item
  428. def format_fields_njpc(item, callback=handle_publish_time, **kwargs):
  429. """ 格式化入库字段(拟建爬虫) """
  430. req_fields = {
  431. 'site', 'approvenumber', 'method', 'project_scale', 'area', 'is_mixed',
  432. 'competehref',
  433. 'air_conditioner', 'funds', 'scale', 'construction_area', 'channel',
  434. 'contenthtml', 'elevator',
  435. 'building_floors', 'ownertel', 'parking', 'building', 'spidercode',
  436. 'title',
  437. 'detail', 'projectinfo', 'exterior', 'constructionunit', 'owner_info',
  438. 'approvetime',
  439. 'project_startdate', 'investment', 'heating', 'district',
  440. 'constructionunitperson',
  441. 'designunitperson', 'publishtime', 'system', 'pace', 'total',
  442. 'project_scale_info', 'passive',
  443. 'phone', 'construction', 'parking_pace', 'floors', 'freshair_system',
  444. 'other_project_scale',
  445. 'conditioner', 'wall', 'designunit', 'owneraddr',
  446. 'prefabricated_building', 'materials',
  447. 'constructionunitaddr', 'constructionunit_info', 'project_person',
  448. 'approvecontent',
  449. 'constructionunittel', 'floor', 'person', 'city', 'floor_area',
  450. 'project', 'approvestatus',
  451. 'project_completedate', 'completedate', 'ownerperson', 'sendflag',
  452. 'comeintime',
  453. 'steel_structure', 'projectaddr', 'freshair', 'T', 'startdate', 'house',
  454. 'projectname',
  455. 'exterior_wall_materials', 'other', 'passive_house', 'jsondata', 'air',
  456. 'prefabricated',
  457. 'designunit_info', 'approvedept', 'total_investment', 'infoformat',
  458. 'project_phone',
  459. 'owner', 'designunittel', 'projecttype', 'approvecode', 'steel',
  460. 'is_theme', 'designunitaddr',
  461. 'heating_method', 'href', 'projectperiod', 'structure'
  462. }
  463. clean_fields(item, special_fields=req_fields)
  464. if callable(callback):
  465. _, timestamp = callback(item.get('publishtime'))
  466. item['publishtime'] = timestamp
  467. item['detail'] = substitute(item.get('contenthtml'))
  468. item['title'] = item.get('title') or item.get('projectname')
  469. item['infoformat'] = 2
  470. item['sendflag'] = "false"
  471. item['T'] = "bidding"
  472. join_fields(item, special_fields=req_fields, **kwargs)
  473. handle_page_html(item)
  474. check_data_validity(item)
  475. item['comeintime'] = int2long(int(time.time()))
  476. return item
  477. def search(pattern, string):
  478. result = re.search(pattern, string)
  479. if result:
  480. return result.groups()[0]
  481. def sleep_time(start_time: int, end_time=0, step=-1):
  482. time.sleep(random.random())
  483. for i in range(start_time, end_time, step):
  484. print(f"\r *** 休眠中... {i} 秒 *** ", end='')
  485. time.sleep(1)
  486. print("\r <* 休眠结束 *> ", end='')
  487. # 装饰器
  488. class Singleton(object):
  489. def __init__(self, cls):
  490. self._cls = cls
  491. self._instance = {}
  492. def __call__(self, *args, **kwargs):
  493. if self._cls not in self._instance:
  494. self._instance[self._cls] = self._cls(*args, **kwargs)
  495. return self._instance[self._cls]
  496. def down_load_image(proxy=None):
  497. img_url = 'https://gdgpo.czt.gd.gov.cn/freecms/verify/verifyCode.do?createTypeFlag=n'
  498. header = {
  499. "Accept": "image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8",
  500. "Accept-Language": "zh-CN,zh;q=0.9",
  501. "Connection": "keep-alive",
  502. "Referer": "https://gdgpo.czt.gd.gov.cn/cms-gd/site/guangdong/qwjsy/index.html?",
  503. "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36",
  504. }
  505. res = requests.get(img_url, headers=header, proxies=proxy, timeout=30, verify=False)
  506. upload_address = "http://pycaptcha.spdata.jianyu360.com/v1/images/verify"
  507. content = {'file': res.content}
  508. # with open('image.jpg', 'wb+') as f:
  509. # f.write(res.content)
  510. headers = {'accept': 'application/json'}
  511. json_resp = requests.post(upload_address, headers=headers, files=content, stream=True).json()
  512. if "msg" in json_resp and "success" == json_resp["msg"]:
  513. code = json_resp["r"]["code"]
  514. if len(code) == 4:
  515. return code
  516. return None
  517. def _pack_file(file):
  518. """包装验证码格式"""
  519. if isinstance(file, str) and file.startswith("data:image"):
  520. img_file = {"file": file}
  521. elif isinstance(file, bytes):
  522. img_file = {"file": file}
  523. else:
  524. with open(file, "rb") as f:
  525. img_bytes = f.read()
  526. img_file = {"file": img_bytes}
  527. return img_file
  528. def simple_captcha(file):
  529. """
  530. 普通验证码
  531. @param file: 验证码 - 可以是图片或者图片base64编码
  532. @return:
  533. """
  534. url = "http://pycaptcha.spdata.jianyu360.com/v1/images/verify"
  535. files = _pack_file(file)
  536. r = requests.post(url, headers={"accept": "application/json"}, files=files, stream=True, timeout=10)
  537. rp_json = r.json()
  538. if "msg" in rp_json and "success" == rp_json["msg"]:
  539. return str(rp_json["r"]["code"])
  540. return None
  541. def retry_on_exception(retries=1, timeout=1):
  542. def decorate(func):
  543. @functools.wraps(func)
  544. def warp(*args, **kwargs):
  545. for _ in range(retries):
  546. try:
  547. return func(*args, **kwargs)
  548. except Exception as e:
  549. print(f"执行[{func.__name__}]失败, args:{args}, kwargs:{kwargs} 异常:{e}")
  550. time.sleep(timeout)
  551. raise RuntimeError(f"执行[{func.__name__}]达到最大重试次数")
  552. return warp
  553. return decorate
  554. class PySpiderError(Exception):
  555. def __init__(self, *args, **kwargs):
  556. if 'code' not in kwargs and 'reason' not in kwargs:
  557. kwargs['code'] = 10000
  558. kwargs['reason'] = '未知爬虫错误,请手动处理'
  559. for key, val in kwargs.items():
  560. setattr(self, key, val)
  561. super(PySpiderError, self).__init__(*args, kwargs)
  562. class AttachmentNullError(PySpiderError):
  563. def __init__(self, code: int = 10004, reason: str = '附件下载异常'):
  564. super(AttachmentNullError, self).__init__(code=code, reason=reason)
  565. class CustomError(Exception):
  566. def __init__(self, ErrorInfo):
  567. self.ErrorInfo = ErrorInfo
  568. def __str__(self):
  569. return self.ErrorInfo
  570. format_fileds = format_fields
  571. format_fileds_njpc = format_fields_njpc