detail_spider.py 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. import time
  2. import requests.exceptions
  3. from lxml.html import fromstring, HtmlElement, tostring
  4. from lxml.html.clean import Cleaner
  5. from pymongo.errors import DuplicateKeyError
  6. from crawler.check_utils import CheckText, CheckTask
  7. from crawler.clean_html import cleaner
  8. from crawler.crawl_scheduler import Scheduler
  9. from crawler.login import login, load_login_cookies, login_check
  10. from utils.databases import mongo_table, int2long
  11. from utils.execptions import YbwCrawlError
  12. from utils.log import logger
  13. from utils.socks5 import Proxy
  14. def iter_node(element: HtmlElement):
  15. yield element
  16. for sub_element in element:
  17. if isinstance(sub_element, HtmlElement):
  18. yield from iter_node(sub_element)
  19. def pre_parse(element: HtmlElement):
  20. """对 HTML 进行预处理可能会破坏 HTML 原有的结构,导致根据原始 HTML 编写的 XPath 不可用"""
  21. pre_remove = {
  22. 'log_col2', 'log_col1', 'cz', 'iconfont closei', 'p2 p1', 'cnxh_b',
  23. 'msg_error', 'r_gg TB-focus', 'april', 'cont2', 'to_login', 'regtxt',
  24. 'shouchang an_n sc', 'april_title red', 'cn_lt', 'dayin an_n',
  25. 'dl_zc vip_t free_member', 'rmbq', 'login-form cl', 'dian_g fr',
  26. 'di_n', 'd_fx', 'd_tub', 'd_dy', 'anniu1', 'cnxh_list', 'btns cl',
  27. 'active', 'close', 'd_an fr', 'avatar', 'toolbar', 'deng_l',
  28. 'cen_right fr', 'log_col5', 'agreement', 'log_col3',
  29. 'shouchang_af an_n sc_after', 'fast_box', 'di_nr fl', 'xgfj', 'dianh',
  30. 'cnxh_list tab_b2 city_list', 'contract cl', 'zb_cen_r fr', 'd_zsms',
  31. 'sc_after active', 'dl_k', 'ewm_b', 'fl', 'wypj', 'rukou', 'p1',
  32. 'dl_zc', 'success', 'daoh h_30', 'bd', 'april_content', 'print',
  33. 'foot', 'cnxh zbgg', 'april_first', 'fastlog', 'tx_mc user_name',
  34. 'tab_h2', 'fanding an_n', 'toux', 'log_col4 cl', 'hangy rem_1', 'red',
  35. 'regshadow', 'bottom', 'dl_zc vip_t fee_member', 'xszn fl', 'no-print',
  36. 'cnxh_b zbgg_b', 'rem rem_1', 'logshadowz', 'd_pj fl', 'tjgjc',
  37. 'spdujaiwlohh', 'di_ewm fr', 'dian_h fl',
  38. 'tab_h2 zbgg_b_gray', 'fanshou an_n fs', 'login-btn', 'fl gjc',
  39. 'agreeshadow', 'guang_db', 'footer_1', 'log_p', 'cnxh_list tab_b2',
  40. 'd_sw', 'april_close', 'd_sc', 'erweima no-print', 'qgzx', 'p2', 'sc',
  41. 'hd', 'log_col6', 'dh_b', 'dian_guang', 'zhu_c', 'ck cai_k', 'april_box',
  42. 'display:none'
  43. }
  44. for node in iter_node(element):
  45. id_attr = node.attrib.get('id')
  46. class_attr = node.attrib.get('class')
  47. style_attr = node.attrib.get('style')
  48. if any([id_attr in pre_remove,
  49. class_attr in pre_remove,
  50. style_attr in pre_remove]):
  51. node.drop_tree()
  52. return element
  53. def page_source(element: HtmlElement):
  54. clear = Cleaner(
  55. forms=False,
  56. style=True
  57. )
  58. return clear.clean_html(tostring(element, encoding="utf-8").decode())
  59. class DetailSpider:
  60. def __init__(
  61. self,
  62. db: str,
  63. crawl_tab: str,
  64. save_tab: str,
  65. ):
  66. self.crawl_tab = mongo_table(db, crawl_tab)
  67. self.save_tab = mongo_table(db, save_tab)
  68. self.user = None
  69. def _update_crawl_task(self, tid, **kwargs):
  70. self.crawl_tab.update_one({'_id': tid}, {'$set': kwargs})
  71. def _lock_task(self, task: dict):
  72. update = {'crawl': True}
  73. self._update_crawl_task(task['_id'], **update)
  74. def _release_task(self, task: dict):
  75. update = {'crawl': False}
  76. self._update_crawl_task(task['_id'], **update)
  77. def crawl_request(self, item: dict):
  78. url = item['competehref']
  79. headers = {
  80. 'Host': 'www.chinabidding.cn',
  81. 'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
  82. 'sec-ch-ua-mobile': '?0',
  83. 'sec-ch-ua-platform': '"Windows"',
  84. 'Upgrade-Insecure-Requests': '1',
  85. 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36',
  86. 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
  87. 'Sec-Fetch-Site': 'none',
  88. 'Sec-Fetch-Mode': 'navigate',
  89. 'Sec-Fetch-User': '?1',
  90. 'Sec-Fetch-Dest': 'document',
  91. 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
  92. }
  93. request_params = {}
  94. request_params.setdefault('headers', headers)
  95. request_params.setdefault('timeout', 60)
  96. retries = 0
  97. retries_502, max_retries_502 = 0, 15
  98. proxy, proxies = None, None
  99. while retries < 3:
  100. if retries_502 > max_retries_502:
  101. # 网站已移除该数据
  102. self._update_crawl_task(item['_id'], crawl_status='remove')
  103. break
  104. login_cookies = load_login_cookies(self.user.phone)
  105. if login_cookies is None:
  106. login(*self.user)
  107. continue
  108. elif 'cookies' not in request_params:
  109. request_params.setdefault('cookies', login_cookies)
  110. else:
  111. request_params.update({'cookies': login_cookies})
  112. try:
  113. r = requests.get(url, **request_params)
  114. # 账号登录状态检查
  115. retry_login = login_check(self.user.phone, url, False)
  116. if retry_login:
  117. logger.info(f"[重新登录]{self.user.phone}")
  118. _, code = login(*self.user, proxies=proxies)
  119. if code == 200:
  120. retries += 1
  121. else:
  122. if proxy is None:
  123. proxy = Proxy(True)
  124. else:
  125. proxy.switch()
  126. proxies = proxy.proxies
  127. retries += 1
  128. continue
  129. element = fromstring(r.text)
  130. nodes = element.xpath('//*[@id="main_dom"]/div[1]')
  131. if len(nodes) != 1:
  132. retries_502 += 1
  133. logger.debug(f'"main_dom"属性匹配个数:{len(nodes)}, {r.status_code} - {url}')
  134. continue
  135. else:
  136. node = nodes[0]
  137. logger.info(f'[采集正文] id={node.attrib.get("id")}')
  138. return r
  139. except requests.RequestException:
  140. retries += 1
  141. continue
  142. return None
  143. def crawl_response(self, response, item):
  144. element: HtmlElement = fromstring(response.text)
  145. node = element.xpath('//*[@id="infoDescription"]')[0]
  146. node = pre_parse(node)
  147. features = {
  148. './div[@class="ckgys_cont"]',
  149. './/div[@class="detail-title ng-scope"]',
  150. './/table[@class="detail_Table"]',
  151. }
  152. for feature in features:
  153. extract_node = node.xpath(feature)
  154. if len(extract_node) > 0:
  155. valid_node = extract_node[0]
  156. break
  157. else:
  158. valid_node = node
  159. html = page_source(valid_node)
  160. '''检查原始页面内容'''
  161. CheckText(html)
  162. item["contenthtml"] = html
  163. special = {
  164. '若附件无法下载,你可以尝试使用360极速浏览器进行下载!': '',
  165. 'DD000E;|EE000F;|FF000E;': '',
  166. }
  167. item["detail"] = cleaner(html, special)
  168. item["comeintime"] = int2long(int(time.time()))
  169. '''检查清洗之后的详情'''
  170. CheckText(item["detail"])
  171. insert = {}
  172. for key, val in item.items():
  173. if key not in ['crawl_status', 'crawl', 'count', '_id']:
  174. insert[key] = val
  175. self.save_tab.insert_one(insert)
  176. logger.info('[采集成功]{}-{}'.format(item['title'], item['publishtime']))
  177. def crawl_spider(self, sc: Scheduler):
  178. while True:
  179. next_task_interval = None
  180. if sc.count >= sc.total:
  181. return True
  182. item = sc.crawl_task
  183. if len(item) == 0:
  184. return False
  185. self._lock_task(item)
  186. # 记录采集异常的爬虫代码与来源
  187. sc.spider_code = item['spidercode']
  188. sc.crawl_url = item['competehref']
  189. try:
  190. # 检查请求采集任务
  191. CheckTask(item)
  192. response = self.crawl_request(item)
  193. if response is not None:
  194. self.crawl_response(response, item)
  195. self._update_crawl_task(item["_id"], crawl_status='finished')
  196. sc.crawl_counter(1)
  197. except YbwCrawlError as e:
  198. if e.code == 10105:
  199. # 抛出异常时,将es查询统计结果进行更新
  200. self._update_crawl_task(item["_id"], count=item['count'])
  201. logger.info('[重复数据]{}-{}'.format(item['title'], item['publishtime']))
  202. else:
  203. sc.err_record(e)
  204. self._update_crawl_task(item["_id"], crawl_status='error')
  205. logger.info('[问题数据]{}-{}'.format(item['title'], item['publishtime']))
  206. sc.crawl_counter(0)
  207. next_task_interval = 0.1
  208. finally:
  209. self._release_task(item)
  210. sc.wait_for_next_task(next_task_interval)
  211. def start(self):
  212. while True:
  213. with Scheduler(site='元博网', crawl_type='detail') as scheduler:
  214. if scheduler.crawl_start:
  215. self.user = scheduler.user
  216. finished = self.crawl_spider(scheduler)
  217. if finished:
  218. # 完成采集任务
  219. scheduler.finished()
  220. else:
  221. # 暂无采集任务
  222. scheduler.wait_for_next_task()
  223. if __name__ == '__main__':
  224. DetailSpider(
  225. db='py_spider',
  226. crawl_tab='ybw_list',
  227. save_tab='data_bak',
  228. ).start()