取消公示-列表页.py 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. # -*- coding: utf-8 -*-
  2. """
  3. Created on 2025-04-17
  4. ---------
  5. @summary: 八戒公采
  6. ---------
  7. @author: lzz
  8. """
  9. import feapder
  10. from items.spider_item import MgpListItem
  11. from collections import namedtuple
  12. from feapder.utils.tools import get_today_of_day,timestamp_to_date
  13. import json
  14. class Feapder(feapder.BiddingListSpider):
  15. def start_callback(self):
  16. Menu = namedtuple('Menu', ['channel', 'code', 'crawl_page'])
  17. self.site = "八戒公采"
  18. self.menus = [
  19. Menu('取消公示', 'a_bjgc_qxgs', 3),
  20. ]
  21. self.headers = {
  22. "authority": "bridgezhyc.zbj.com",
  23. "accept": "application/json, text/plain, */*",
  24. "accept-language": "zh-CN,zh;q=0.9",
  25. "accesstoken": "undefined",
  26. "cache-control": "no-cache",
  27. "content-type": "application/json",
  28. "logintoken": "undefined",
  29. "origin": "https://cg.zbj.com",
  30. "pragma": "no-cache",
  31. "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
  32. "x-auth-token": "undefined",
  33. "x-requested-with": "XMLHttpRequest"
  34. }
  35. def start_requests(self):
  36. for menu in self.menus:
  37. start_url = "https://bridgezhyc.zbj.com/api/notice/queryNoticeList"
  38. yield feapder.Request(url=start_url, item=menu._asdict(), page=1)
  39. def download_midware(self, request):
  40. page = request.page
  41. data = {
  42. "data": {
  43. "businessId": "",
  44. "biddingType": "0",
  45. "purchasingInformation": "",
  46. "transactionSupplierName": "",
  47. "regionVal": [],
  48. "requestId": "1531362372728",
  49. "requirementName": "",
  50. "type": "1",
  51. "page": page,
  52. "pageSize": 10,
  53. "province": "",
  54. "city": "",
  55. "region": "",
  56. "startTime": f"{get_today_of_day(-3)}",
  57. "endTime": f"{get_today_of_day()}"
  58. }
  59. }
  60. data = json.dumps(data, separators=(',', ':'))
  61. request.data = data
  62. request.headers = self.headers
  63. def parse(self, request, response):
  64. menu = request.item
  65. info_list = response.json.get('data').get('data')
  66. for info in info_list:
  67. hid = info.get('id')
  68. htp = info.get('type')
  69. appId = info.get('appId')
  70. href = f"https://cg.zbj.com/publicityDetails?id={hid}&type={htp}"
  71. if appId == "HLJGCY":
  72. title = info.get('purchasingInformation','').strip() + info.get('name').strip() + "更正公告"
  73. else:
  74. title = info.get('name').strip()
  75. create_time = timestamp_to_date(int(str(info.get('publishDate'))[:10]))
  76. area = "全国"
  77. city = ""
  78. list_item = MgpListItem() # 存储数据的管道
  79. list_item.href = href # 标书链接
  80. list_item.unique_key = ('href',create_time)
  81. list_item.channel = menu.get("channel") # 最上方定义的抓取栏目 (编辑器定的)
  82. list_item.spidercode = menu.get("code") # 最上方定义的爬虫code(编辑器定的)
  83. list_item.title = title # 标题
  84. list_item.site = self.site
  85. list_item.publishtime = create_time
  86. list_item.area = area or "全国" # 城市默认:全国
  87. list_item.city = city # 城市 默认为空
  88. list_item.parse = "self.detail_get" # 详情页回调方法
  89. list_item.deal_detail = [] # 抽取正文xpath
  90. list_item.proxies = False
  91. ddata = {
  92. "data": {
  93. "id": f"{hid}"
  94. }
  95. }
  96. ddata = json.dumps(ddata, separators=(',', ':'))
  97. list_item.request_params = {"data":ddata}
  98. list_item.parse_url = "https://bridgezhyc.zbj.com/api/buyer/queryPurchasingDemandHistoryById"
  99. yield list_item
  100. # 无限翻页
  101. request = self.infinite_pages(request, response)
  102. yield request
  103. if __name__ == "__main__":
  104. Feapder(redis_key="lzz:bjgc_qxgs").start()