下水煤采购信息-列表页.py 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. # -*- coding: utf-8 -*-
  2. """
  3. Created on 2025-05-26
  4. ---------
  5. @summary: 华能燃料交易网站
  6. ---------
  7. @author: lzz
  8. """
  9. import feapder
  10. from items.spider_item import MgpListItem
  11. from collections import namedtuple
  12. import requests
  13. def get_cookeis(proxies=False):
  14. headers = {
  15. "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
  16. "accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
  17. "priority": "u=0, i",
  18. "referer": "https://fec.hpi.com.cn/Home/Login",
  19. "upgrade-insecure-requests": "1",
  20. "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36"
  21. }
  22. url = "https://fec.hpi.com.cn/Home/Login"
  23. res = requests.get(url, headers=headers, timeout=30, proxies=proxies)
  24. cookies = res.cookies.get_dict()
  25. url1 = "https://fec.hpi.com.cn/Topic/Index?currmenuId=100&topiccode=BULLETIN"
  26. url2 = "https://fec.hpi.com.cn/Topic/Search?topiccode=BULLETIN"
  27. requests.get(url1, headers=headers, cookies=cookies, timeout=30, proxies=proxies)
  28. requests.get(url2, headers=headers, cookies=cookies, timeout=30, proxies=proxies)
  29. return cookies
  30. class Hnrljywz(feapder.BiddingListSpider):
  31. def start_callback(self):
  32. Menu = namedtuple('Menu', ['channel', 'code', 'typeone', 'order', 'crawl_page'])
  33. self.site = "华能燃料交易网站"
  34. self.menus = [
  35. Menu('下水煤采购信息', 'a_hnrljywz_xsmcgxx', 'JTOrder/search', '3', 1),
  36. ]
  37. self.count = 0
  38. self.headers = {
  39. "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
  40. "accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
  41. "priority": "u=0, i",
  42. "upgrade-insecure-requests": "1",
  43. "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36"
  44. }
  45. self.cookies= get_cookeis()
  46. def start_requests(self):
  47. for menu in self.menus:
  48. yield feapder.Request(item=menu._asdict(), headers=self.headers, proxies=False, page=1)
  49. def download_midware(self, request):
  50. menu = request.item
  51. if menu["code"] == 'a_hnrljywz_zdzbjg':
  52. url = "https://fec.hpi.com.cn/Evaluate/Index?currmenuId=100"
  53. else:
  54. url = f"https://fec.hpi.com.cn/{menu['typeone']}?page={request.page}"
  55. request.url = url
  56. request.headers = self.headers
  57. request.cookies = self.cookies
  58. def parse(self, request, response):
  59. if self.count > 5:
  60. return
  61. if response.status_code == 500:
  62. self.count += 1
  63. self.cookies = get_cookeis()
  64. yield request
  65. else:
  66. self.count = 0
  67. menu = request.item
  68. info_list = response.xpath('//table[@class="contain_table"]/tr')
  69. order = menu.get('order')
  70. for info in info_list[-20:]:
  71. title = info.xpath('./td[1]/a/text()').extract_first().strip()
  72. href = info.xpath('./td[1]/a/@href').extract_first()
  73. zbgs = "".join(info.xpath(f'./td[{order}]/text()').extract()).strip()
  74. create_time_org = "".join(info.xpath('./td[last()]/text()').extract())
  75. if '/' in create_time_org:
  76. create_time = create_time_org.replace('/', '-').strip()
  77. if len(create_time) == 16:
  78. create_time = create_time + ":00"
  79. elif '-' in create_time_org:
  80. create_time = create_time_org.strip()
  81. if len(create_time) == 16:
  82. create_time = create_time + ":00"
  83. else:
  84. create_time = ""
  85. area = "全国" # 省份
  86. city = "" # 城市
  87. list_item = MgpListItem() # 存储数据的管道
  88. list_item.href = href # 标书链接
  89. list_item.channel = menu.get("channel") # 最上方定义的抓取栏目 (编辑器定的)
  90. list_item.spidercode = menu.get("code") # 最上方定义的爬虫code(编辑器定的)
  91. list_item.title = title # 标题
  92. list_item.publishtime = create_time # 标书发布时间
  93. list_item.site = self.site
  94. list_item.area = area # 城市默认:全国
  95. list_item.city = city # 城市 默认为空
  96. list_item.unique_key = ("href", zbgs)
  97. list_item.parse = "self.detail_get"
  98. list_item.proxies = False
  99. list_item.parse_url = href
  100. yield list_item
  101. # 无限翻页设置
  102. request = self.infinite_pages(request, response)
  103. yield request
  104. if __name__ == "__main__":
  105. Hnrljywz(redis_key="lzz:hnrljywz_xsgg").start()