陕西采购与招标网-列表页.py 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. # -*- coding: utf-8 -*-
  2. """
  3. Created on 2025-01-03
  4. ---------
  5. @summary: 陕西采购与招标网
  6. ---------
  7. @author: lzz
  8. """
  9. import random
  10. import re
  11. import time
  12. from collections import namedtuple
  13. import feapder
  14. from items.spider_item import BidingListItem
  15. class Feapder(feapder.BiddingListSpider):
  16. def start_callback(self):
  17. Menu = namedtuple('Menu', ['channel', 'code', 'tid', 'cid', 'crawl_page'])
  18. self.site = "陕西采购与招标网"
  19. self.menus = [
  20. Menu('中标结果公示', 'sn_sxcgyzbw_zhbjggs2', 'result', '90', 5),
  21. ]
  22. self.headers = {
  23. "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
  24. "Accept-Language": "zh-CN,zh;q=0.9",
  25. "Cache-Control": "no-cache",
  26. "Connection": "keep-alive",
  27. "Pragma": "no-cache",
  28. "Upgrade-Insecure-Requests": "1",
  29. "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36"
  30. }
  31. def start_requests(self):
  32. for menu in self.menus:
  33. start_url = f"http://bulletin.sntba.com/xxfbcmses/search/{menu.tid}.html"
  34. yield feapder.Request(url=start_url, item=menu._asdict(), page=1)
  35. def download_midware(self, request):
  36. page = request.page
  37. menu = request.item
  38. params = {
  39. "searchDate": "1998-03-25",
  40. "dates": "300",
  41. "word": "",
  42. "categoryId": menu.get('cid'),
  43. "industryName": "",
  44. "area": "",
  45. "status": "",
  46. "publishMedia": "",
  47. "sourceInfo": "",
  48. "showStatus": "",
  49. "page": f"{page}"
  50. }
  51. request.params = params
  52. request.headers = self.headers
  53. def parse(self, request, response):
  54. menu = request.item
  55. info_list = response.xpath('//table[@class="table_text"]/tr')
  56. for info in info_list[1:]:
  57. href_org = info.xpath('./td[1]/a/@href').extract_first().strip()
  58. href = "".join(re.findall("javascript:urlOpen\('(.*?)'",href_org))
  59. title = info.xpath('./td[1]/a/@title').extract_first().strip()
  60. create_time = info.xpath('./td[last()-1]/text()').extract_first().strip()
  61. if menu.get('code') == "sn_sxcgyzbw_zhbjggs2":
  62. create_time = info.xpath('./td[last()]/text()').extract_first().strip()
  63. area = "陕西" # 省份
  64. city = "" # 城市
  65. district = "" # 区/县
  66. list_item = BidingListItem() # 存储数据的管道
  67. list_item.href = href # 标书链接
  68. list_item.unique_key = ('href',)
  69. list_item.channel = menu.get("channel") # 最上方定义的抓取栏目 (编辑器定的)
  70. list_item.spidercode = menu.get("code") # 最上方定义的爬虫code(编辑器定的)
  71. list_item.title = title # 标题
  72. list_item.publishtime = create_time # 标书发布时间
  73. list_item.site = self.site
  74. list_item.area = area # 城市默认:全国
  75. list_item.city = city # 城市 默认为空
  76. list_item.district = district # 区/县
  77. list_item.parse = "self.detail_get"
  78. list_item.deal_detail = ['//div[@class="mian_list"]']
  79. list_item.parse_url = href
  80. yield list_item
  81. time.sleep(random.randint(3, 5))
  82. request = self.infinite_pages(request, response)
  83. yield request
  84. if __name__ == "__main__":
  85. Feapder(redis_key="lzz:sxcgyzbw_zgysgg2").start()