123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128 |
- # -*- coding: utf-8 -*-
- """
- Created on 2024-09-19
- ---------
- @summary: 国铁采购平台
- ---------
- @author: lzz
- """
- from collections import namedtuple
- import feapder
- from feapder.utils.tools import joint_url
- from items.spider_item import BidingListItem
- from untils.tools import get_proxy
- from fingerprint import get_fingerprint
- class Spider(feapder.BiddingListSpider):
- def start_callback(self):
- Menu = namedtuple('Menu', ['channel', 'code', 'noticeType', 'tid', 'crawl_page'])
- self.site = "国铁采购平台"
- self.menus = [
- Menu('采购公告', 'a_gtcgpt_cggg', '000', 'queryProcurementNoticeList', 20),
- ]
- self.headers = {
- "Accept": "application/json, text/javascript, */*; q=0.01",
- "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
- "Cache-Control": "no-cache",
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36",
- "X-Requested-With": "XMLHttpRequest",
- }
- self.cookies = None
- self.proxy = get_proxy()
- self.fp = get_fingerprint()
- def start_requests(self):
- for menu in self.menus:
- referer = "https://cg.95306.cn/baseinfor/notice/procurementNotice"
- params = {
- "bidType": "",
- "noticeType": f"{menu.noticeType}",
- "transactionType": "01",
- "wzType": "",
- "title": "",
- "bidding": "",
- "navigation": ""
- }
- self.headers["Referer"] = joint_url(referer, params)
- url = f"https://cg.95306.cn/proxy/portal/elasticSearch/{menu.tid}"
- yield feapder.Request(url, item=menu._asdict(), page=1, proxies=False)
- def download_midware(self, request):
- if self.cookies is None:
- self.cookies = {
- 'AlteonPcgmh': '0a03b7f3bb36ad3f1f41',
- 'mhId': self.fp,
- }
- data = {
- 'mhId': self.fp,
- 'projBidType': '01',
- 'bidType': '',
- 'noticeType': '000',
- 'wzType': '',
- 'title': '',
- }
- request.data = data
- request.headers = self.headers
- request.cookies = self.cookies
- request.proxies = self.proxy
- def validate(self, request, response):
- data = response.json.get('data')
- if not data:
- raise ValueError('数据不能为空!')
- return True
- def parse(self, request, response):
- menu = request.item
- info_list = response.json.get('data')
- for info in info_list.get('resultData').get('result'):
- href_id = info.get('id')
- href = f"https://cg.95306.cn/baseinfor/notice/informationShow?id={href_id}"
- title = info.get('notTitle').strip()
- create_time = info.get('checkTime')
- area = "全国" # 省份
- city = "" # 城市
- list_item = BidingListItem() # 存储数据的管道
- list_item.href = href # 标书链接
- list_item.channel = menu.get("channel") # 最上方定义的抓取栏目 (编辑器定的)
- list_item.spidercode = menu.get("code") # 最上方定义的爬虫code(编辑器定的)
- list_item.title = title # 标题
- list_item.publishtime = create_time # 标书发布时间
- list_item.site = self.site
- list_item.area = area # 城市默认:全国
- list_item.city = city # 城市 默认为空
- list_item.unique_key = ("href",)
- list_item.parse = "self.detail_get"
- list_item.proxies = False
- params_d = {
- "noticeId": f"{href_id}",
- "mhId": self.fp,
- }
- list_item.request_params = {"params": params_d}
- list_item.parse_url = "https://cg.95306.cn/proxy/portal/elasticSearch/indexView"
- yield list_item
- # 无限翻页设置
- request = self.infinite_pages(request, response)
- yield request
- def exception_request(self, request, response):
- self.cookies = None
- self.proxy = get_proxy()
- self.fp = get_fingerprint()
- yield request
- if __name__ == "__main__":
- Spider(redis_key="lzz:Gtcgpt").start()
|