123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124 |
- # -*- coding: utf-8 -*-
- """
- Created on 2023-7-22
- ---------
- @summary: 云采通高校采购联盟
- ---------
- @author: lzz
- """
- from collections import namedtuple
- import execjs
- import feapder
- from feapder.utils.tools import timestamp_to_date
- from items.spider_item import BidingListItem, DataBakItem
- from untils.attachment import AttachmentDownloader
- class Spider(feapder.BiddingListSpider):
- def start_callback(self):
- Menu = namedtuple('Menu', ['channel', 'code', 'crawl_page'])
- self.site = "云采通高校采购联盟"
- self.menus = [
- Menu('采购需求', 'a_yctgxcglm_cgxq', 2),
- ]
- def start_requests(self):
- url = "https://www.yuncaitong.cn/api/publish/solr/infoSearch"
- for menu in self.menus:
- yield feapder.Request(url, item=menu._asdict(), page=1)
- def download_midware(self, request):
- page = request.page
- params = {
- "page": f"{page-1}",
- "rows": "12",
- "timeBegin": "1682385621358",
- "infoType": "purchaseAnnouncement"
- }
- with open('./yctgxcglm_pm.js', 'r') as f:
- ex_js = f.read()
- ctx = execjs.compile(ex_js)
- pm = ctx.call('get_pm')
- request.headers = {
- "Accept": "application/json, text/plain, */*",
- "Accept-Language": "zh-CN,zh;q=0.9",
- "Authorization": "null",
- "Cache-Control": "no-cache",
- "Connection": "keep-alive",
- "Pragma": "no-cache",
- "Referer": "https://www.yuncaitong.cn/publish/demand.shtml",
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
- "X-Requested-At": pm.get('At'),
- "X-Requested-Nonce": pm.get('Nonce'),
- "X-Requested-Token": pm.get('Token'),
- }
- request.params = params
- def parse(self, request, response):
- menu = request.item
- info_list = response.json
- for info in info_list:
- id_ = info.get('id')
- title = info.get('subject').strip()
- create_time = timestamp_to_date(int(str(info.get('createTime'))[:10]),time_format="%Y-%m-%d")
- href = f"https://www.yuncaitong.cn/publish/{create_time.replace('-','/')}/{id_}.shtml"
- area = "全国" # 省份
- city = "" # 城市
- data_item = DataBakItem() # 存储数据的管道
- data_item.href = href # 标书链接
- data_item.unique_key = ('title', 'href')
- data_item.channel = menu.get("channel") # 最上方定义的抓取栏目 (编辑器定的)
- data_item.spidercode = menu.get("code") # 最上方定义的爬虫code(编辑器定的)
- data_item.title = title # 标题
- data_item.site = self.site
- data_item.publishtime = create_time
- data_item.area = area # 城市默认:全国
- data_item.city = city # 城市 默认为空
- if info.get('contentType') == "PDF":
- file_url = href.replace('.shtml', '/content.pdf')
- attachments = {}
- attachment = AttachmentDownloader().fetch_attachment(
- file_name=title,
- file_type='pdf',
- download_url=file_url,
- proxies=request.get_proxies()
- )
- if attachment.get('size'):
- data_item.contenthtml = '详情请访问原网页!'
- attachments[str(len(attachments) + 1)] = attachment
- data_item.projectinfo = {"attachments": attachments}
- yield data_item
- else:
- list_item = BidingListItem()
- list_item.unique_key = ('href', 'title')
- list_item.parse = "self.detail_get" # 详情页回调方法
- list_item.deal_detail = ['//div[@class="project-details positionrl"]',
- '//div[@class="content"]'] # 抽取正文xpath
- list_item.proxies = False
- list_item.parse_url = href # 详情页请求地址
- list_item.href = href # 标书链接
- list_item.channel = menu.get("channel") # 最上方定义的抓取栏目 (编辑器定的)
- list_item.spidercode = menu.get("code") # 最上方定义的爬虫code(编辑器定的)
- list_item.title = title # 标题
- list_item.site = self.site
- list_item.publishtime = create_time
- list_item.area = area # 城市默认:全国
- list_item.city = city # 城市 默认为空
- yield list_item
- # 无限翻页
- request = self.infinite_pages(request, response)
- yield request
- if __name__ == "__main__":
- Spider(redis_key="lzz:yctgxcglm_bggg").start()
|