1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556 |
- from concurrent.futures import ThreadPoolExecutor
- from lxml.html import fromstring, HtmlElement
- from crawler.defaults import fetch_page_by_post, crawl_request, crawl_params
- from crawler.fields import (
- SaveCompanyInformation,
- BulletinBasicFields
- )
- class SXSpider:
- def __init__(self):
- self.sign = 'sx'
- self.enable_proxy = None
- def extract_text_and_save(self, element: HtmlElement, **request_params):
- nodes = element.xpath('//ul[@class="listLeft-item"]/li')
- for node in nodes:
- name = "".join(node.xpath('./a/text()')).strip()
- item = BulletinBasicFields(
- company=name,
- province='陕西省',
- url=request_params.get('url'),
- request_data=request_params.get('request_data'),
- page=request_params.get('page')
- )
- SaveCompanyInformation(item, self.sign)
- def crawl_spider(self, task: tuple):
- url, data, page = task
- response = crawl_request(fetch_page_by_post, url, self.enable_proxy, data=data)
- element = fromstring(response.text)
- self.extract_text_and_save(element, url=url, page=page, request_data=data)
- def generate_request_tasks(self):
- results = []
- for spider in crawl_params(self.sign):
- url = "".join(spider.keys())
- data: dict = spider.get(url)
- total_page = int(data.get('pageTotal'))
- for page in range(1, total_page + 1):
- item = {
- 'code': '',
- 'year': data.get('year'),
- 'contentUrlPage.pageSize': data.get('pageSize'),
- 'contentUrlPage.currentPage': str(page)
- }
- results.append((url, item, page))
- yield from results
- def run(self, enable_proxy=None, max_workers: int = 1):
- self.enable_proxy = enable_proxy or False
- with ThreadPoolExecutor(max_workers=max_workers) as Executor:
- Executor.map(self.crawl_spider, self.generate_request_tasks())
|