12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758 |
- from concurrent.futures import ThreadPoolExecutor
- from lxml.html import fromstring, HtmlElement
- from crawler.defaults import fetch_page_by_post, crawl_request, crawl_params
- from crawler.fields import BulletinBasicFields, SaveCompanyInformation
- class GDSpider:
- def __init__(self):
- self.sign = 'gd'
- self.enable_proxy = None
- def extract_text_and_save(self, element: HtmlElement, **request_params):
- nodes = element.xpath('//*[@name="frm"]/div/table[2]//tr[position()>1]')
- for node in nodes:
- social_id = "".join(node.xpath('./td[2]/text()')).strip()
- item = BulletinBasicFields(
- social_id=social_id,
- company="".join(node.xpath('./td[3]//text()')).strip(),
- district_code=social_id[2:8],
- province='广东省',
- url=request_params.get('url'),
- request_data=request_params.get('request_data'),
- page=request_params.get('page')
- )
- SaveCompanyInformation(item, self.sign)
- def crawl_spider(self, task: tuple):
- url, data, page = task
- headers = {
- 'Content-Type': 'application/x-www-form-urlencoded',
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36',
- }
- response = crawl_request(fetch_page_by_post, url, self.enable_proxy, headers=headers, data=data)
- element = fromstring(response.text)
- self.extract_text_and_save(element, url=url, page=page, request_data=data)
- def generate_request_tasks(self):
- results = []
- for spider in crawl_params(self.sign):
- url = "".join(spider.keys())
- params: dict = spider.get(url)
- total_page = int(params.get('pageInfo.pageTotal'))
- for page in range(1, total_page + 1):
- item = {**params}
- item.update({
- 'pageInfo.switchingPage': 'true',
- 'pageInfo.pageIndex': str(page)
- })
- results.append((url, item, page))
- yield from results
- def run(self, enable_proxy=None, max_workers: int = 1):
- self.enable_proxy = enable_proxy or False
- with ThreadPoolExecutor(max_workers=max_workers) as Executor:
- Executor.map(self.crawl_spider, self.generate_request_tasks())
|