12345678910111213141516171819202122232425262728 |
- import threading
- from concurrent.futures import ThreadPoolExecutor, wait
- from crawler.spiders import SearchEngine, VisitDomain, SyncData
- from crawler.utils import err_details
- class BreadthCrawler(SearchEngine, VisitDomain):
- def __init__(self, workers=1, **kwargs):
- SyncData(**kwargs)
- SearchEngine.__init__(self, **kwargs)
- VisitDomain.__init__(self, **kwargs)
- self._workers = workers
- def start(self):
- threading.Thread(
- target=self.load_engines,
- name='MainSearchEngine'
- ).start()
- with ThreadPoolExecutor(max_workers=self._workers) as executor:
- futures = []
- for _ in range(1, self._workers + 1):
- f = executor.submit(self.search_domains)
- f.add_done_callback(err_details)
- futures.append(f)
- wait(futures)
- print('寻源任务结束')
|