123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475 |
- import threading
- import time
- from common.log import logger
- from crawler.bloom_filter.RedisBloomFilter import RedisFilter
- from settings import (
- MGO_REMOVAL_DUPLICATE,
- REQUIREMENT_PHRASE,
- SENSITIVE_WORDS
- )
- def _requirement_phrase(title: str):
- """关键词"""
- for word in REQUIREMENT_PHRASE:
- if title.find(word) != -1:
- return True
- return False
- def _sensitive_word(title: str):
- """敏感词"""
- for word in SENSITIVE_WORDS:
- if title.find(word) != -1:
- return True
- return False
- class Validator:
- def __init__(self):
- self._rbf = RedisFilter(redis_key='RemovalDuplicate_')
- self._rbf.start(1000000000, 0.00001)
- self._sensitive_word = _sensitive_word
- self._requirement_phrase = _requirement_phrase
- self._loop_Interval = 7200
- def _sync_data_rubbish(self):
- while True:
- count = 0
- cursor = MGO_REMOVAL_DUPLICATE.find(projection={'domain': 1})
- try:
- for item in cursor.sort([('_id', -1)]):
- domain = item['domain']
- if not isinstance(domain, str):
- MGO_REMOVAL_DUPLICATE.delete_one({'_id': item['_id']})
- continue
- if not self._rbf.is_exists(domain):
- self._rbf.add(domain)
- count += 1
- finally:
- logger.info(f'[过滤器]数据加载:{len(self._rbf)}条,新增:{count}条')
- time.sleep(self._loop_Interval)
- def load_filter(self):
- logger.info(f'[过滤器]初始化加载')
- threading.Thread(
- target=self._sync_data_rubbish,
- name='RemovalDuplicate_'
- ).start()
- def add_url(self, url: str):
- self._rbf.add(url)
- def words(self, title, task):
- if self._sensitive_word(title):
- task['sensitive'] = True
- return False
- elif not self._requirement_phrase(title):
- task['requirement'] = True
- return False
- return True
- def url(self, url: str):
- return self._rbf.is_exists(url)
|