utils.py 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. import re
  2. from html import unescape
  3. from urllib.parse import urlencode, urljoin
  4. from bs4 import BeautifulSoup
  5. from lxml.etree import ParseError
  6. from lxml.html import etree, HtmlElement, fromstring, tostring
  7. from urllib3 import get_host
  8. from common.log import logger
  9. from crawler.defaults import (
  10. USELESS_TAG,
  11. USELESS_ATTR,
  12. TAGS_CAN_BE_REMOVE_IF_EMPTY,
  13. VALID_WORDS,
  14. VOID_WORDS
  15. )
  16. def err_details(worker):
  17. worker_exception = worker.exception()
  18. if worker_exception:
  19. logger.exception("Worker return exception: {}".format(worker_exception))
  20. return worker
  21. def extract_host(url):
  22. """
  23. # >>> base_url = extract_host('http://192.168.3.207:8080/')
  24. """
  25. _s, _h, _p = get_host(url)
  26. return f"{_s}://{_h}/" if _p is None else f"{_s}://{_h}:{_p}/"
  27. def split_domain(val: str):
  28. if re.match(r'\d+', val) is None:
  29. return re.split(r'[\\.:]', val)
  30. return [val]
  31. def extract_domain(url):
  32. """
  33. # >>> base_url = extract_domain('http://192.168.3.207:8080/')
  34. """
  35. _, host, port = get_host(url)
  36. return f"{host}" if port is None else f"{host}:{port}"
  37. def extract_page_title(source):
  38. node = ''
  39. try:
  40. element = html2element(source)
  41. node = element.xpath('/html/head/title/text()|//title/text()')
  42. except ParseError:
  43. pass
  44. if len(node) > 1:
  45. return "".join(";".join(node).split())
  46. return "".join("".join(node).split())
  47. def is_url(url):
  48. """判断url格式畸形与否"""
  49. _regex = re.compile(
  50. r'^(?:http|ftp)s?://' # http:// or https://
  51. r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
  52. r'localhost|' # localhost...
  53. r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
  54. r'(?::\d+)?' # optional port
  55. r'(?:/?|[/?]\S+)$', re.IGNORECASE)
  56. return re.match(_regex, url) is not None
  57. def is_domain(domain):
  58. _regex = re.compile(
  59. r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
  60. r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
  61. r'(?::\d+)?', re.IGNORECASE)
  62. return re.match(_regex, domain) is not None
  63. def label_split(val):
  64. # '~`!#$%^&*()_+-=|\';"":/.,?><~·!@#¥%……&*()——+-=“:’;、。,?》{《}】【\n\]\[ '
  65. result = re.split(r'[- _,,\\.|-「」【】??!!/、] *', val)
  66. result = [v for v in result if len(v) > 0]
  67. return result
  68. def get_url(url: str, parameters: dict):
  69. """
  70. 拼接url与所带参数
  71. :param url: 链接
  72. :param parameters: 参数
  73. :return: 拼接后的url
  74. """
  75. _data = '?' + urlencode(parameters)
  76. return urljoin(url, _data)
  77. def clean_html(source: str):
  78. html_str = re.sub(r'<!--[\s\S]*?-->', '', source)
  79. html_str = re.sub(r'<html>|<html [^>]*>|</html>', '', html_str)
  80. html_str = re.sub(r'<head>[\s\S]*?</head>', '', html_str)
  81. html_str = re.sub(r'<script[^<>]*>[\s\S]*?</script>|</script>', '', html_str)
  82. html_str = re.sub(r'<style[^<>]*>[\s\S]*?</style>', '', html_str)
  83. html_str = re.sub(r'<link[^<>]*>[\s\S]*?', '', html_str)
  84. html_str = re.sub(r'<img[^>]*>', '', html_str)
  85. return html_str
  86. def extract_text(source: str):
  87. soup = BeautifulSoup(source, "lxml")
  88. return soup.get_text()
  89. def verify_text(val: str, length=50):
  90. """检查数字、字母、中文的个数"""
  91. if val is None:
  92. return False
  93. sub_pattern = ['<[^>]+>', '[^0-9a-zA-Z\u4e00-\u9fa5]+']
  94. for pattern in sub_pattern:
  95. val = re.sub(pattern, '', val)
  96. # 若文本长度小于指定文本长度(length),表示页面内容无详情内容
  97. if len(val) < length:
  98. '''无效文本'''
  99. return False
  100. '''有效文本'''
  101. return True
  102. def element2html(element: HtmlElement) -> str:
  103. return unescape(tostring(element, encoding="utf-8").decode())
  104. def html2element(source: str, base_url=None) -> HtmlElement:
  105. html_str = re.sub('\ufeff|\xa0|\u3000|\x00', '', source)
  106. html_str = re.sub('<!--[\s\S]*?-->', '', html_str) # 清除注释
  107. html_str = re.sub(r'<style[^<>]*>[\s\S]*?</style>', '', html_str) # 清除样式
  108. html_str = re.sub(r'<script[^<>]*>[\s\S]*?</script>', '', html_str) # 清除js
  109. html_str = re.sub('</?br.*?>', '', html_str)
  110. html_str = re.sub(r'<\?xml.*?>', '', html_str)
  111. html_str = re.sub(r'<[!]DOCTYPE.*?>', '', html_str)
  112. return fromstring(html_str, base_url=base_url)
  113. def iter_node(element: HtmlElement, depth=1):
  114. yield element, depth
  115. depth += 1
  116. for sub_element in element:
  117. if isinstance(sub_element, HtmlElement):
  118. yield from iter_node(sub_element, depth)
  119. # print('退出', depth)
  120. def remove_node(node: HtmlElement):
  121. """
  122. this is a in-place operation, not necessary to return
  123. :param node:
  124. :return:
  125. """
  126. parent = node.getparent()
  127. if parent is not None:
  128. node.drop_tree()
  129. # parent.remove(node)
  130. def drop_tag(node: HtmlElement):
  131. """
  132. only delete the tag, but merge its text to parent.
  133. :param node:
  134. :return:
  135. """
  136. parent = node.getparent()
  137. if parent is not None:
  138. node.drop_tag()
  139. def is_empty_element(node: HtmlElement):
  140. return not node.getchildren() and not node.text
  141. def normalize_node(element: HtmlElement):
  142. etree.strip_elements(element, *USELESS_TAG, with_tail=False)
  143. # 节点预处理,删除节点与更新节点的操作在同一循环发生时,更新节点的操作不会生效,原因:?
  144. # 空节点合并、噪声节点剔除
  145. for node, _ in iter_node(element):
  146. if node.tag.lower() in TAGS_CAN_BE_REMOVE_IF_EMPTY and is_empty_element(node):
  147. remove_node(node)
  148. if node.tag.lower() == 'p':
  149. etree.strip_tags(node, 'span')
  150. etree.strip_tags(node, 'strong')
  151. # if a div tag does not contain any sub node, it could be converted to p node.
  152. if node.tag.lower() == 'div' and not node.getchildren():
  153. node.tag = 'p'
  154. if node.tag.lower() == 'span' and not node.getchildren():
  155. node.tag = 'p'
  156. # remove empty p tag
  157. if node.tag.lower() == 'p' and not node.xpath('.//img'):
  158. if not (node.text and node.text.strip()):
  159. drop_tag(node)
  160. # Delete inline styles
  161. style = node.get('style')
  162. if style:
  163. del node.attrib['style']
  164. # Obsolete scroll property
  165. if node.tag.lower() == 'marquee':
  166. remove_node(node)
  167. # 删除包含干扰属性的节点(完全匹配)
  168. for node, _ in iter_node(element):
  169. attr = (node.get('id') or node.get('class'))
  170. if attr:
  171. if attr.lower() in USELESS_ATTR:
  172. remove_node(node)
  173. break
  174. def pre_parse(element):
  175. normalize_node(element)
  176. return element
  177. def check_text_by_words(val: str):
  178. for word in VOID_WORDS:
  179. search = re.search(word, val)
  180. if search is not None:
  181. return False
  182. for keyword in VALID_WORDS:
  183. search = re.search(keyword, val)
  184. if search is not None:
  185. return True
  186. return False