|
@@ -14,25 +14,11 @@ class DomAnalysis(FilterUrl):
|
|
|
Comment
|
|
|
"""
|
|
|
|
|
|
- def __init__(self, isogeny: bool, dom: str, host=None, request_url=None):
|
|
|
- self.soup = BeautifulSoup(dom, "lxml")
|
|
|
+ def __init__(self, isogeny: bool, dom: str, addr: str):
|
|
|
self.pattern = re.compile("href=([a-zA-Z0-9'\"+?=.%/_]*)")
|
|
|
self.isogeny = isogeny
|
|
|
- if self.isogeny:
|
|
|
- if host is None:
|
|
|
- raise TypeError(
|
|
|
- '{} missing 1 required positional argument: {}'.format(
|
|
|
- self.__class__.__name__, 'host')
|
|
|
- )
|
|
|
- self.host = host # 网址主机地址
|
|
|
- if not self.isogeny:
|
|
|
- if request_url is None:
|
|
|
- raise TypeError(
|
|
|
- '{} missing 1 required positional argument: {}'.format(
|
|
|
- self.__class__.__name__, 'request_url'
|
|
|
- )
|
|
|
- )
|
|
|
- self.request_url = request_url # 当前请求网址
|
|
|
+ self.soup = BeautifulSoup(dom, "lxml")
|
|
|
+ self.addr = addr # 请求地址
|
|
|
|
|
|
def show_html(self):
|
|
|
#https://www.crummy.com/software/BeautifulSoup/bs3/documentation.zh.html 发现prettify是u字符
|
|
@@ -46,14 +32,18 @@ class DomAnalysis(FilterUrl):
|
|
|
# 静态页面链接解析 和 javascript动态解析
|
|
|
for tag in self.soup.find_all('a'):
|
|
|
if self.judge(tag.get('href')):
|
|
|
- urls.append(self.filter(tag.get('href')))
|
|
|
+ href = self.urljoin(tag.get('href'))
|
|
|
+ if self.filter(href) and href not in urls:
|
|
|
+ urls.append(href)
|
|
|
|
|
|
# 自动交互. 这里采用静态解析的思路提取交互式生成的链接
|
|
|
for tag in self.soup.find_all():
|
|
|
if self._is_input_with_onclick(tag):
|
|
|
for item in re.findall(self.pattern, tag.get('onclick')):
|
|
|
if self.judge(self.onclick_filter(item)):
|
|
|
- urls.append(self.filter(self.onclick_filter(item)))
|
|
|
+ href = self.urljoin(self.onclick_filter(item))
|
|
|
+ if self.filter(href) and href not in urls:
|
|
|
+ urls.append(href)
|
|
|
return urls
|
|
|
|
|
|
def get_items(self):
|
|
@@ -67,11 +57,11 @@ class DomAnalysis(FilterUrl):
|
|
|
if tag.get('href') is None:
|
|
|
return
|
|
|
try:
|
|
|
- href = self.filter(tag.get('href'))
|
|
|
+ href = self.urljoin(tag.get('href'))
|
|
|
except ValueError:
|
|
|
return
|
|
|
data = {'title': name, 'href': href}
|
|
|
- if data not in items:
|
|
|
+ if self.filter(href) and data not in items:
|
|
|
items.append(data)
|
|
|
|
|
|
for tag in self.soup.find_all('a'):
|