123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125 |
- from hytest import *
- from playwright.sync_api import sync_playwright
- from PIL import Image, ImageChops
- from bs4 import BeautifulSoup
- import requests
- class APILink():
- headers = {
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.67;jy-test"
- }
- #headers设置为全局变量
- GSTORE['headers'] = headers
- # session对象设置为全局变量
- s = requests.Session()
- GSTORE['s'] = s
- #打开链接,返回title
- def obtain_url_title(self,url):
- headers=GSTORE['headers']
- s = GSTORE['s']
- response = s.get(url,headers=headers)
- response.encoding = 'utf-8' # 设置编码为utf-8
- soup = BeautifulSoup(response.text, 'html.parser')
- title = soup.title.string
- return title
- #打开链接,status=200,返回true
- def open_url_status(self,url):
- headers=GSTORE['headers']
- s = GSTORE['s']
- response = s.get(url,headers=headers)
- status_code = response.status_code
- if status_code==200:
- return True
- else:
- return False
- #打开链接,返回页面底部某个值
- def obtain_url_bottom(self,url,loc):
- response = requests.get(url)
- response.encoding = 'utf-8' # 设置编码为gbk
- soup = BeautifulSoup(response.text, 'html.parser')
- # 查找元素并获取值
- element = soup.find('div', attrs={'class': loc})
- # element = soup.find(loc)
- element_value = element.text
- return element_value
- def setup(self):
- # 初始化 Playwright
- self.playwright = sync_playwright().start()
- self.browser = self.playwright.chromium.launch(headless=True)
- self.page = self.browser.new_page()
- def teardown(self):
- # 关闭浏览器
- self.browser.close()
- self.playwright.stop()
- def obtain_element_text(self,url,element):
- self.page.goto(url)
- # 等待元素出现
- self.page.wait_for_selector(element)
- # 查找元素
- element_handle = self.page.locator(element)
- # 获取元素的文本或属性值
- element_text = element_handle.text_content()
- # element_attribute = element_handle.get_attribute('属性名')
- # 设置超时时间
- self.page.wait_for_timeout(3000)
- return element_text
- # print(f'元素的属性值: {element_attribute}')
- #网页截图模糊遮罩方法,适用于网页有动态元素,进行遮罩比较
- def save_screenshot_mask(self,url, output_path, elements, clip=None):
- locs = []
- self.page.goto(url)
- for element in elements:
- loc = self.page.locator(element)
- locs.append(loc)
- self.page.screenshot(path=output_path,mask=locs, clip=clip)
- #网页直接截图方法,适用于网页元素不变,可直接截图比较
- def save_screenshot(self,url, output_path, clip=None):
- self.page.goto(url)
- self.page.screenshot(path=output_path, clip=clip)
- def compare_images(self, image1_path, image2_path):
- image1 = Image.open(image1_path)
- image2 = Image.open(image2_path)
- diff = ImageChops.difference(image1, image2)
- if diff.getbbox() is None:
- INFO("两张图片完全相同")
- return True
- else:
- INFO("两张图片存在差异")
- return False
- #对比样本快照与当前快照
- def contrast_snapshot(self,url,expected_screenshot,current_screenshot,clip):
- # 如果不存在样本照片,生成样本照片
- if not os.path.exists(expected_screenshot):
- self.save_screenshot(url, expected_screenshot,clip)
- INFO(f"样本快照已保存:{expected_screenshot}")
- #生成对比照片
- apilink.save_screenshot(url, current_screenshot,clip)
- INFO(f"对比快照已保存:{expected_screenshot}")
- #返回对比结果
- result = apilink.compare_images(current_screenshot, expected_screenshot)
- return result
- #对比样本快照与当前快照2
- def contrast_snapshot_mask(self,url,expected_screenshot,current_screenshot,element,clip):
- # 如果不存在样本照片,生成样本照片
- if not os.path.exists(expected_screenshot):
- self.save_screenshot_mask(url, expected_screenshot,element,clip)
- INFO(f"样本快照已保存:{expected_screenshot}")
- #生成对比照片
- apilink.save_screenshot_mask(url, current_screenshot,element,clip)
- INFO(f"对比快照已保存:{expected_screenshot}")
- #返回对比结果
- result = apilink.compare_images(current_screenshot, expected_screenshot)
- return result
- apilink = APILink()
|