linkapi.py 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. from hytest import *
  2. from playwright.sync_api import sync_playwright
  3. from PIL import Image, ImageChops
  4. from bs4 import BeautifulSoup
  5. import requests
  6. class APILink():
  7. #打开链接,返回title
  8. def obtain_url_title(self,url):
  9. response = requests.get(url)
  10. response.encoding = 'utf-8' # 设置编码为gbk
  11. soup = BeautifulSoup(response.text, 'html.parser')
  12. title = soup.title.string
  13. return title
  14. #打开链接,status=200,返回true
  15. def open_url_status(self,url):
  16. response = requests.get(url,headers = {
  17. 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'})
  18. status_code = response.status_code
  19. if status_code==200:
  20. return True
  21. else:
  22. return False
  23. #打开链接,返回页面底部某个值
  24. def obtain_url_bottom(self,url,loc):
  25. response = requests.get(url)
  26. response.encoding = 'utf-8' # 设置编码为gbk
  27. soup = BeautifulSoup(response.text, 'html.parser')
  28. # 查找元素并获取值
  29. element = soup.find('div', attrs={'class': loc})
  30. # element = soup.find(loc)
  31. element_value = element.text
  32. return element_value
  33. def setup(self):
  34. # 初始化 Playwright
  35. self.playwright = sync_playwright().start()
  36. self.browser = self.playwright.chromium.launch(headless=True)
  37. self.page = self.browser.new_page()
  38. def teardown(self):
  39. # 关闭浏览器
  40. self.browser.close()
  41. self.playwright.stop()
  42. def obtain_element_text(self,url,element):
  43. self.page.goto(url)
  44. # 等待元素出现
  45. self.page.wait_for_selector(element)
  46. # 查找元素
  47. element_handle = self.page.locator(element)
  48. # 获取元素的文本或属性值
  49. element_text = element_handle.text_content()
  50. # element_attribute = element_handle.get_attribute('属性名')
  51. # 设置超时时间
  52. self.page.wait_for_timeout(3000)
  53. return element_text
  54. # print(f'元素的属性值: {element_attribute}')
  55. #网页截图模糊遮罩方法,适用于网页有动态元素,进行遮罩比较
  56. def save_screenshot_mask(self,url, output_path, elements, clip=None):
  57. locs = []
  58. self.page.goto(url)
  59. for element in elements:
  60. loc = self.page.locator(element)
  61. locs.append(loc)
  62. self.page.screenshot(path=output_path,mask=locs, clip=clip)
  63. #网页直接截图方法,适用于网页元素不变,可直接截图比较
  64. def save_screenshot(self,url, output_path, clip=None):
  65. self.page.goto(url)
  66. self.page.screenshot(path=output_path, clip=clip)
  67. def compare_images(self, image1_path, image2_path):
  68. image1 = Image.open(image1_path)
  69. image2 = Image.open(image2_path)
  70. diff = ImageChops.difference(image1, image2)
  71. if diff.getbbox() is None:
  72. INFO("两张图片完全相同")
  73. return True
  74. else:
  75. INFO("两张图片存在差异")
  76. return False
  77. #对比样本快照与当前快照
  78. def contrast_snapshot(self,url,expected_screenshot,current_screenshot,clip):
  79. # 如果不存在样本照片,生成样本照片
  80. if not os.path.exists(expected_screenshot):
  81. self.save_screenshot(url, expected_screenshot,clip)
  82. INFO(f"样本快照已保存:{expected_screenshot}")
  83. #生成对比照片
  84. apilink.save_screenshot(url, current_screenshot,clip)
  85. INFO(f"对比快照已保存:{expected_screenshot}")
  86. #返回对比结果
  87. result = apilink.compare_images(current_screenshot, expected_screenshot)
  88. return result
  89. #对比样本快照与当前快照2
  90. def contrast_snapshot_mask(self,url,expected_screenshot,current_screenshot,element,clip):
  91. # 如果不存在样本照片,生成样本照片
  92. if not os.path.exists(expected_screenshot):
  93. self.save_screenshot_mask(url, expected_screenshot,element,clip)
  94. INFO(f"样本快照已保存:{expected_screenshot}")
  95. #生成对比照片
  96. apilink.save_screenshot_mask(url, current_screenshot,element,clip)
  97. INFO(f"对比快照已保存:{expected_screenshot}")
  98. #返回对比结果
  99. result = apilink.compare_images(current_screenshot, expected_screenshot)
  100. return result
  101. apilink = APILink()