client_mysql.py 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. # coding:utf-8
  2. import time
  3. from a2s.tools import json_serialize, json_deserialize
  4. from a2s.a2s_client import a2s_execute
  5. from docs.config import ReluMongodb
  6. from util.mogodb_helper import MongoDBInterface
  7. from pymongo import MongoClient
  8. from util.mysql_tool import MysqlUtil
  9. import json
  10. from datetime import datetime, timedelta
  11. from elasticsearch import Elasticsearch
  12. ReluClient = MongoDBInterface(ReluMongodb)
  13. # 评估服务配置
  14. a2s_ip = "172.20.100.235:9090"
  15. topic = "quality_bid"
  16. #本地测试用的主题
  17. # topic = "test_quality_bid"
  18. timeout = 180
  19. # # ES 连接配置
  20. # es_host = "http://127.0.0.1:19800"
  21. # es_username = "jianyuGr"
  22. # es_password = "we3g8glKfe#"
  23. #正式es
  24. es_host = "http://172.17.4.184:19908"
  25. es_username = "qyfw_es_2"
  26. es_password = "Khfdals33#"
  27. # 初始化 Elasticsearch 客户端
  28. es_client = Elasticsearch(es_host,http_auth=(es_username, es_password),retry_on_timeout=True) # 使用基本认证
  29. # 开始评估
  30. def start_quality(data: dict, rules_id: int, a2s_ip, topic, timeout, retry=3):
  31. # 本次不使用SSL,所以channel是不安全的
  32. row = {"data": data, "rules_id": rules_id}
  33. bytes_data = json_serialize(row)
  34. for t in range(retry):
  35. print("topic",topic)
  36. try:
  37. resp_data = a2s_execute(a2s_ip, topic, timeout, bytes_data)
  38. if resp_data is None:
  39. continue
  40. result = json_deserialize(resp_data)
  41. return result
  42. except Exception as e:
  43. print(e)
  44. return {}
  45. # 获取规则ID
  46. def get_rule(company, version):
  47. rule_id = ReluClient.find_rule_by_company(ReluMongodb["col"], company, version)
  48. return rule_id
  49. def find_error_id(conn, cleaned_key, sub_value):
  50. """
  51. 查找 error_dict 中的 id
  52. """
  53. query = """SELECT id FROM error_dict WHERE fields = %s AND error = %s"""
  54. params = (cleaned_key, sub_value)
  55. result = MysqlUtil.query_data(conn, query, params)
  56. #[(10,)]
  57. # 检查查询结果是否为空
  58. if not result:
  59. print(f"Error: No matching record found for fields={cleaned_key}, error={sub_value}")
  60. return None # 或者返回一个默认值,根据需求而定
  61. record = result[0][0]
  62. return record
  63. def insert_batch_data(conn, params):
  64. """
  65. 执行批量插入数据
  66. """
  67. query = """INSERT IGNORE INTO bid_analysis (mongoid,toptype,subtype, site, spidercode, channel,comeintime, area, city, district, score, error_type, create_time)
  68. VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s,%s,%s,%s)"""
  69. MysqlUtil.insert_data(conn, query, params)
  70. def insert_dynamic_error_field(conn, cleaned_key, error_ids, mongoid):
  71. """
  72. 动态插入 error_ids 到相应的 cleaned_key_error 字段
  73. """
  74. # 构造动态插入 SQL 语句,更新指定的 cleaned_key_error 字段
  75. query = f"""
  76. UPDATE bid_analysis
  77. SET {cleaned_key}_error = %s
  78. WHERE mongoid = %s
  79. """
  80. # 拼接多个 error_id,用分隔符分开
  81. error_ids_str = ','.join(map(str, error_ids))
  82. params = (error_ids_str, mongoid )
  83. MysqlUtil.update_data(conn, query, params)
  84. def get_last_processed_id():
  85. """
  86. 获取上次处理的最大 ID (例如从数据库或文件中读取)
  87. """
  88. # 这里假设从文件读取中断 ID,你也可以从数据库或 Redis 等存储获取
  89. try:
  90. with open('docs/last_processed_id_mysql.txt', 'r') as f:
  91. last_id = f.read().strip()
  92. if last_id:
  93. return last_id
  94. else:
  95. return None
  96. except FileNotFoundError:
  97. return None
  98. def save_last_processed_id(last_id):
  99. """
  100. 保存当前处理的最大 ID,用于恢复
  101. """
  102. with open('docs/last_processed_id_mysql.txt', 'w') as f:
  103. f.write(str(last_id))
  104. def clear_last_processed_id():
  105. """
  106. 清空 last_processed_id.txt 文件
  107. """
  108. open('docs/last_processed_id_mysql.txt', 'w').close()
  109. def batch_load_data():
  110. """
  111. 批量数据质量检查
  112. """
  113. # 获取今天的日期(字符串格式)
  114. today_date = datetime.now().strftime("%Y-%m-%d")
  115. # 获取今天 8:00:00 的时间戳
  116. start_date = int(datetime.strptime(f"{today_date} 08:00:00", "%Y-%m-%d %H:%M:%S").timestamp())
  117. # 获取今天 12:00:00 的时间戳
  118. end_date = int(datetime.strptime(f"{today_date} 12:00:00", "%Y-%m-%d %H:%M:%S").timestamp())
  119. # 规则查询,根据必要条件 公司名称(用户ID)、版本号
  120. rules_id = get_rule("北京剑鱼信息技术有限公司", "v1.2")
  121. print(rules_id)
  122. # 初始化mysql
  123. conn = MysqlUtil.connect_to_mysql(host='172.20.45.129', port='4000', user='root', password='=PDT49#80Z!RVv52_z',database='quality')
  124. # 获取上次处理的 ID,如果没有,则从头开始
  125. last_processed_id = get_last_processed_id()
  126. print(f"上次处理的 ID: {last_processed_id}")
  127. # 获取ES数据
  128. es_query = {
  129. "query": {
  130. "bool": {
  131. "filter": [
  132. {
  133. "range": {
  134. "comeintime": {
  135. "gte": start_date,
  136. "lt": end_date
  137. }
  138. }
  139. }
  140. ]
  141. }
  142. },
  143. "sort": [
  144. {"_id": {"order": "asc"}} # 如果 comeintime 相同,再按 _id 排序
  145. ],
  146. "size": 100 # 每次返回的数据量
  147. }
  148. # 如果有上次处理的 ID,使用 `search_after` 进行分页
  149. if last_processed_id:
  150. es_query["search_after"] = [last_processed_id] # 确保传入的是字符串类型的 _id
  151. try:
  152. # 使用 scroll API 来分批获取数据
  153. response = es_client.search(index="bidding", body=es_query)
  154. hits = response['hits']['hits']
  155. while hits:
  156. print(f"---- 批次开始 ----")
  157. max_id = None
  158. for hit in hits:
  159. item = hit["_source"]
  160. print("------一条数据开始--------")
  161. max_id = hit["_id"]
  162. print(f"正在处理数据: {max_id}")
  163. item["_id"] = str(hit["_id"])
  164. # 质量检查逻辑
  165. result = start_quality(item, rules_id, a2s_ip, topic, timeout)
  166. print(result)
  167. code = result.get("code")
  168. if code != 200:
  169. # 数据出错,跳过
  170. continue
  171. data = result.get("data", {})
  172. # 数据插入到 MySQL
  173. toptype = item.get("toptype", "")
  174. subtype = item.get("subtype", "")
  175. site = item.get("site", "")
  176. spidercode = item.get("spidercode", "")
  177. channel = item.get("channel", "")
  178. comeintime = item.get("comeintime", "")
  179. comeintime = datetime.fromtimestamp(comeintime)
  180. area = item.get("area", "")
  181. city = item.get("city", "")
  182. district = item.get("district", "")
  183. score = item.get("score", "")
  184. error_type_data = json.dumps(data)
  185. create_time = today_date
  186. params = (item["_id"], toptype, subtype, site, spidercode,channel, comeintime, area, city, district, score, error_type_data,create_time)
  187. insert_batch_data(conn, params)
  188. print("------一条数据结束------")
  189. # 保存当前批次处理的最大 ID
  190. if max_id:
  191. save_last_processed_id(max_id)
  192. print(f"保存当前处理的最大 ID: {max_id}")
  193. # 批次结束的打印信息
  194. print("---- 当前批次数据处理完成 ----")
  195. # 获取下一批数据
  196. search_after = hits[-1]["_id"] # 获取当前批次最后一条数据的 _id 作为下一批的起始点
  197. es_query["search_after"] = [search_after] # 保持 _id 类型一致
  198. response = es_client.search(index="bidding", body=es_query)
  199. hits = response['hits']['hits']
  200. # 如果没有更多数据,跳出循环
  201. if not hits:
  202. print("没有更多数据,结束批次处理")
  203. clear_last_processed_id()
  204. break
  205. print("数据处理完成")
  206. except Exception as e:
  207. print(f"错误: {e}")
  208. time.sleep(10)
  209. finally:
  210. if conn.is_connected():
  211. conn.close() # 确保连接关闭
  212. print("MySQL 连接已关闭")
  213. if __name__ == '__main__':
  214. batch_load_data()