maguopeng 3 лет назад
Родитель
Сommit
fff625a8a0
35 измененных файлов с 2988 добавлено и 50 удалено
  1. 8 0
      .idea/.gitignore
  2. 170 0
      Details/detail_cookie.py
  3. 117 0
      Details/detail_firefox.py
  4. 164 0
      Details/details.py
  5. 3 18
      FworkSpider/items/spider_item.py
  6. 2 3
      FworkSpider/mongo_pipeline.py
  7. 14 25
      FworkSpider/setting.py
  8. 0 0
      FworkSpider/untils/clean_html/__init__.py
  9. 131 0
      FworkSpider/untils/clean_html/defaults.py
  10. 2 3
      FworkSpider/untils/cookie_pool.py
  11. 8 1
      FworkSpider/untils/create_menus.py
  12. 0 0
      spiders/__init__.py
  13. 0 0
      spiders/李宗泽/__init__.py
  14. 0 0
      spiders/马国鹏/__init__.py
  15. 88 0
      spiders/马国鹏/中国南方航空采购招标网.py
  16. 75 0
      spiders/马国鹏/中国石化物质采购电子商务平台.py
  17. 98 0
      spiders/马国鹏/中泰集团招标投标网.py
  18. 133 0
      spiders/马国鹏/中铁鲁班商务网.py
  19. 105 0
      spiders/马国鹏/亿企优采.py
  20. 76 0
      spiders/马国鹏/华润置地华东大区网站.py
  21. 120 0
      spiders/马国鹏/南通市如皋市政府采购网上商城.py
  22. 101 0
      spiders/马国鹏/天津市政府采购网.py
  23. 137 0
      spiders/马国鹏/广东省政府采购网.py
  24. 75 0
      spiders/马国鹏/广发证券采购平台.py
  25. 110 0
      spiders/马国鹏/杭州市公共资源交易.py
  26. 99 0
      spiders/马国鹏/武汉市公共资源交易平台.py
  27. 132 0
      spiders/马国鹏/湖北省政府采购网.py
  28. 113 0
      spiders/马国鹏/滁州市人民政府网.py
  29. 92 0
      spiders/马国鹏/玖隆在线_交易公告.py
  30. 102 0
      spiders/马国鹏/玖隆在线_招标计划公示.py
  31. 198 0
      spiders/马国鹏/甘肃政府采购网.py
  32. 106 0
      spiders/马国鹏/福建省政府采购网.py
  33. 75 0
      spiders/马国鹏/苏州弘创招投标代理有限公司.py
  34. 240 0
      spiders/马国鹏/贵阳市公共资源交易监管网.py
  35. 94 0
      spiders/马国鹏/黔云招采电子招标采购交易平台.py

+ 8 - 0
.idea/.gitignore

@@ -0,0 +1,8 @@
+# Default ignored files
+/shelf/
+/workspace.xml
+# Datasource local storage ignored files
+/../../../../../../../:\Users\topnet\Desktop\fuwuq\crawlab_feader\.idea/dataSources/
+/dataSources.local.xml
+# Editor-based HTTP Client requests
+/httpRequests/

+ 170 - 0
Details/detail_cookie.py

@@ -0,0 +1,170 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 2021-12-13 13:25:15
+---------
+@summary:  生成一定有效期cookie,并使用的detail 详情处理方案,默认不限制ip
+---------
+@author: 马国鹏
+"""
+import sys
+sys.path.append('/app/spiders/sword_feapder/FworkSpider')
+import feapder
+from feapder.utils.tools import wechat_warning
+import execjs
+from items.spider_item import DataBakItem, MgpListItem
+from feapder.db.mongodb import MongoDB
+
+from untils.cookie_pool import PageCookiePool
+import copy
+
+class Details(feapder.Spider):
+    _to_db = None
+    db_name = 'mgp_list'
+    send_list = []
+    # 定义mongo链接
+    @property
+    def to_db(self):
+        if not self._to_db:
+            self._to_db = MongoDB()
+        return self._to_db
+
+    def start_requests(self):
+        while True:
+            data_lsit = self.to_db.find(self.db_name,{"parser_name":"details_cookie"},sort={"date":-1})
+            for item in data_lsit:
+                request_params = item.get("request_params")
+                down_mid = copy.copy(item.get("down_mid"))
+                key = down_mid.get("key")
+                page_url = down_mid.get("page_url")
+                cookie_pool = PageCookiePool(redis_key=key, page_url=page_url, selenium=False)
+                down_mid["cookie_pool"] = cookie_pool
+
+                if item.get("ex_python"):
+                    exec(item.get("ex_python"))
+
+                yield feapder.Request(url=item.get("parse_url"),item=item.get("item"),
+                                      deal_detail=item.get("deal_detail"),**request_params,
+                                      callback=eval(item.get("parse")),base_info=item,down_mid=item.get("down_mid"))
+                self.to_db.delete(self.db_name,item)
+            break
+
+
+
+    def detail_get(self,request,response):
+        '''处理html格式的返回结果'''
+        if request.down_mid.get("text") and request.down_mid.get("text") in response.text:
+            '''失败处理,当text设置不为None,且在resposne.text中时,删除当前cookie并重新生产cookie'''
+            down_mid = copy.copy(request.down_mid)
+            key = down_mid.get("key")
+            page_url = down_mid.get("page_url")
+            cookie_pool = PageCookiePool(redis_key=key, page_url=page_url, selenium=False)
+            cookie_pool.del_cookie(request.cookies)
+            yield request
+        if response.code in (request.down_mid.get("code")):
+            '''失败处理,response——code不为正确的状态码时,删除当前cookie并重新生产cookie'''
+            down_mid = copy.copy(request.down_mid)
+            key = down_mid.get("key")
+            page_url = down_mid.get("page_url")
+            cookie_pool = PageCookiePool(redis_key=key, page_url=page_url, selenium=False)
+            cookie_pool.del_cookie(request.cookies)
+            yield request
+        items = request.item
+        list_item = DataBakItem()
+        for key in items:
+            list_item.__setitem__(key,items[key])
+        html = ''
+        for xpath in request.deal_detail:
+            html = response.xpath(xpath).extract_first()  # 标书详细内容
+            if html is not None:
+                break
+
+        list_item.contenthtml = html
+        yield list_item
+
+    def detail_json(self,request,response):
+        '''处理json串及其他格式的返回结果'''
+        if request.down_mid.get("text") and request.down_mid.get("text") in response.text:
+            '''失败处理,当text设置不为None,且在resposne.text中时,删除当前cookie并重新生产cookie'''
+            down_mid = copy.copy(request.down_mid)
+            key = down_mid.get("key")
+            page_url = down_mid.get("page_url")
+            cookie_pool = PageCookiePool(redis_key=key, page_url=page_url, selenium=False)
+            cookie_pool.del_cookie(request.cookies)
+            yield request
+        if response.code in (request.down_mid.get("code")):
+            '''失败处理,response——code不为正确的状态码时,删除当前cookie并重新生产cookie'''
+            down_mid = copy.copy(request.down_mid)
+            key = down_mid.get("key")
+            page_url = down_mid.get("page_url")
+            cookie_pool = PageCookiePool(redis_key=key, page_url=page_url, selenium=False)
+            cookie_pool.del_cookie(request.cookies)
+            yield request
+        items = request.item
+        list_item = DataBakItem()
+        for key in items:
+            list_item.__setitem__(key,items[key])
+        html = ''
+        exec(request.deal_detail)
+
+        list_item.contenthtml = html
+        yield list_item
+
+    def failed_request(self, request, response):
+        '''请求、解析次数超过上限后,将原信息重新保存至mongo,并修改failed字段'''
+        if response is None:
+            code = 0
+        else:
+            code = response.status_code
+        err_dic = {"200": "analysis", "400": "download", "500": "servers", "300": "download"}
+        if 200 <= code < 300:
+            err = 'analysis'
+        elif 300 <= code < 400:
+            err = 'download'
+        elif 400 <= code < 500:
+            err = 'download'
+        elif 500 <= code:
+            err = "servers"
+        else:
+            err = "timeout"
+        mgp = MgpListItem()
+        mgp.code = code
+        mgp.error = err
+        items = request.base_info
+        for key in items:
+            mgp.__setitem__(key, items[key])
+        mgp.failed += 1
+        if mgp.pri is None:
+            mgp.pri = 0
+
+        if mgp.pri > 5:
+            if mgp.failed in (10, 30, 50, 100, 200) or mgp.failed > 200:
+                if self.send_list.count(mgp.item.get("site")) == mgp.pri - 5:
+                    '''
+                    根据爬虫优先级报警'''
+                    info = f'''`
+        您的爬虫出现超<font color="#FF0000">{mgp.failed}</font>次请求、解析失败的任务。
+        > **爬虫名称:** {mgp.item.get("site")}
+        > **栏目名称:** {mgp.item.get("channel")}
+        > **爬虫代码:** {mgp.item.get("spidercode")}
+        > **爬虫等级:** {mgp.pri}
+        > **所属管理人员:** {mgp.author}
+        请登录剑鱼爬虫管理平台查看详情。
+        `'''
+                    wechat_warning(info)
+                    self.send_list.append(mgp.item.get("site"))
+        yield mgp
+
+
+    def end_callback(self):
+        print("爬虫结束")
+    def download_midware(self, request):
+        down_mid = request.down_mid
+        key = down_mid.get("key")
+        page_url = down_mid.get("page_url")
+        cookie_pool = PageCookiePool(redis_key=key, page_url=page_url, selenium=False)
+        request.cookies = cookie_pool.get_cookie()
+        return request
+
+
+if __name__ == "__main__":
+    Details(redis_key="magp:details:cookie").start()

+ 117 - 0
Details/detail_firefox.py

@@ -0,0 +1,117 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 2021-12-13 13:25:15
+---------
+@summary:
+---------
+@author: 马国鹏
+"""
+import sys
+sys.path.append('/app/spiders/sword_feapder/FworkSpider')
+import feapder
+from feapder.utils.tools import wechat_warning
+import execjs
+from items.spider_item import DataBakItem, MgpListItem
+from feapder.db.mongodb import MongoDB
+
+
+
+class FirefoxDetails(feapder.Spider):
+    _to_db = None
+    db_name = 'mgp_list'
+    send_list = []
+    # 定义mongo链接
+    @property
+    def to_db(self):
+        if not self._to_db:
+            self._to_db = MongoDB()
+        return self._to_db
+
+    def start_requests(self):
+        while True:
+            data_lsit = self.to_db.find(self.db_name,{"parser_name":"details_firefox"},sort={"date":-1})
+            print(data_lsit)
+            for item in data_lsit:
+                print(item)
+                request_params = item.get("request_params")
+                if item.get("ex_python"):
+                    exec(item.get("ex_python"))
+
+                yield feapder.Request(url=item.get("parse_url"),item=item.get("item"),
+                                      deal_detail=item.get("deal_detail"),**request_params,
+                                      callback=eval(item.get("parse")),base_info=item,render=True,
+                                      render_time=item.get("render_time"))
+                self.to_db.delete(self.db_name,item)
+            break
+
+    def detail_get(self,request,response):
+        print(response.text)
+        items = request.item
+        # print(items)
+        list_item = DataBakItem()
+        for key in items:
+            list_item.__setitem__(key,items[key])
+        html = ''
+        for xpath in request.deal_detail:
+            html = response.xpath(xpath).extract_first()  # 标书详细内容
+            if html is not None:
+                break
+        list_item.contenthtml = html
+        yield list_item
+
+    def failed_request(self, request, response):
+        '''请求、解析次数超过上限后,将原信息重新保存至mongo,并修改failed字段'''
+        if response is None:
+            code = 0
+        else:
+        	code = response.status_code
+        err_dic = {"200": "analysis", "400": "download", "500": "servers", "300": "download"}
+        if 200 <= code < 300:
+            err = 'analysis'
+        elif 300 <= code < 400:
+            err = 'download'
+        elif 400 <= code < 500:
+            err = 'download'
+        elif 500 <= code:
+            err = "servers"
+        else:
+            err = "timeout"
+        mgp = MgpListItem()
+        mgp.code = code
+        mgp.error = err
+        items = request.base_info
+        for key in items:
+            mgp.__setitem__(key, items[key])
+        mgp.failed += 1
+        if mgp.pri is None:
+            mgp.pri = 0
+
+        if mgp.pri > 5:
+            if mgp.failed in (10, 30, 50, 100, 200) or mgp.failed > 200:
+                if self.send_list.count(mgp.item.get("site")) == mgp.pri - 5:
+                    '''
+                    根据爬虫优先级报警'''
+                    info = f'''`
+        您的爬虫出现超<font color="#FF0000">{mgp.failed}</font>次请求、解析失败的任务。
+        > **爬虫名称:** {mgp.item.get("site")}
+        > **栏目名称:** {mgp.item.get("channel")}
+        > **爬虫代码:** {mgp.item.get("spidercode")}
+        > **爬虫等级:** {mgp.pri}
+        > **所属管理人员:** {mgp.author}
+        请登录剑鱼爬虫管理平台查看详情。
+        `'''
+                    wechat_warning(info)
+                    self.send_list.append(mgp.item.get("site"))
+        yield mgp
+
+
+    def end_callback(self):
+        print("爬虫结束")
+        # wechat_warning(f"爬虫名称 爬虫结束\n共抓取{self.count}次详情页数据")
+    # def download_midware(self, request):
+    #     request.proxies = self.prox_pool.get()
+    #     return request
+
+
+if __name__ == "__main__":
+    FirefoxDetails(redis_key="magp:details:firefox").start()

+ 164 - 0
Details/details.py

@@ -0,0 +1,164 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 2021-12-13 13:25:15
+---------
+@summary:
+---------
+@author: 马国鹏
+"""
+import sys
+sys.path.append('/app/spiders/sword_feapder/FworkSpider')
+import time
+from urllib.parse import urljoin
+
+import feapder
+from feapder.utils.tools import wechat_warning
+import execjs
+from items.spider_item import DataBakItem, MgpListItem
+from feapder.db.mongodb import MongoDB
+from untils.attachment import AttachmentDownloader
+
+
+class Details(feapder.Spider):
+    _to_db = None
+    db_name = 'mgp_list'
+    send_list = []
+    # 定义mongo链接
+    @property
+    def to_db(self):
+        if not self._to_db:
+            self._to_db = MongoDB()
+        return self._to_db
+
+    def start_requests(self):
+        while True:
+            data_lsit = self.to_db.find(self.db_name,{"parser_name":"details","failed":0},sort={"failed":1},limit=100)
+            for item in data_lsit:
+                print(item.get("item"))
+                request_params = item.get("request_params")
+                if item.get("js"):
+                    eval(item.get("js"))
+                if item.get("ex_python"):
+                    exec(item.get("ex_python"))
+                if item.get("proxies"):
+
+                    yield feapder.Request(url=item.get("parse_url"),item=item.get("item"),files_info=item.get("files"),
+                                          deal_detail=item.get("deal_detail"),
+                                          callback=eval(item.get("parse")),base_info=item,**request_params)
+                else:
+                    yield feapder.Request(url=item.get("parse_url"), item=item.get("item"), files_info=item.get("files"),
+                                          deal_detail=item.get("deal_detail"),
+                                          callback=eval(item.get("parse")), base_info=item,proxies=False,**request_params)
+
+                self.to_db.delete(self.db_name,item)
+            break
+
+    def detail_get(self,request,response):
+        items = request.item
+        list_item = DataBakItem()
+        for key in items:
+            list_item.__setitem__(key,items[key])
+        html = ''
+        for xpath in request.deal_detail:
+            html = response.xpath(xpath).extract_first()  # 标书详细内容
+            if html is not None:
+                break
+
+        list_item.contenthtml = html
+        if request.files_info:
+            files_info = request.files_info
+            files =  response.xpath(files_info.get("list_xpath"))
+            if len(files)>0:
+                attachments = {}
+                for index,info in enumerate(files):
+                    file_url = info.xpath(files_info.get("url_xpath")).extract_first()
+                    file_name = info.xpath(files_info.get("name_xpath")).extract_first()
+                    if files_info.get("host"):
+                        file_url = urljoin(files_info.get("host"), file_url)
+                    if not files_info.get("file_type"):
+                        file_type = file_url.split("?")[0].split(".")[-1].lower()
+                    elif files_info.get("file_type")=='file_name':
+                        file_type = file_name.split("?")[0].split(".")[-1].lower()
+                    else:
+                        file_type = files_info.get("file_type")
+                    if file_type in files_info.get("files_type") and files_info.get("url_key") in file_url:
+                        attachment = AttachmentDownloader().fetch_attachment(
+                            file_name=file_name,file_type=file_type,download_url=file_url,
+                            enable_proxy=False)
+                        attachments[str(len(attachments)+1)] = attachment
+                if len(attachments)==0:
+                    pass
+                else:
+                    list_item.projectinfo={"attachments":attachments}
+
+        yield list_item
+
+    def detail_json(self,request,response):
+        items = request.item
+        list_item = DataBakItem()
+        for key in items:
+            list_item.__setitem__(key,items[key])
+        exec(request.deal_detail)
+
+        yield list_item
+    def detail_post(self,request,response):
+        items = request.item
+        list_item = DataBakItem()
+        for key in items:
+            list_item.__setitem__(key,items[key])
+        exec(request.deal_detail)
+
+        yield list_item
+
+    def failed_request(self, request, response):
+        '''请求、解析次数超过上限后,将原信息重新保存至mongo,并修改failed字段'''
+        if response is None:
+            code = 0
+        else:
+        	code = response.status_code
+        err_dic = {"200":"analysis","400":"download","500":"servers","300":"download"}
+        if 200<=code<300:
+            err = 'analysis'
+        elif 300<=code<400:
+            err = 'download'
+        elif 400<=code<500:
+            err = 'download'
+        elif 500<=code:
+            err = "servers"
+        else:
+            err = "timeout"
+        mgp = MgpListItem()
+        mgp.code=code
+        mgp.error=err
+        items = request.base_info
+        for key in items:
+            mgp.__setitem__(key,items[key])
+        mgp.failed +=1
+        if mgp.pri is None:
+            mgp.pri = 0
+
+        if mgp.pri > 5:
+            if mgp.failed in(10,30,50,100,200)or mgp.failed>200:
+                if self.send_list.count(mgp.item.get("site")) == mgp.pri - 5:
+                    '''
+                    根据爬虫优先级报警'''
+                    info= f'''`
+        您的爬虫出现超<font color="#FF0000">{mgp.failed}</font>次请求、解析失败的任务。
+        > **爬虫名称:** {mgp.item.get("site")}
+        > **栏目名称:** {mgp.item.get("channel")}
+        > **爬虫代码:** {mgp.item.get("spidercode")}
+        > **爬虫等级:** {mgp.pri}
+        > **所属管理人员:** {mgp.author}
+        请登录剑鱼爬虫管理平台查看详情。
+        `'''
+                    wechat_warning(info)
+                    self.send_list.append(mgp.item.get("site"))
+        yield mgp
+
+    def end_callback(self):
+        print("爬虫结束")
+
+
+
+if __name__ == "__main__":
+    Details(redis_key="magp:details1").start()

+ 3 - 18
FworkSpider/items/spider_item.py

@@ -7,6 +7,7 @@ from crawlab import save_item
 from datetime import datetime
 import os
 from feapder import setting
+
 class DataBakItem(Item):
 
     def __init__(self):
@@ -91,15 +92,7 @@ class MgpListItem(Item):
         # self.error_info =
     def pre_to_db(self):
         # 生成入库时间戳(秒级), 定义为long型
-        self.author = setting.author.get(os.path.basename(os.getcwd()))
-        save_item({"site": self.item.get("site"),"error":True,"author":self.author,
-                   "spidercode":self.item.get("spidercode"),"channel":self.item.get("channel"),"state_code":"code",
-                   "href":self.item.get("href"),"error_info":self.error})
-        '''
-        "site": "站点名", "error_type": "错误类型(detail/list/content/)", "author": "负责人",
-         "spidercode": "", "channel": "", error_count:"错误数量"
-         '''
-
+        self.author = os.path.basename(os.getcwd())
 class ListItem(Item):
     def __init__(self):
         self.spidercode = ""  # 爬虫代码(编辑器爬虫平台定义)
@@ -110,16 +103,8 @@ class ListItem(Item):
         self.rel_count = 0
 
     def pre_to_db(self):
+        time.sleep(0.1)
         self.author = setting.author.get(os.path.basename(os.getcwd()))
         if self.author is None:
             self.author = os.path.basename(os.getcwd())
         self.runtime = get_current_date(date_format="%Y-%m-%d")
-
-
-
-class ErrorInfoItem(Item):
-    def __init__(self):
-        self.parmars = ""  # 需要调用的方法名称
-        self.item = "111"  # 传过来的参数
-        self.parser_name = "111"  # 处理详情页的爬虫名
-        self.date = time.time()

+ 2 - 3
FworkSpider/mongo_pipeline.py

@@ -40,6 +40,7 @@ class MongoPipeline(BasePipeline):
                  若False,不会将本批数据入到去重库,以便再次入库
         """
         try:
+            print(table)
             add_count = self.to_db.add_batch(coll_name=table, datas=items)
             for item in items:
                 dedup = Dedup(Dedup.BloomFilter)
@@ -50,10 +51,8 @@ class MongoPipeline(BasePipeline):
                 "共导出 %s 条数据到 %s,  新增 %s条, 重复 %s 条"
                 % (datas_size, table, add_count, datas_size - add_count)
             )
-            # wechat_warning(f"{site}  数据导报\n共插入 {datas_size} 条数据到 {table}")
-            # for i in range(add_count):
             if table == "mgp_list":
-                save_item({"site": "失败回填", "title": add_count})
+                save_item({"site": "新增/回填", "title": add_count})
 
             return True
         except Exception as e:

+ 14 - 25
FworkSpider/setting.py

@@ -14,16 +14,9 @@ import sys
 #
 # MONGODB
 # MONGO_IP = "192.168.20.51"  # 本地 docker 环境
-MONGO_IP = "127.0.0.1"  # 本地环境
-# MONGO_PORT = 27017
-MONGO_PORT = 27001
-
-#
-
-# MONGO_IP = "192.168.3.71"  # 本地环境
-# MONGO_PORT = 27027
-
-
+MONGO_IP = "172.17.4.87"  # 线上环境
+MONGO_PORT = 27080
+# MONGO_PORT = 27001
 MONGO_DB = "py_spider"
 # MONGO_USER_NAME = ""
 # MONGO_USER_PASS = ""
@@ -31,8 +24,7 @@ MONGO_DB = "py_spider"
 # # REDIS
 # # ip:port 多个可写为列表或者逗号隔开 如 ip1:port1,ip2:port2 或 ["ip1:port1", "ip2:port2"]
 # REDISDB_IP_PORTS = "192.168.20.51:6379"  # 本地 docker 环境
-REDISDB_IP_PORTS = "127.0.0.1:6379"  # 本地环境
-# REDISDB_IP_PORTS = "192.168.3.71:6379"  # 本地环境
+REDISDB_IP_PORTS = "172.19.0.1:6379"  # 本地环境
 # REDISDB_USER_PASS = ""
 REDISDB_DB = 10
 # # 适用于redis哨兵模式
@@ -54,10 +46,10 @@ EXPORT_DATA_MAX_RETRY_TIMES = 5 # 导出数据时最大的重试次数,包括
 #
 REDIS_KEY = "fwork"
 # # SPIDER
-# SPIDER_THREAD_COUNT = 10  # 爬虫并发数
+# SPIDER_THREAD_COUNT = 4  # 爬虫并发数
 # SPIDER_SLEEP_TIME = [2, 5] # 下载时间间隔 单位秒。 支持随机 如 SPIDER_SLEEP_TIME = [2, 5] 则间隔为 2~5秒之间的随机数,包含2和5
 # SPIDER_TASK_COUNT = 1  # 每个parser从内存队列中获取任务的数量
-SPIDER_MAX_RETRY_TIMES = 2  # 每个请求最大重试次数
+SPIDER_MAX_RETRY_TIMES = 5  # 每个请求最大重试次数
 # KEEP_ALIVE = False  # 爬虫是否常驻
 #
 # # 浏览器渲染
@@ -66,11 +58,11 @@ WEBDRIVER  = dict(
     load_images=False,  # 是否加载图片
     # user_agent=None,  # 字符串 或 无参函数,返回值为user_agent
     proxy=None,  # xxx.xxx.xx.xxx:xxxx 或 无参函数,返回值为代理地址
-    headless=False,  # 是否为无头浏览器
+    headless=True,  # 是否为无头浏览器
     driver_type="FIREFOX",  # CHROME、PHANTOMJS、FIREFOX
     timeout=30,  # 请求超时时间
     window_size=(1280, 800),  # 窗口大小
-    executable_path="D:\\geckodriver.exe",  # 浏览器路径,默认为默认路径
+    # executable_path="D:\\geckodriver.exe",  # 浏览器路径,默认为默认路径
     render_time=0,  # 渲染时长,即打开网页等待指定时间后再获取源码
     custom_argument=["--ignore-certificate-errors"],  # 自定义浏览器渲染参数
 )
@@ -134,16 +126,15 @@ WARNING_INTERVAL = 360  # 相同报警的报警时间间隔,防止刷屏; 0表
 WARNING_LEVEL = "DEBUG"  # 报警级别, DEBUG / ERROR
 WARNING_FAILED_COUNT = 2  # 任务失败数 超过WARNING_FAILED_COUNT则报警
 #
-LOG_NAME = os.path.basename(os.getcwd())
+#LOG_NAME = os.path.basename(os.getcwd())
 
 DTIME = time.strftime("%Y-%m-%d", time.localtime(time.time()))
-# LOG_NAME = os.path.split(sys.argv[0])[-1].split('.')[0]
-# LOG_PATH = "log/%s/%s.log" %(DTIME,LOG_NAME)  # log存储路径
-LOG_PATH = LOG_NAME  # log存储路径
-LOG_LEVEL = "DEBUG"
+LOG_NAME = os.path.split(sys.argv[0])[-1].split('.')[0]
+LOG_PATH = "log/%s/%s.log" %(DTIME,LOG_NAME)  # log存储路径
+LOG_LEVEL = "INFO"
 LOG_COLOR = True  # 是否带有颜色
 LOG_IS_WRITE_TO_CONSOLE = True # 是否打印到控制台
-LOG_IS_WRITE_TO_FILE = True  # 是否写文件
+LOG_IS_WRITE_TO_FILE = False  # 是否写文件
 LOG_MODE = "w"  # 写文件的模式
 LOG_MAX_BYTES = 10 * 1024 * 1024  # 每个日志文件的最大字节数
 LOG_BACKUP_COUNT = 20  # 日志文件保留数量
@@ -157,7 +148,5 @@ OTHERS_LOG_LEVAL = "ERROR"  # 第三方库的log等级
 # print('当前工作路径为 ' + os.getcwd())
 jy_proxy = {'socks5': {'url': 'http://socks.spdata.jianyu360.com/socks/getips?limit=100', 'decrypt': 'ABNOPqrceQRSTklmUDEFGXYZabnopfghHVWdijstuvwCIJKLMxyz0123456789+/'}}
 headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36', 'Accept': '*/*'}
-oss_={'key_id': 'LTAI4G5x9aoZx8dDamQ7vfZi', 'key_secret': 'Bk98FsbPYXcJe72n1bG3Ssf73acuNh', 'endpoint': 'oss-cn-beijing.aliyuncs.com', 'bucket_name': 'jy-datafile'}
-# oss_={'key_id': 'LTAI4G5x9aoZx8dDamQ7vfZi', 'key_secret': 'Bk98FsbPYXcJe72n1bG3Ssf73acuNh', 'endpoint': 'oss-cn-beijing-internal.aliyuncs.com', 'bucket_name': 'jy-editor'}
-
+oss_={'key_id': 'LTAI4G5x9aoZx8dDamQ7vfZi', 'key_secret': 'Bk98FsbPYXcJe72n1bG3Ssf73acuNh', 'endpoint': 'oss-cn-beijing-internal.aliyuncs.com', 'bucket_name': 'jy-datafile'}
 author = {"dzr":"董钊瑞",'mgp':"马国鹏","lzz":"李宗泽"}

+ 0 - 0
FworkSpider/untils/clean_html/__init__.py


+ 131 - 0
FworkSpider/untils/clean_html/defaults.py

@@ -0,0 +1,131 @@
+import re
+
+__all__ = ['cleaner']
+
+# 独立元素
+INDEPENDENT_TAGS = {
+    '<head>[\s\S]*?</head>': '',
+    '<html>|<html [^>]*>|</html>': '',
+    '<body>|<body [^>]*>|</body>': '',
+    '<meta[^<>]*>|<meta [^<>]*>|<meta[^<>]*>[\s\S]*?</meta>|</meta>': '',  # 元数据
+    '&(nbsp|e[mn]sp|thinsp|zwn?j|#13);': '',  # 空格
+    '\\xa0|\\u3000': '',  # 空格
+    '<!--[\s\S]*?-->': '',  # 注释
+    '<style[^<>]*>[\s\S]*?</style>': '',  # 样式
+    '<script[^<>]*>[\s\S]*?</script>': '',  # JavaScript
+    '<input>': '',  # 输入框
+    '<img[^>]*>': '<br>',  # 图片
+}
+# 行内元素
+INLINE_TAGS = {
+    '<a>|<a [^>]*>|</a>': '',  # 超链接
+    '<span>|<span [^>]*>|</span>': '',  # span
+    '<label>|<label [^>]*>|</label>': '<br>',  # label
+    '<font>|<font [^>]*>|</font>': '',  # font
+}
+# 块级元素
+BLOCK_TAGS = {
+    '<h[1-6][^>]*>|</h[1-6]>': '',  # 标题
+    '<p>|<p [^>]*>|</p>': '<br>',  # 段落
+    '<div>|<div [^>]*>|</div>': '<br>',  # 分割 division
+    '<o:p>|<o:p [^>]*>|</o:p>': ''  # OFFICE微软WORD段落
+}
+# 其他
+OTHER = {
+    '<?xml[^>]*>|<?xml [^>]*>|<?xml:.*?>': '',
+    '<epointform>': '',
+    '<!doctype html>|<!doctype html [^>]*>': '',
+    '【关闭】|关闭': '',
+    '【打印】|打印本页': '',
+    '【字体:[\s\S]*】': '',
+    '文章来源:[\u4e00-\u9fa5]+': '',
+    '浏览次数:.*[<]+': '',
+    '(责任编辑:.*?)': '',
+    '分享到[:]': '',
+}
+# 样式
+CSS_STYLE = {
+    'style="[\s\S]*?"|style ="[\s\S]*?"': '',
+    'bgcolor="[\s\S]*?"|bgcolor ="[\s\S]*?"': '',
+    'bordercolor="[\s\S]*?"|bordercolor ="[\s\S]*?"': '',
+    'class="[\s\S]*?"|class ="[\s\S]*?"': '',
+    'align="[\s\S]*?"|align ="[\s\S]*?"': '',
+    'cellpadding="(\d+)"|cellspacing="(\d+)"': '',
+}
+# 空白符
+BLANKS = {
+    '\n\s*\n': '\n',
+    '\s*\n\s*': '\n',
+    '[^\S\n]': ' ',
+    '\s+': ' ',
+}
+# css标签集合
+TAGS = {'table', 'tr', 'td', 'div', 'span', 'p'}
+# css属性集合
+ATTRS = {'id', 'class', 'style', 'width'}
+
+
+def _repair_tag():
+    """异常的标签组合,用来替换非标准页面的标签"""
+    _repairs = {}
+    for tag in TAGS:
+        for attr in ATTRS:
+            key = '{}{}'.format(tag, attr)
+            val = '{} {}'.format(tag, attr)
+            _repairs[key] = val
+    return _repairs
+
+
+def _escape_character(html):
+    """转义字符"""
+    html = html.replace('&lt;', '<')
+    html = html.replace('&gt;', '>')
+    html = html.replace('&quot;', '"')
+    html = html.replace('&amp;', '&')
+    return html
+
+
+def _lowercase_tag(html):
+    """标签归一化处理(全部小写)"""
+    tags = re.findall("<[^>]+>", html)
+    for tag in tags:
+        html = html.replace(tag, str(tag).lower())
+
+    repair_tags = _repair_tag()
+    for err, right in repair_tags.items():
+        html = html.replace(err, right)
+
+    return html
+
+
+def cleaner(html, special=None, completely=False):
+    """
+    数据清洗
+
+    :param html: 清洗的页面
+    :param special: 额外指定页面清洗规则
+    :param completely: 是否完全清洗页面
+    :return: 清洗后的页面源码
+    """
+    if special is None:
+        special = {}
+    OTHER.update(special)
+    remove_tags = {
+        **INDEPENDENT_TAGS,
+        **INLINE_TAGS,
+        **BLOCK_TAGS,
+        **OTHER,
+        **CSS_STYLE,
+        **BLANKS,
+    }
+    html = _lowercase_tag(html)
+    for tag, repl in remove_tags.items():
+        html = re.sub(tag, repl, html)
+
+    if completely:
+        html = re.sub(r'<canvas[^<>]*>[\s\S]*?</canvas>', '', html)  # 画布
+        html = re.sub(r'<iframe[^<>]*>[\s\S]*?</iframe>', '', html)  # 内框架
+        html = re.sub('<([^<>\u4e00-\u9fa5]|微软雅黑|宋体|仿宋)+>', '', html)
+
+    html = _escape_character(html)
+    return html

+ 2 - 3
FworkSpider/untils/cookie_pool.py

@@ -16,13 +16,13 @@ import warnings
 from collections import Iterable
 from enum import Enum, unique
 import requests
-from feapder.db.mongodb import MongoDB
 
 import feapder.utils.tools as tools
 from feapder import setting
 from feapder.network import user_agent
 
 from feapder.db.mysqldb import MysqlDB
+from feapder.db.mongodb import MongoDB
 from feapder.db.redisdb import RedisDB
 from feapder.utils import metrics
 from feapder.utils.log import log
@@ -105,8 +105,7 @@ class PageCookiePool(CookiePoolInterface):
         可能会重写
         @return:
         """
-        print('ssssssssssssssss',self._kwargs)
-        url = 'https://www.whzbtb.com/V2PRTS/PrequalificationPublicityInfoListInit.do'
+        url = self._page_url
         header = {
             "Upgrade-Insecure-Requests": "1",
             "User-Agent": user_agent.get()

+ 8 - 1
FworkSpider/untils/create_menus.py

@@ -1,3 +1,10 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 2021-12-13 10:04:03
+---------
+@summary:  快捷创建meaus配置
+
+"""
 from feapder.db.mongodb import MongoDB
 
 
@@ -19,7 +26,7 @@ class Details:
         return self._to_db_xs
     def main(self,page):
         menus_list = []
-        data = self.to_db_xs.find("luaconfig",{"modifyuser":"maguopeng","param_common":{"$elemMatch": {"$regex": "广东省政府采购网", "$options": "$i"}}})
+        data = self.to_db_xs.find("luaconfig",{"modifyuser":"maguopeng","param_common":{"$elemMatch": {"$regex": "中国南方航空采购招标网", "$options": "$i"}}})
         # print(data)
         for item in data:
             # print(item)

+ 0 - 0
spiders/__init__.py


+ 0 - 0
spiders/李宗泽/__init__.py


+ 0 - 0
spiders/马国鹏/__init__.py


+ 88 - 0
spiders/马国鹏/中国南方航空采购招标网.py

@@ -0,0 +1,88 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 2021-12-21 16:19:50
+---------
+@summary:中国南方航空采购招标网.py
+---------
+@author: 马国鹏
+"""
+import sys
+sys.path.append('/app/spiders/sword_feapder/FworkSpider')
+import feapder
+from items.spider_item import DataBakItem,MgpListItem
+from untils.proxy_pool import ProxyPool
+from feapder.dedup import Dedup
+from collections import namedtuple
+
+
+class Zgnfhkcgzbw(feapder.Spider):
+
+    def start_callback(self):
+         self.count = 0
+         Menu = namedtuple('Menu', ['channel', 'code', 'url', 'crawl_page'])
+
+         self.menus = [
+             Menu('其它公告', 'a_zgnfhkcgzbw_cggg',
+                  'https://csbidding.csair.com/cms/channel/qtgg/index.htm', 1),
+             Menu('非招标采购-采购结果', 'a_zgnfhkcgzbw_cgjg',
+                  'https://csbidding.csair.com/cms/channel/cgjg/index.htm', 2),
+             Menu('招标公告', 'a_zgnfhkcgzbw_zbgg',
+                  'https://csbidding.csair.com/cms/channel/zbgg/index.htm', 1),
+             Menu('中标公告', 'a_zgnfhkcgzbw_zhbgg',
+                  'https://csbidding.csair.com/cms/channel/bidzbgg/index.htm', 1),
+             Menu('评标公示', 'a_zgnfhkcgzbw_pbgs',
+                  'https://csbidding.csair.com/cms/channel/pbgs/index.htm', 1),
+             Menu('非招标采购-采购公告', 'a_zgnfhkcgzbw_fzbcg_cggg',
+                  'https://csbidding.csair.com/cms/channel/cggg/index.htm', 2),
+             Menu('非招标采购-其它公告', 'a_zgnfhkcgzbw_qtgg',
+                  'https://csbidding.csair.com/cms/channel/fzbqtgg/index.htm', 1),
+         ]
+    def start_requests(self):
+         for menu in self.menus:
+            for page in range(1,menu.crawl_page+1):
+                start_url = menu.url + f'?pageNo={page}'
+                yield feapder.Request(url=start_url, item=menu._asdict())
+
+    def parse(self, request, response):
+        menu = request.item
+        self.count += 1   # 一个计数器
+        dedup = Dedup(Dedup.BloomFilter)
+        href_list = []
+        info_list = response.xpath("//ul[@id='list1']/li")
+        for info in info_list:
+            href = info.xpath('./a/@href').extract_first()
+            title = info.xpath('./a/@title').extract_first()
+            # import pdb
+            # pdb.set_trace()
+            # print(info.xpath('./a/text()'))
+            create_time = info.xpath('./a/em/text()').extract_first()
+
+            data_item = DataBakItem()  # 存储数据的管道
+            data_item.href = href  # 标书链接
+            data_item.channel = menu.get("channel")  # 最上方定义的抓取栏目 (编辑器定的)
+            data_item.spidercode = menu.get("code")  # 最上方定义的爬虫code(编辑器定的)
+            data_item.title = title  # 标题
+            data_item.publishtime = create_time.strip()  # 标书发布时间
+            data_item.site = "中国南方航空采购招标网"
+            data_item.area = "全国"  # 城市默认:全国
+            data_item.city = ""  # 城市 默认为空
+            ss = dedup.filter_exist_data([href])
+            if ss == []:
+                continue
+            list_item =  MgpListItem()
+            list_item.__table_name__= 'mgp_list'
+            list_item.parse = "self.detail_get"
+            list_item.parser_name = "details"
+            list_item.item = data_item.to_dict
+            list_item.deal_detail = ['//div[@class="main-text"]']
+            # list_item.create_time = '//div[@class="article-author"]/text()[-1]'
+            list_item.parse_url = href
+            href_list.append(href)
+            yield list_item
+        dedup.add(href_list)
+
+    def end_callback(self):
+        print("爬虫结束")
+
+if __name__ == "__main__":
+    Zgnfhkcgzbw(redis_key="fwork:Zgnfhkcgzbw").start()

+ 75 - 0
spiders/马国鹏/中国石化物质采购电子商务平台.py

@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 2021-12-16 15:53:39
+---------
+@summary:中国石化物质采购电子商务平台.py
+---------
+@author: 马国鹏
+"""
+import sys
+sys.path.append('/app/spiders/sword_feapder/FworkSpider')
+import feapder
+from items.spider_item import DataBakItem,MgpListItem
+from untils.proxy_pool import ProxyPool
+from feapder.dedup import Dedup
+from collections import namedtuple
+from feapder.utils.tools import timestamp_to_date
+
+
+class Zshcg(feapder.Spider):
+    # 自定义数据库,若项目中有setting.py文件,此自定义可删除
+    def start_callback(self):
+         self.count = 0
+         Menu = namedtuple('Menu', ['channel', 'code', 'types', 'crawl_page'])
+
+         self.menus = [
+             Menu('	独家采购公示', 'a_zgshwzcgdzswpt_djcggs', "Notice", 2),
+         ]
+    def start_requests(self):
+         for menu in self.menus:
+             for page in range(1,menu.crawl_page+1):
+                start_url = f'https://ec.sinopec.com/f/supp/bid/queryOnlyBill.do?pageNo={page}&_=1639640334801'
+                yield feapder.Request(url=start_url, item=menu._asdict())
+
+    def parse(self, request, response):
+        menu = request.item
+        self.count += 1   # 一个计数器
+        dedup = Dedup(Dedup.BloomFilter)
+        href_list = []
+        # print(response.json)
+        info_list = response.json.get("result").get("result")
+        # return
+        for info in info_list:
+            href = f'https://ec.sinopec.com/f/supp/notice/viewArticle.do?id={info.get("id")}'
+            title =info.get("title")
+            create_time = timestamp_to_date(info.get("createdate").get("time")/1000)
+
+            list_item = DataBakItem()  # 存储数据的管道
+            list_item.href = href  # 标书链接
+            list_item.channel = menu.get("channel")  # 最上方定义的抓取栏目 (编辑器定的)
+            list_item.spidercode = menu.get("code")  # 最上方定义的爬虫code(编辑器定的)
+            list_item.title = title  # 标题
+            list_item.publishtime = create_time  # 标书发布时间
+            list_item.site = "中国石化物资采购电子商务平台"
+            list_item.area = "全国"  # 城市默认:全国
+            list_item.city = ""  # 城市 默认为空
+            ss = dedup.filter_exist_data([href])
+            if ss == []:
+                continue
+            mgp =  MgpListItem()
+            mgp.parse = "self.detail_get"
+            mgp.parser_name = "details"
+            mgp.item = list_item.to_dict
+            mgp.deal_detail = ['//div[@class="wrap"]','//div[@id="middle"]']
+            mgp.parse_url = href
+            href_list.append(href)
+        #     yield mgp
+        # dedup.add(href_list)
+
+    def end_callback(self):
+        print("爬虫结束")
+        # wechat_warning(f"爬虫名称 爬虫结束\n共抓取{self.count}次详情页数据")
+
+if __name__ == "__main__":
+    Zshcg(redis_key="fwork:{spider_name}").start()
+    # print(timestamp_to_date(1639635843,time_format="%Y-%m-%d %H:%M:%S"))

+ 98 - 0
spiders/马国鹏/中泰集团招标投标网.py

@@ -0,0 +1,98 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 2022-02-17 09:39:39
+---------
+@summary: 中泰集团招标投标网
+---------
+@author: maguopemng
+"""
+import sys
+
+from requests_toolbelt import MultipartEncoder
+
+
+sys.path.append('C:/Users/topnet/Desktop/crawlab_feader/FworkSpider')
+sys.path.append('/app/spiders/sword_feapder/FworkSpider')
+import feapder
+from items.spider_item import DataBakItem,MgpListItem,ListItem
+from feapder.dedup import Dedup
+from collections import namedtuple
+from untils.clean_html.defaults import cleaner
+
+
+
+class AZtjtzbtbwXxggQb(feapder.Spider):
+
+    def start_callback(self):
+         Menu = namedtuple('Menu', ['channel', 'code', 'types', 'crawl_page'])
+         self.site= "中泰集团招标投标网"
+
+         self.menus = [
+             Menu('信息公告-全部', 'a_ztjtzbtbw_xxgg_qb', "自定义参数", 1),
+         ]
+    def start_requests(self):
+         for menu in self.menus:
+             for page in range(1,menu.crawl_page+1):
+                 start_url = f'http://scm.zthx.com/srm-pb-web/portalBulletinNoAuth/listByPageNoAuth'
+                 multipart_data = MultipartEncoder(
+                     fields={
+                     "Q_EQ_bidTypeValue": "",
+                     "Q_EQ_noticeTypeValue": "",
+                     "Quick_value": "",
+                     "S_releaseDate": "desc",
+                     "page": "2",
+                     "rows": "15"
+                 })
+                 headers = {
+                     "Content-Type": multipart_data.content_type
+                 }
+
+                 yield feapder.Request(url=start_url, item=menu._asdict(),proxies=False,
+                                       data=multipart_data,headers=headers)
+
+    def parse(self, request, response):
+        print(response.text)
+        menu = request.item
+        dedup = Dedup(Dedup.BloomFilter)
+        href_list = []
+        info_list = response.json.get("rows")
+        for info in info_list:
+            href = f'http://scm.zthx.com/?id={info.get("id")}'
+            title = info.get("title")
+            create_time = info.get("releaseDate")
+            html = info.get("mainBody")
+            result = cleaner(html)
+            area = "全国"  # 省份
+            city = ""  # 城市
+
+            data_item = DataBakItem()  # 存储数据的管道
+            data_item.href = href  # 标书链接
+            data_item.channel = menu.get("channel")  # 最上方定义的抓取栏目 (编辑器定的)
+            data_item.spidercode = menu.get("code")  # 最上方定义的爬虫code(编辑器定的)
+            data_item.title = title  # 标题
+            data_item.publishtime = create_time  # 标书发布时间
+            data_item.site = self.site
+            data_item.area = area  # 城市默认:全国
+            data_item.city = city  # 城市 默认为空
+            data_item.contenthtml = html  # 城市 默认为空
+            data_item.detail = result  # 城市 默认为空
+            ss = dedup.filter_exist_data([href])
+            if ss == []:
+                continue
+
+            yield data_item
+        list = ListItem()
+        list.site = self.site
+        list.channel = menu.get("channel")
+        list.spidercode = menu.get("code")
+        list.url = request.url
+        list.count = len(info_list)
+        list.rel_count = len(href_list)
+        yield list
+        dedup.add(href_list)
+
+    def end_callback(self):
+        print("爬虫结束")
+
+if __name__ == "__main__":
+    AZtjtzbtbwXxggQb(redis_key="maguopemng:AZtjtzbtbwXxggQb").start()

+ 133 - 0
spiders/马国鹏/中铁鲁班商务网.py

@@ -0,0 +1,133 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 2022-01-20 13:49:04
+---------
+@summary: Zglbsww
+---------
+@author: dist
+"""
+import json
+import sys
+
+sys.path.append('/app/spiders/sword_feapder/FworkSpider')
+import feapder
+from items.spider_item import DataBakItem,MgpListItem,ListItem
+from feapder.dedup import Dedup
+from collections import namedtuple
+
+
+class Zglbsww(feapder.Spider):
+
+    def start_callback(self):
+         Menu = namedtuple('Menu', ['channel', 'code', 'purchaseType',"orders", 'crawl_page'])
+         self.site= "中铁鲁班商务网"
+
+         self.menus = [
+             Menu('公告补遗-招标采购', 'a_ztlbsww_zhbgg', "CRFQ","publish_time", 1),
+             Menu('公告补遗-询价采购', 'a_ztlbsww_ggby_xjcg', "XJFQ","publish_time", 1),
+             Menu('公告补遗-竞争性谈判', 'a_ztlbsww_cqby', "TPFQ","publish_time", 1),
+             Menu('公告补遗-竞价采购', 'a_ztlbsww_ggby_jjcg', "JJFQ","publish_time", 1),
+
+             Menu('采购公告-招标采购', 'a_ztlbsww_zbgg', "CRFQ","pub_time", 1),
+             Menu('采购公告-询价采购', 'a_ztlbsww_lsxjcg', "XJFQ","pub_time", 1),
+             Menu('采购公告-竞争性谈判', 'a_ztlbsww_jzxtp', "TPFQ","pub_time", 1),
+             Menu('采购公告-竞价采购', 'a_ztlbsww_jjcg', "JJFQ","pub_time", 1),
+         ]
+    def start_requests(self):
+         for menu in self.menus:
+             for page in range(1,menu.crawl_page+1):
+                 '''
+                 https://eproport.crecgec.com/epu-portal/portal/project/listWithPage
+                 https://eproport.crecgec.com/epu-portal/portal/project/listWithPage
+                 '''
+                 start_url = f'https://eproport.crecgec.com/epu-portal/portal/project/listWithPage'
+                 data = {
+                     "timeType": "month",
+                     "areaCode": "-1",
+                     "mainType": "-1",
+                     "purchaser": None,
+                     "information": None,
+                     "sTime": "",
+                     "eTime": "",
+                     "classify": "-1",
+                     "region": "-1",
+                     "level": "",
+                     "selectedState": "",
+                     "purchaseType": menu.purchaseType,
+                     "noticeType": 1,
+                     "orders": menu.orders,
+                     "dirs": "desc",
+                     "current": page,
+                     "size": 10,
+                     "page": {}
+                 }
+                 data = json.dumps(data)
+
+                 yield feapder.Request(url=start_url, item=menu._asdict(),proxies=False,method="POST",data=data)
+    def parse(self, request, response):
+        print(response.text)
+        menu = request.item
+        dedup = Dedup(Dedup.BloomFilter)
+        href_list = []
+        info_list = response.json.get("data").get("records")
+        for info in info_list:
+            projectid = info.get("projectId")
+            tenantid = info.get("tenantId")
+            href = f'https://eproport.crecgec.com/#/notice/noticexj-detail?projectId={projectid}&tenantId={tenantid}'
+            title = info.get("projectName")
+            create_time = info.get("publishTime") + ":00"
+            area = "全国"  # 省份
+            city = ""  # 城市
+
+            data_item = DataBakItem()  # 存储数据的管道
+            data_item.href = href  # 标书链接
+            data_item.channel = menu.get("channel")  # 最上方定义的抓取栏目 (编辑器定的)
+            data_item.spidercode = menu.get("code")  # 最上方定义的爬虫code(编辑器定的)
+            data_item.title = title  # 标题
+            data_item.publishtime = create_time  # 标书发布时间
+            data_item.site = self.site
+            data_item.area = area  # 城市默认:全国
+            data_item.city = city  # 城市 默认为空
+            ss = dedup.filter_exist_data([href])
+            if ss == []:
+                continue
+            list_item =  MgpListItem()
+            list_item.parse = "self.detail_get"
+            list_item.parser_name = "details_ztlbw"
+            list_item.item = data_item.to_dict
+            list_item.deal_detail = ['//*']
+            list_item.proxies = False
+            list_item.render_time = 3
+            list_item.parse_url = href
+            list_item.pri = 1
+            list_item.files={
+                "list_xpath":'//div[@class="****"]/a',
+                "url_xpath":'./@href',
+                "name_xpath":'./text()',
+                "files_type":('zip','doxc','ftp'), # 需要下载的附件类型
+                # "file_type":'zip', # 默认的附件类型,用于url中未带附件类型的
+                "url_key":'http', # 用于区别连接是否为正常附件连接的url关键词 必须携带,如无可填http
+                # "host":'http://www.ceshi.com',  # 需要拼接url的host
+            }
+            href_list.append(href)
+            yield list_item
+        list = ListItem()
+        list.site = self.site
+        list.channel = menu.get("channel")
+        list.spidercode = menu.get("code")
+        list.url = request.url
+        list.count = len(info_list)
+        list.rel_count = len(href_list)
+        yield list
+        dedup.add(href_list)
+
+    def end_callback(self):
+        print("爬虫结束")
+
+    def download_midware(self, request):
+        request.headers = {
+
+            "Content-Type": "application/json"
+        }
+if __name__ == "__main__":
+    Zglbsww(redis_key="dist:Zglbsww").start()

+ 105 - 0
spiders/马国鹏/亿企优采.py

@@ -0,0 +1,105 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 2022-02-16 09:23:14
+---------
+@summary: Yqyc
+---------
+@author: maguopemng
+"""
+import sys
+
+sys.path.append('C:/Users/topnet/Desktop/crawlab_feader/FworkSpider')
+sys.path.append('/app/spiders/sword_feapder/FworkSpider')
+import feapder
+from items.spider_item import DataBakItem, MgpListItem, ListItem
+from feapder.dedup import Dedup
+from collections import namedtuple
+from requests_toolbelt import MultipartEncoder
+import json
+
+
+class Yqyc(feapder.Spider):
+
+    def start_callback(self):
+        Menu = namedtuple('Menu', ['channel', 'code', 'types', 'crawl_page'])
+        self.site = "亿企优采"
+
+        self.menus = [
+            Menu('竞价结果列表', 'a_yqyc_jjjglb', "bidResultList", 1),
+            Menu('待竞价列表', 'a_yqyc_djjlb', "biddingList", 1),
+        ]
+
+    def start_requests(self):
+        for menu in self.menus:
+            for page in range(1, menu.crawl_page + 1):
+                start_url = f'http://www.vins.com.cn/business/login/{menu.types}'
+                multipart_data = MultipartEncoder(
+                    fields={"page": json.dumps(
+                        {"numPerPage": 10, "pageNum": page, "condition": "LIKE", "keyword": "f01", "searchValue": "",
+                         "orderField": "", "orderDirection": "", "filterParams": {}})})
+                headers = {
+                    "Content-Type": multipart_data.content_type
+                }
+                yield feapder.Request(url=start_url, item=menu._asdict(), proxies=False,
+                                      data=multipart_data, method="POST", headers=headers)
+
+    def parse(self, request, response):
+        print(response.text)
+        menu = request.item
+        dedup = Dedup(Dedup.BloomFilter)
+        href_list = []
+        info_list = response.json.get("data").get("records")
+        for info in info_list:
+            if menu.get("types")=="biddingList":
+                href = f'http://www.vins.com.cn/business/bidingDetail?fid={info.get("f14")}&school={info.get("f04")}'
+                title = f'待竞价详细({info.get("f01")})---- {info.get("f05")}'
+                create_time = info.get("f07")
+            else:
+                href = f'http://www.vins.com.cn/business/bidResultDetail?fid={info.get("f14")}&school={info.get("f04")}'
+                title = f'竞价结果详细({info.get("f01")})---- {info.get("f05")}'
+                create_time = info.get("f25")
+
+
+            area = "全国"  # 省份
+            city = ""  # 城市
+
+            data_item = DataBakItem()  # 存储数据的管道
+            data_item.href = href  # 标书链接
+            data_item.channel = menu.get("channel")  # 最上方定义的抓取栏目 (编辑器定的)
+            data_item.spidercode = menu.get("code")  # 最上方定义的爬虫code(编辑器定的)
+            data_item.title = title  # 标题
+            data_item.publishtime = create_time  # 标书发布时间
+            data_item.site = self.site
+            data_item.area = area  # 城市默认:全国
+            data_item.city = city  # 城市 默认为空
+            ss = dedup.filter_exist_data([href])
+            if ss == []:
+                continue
+            list_item = MgpListItem()
+            list_item.parse = "self.detail_get"
+            list_item.parser_name = "details_firefox"
+            list_item.item = data_item.to_dict
+            list_item.deal_detail = ['//div[@class="contentWrapper"]']
+            list_item.proxies = False
+            list_item.render_time = 3
+            list_item.parse_url = href
+            list_item.pri = 1
+
+            href_list.append(href)
+            yield list_item
+        list = ListItem()
+        list.site = self.site
+        list.channel = menu.get("channel")
+        list.spidercode = menu.get("code")
+        list.url = request.url
+        list.count = len(info_list)
+        list.rel_count = len(href_list)
+        yield list
+        dedup.add(href_list)
+
+    def end_callback(self):
+        print("爬虫结束")
+
+
+if __name__ == "__main__":
+    Yqyc(redis_key="maguopemng:Yqyc").start()

+ 76 - 0
spiders/马国鹏/华润置地华东大区网站.py

@@ -0,0 +1,76 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 2022-01-04 13:45:21
+---------
+@summary:华润置地华东大区网站
+---------
+@author: topnet
+"""
+import sys
+sys.path.append('/app/spiders/sword_feapder/FworkSpider')
+import feapder
+from items.spider_item import DataBakItem,MgpListItem
+from untils.proxy_pool import ProxyPool
+from feapder.dedup import Dedup
+from collections import namedtuple
+
+
+class Hrzdhddqwz(feapder.Spider):
+
+    def start_callback(self):
+         self.count = 0
+         Menu = namedtuple('Menu', ['channel', 'code', 'types', 'crawl_page'])
+
+         self.menus = [
+             Menu('Hrzdhddqwz', 'Hrzdhddqwz', "Notice", 1),
+             # Menu('Hrzdhddqwz', 'Hrzdhddqwz', "Notice", 1),
+         ]
+    def start_requests(self):
+         for menu in self.menus:
+            start_url = f'https://sh.crland.com.cn/shanghai1/index.html'
+            yield feapder.Request(url=start_url, item=menu._asdict())
+
+    def parse(self, request, response):
+        # print(response.text)
+        menu = request.item
+        self.count += 1   # 一个计数器
+        dedup = Dedup(Dedup.BloomFilter)
+        href_list = []
+        info_list = response.xpath("//div[@class='east-tender']//tr[position()>1]")
+        for info in info_list:
+            href = info.xpath('./td[2]/a/@href').extract_first()
+            title = info.xpath('./td[2]/a/text()').extract_first()
+            create_time = info.xpath('./td[4]/text()').extract_first()
+
+            data_item = DataBakItem()  # 存储数据的管道
+            data_item.href = href  # 标书链接
+            data_item.channel = menu.get("channel")  # 最上方定义的抓取栏目 (编辑器定的)
+            data_item.spidercode = menu.get("code")  # 最上方定义的爬虫code(编辑器定的)
+            data_item.title = title  # 标题
+            data_item.publishtime = create_time  # 标书发布时间
+            data_item.site = "华润置地华东大区网站"
+            data_item.area = "上海市"  # 城市默认:全国
+            data_item.city = "上海市"  # 城市 默认为空
+            ss = dedup.filter_exist_data([href])
+            if ss == []:
+                continue
+            list_item =  MgpListItem()
+            list_item.parse = "self.detail_json"  # 虽然用的json方法,但处理的不是json型数据,因为title需要重查
+            list_item.parser_name = "details"
+            list_item.item = data_item.to_dict
+            list_item.deal_detail = '''
+title = response.xpath('//div[@class="east-news-detail-title"]/text()').extract_first()
+html = response.xpath('//div[@class="east-news-detail-bottom"]').extract_first()
+list_item.title = title
+list_item.contenthtml = html
+            '''
+            list_item.parse_url = href
+            href_list.append(href)
+            yield list_item
+        dedup.add(href_list)
+
+    def end_callback(self):
+        print("爬虫结束")
+
+if __name__ == "__main__":
+    Hrzdhddqwz(redis_key="fwork:Hrzdhddqwz").start()

+ 120 - 0
spiders/马国鹏/南通市如皋市政府采购网上商城.py

@@ -0,0 +1,120 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 2022-02-18 13:18:40
+---------
+@summary: 	南通市如皋市政府采购网上商城
+---------
+@author: maguopemng
+"""
+import sys
+sys.path.append('C:/Users/topnet/Desktop/crawlab_feader/FworkSpider')
+sys.path.append('/app/spiders/sword_feapder/FworkSpider')
+import feapder
+from items.spider_item import DataBakItem,MgpListItem,ListItem
+from feapder.dedup import Dedup
+from collections import namedtuple
+
+
+class JsNtsrgszfcgwssc(feapder.Spider):
+
+    def start_callback(self):
+         Menu = namedtuple('Menu', ['channel', 'code', 'types', 'crawl_page'])
+         self.site= "南通市如皋市政府采购网上商城"
+
+         self.menus = [
+             Menu('分散公告', 'js_ntsrgszfcgwssc_fsgg', "自定义参数", 1),
+             # Menu('JsNtsrgszfcgwssc抓取栏目', 'JsNtsrgszfcgwssc爬虫code', "Notice", 1),
+         ]
+    def start_requests(self):
+         for menu in self.menus:
+             for page in range(1,menu.crawl_page+1):
+                 start_url = f'http://rugao.ntzfcg.cn/cgr_articles.html?category_id=5&page={page}'
+                 yield feapder.Request(url=start_url, item=menu._asdict(),proxies=False)
+
+    def parse(self, request, response):
+        menu = request.item
+        dedup = Dedup(Dedup.BloomFilter)
+        href_list = []
+        info_list = response.xpath('//ul[@class="list_main"]/li')
+        for info in info_list:
+            href = info.xpath('./a/@href').extract_first()
+            title = info.xpath('./a/text()').extract()[-1].strip()
+            create_time = "20" + info.xpath('./a/span/text()').extract_first().strip()
+            area = "江苏"  # 省份
+            city = "南通市"  # 城市
+
+            data_item = DataBakItem()  # 存储数据的管道
+            data_item.href = href  # 标书链接
+            data_item.channel = menu.get("channel")  # 最上方定义的抓取栏目 (编辑器定的)
+            data_item.spidercode = menu.get("code")  # 最上方定义的爬虫code(编辑器定的)
+            data_item.title = title  # 标题
+            data_item.publishtime = create_time  # 标书发布时间
+            data_item.site = self.site
+            data_item.area = area  # 城市默认:全国
+            data_item.city = city  # 城市 默认为空
+            ss = dedup.filter_exist_data([href])
+            if ss == []:
+                continue
+            list_item =  MgpListItem()
+            list_item.parse = "self.detail_get"
+            list_item.parser_name = "details"
+            list_item.item = data_item.to_dict
+            list_item.deal_detail = ['//div[@class="nes_details"]']
+            list_item.proxies = False
+            list_item.ex_python = '''
+js_str="""function randomString(e) {
+    e = e || 32;
+    var t = "ABCDEFGHJKMNPQRSTWXYZabcdefhijkmnprstwxyz2345678"
+      , n = t.length
+      , o = "";
+    for (i = 0; i < e; i++)
+        o += t.charAt(Math.floor(Math.random() * n));
+    return o
+}
+function undefind_function(nowtimes) {
+    for (var e = nowtimes, t = (new Date).getSeconds(), i = 100 * Number(e) + t, n = parseInt(Number(i) / 1e3), o = new Array(4), r = 3; 0 <= r; r--)
+        3 == r ? o[3] = Number(i) % 1e3 : (o[r] = n % 1e3,
+        n = parseInt(n / 1e3));
+    var s = o.map(function(e) {
+        var t, i = [1, 3, 5, 7, 9], n = [0, 2, 4, 6, 8];
+        return e < 480 ? (e = 1e3 - e,
+        t = i[Math.floor(Math.random() * i.length)]) : t = n[Math.floor(Math.random() * n.length)],
+        (randomString(2) + e.toString(16) + t).toUpperCase()
+    }).join("-")
+      , a = parseInt(t / 10)
+      , l = t % 10
+      , c = a * l * 100 + 10 * (a + 1) + (9 - l);
+    return "_new_rugao_session=" + s + "-" + randomString(4) + c
+}"""
+ctx = execjs.compile(js_str)
+cookie = ctx.call("undefind_function",str(int(time.time())))  
+request_params["headers"]={"Cookie":cookie}         
+
+'''
+            list_item.parse_url = href
+            list_item.pri = 1
+            list_item.files={
+                "list_xpath":'//span[@class="font16 cgr_ar_content  mb29"]/a',
+                "url_xpath":'./@href',
+                "name_xpath":'./text()',
+                "file_type":'file_name',
+                "files_type":('zip','doxc','ftp','rar','pdf','xlsx','doc','jpg'), # 需要下载的附件类型
+                "url_key":'attachments', # 用于区别连接是否为正常附件连接的url关键词 必须携带,如无可填http
+                "host":'http://rugao.ntzfcg.cn',  # 需要拼接url的host
+            }
+            href_list.append(href)
+            yield list_item
+        list = ListItem()
+        list.site = self.site
+        list.channel = menu.get("channel")
+        list.spidercode = menu.get("code")
+        list.url = request.url
+        list.count = len(info_list)
+        list.rel_count = len(href_list)
+        dedup.add(href_list)
+
+    def end_callback(self):
+        print("爬虫结束")
+
+if __name__ == "__main__":
+    JsNtsrgszfcgwssc(redis_key="maguopemng:JsNtsrgszfcgwssc").start()

+ 101 - 0
spiders/马国鹏/天津市政府采购网.py

@@ -0,0 +1,101 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 2021-12-13 10:04:03
+---------
+@summary:天津市政府采购网.py
+---------
+@author: 马国鹏
+"""
+import sys
+sys.path.append('/app/spiders/sword_feapder/FworkSpider')
+import feapder
+from items.spider_item import DataBakItem,MgpListItem
+from untils.proxy_pool import ProxyPool
+from feapder.dedup import Dedup
+from collections import namedtuple
+from feapder.utils.tools import format_date
+import time
+
+class Tjszf(feapder.Spider):
+    # 自定义数据库,若项目中有setting.py文件,此自定义可删除
+    def start_callback(self):
+         self.count = 0
+         self.prox_pool = ProxyPool()
+         Menu = namedtuple('Menu', ['channel', 'code', 'id','st', 'crawl_page'])
+
+         self.menus = [
+             Menu('采购公告市级',        'tj_tjszfcgw_cggg_sj', "1665", 1,   1),
+             Menu('采购公告区县',        'tj_tjszfcgw_cggg_qj', "1664",None, 1),
+             Menu('采购结果公告市级',     'tj_tjszfcgw_cgjggg_sj', "2014", 1,   1),
+             Menu('采购结果公告区县',     'tj_tjszfcgw_cgjggg_qx', "2013",None, 1),
+             Menu('采购需求征求意见市级',  'tj_tjszfcgw_cgxqzqyj_sj', "1662",1, 1),
+             Menu('采购需求征求意见区县', 'tj_tjszfcgw_cgxqzqyj_qj', "1994", None, 1),
+             Menu('单一来源公示-市级',    'tj_tjszfcgw_cgxqzqyj_sj', "2033", 1,   1),
+             Menu('单一来源公示-区级',    'tj_tjszfcgw_dylygs_qx', "2034", None, 1),
+             Menu('更正公告市级',        'tj_tjszfcgw_gzgg_sj', "1663", 1, 1),
+             Menu('更正公告区县',        'tj_tjszfcgw_gzgg_qx', "1666", None, 1),
+             Menu('合同验收公告市级',     'tj_tjszfcgw_htysgg_sj', "2015", 1, 1),
+             Menu('合同验收公告区县',     'tj_tjszfcgw_htysgg_qx', "2016", None, 1),
+             Menu('监督检查处理决定公告-市级','tj_tjszfcgw_jdjccjjdgg_sj', "5776730", 1, 1),
+             Menu('监督检查处理决定公告-区级','tj_tjszfcgw_jdjccjjdgg_qj', "5903537", None, 1),
+             Menu('投诉处理决定-市级',     'tj_tjszfcgw_tscljd', "5776729", None, 1),
+             Menu('投诉处理决定公告-区级',  'tj_tjszfcgw_tscljd_qj', "5903425", None, 1),
+             Menu('采购意向公开-市级',  'tj_tjszfcgw_cgyxgk_sj', "2021", 1, 1),
+             Menu('采购意向公开-区级',  'tj_tjszfcgw_cgyxgk_qj', "2022", None, 1),
+         ]
+    def start_requests(self):
+         for menu in self.menus:
+            stmp = int(time.time()*1000)
+            start_url = f'http://www.ccgp-tianjin.gov.cn/portal/topicView.do?method=view&view=Infor&id={menu.id}&ver=2{"&st"+str(menu.st) if menu.st else ""}&stmp={stmp}'
+            yield feapder.Request(url=start_url, item=menu._asdict())
+
+    def parse(self, request, response):
+        # print(response.text)
+        info_list = response.xpath('//ul[@class="dataList"]/li')
+        menu = request.item
+        self.count += 1   # 一个计数器
+        href_list = []
+        dedup = Dedup(Dedup.BloomFilter, absolute_name="boolm:list")
+        for info in info_list:
+            create_time = info.xpath("./span/text()").extract_first()
+            create_time = format_date(create_time, "%a %b %d %H:%M:%S CST %Y")
+            href = info.xpath("./a/@href").extract_first()
+            title = info.xpath("./a/@title").extract_first()
+            list_item = DataBakItem()  # 存储数据的管道
+            list_item.href = href  # 标书链接
+            list_item.channel = menu.get("channel")  # 最上方定义的抓取栏目 (编辑器定的)
+            list_item.spidercode = menu.get("code")  # 最上方定义的爬虫code(编辑器定的)
+            list_item.title = title  # 标题
+            list_item.publishtime = create_time  # 标书发布时间
+            list_item.site = "天津市政府采购网"
+            list_item.area = "天津市"  # 城市默认:全国
+            list_item.city = "天津市"  # 城市 默认为空
+            ss = dedup.filter_exist_data([href])
+            if ss == []:
+                continue
+            mgp =  MgpListItem()
+            mgp.parse = "self.detail_get"
+            mgp.parser_name = "details"
+            mgp.item = list_item.to_dict
+            # mgp.author = '马国鹏'
+            mgp.deal_detail = ['//table',"//div[@class='pageInner']"]
+            mgp.parse_url = href
+            href_list.append(href)
+            yield mgp
+        dedup.add(href_list)
+
+
+    def end_callback(self):
+        print("爬虫结束")
+        # wechat_warning(f"爬虫名称 爬虫结束\n共抓取{self.count}次详情页数据")
+
+    def download_midware(self, request):
+        request.proxies = self.prox_pool.get()
+        return request
+
+if __name__ == "__main__":
+    Tjszf(redis_key="magp:tjszfcg").start()
+'''
+imageString=67&method=downEnIdFile1&id=301079006&fileId=LwQVvvUfo5A*
+
+'''

+ 137 - 0
spiders/马国鹏/广东省政府采购网.py

@@ -0,0 +1,137 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 2022-01-18 09:41:49
+---------
+@summary: Gdszfcgw
+---------
+@author: dist
+"""
+import sys
+from urllib.parse import urljoin
+
+import requests
+
+sys.path.append('/app/spiders/sword_feapder/FworkSpider')
+import feapder,time
+from items.spider_item import DataBakItem,MgpListItem,ListItem
+from feapder.dedup import Dedup
+from collections import namedtuple
+from untils.get_imgcode import get_code
+#
+# # custom_settings = { 'DOWNLOAD_DELAY': 10, 'CONCURRENT_REQUESTS_PER_IP': 4, 'DOWNLOADER_MIDDLEWARES': {}, }
+# settings = { 'LOG_LEVEL': "INFO" }
+class Gdszfcgw(feapder.Spider):
+    def start_callback(self):
+         Menu = namedtuple('Menu', ['channel', 'code', 'noticetype','notchannel', 'crawl_page'])
+         self.site= "广东省政府采购网"
+         self.host = 'https://gdgpo.czt.gd.gov.cn'
+
+         self.menus = [
+             Menu('采购意向公开', 'gd_gdszfcgwxwz_cgyxgk','59','fca71be5-fc0c-45db-96af-f513e9abda9d', 1),
+             Menu('单一来源公示', 'gd_gdszfcgwxwz_cggg_pccgyxgk','001051','fca71be5-fc0c-45db-96af-f513e9abda9d', 1),
+             Menu('采购计划', 'gd_gdszfcgwxwz_cgjh', '001101','95ff31f3-a1af-4bc4-b1a2-54c894476193', 1),   #1
+             Menu('采购需求', 'gd_gdszfcgwxwz_cgxq', '001059','fca71be5-fc0c-45db-96af-f513e9abda9d', 1),
+             Menu('资格预审公告', 'gd_gdszfcgwxwz_zgysgg', '001052,001053','fca71be5-fc0c-45db-96af-f513e9abda9d', 1), #2
+             Menu('采购公告', 'gd_gdszfcgwxwz_cggg', '00101','fca71be5-fc0c-45db-96af-f513e9abda9d', 1),
+             Menu('中标成交公告', 'gd_gdszfcgwxwz_zbcjgg', '00102','fca71be5-fc0c-45db-96af-f513e9abda9d', 1),
+             Menu('更正公告', 'gd_gdszfcgwxwz_gzgg', '00103','fca71be5-fc0c-45db-96af-f513e9abda9d', 1),
+             Menu('终止公告', 'gd_gdszfcgwxwz_zzgg', '001004,001006','fca71be5-fc0c-45db-96af-f513e9abda9d', 1), #3
+             Menu('合同公告', 'gd_gdszfcgwxwz_htgg', '001054','fca71be5-fc0c-45db-96af-f513e9abda9d', 1),
+             Menu('验收公告', 'gd_gdszfcgwxwz_ysgg', '001009,00105A','fca71be5-fc0c-45db-96af-f513e9abda9d', 1),  #4
+             Menu('电子卖场信息', 'gd_gdszfcgwxwz_ysgg', '201022,201023,201111,00107D','3b49b9ba-48b6-4220-9e8b-eb89f41e9d66', 1),  #4
+             Menu('电子卖场信息', 'gd_gdszfcgwxwz_ysgg', '202022,202023,202111,00107E,001076','3b49b9ba-48b6-4220-9e8b-eb89f41e9d66', 1),  #4
+             Menu('电子卖场信息', 'gd_gdszfcgwxwz_ysgg', '001071','3b49b9ba-48b6-4220-9e8b-eb89f41e9d66', 1),  #4
+             Menu('电子卖场信息', 'gd_gdszfcgwxwz_ysgg', '204022,204023,204111,204112','3b49b9ba-48b6-4220-9e8b-eb89f41e9d66', 1),  #4
+             Menu('电子卖场信息', 'gd_gdszfcgwxwz_ysgg', '001054', '3b49b9ba-48b6-4220-9e8b-eb89f41e9d66', 1),  # 4
+             Menu('电子卖场信息', 'gd_gdszfcgwxwz_ysgg', '001009,00105A', '3b49b9ba-48b6-4220-9e8b-eb89f41e9d66', 1),  # 4
+
+             # Menu('批量采购', 'gd_gdszfcgwxwz_plcg',
+             #      'https://gdgpo.czt.gd.gov.cn/freecms/site/guangdong/dzmcgg/index.html', 1),
+             # Menu('进口产品清单', 'gd_gdszfcgwxwz_jkcpqd',
+             #      'https://gdgpo.czt.gd.gov.cn/freecms/site/guangdong/jkcpqd/index.html','','d7284b7e-29e9-4fe4-bad3-b187ec8edbf9' 1),
+         ]
+    def start_requests(self):
+        code = self.get_code()
+        for menu in self.menus:
+             for page in range(1,menu.crawl_page+1):
+                 start_url = f'https://gdgpo.czt.gd.gov.cn/freecms/rest/v1/notice/selectInfoMoreChannel.do?&siteId=cd64e06a-21a7-4620-aebc-0576bab7e07a&channel={menu.notchannel}&currPage={page}&pageSize=10&noticeType={menu.noticetype}&regionCode=440001&verifyCode={code}&subChannel=false&purchaseManner=&title=&openTenderCode=&purchaser=&agency=&purchaseNature=&operationStartTime=&operationEndTime=&selectTimeName=noticeTime'
+                 yield feapder.Request(url=start_url, item=menu._asdict(),proxies=False)
+    def get_code(self):
+        img_url = 'https://gdgpo.czt.gd.gov.cn/freecms/verify/verifyCode.do?createTypeFlag=n'
+        header = {"Host": "www.ccgp-tianjin.gov.cn",
+                  "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0",
+                  "Origin": "http://www.ccgp-tianjin.gov.cn",
+
+                  }
+        res = requests.get(img_url, headers=header)
+        with open('image/guangdong.jpg', 'wb+') as f:
+            f.write(res.content)
+        res = get_code('image/guangdong.jpg')
+        if res.get("msg")=="success":
+            img_code = res.get("r").get("code")
+        else:
+            img_code = None
+        return img_code
+
+
+    def parse(self, request, response):
+        time.sleep(0.3)
+        menu = request.item
+        dedup = Dedup(Dedup.BloomFilter)
+        href_list = []
+        info_list = response.json.get("data")
+        for info in info_list:
+            href = info.get("pageurl")
+            title = info.get("shorttitle")
+            create_time = info.get("addtimeStr")
+            href = urljoin(self.host, href)
+
+            area = "广东"  # 省份
+            city = ""  # 城市
+
+            data_item = DataBakItem()  # 存储数据的管道
+            data_item.href = href  # 标书链接
+            data_item.channel = menu.get("channel")  # 最上方定义的抓取栏目 (编辑器定的)
+            data_item.spidercode = menu.get("code")  # 最上方定义的爬虫code(编辑器定的)
+            data_item.title = title  # 标题
+            data_item.publishtime = create_time  # 标书发布时间
+            data_item.site = self.site
+            data_item.area = area  # 城市默认:全国
+            data_item.city = city  # 城市 默认为空
+            ss = dedup.filter_exist_data([href])
+            if ss == []:
+                continue
+            list_item =  MgpListItem()
+            list_item.parse = "self.detail_get"
+            list_item.parser_name = "details"
+            list_item.item = data_item.to_dict
+            list_item.deal_detail = ['//div[@class="info-article in active"]']
+            list_item.proxies = False
+            list_item.parse_url = href
+            list_item.pri = 1
+            list_item.files={
+                "list_xpath":'//div[@class="info-article in active"]//div/a',
+                "url_xpath":'./@href',
+                "name_xpath":'./text()',
+                "files_type":('zip','doxc','ftp','pdf'), # 需要下载的附件类型
+                # "file_type":'zip', # 默认的附件类型,用于url中未带附件类型的
+                "url_key":'http', # 用于区别连接是否为正常附件连接的url关键词 必须携带,如无可填http
+                # "host":'http://www.ceshi.com',  # 需要拼接url的host
+            }
+            href_list.append(href)
+            yield list_item
+        list = ListItem()
+        list.site = self.site
+        list.channel = menu.get("channel")
+        list.spidercode = menu.get("code")
+        list.url = request.url
+        list.count = len(info_list)
+        list.rel_count = len(href_list)
+        yield list
+        dedup.add(href_list)
+
+    def end_callback(self):
+        print("爬虫结束")
+
+if __name__ == "__main__":
+    Gdszfcgw(redis_key="dist:Gdszfcgw").start()

+ 75 - 0
spiders/马国鹏/广发证券采购平台.py

@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 2022-01-04 13:12:42
+---------
+@summary: 广发证券采购平台
+---------
+@author: topnet
+"""
+import sys
+sys.path.append('/app/spiders/sword_feapder/FworkSpider')
+from urllib.parse import urljoin
+
+import feapder
+from items.spider_item import DataBakItem,MgpListItem
+from untils.proxy_pool import ProxyPool
+from feapder.dedup import Dedup
+from collections import namedtuple
+
+
+class Gfzqcgpt(feapder.Spider):
+
+    def start_callback(self):
+         self.count = 0
+         self.host = 'https://gfjc.gf.com.cn'
+         Menu = namedtuple('Menu', ['channel', 'code', 'types', 'crawl_page'])
+
+         self.menus = [
+             Menu('公告公示信息', 'a_gfzqcgpt_gggsxx', "gonggao", 1),
+             # Menu('Gfzqcgpt', 'Gfzqcgpt', "Notice", 1),
+         ]
+    def start_requests(self):
+         for menu in self.menus:
+             for page in range(1,menu.crawl_page+1):
+                start_url = f'https://gfjc.gf.com.cn/gonggao/index_{page}.jhtml'
+                yield feapder.Request(url=start_url, item=menu._asdict())
+
+    def parse(self, request, response):
+        menu = request.item
+        self.count += 1   # 一个计数器
+        dedup = Dedup(Dedup.BloomFilter)
+        href_list = []
+        info_list = response.xpath('//div[@class="list-news-mode"]')
+        for info in info_list:
+            href = urljoin(self.host, info.xpath('./div/a/@href').extract_first())
+            title = info.xpath('./div/a/text()').extract_first()
+            create_time = info.xpath('./div/div/span[3]/text()').extract_first()
+            create_time = create_time.split(":")[-1]
+
+            data_item = DataBakItem()  # 存储数据的管道
+            data_item.href = href  # 标书链接
+            data_item.channel = menu.get("channel")  # 最上方定义的抓取栏目 (编辑器定的)
+            data_item.spidercode = menu.get("code")  # 最上方定义的爬虫code(编辑器定的)
+            data_item.title = title  # 标题
+            data_item.publishtime = create_time  # 标书发布时间
+            data_item.site = "广发证券采购平台"
+            data_item.area = "全国"  # 城市默认:全国
+            data_item.city = ""  # 城市 默认为空
+            ss = dedup.filter_exist_data([href])
+            if ss == []:
+                continue
+            list_item =  MgpListItem()
+            list_item.parse = "self.detail_get"
+            list_item.parser_name = "details"
+            list_item.item = data_item.to_dict
+            list_item.deal_detail = ['//div[@class="list-news-box"]']
+            list_item.parse_url = href
+            href_list.append(href)
+            yield list_item
+        dedup.add(href_list)
+
+    def end_callback(self):
+        print("爬虫结束")
+
+if __name__ == "__main__":
+    Gfzqcgpt(redis_key="fwork:Gfzqcgpt").start()

+ 110 - 0
spiders/马国鹏/杭州市公共资源交易.py

@@ -0,0 +1,110 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 2021-11-26 16:28:18
+---------
+@summary: 杭州市公共资源交易
+---------
+@author: 马国鹏
+"""
+import sys
+sys.path.append('/app/spiders/sword_feapder/FworkSpider')
+from collections import namedtuple
+import feapder
+import time
+from feapder.dedup import Dedup
+from items.spider_item import DataBakItem, MgpListItem
+
+
+class Hzsggzy(feapder.Spider):
+    # 自定义数据库,若项目中有setting.py文件,此自定义可删除
+    def start_callback(self):
+        self.start_url = 'https://ggzy.hzctc.hangzhou.gov.cn/SecondPage/GetNotice'
+        self.count = 0
+
+    def start_requests(self):
+        Menu = namedtuple('Menu', ['channel', 'code', 'afficheType', 'crawl_page'])
+        menus = [
+            # Menu('工程建设-项目合同', 'zj_hzsggzyjyw_gcjs_xmht', "506", 1, ),
+            # Menu('工程建设-招标文件预公示', 'zj_hzsggzyjyw_gcjs_zbwjygs', "505", 2, ),
+            # Menu('工程建设-核准信息公告', 'zj_hzsggzyjyw_gcjs_hzxxgg', "518", 1, ),
+            # Menu('政府采购-更正答疑', 'zj_hzsggzyjy_zfcg_gzdy2', "27", 1, ),
+            Menu('政府采购-采购公告', 'zj_hzsggzyjy_zfcg_cggg2', "29", 2, ),
+            Menu('综合其他-中标结果公告', 'zj_hzsggzyjyw_zhqt_zbjggg', "507", 1, ),
+            Menu('综合其他-中标前公示', 'zj_hzsggzyjyw_zhqt_zbqgs', "37", 1, ),
+            Menu('综合其他-答疑文件', 'zj_hzsggzyjyw_zhqt_dywj', "499",1, ),
+            Menu('综合其他-答疑公告', 'zj_hzsggzyjyw_zhqt_dygg', "469", 1, ),
+            Menu('综合其他-招标公告', 'zj_hzsggzyjyw_zhqt_zbgg', "34", 1, ),
+
+            Menu('工程建设-招标公告', 'zj_hzsggzyjy_gcjs_zbgg', "22", 1, ),
+            Menu('工程建设-答疑文件', 'zj_hzsggzyjy_gcjs_dywj', "23", 1, ),
+            Menu('工程建设-答疑公告', 'zj_hzsggzyjy_gcjs_dygg', "465", 1, ),
+            Menu('工程建设-开标结果公示', 'zj_hzsggzyjy_gcjs_kbjggs', "486", 1, ),
+            Menu('工程建设-中标前公示', 'zj_hzsggzyjy_gcjs_zhbqgs', "25", 1, ),
+            Menu('工程建设-中标公告', 'zj_hzsggzyjy_gcjs_zbgs', "28", 1, ),
+
+            Menu('政府采购-意见征询', 'zj_hzsggzyjy_zfcg_yjzx', "26", 1, ),
+            Menu('政府采购-答疑公告', 'zj_hzsggzyjy_zfcg_dygg', "466", 1, ),
+            Menu('政府采购-结果公告', 'zj_hzsggzyjy_zfcg_jggg', "32", 1, ),
+
+        ]
+        for menu in menus:
+            for page in range(1,menu.crawl_page+1):
+
+                data = {
+                    "area":"",
+                    "afficheType":menu.afficheType,
+                    "IsToday":"",
+                    "title":"",
+                    "proID":"",
+                    "number":"",
+                    "_search":"false",
+                    "nd":int(time.time()*1000),
+                    "rows":"10",
+                    "page":page,
+                    "sidx":"PublishStartTime",
+                    "sord":"desc"
+                }
+
+                yield feapder.Request(url=self.start_url,data=data,method="POST",item=menu._asdict(),verify=False,proxies=False)
+
+    def parse(self, request, response):
+        menu = request.item
+        href_list = []
+        dedup = Dedup(Dedup.BloomFilter)
+        info_list =response.json.get("rows")
+        for info in info_list:
+            info_id = info.get("ID")
+            tenderno = info.get("TenderNo")
+            title = info.get("TenderName")
+            create_time = info.get("PublishStartTime")
+            inner = info.get("IsInner")
+            href = f'https://ggzy.hzctc.hangzhou.gov.cn/AfficheShow/Home?AfficheID={info_id}&IsInner={inner}&ModuleID={menu.get("afficheType")}'
+            data_item = DataBakItem()
+            data_item.href = href
+            data_item.title = title
+            data_item.publishtime = create_time
+            data_item.channel = menu.get("channel")
+            data_item.spidercode = menu.get("code")
+            data_item.site = "杭州市公共资源交易"
+            data_item.area = "浙江"
+            data_item.city = "杭州市"
+            ss = dedup.filter_exist_data([href])
+            if ss == []:
+                continue
+            list_item = MgpListItem()
+            # list_item.__table_name__ = 'mgp_list'
+            list_item.parse = "self.detail_get"
+            list_item.parser_name = "details"
+            list_item.item = data_item.to_dict
+            list_item.deal_detail = ["//div[@class='content']"]
+            # list_item.create_time = '//div[@class="article-author"]/text()[-1]'
+            list_item.parse_url = href
+            href_list.append(href)
+            yield list_item
+        dedup.add(href_list)
+
+
+
+
+if __name__ == "__main__":
+    Hzsggzy(redis_key="mgp:hzsggzy",debug=True).start()

+ 99 - 0
spiders/马国鹏/武汉市公共资源交易平台.py

@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 2021-12-29 10:06:02
+---------
+@summary:  武汉市公共资源交易平台
+---------
+@author: topnet
+"""
+import sys
+sys.path.append('/app/spiders/sword_feapder/FworkSpider')
+import feapder
+from items.spider_item import DataBakItem,MgpListItem
+from untils.cookie_pool import PageCookiePool
+from feapder.dedup import Dedup
+from collections import namedtuple
+
+
+class Whsggzyjypt(feapder.Spider):
+
+    cookie_pool = PageCookiePool(redis_key='fwork:Whsggzyjypt',page_url='https://www.whzbtb.com/V2PRTS/PrequalificationPublicityInfoListInit.do')
+
+    def start_callback(self):
+         self.count = 0
+         Menu = namedtuple('Menu', ['channel', 'code', 'types', 'crawl_page'])
+
+         self.menus = [
+             Menu('资格预审公示', 'hb_whsggzyjypt_zgysgs', "Notice", 3), # 300页历史数据
+         ]
+    def start_requests(self):
+         for menu in self.menus:
+             start_url = f'https://www.whzbtb.com/V2PRTS/PrequalificationPublicityInfoList.do'
+             for page in range(1,menu.crawl_page+1):
+                 data = {
+                     "page": page,
+                     "rows": "10"
+                 }
+                 yield feapder.Request(url=start_url, data=data, method="POST", item=menu._asdict())
+
+    def parse(self, request, response):
+        if '当前操作存在安全风险' in response.text:
+            self.cookie_pool.del_cookie(request.cookies)
+            yield request
+        menu = request.item
+        self.count += 1   # 一个计数器
+        dedup = Dedup(Dedup.BloomFilter)
+        href_list = []
+        info_list = response.json.get("rows")
+        for info in info_list:
+            href = f'https://www.whzbtb.com/V2PRTS/PrequalificationPublicityInfoDetail.do?%id={info.get("id")}'
+            title = info.get("prjName")
+            create_time = info.get("insertDate")
+
+            data_item = DataBakItem()  # 存储数据的管道
+            data_item.href = href  # 标书链接
+            data_item.channel = menu.get("channel")  # 最上方定义的抓取栏目 (编辑器定的)
+            data_item.spidercode = menu.get("code")  # 最上方定义的爬虫code(编辑器定的)
+            data_item.title = title  # 标题
+            data_item.publishtime = create_time  # 标书发布时间
+            data_item.site = "武汉市公共资源交易平台"
+            data_item.area = "湖北省"  # 城市默认:全国
+            data_item.city = "武汉市"  # 城市 默认为空
+            ss = dedup.filter_exist_data([href])
+            # if ss == []:
+            #     continue
+            list_item =  MgpListItem()
+            list_item.parse = "self.detail_get"
+            list_item.parser_name = "details_cookie"
+            list_item.item = data_item.to_dict
+            list_item.deal_detail = ['//div[@class="pageRight_box"]']
+            list_item.parse_url = href
+            list_item.down_mid = {"key":'fwork:Whsggzyjypt',"text":"当前操作存在安全风险","code":(404,500),
+                                  "page_url":'https://www.whzbtb.com/V2PRTS/PrequalificationPublicityInfoListInit.do'}
+            href_list.append(href)
+            yield list_item
+        dedup.add(href_list)
+    def download_midware(self, request):
+        request.headers = {
+            "Sec-Fetch-Mode": "cors",
+            "Sec-Fetch-Site": "same-origin",
+            "Origin": "https://www.whzbtb.com",
+            "Accept-Encoding": "gzip, deflate, br",
+            "Accept-Language": "zh-CN,zh;q=0.9",
+            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36 Core/1.77.81.400 QQBrowser/10.9.4608.400",
+            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
+            "Accept": "application/json, text/javascript, */*; q=0.01",
+            "Referer": "https://www.whzbtb.com/V2PRTS/PrequalificationPublicityInfoListInit.do",
+            "X-Requested-With": "XMLHttpRequest",
+            "Connection": "keep-alive"
+        }
+
+        request.cookies = self.cookie_pool.get_cookie()
+        return request
+
+
+    def end_callback(self):
+        print("爬虫结束")
+
+if __name__ == "__main__":
+    Whsggzyjypt(redis_key="fwork:Whsggzyjypt").start()

+ 132 - 0
spiders/马国鹏/湖北省政府采购网.py

@@ -0,0 +1,132 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 2022-02-16 11:31:01
+---------
+@summary: HbHbszfcgwCgyxgg
+---------
+@author: maguopemng
+"""
+import sys
+sys.path.append('C:/Users/topnet/Desktop/crawlab_feader/FworkSpider')
+sys.path.append('/app/spiders/sword_feapder/FworkSpider')
+import feapder
+from items.spider_item import DataBakItem,MgpListItem,ListItem
+from feapder.dedup import Dedup
+from collections import namedtuple
+from untils.cookie_pool import PageCookiePool
+
+
+class HbHbszfcgwCgyxgg(feapder.Spider):
+    cookiepool = PageCookiePool(redis_key='fwork:gszfcg',
+                            page_url='http://www.ccgp-hubei.gov.cn:9040/quSer/initSearch')
+
+    def start_callback(self):
+         Menu = namedtuple('Menu', ['channel', 'code', 'types', 'crawl_page'])
+         self.site= "湖北省政府采购网"
+         self.menus = [
+             Menu('采购意向公告', 'hb_hbszfcgw_cgyxgg', "自定义参数", 1),
+         ]
+
+    def start_requests(self):
+         for menu in self.menus:
+             for page in range(1,menu.crawl_page+1):
+                 start_url = f'http://www.ccgp-hubei.gov.cn:9040/quSer/search'
+                 data = {
+                     "queryInfo.type": "cgyx",
+                     "queryInfo.key": "",
+                     "queryInfo.xmmc": "",
+                     "queryInfo.cgdw": "",
+                     "queryInfo.city": "湖北省",
+                     "queryInfo.qybm": "42????",
+                     "queryInfo.district": "全省",
+                     "queryInfo.je1": "",
+                     "queryInfo.begin": "",
+                     "queryInfo.end": "",
+                     "queryInfo.pageNo": "3",
+                     "queryInfo.pageSize": "15",
+                     "queryInfo.pageTotle": "2950"
+                 }
+                 headers = {
+                     "Content-Type": "application/x-www-form-urlencoded",
+                 }
+                 yield feapder.Request(url=start_url, item=menu._asdict(),proxies=False,data=data,method="POST",headers=headers)
+
+    def parse(self, request, response):
+        if '查询失败,请重新查询' in response.text:
+            self.cookiepool.del_cookie(request.cookies)
+            yield request
+        print(response.text)
+        menu = request.item
+        dedup = Dedup(Dedup.BloomFilter)
+        href_list = []
+        info_list = response.xpath('//tbody/tr')
+        for info in info_list:
+            href = info.xpath('./td[last()]/a/@href').extract_first()
+            title = info.xpath('./td[2]/text()').extract_first()
+            create_time = info.xpath('./td[5]/text()').extract_first()
+            area = "湖北"  # 省份
+            city = ""  # 城市
+            print(title,create_time,href)
+
+            data_item = DataBakItem()  # 存储数据的管道
+            data_item.href = href  # 标书链接
+            data_item.channel = menu.get("channel")  # 最上方定义的抓取栏目 (编辑器定的)
+            data_item.spidercode = menu.get("code")  # 最上方定义的爬虫code(编辑器定的)
+            data_item.title = title  # 标题
+            data_item.publishtime = create_time  # 标书发布时间
+            data_item.site = self.site
+            data_item.area = area  # 城市默认:全国
+            data_item.city = city  # 城市 默认为空
+            ss = dedup.filter_exist_data([href])
+            if ss == []:
+                continue
+            list_item =  MgpListItem()
+            list_item.parse = "self.detail_json"
+            list_item.parser_name = "details"
+            list_item.item = data_item.to_dict
+            list_item.proxies = False
+            list_item.deal_detail = '''
+html = response.xpath('//div[@style="margin: 0 22px;"]').extract_first()   
+list_item.contenthtml=html
+files =  response.xpath('//ul[@class="list-unstyled details-ul"]/li')
+if len(files) > 0:
+    attachments = {}
+    for index, info in enumerate(files):
+        file_id = info.xpath('./a/@href').extract_first().strip("javascript:downloadFile();Base64").strip("'")
+        file_name = info.xpath('./a/@download').extract_first()
+        import base64
+        file_url = 'http://www.ccgp-hubei.gov.cn:8090/gpmispub/download?id=' + base64.b64encode(file_id.encode('utf-8')).decode()
+        file_type = file_name.split(".")[-1].lower()
+        file_name = file_name.split(".")[0]
+        print(file_type)
+        print(file_url)
+        attachment = AttachmentDownloader().fetch_attachment(
+            file_name=file_name, file_type=file_type, download_url=file_url,
+            enable_proxy=False)
+        attachments[str(len(attachments) + 1)] = attachment
+        print(attachment)
+        if len(attachments) == 0:
+            pass
+        else:
+            list_item.projectinfo = {"attachments": attachments}         
+            '''
+            list_item.parse_url = href
+            list_item.pri = 1
+            href_list.append(href)
+            yield list_item
+        list = ListItem()
+        list.site = self.site
+        list.channel = menu.get("channel")
+        list.spidercode = menu.get("code")
+        list.url = request.url
+        list.count = len(info_list)
+        list.rel_count = len(href_list)
+        dedup.add(href_list)
+    def download_midware(self, request):
+        request.cookies = self.cookiepool.get_cookie()
+
+    def end_callback(self):
+        print("爬虫结束")
+
+if __name__ == "__main__":
+    HbHbszfcgwCgyxgg(redis_key="maguopemng:HbHbszfcgwCgyxgg").start()

+ 113 - 0
spiders/马国鹏/滁州市人民政府网.py

@@ -0,0 +1,113 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 2022-01-14 20:02:21
+---------
+@summary: 滁州市人民政府网
+---------
+@author: mgp
+"""
+import sys
+sys.path.append('/app/spiders/sword_feapder/FworkSpider')
+import feapder
+from items.spider_item import DataBakItem,MgpListItem,ListItem
+from feapder.dedup import Dedup
+from collections import namedtuple
+
+
+class Czsrmzf(feapder.Spider):
+
+    def start_callback(self):
+         Menu = namedtuple('Menu', ['channel', 'code', 'types', 'crawl_page'])
+         self.site= "滁州市人民政府网"
+
+         self.menus = [
+             Menu('政府信息公开目录-公立医疗机构药品医用设备采购', 'ah_czsrmzfw_gcztb_zbgg', "自定义参数", 1),
+             Menu('重大建设项目-招标投标信息', 'ah_czsrmzfw_zfcg_cggg', "自定义参数", 1),
+             Menu('政府采购', 'ah_czsrmzfw_gcztb_zbgs', "Notice", 1),
+             Menu('工程建设招投标', 'ah_czsrmzfw_zfcg_zbcjgg', "Notice", 1),
+         ]
+    def start_requests(self):
+         for menu in self.menus:
+             for page in range(1,menu.crawl_page+1):
+                 start_url = f'https://www.chuzhou.gov.cn/chuzhou/site/label/8888'
+                 parmars = params = {
+                        "IsAjax": "1",
+                        "dataType": "html",
+                        "_": "0.5840033326645138",
+                        "labelName": "publicInfoList",
+                        "siteId": "2653861",
+                        "pageSize": "20",
+                        "pageIndex": "3",
+                        "action": "list",
+                        "isDate": "true",
+                        "dateFormat": "yyyy-MM-dd",
+                        "length": "50",
+                        "organId": "2681509",
+                        "type": "4",
+                        "catId": "161735369",
+                        "cId": "",
+                        "result": "暂无相关信息",
+                        "title": "",
+                        "fileNum": "",
+                        "keyWords": "",
+                        "file": "/c1/chuzhou/publicInfoList_newest"
+                    }
+                 yield feapder.Request(url=start_url,params=parmars, item=menu._asdict(),proxies=False)
+
+    def parse(self, request, response):
+        menu = request.item
+        dedup = Dedup(Dedup.BloomFilter)
+        href_list = []
+        info_list = response.xpath("//ul")
+        for info in info_list:
+            href = info.xpath("./li/a/@href").extract_first().strip()
+            title = info.xpath("./li/a/@title").extract_first().strip()
+            create_time = info.xpath("./li/span/text()").extract_first().strip()
+            area = "安徽"  # 省份
+            city = "滁州市"  # 城市
+
+            data_item = DataBakItem()  # 存储数据的管道
+            data_item.href = href  # 标书链接
+            data_item.channel = menu.get("channel")  # 最上方定义的抓取栏目 (编辑器定的)
+            data_item.spidercode = menu.get("code")  # 最上方定义的爬虫code(编辑器定的)
+            data_item.title = title  # 标题
+            data_item.publishtime = create_time  # 标书发布时间
+            data_item.site = self.site
+            data_item.area = area  # 城市默认:全国
+            data_item.city = city  # 城市 默认为空
+            ss = dedup.filter_exist_data([href])
+            if ss == []:
+                continue
+            list_item =  MgpListItem()
+            list_item.parse = "self.detail_get"
+            list_item.parser_name = "details"
+            list_item.item = data_item.to_dict
+            list_item.deal_detail = ['//div[@class="contentbox minh500"]']
+            list_item.proxies = False
+            list_item.parse_url = href
+            list_item.pri = 1
+            list_item.files={
+                "list_xpath":'//a[contains(@data-file-ext,"D")]',
+                "url_xpath":'./@href',
+                "name_xpath":'./text()',
+                "files_type":('zip','docx','ftp'), # 需要下载的附件类型
+                "url_key": 'http',  # 用于区别连接是否为正常附件连接的url关键词 必须携带,如无可填http
+                "host": 'https://www.chuzhou.gov.cn'
+            }
+            href_list.append(href)
+            yield list_item
+        list = ListItem()
+        list.site = self.site
+        list.channel = menu.get("channel")
+        list.spidercode = menu.get("code")
+        list.url = request.url
+        list.count = len(info_list)
+        list.rel_count = len(href_list)
+        yield list
+        dedup.add(href_list)
+
+    def end_callback(self):
+        print("爬虫结束")
+
+if __name__ == "__main__":
+    Czsrmzf(redis_key="magp:Czsrmzf").start()

+ 92 - 0
spiders/马国鹏/玖隆在线_交易公告.py

@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 2022-02-15 14:01:43
+---------
+@summary: Jlzx
+---------
+@author: maguopemng
+"""
+import sys
+
+
+sys.path.append('/app/spiders/sword_feapder/FworkSpider')
+sys.path.append('C:/Users/topnet/Desktop/crawlab_feader/FworkSpider')
+import feapder
+from items.spider_item import DataBakItem,MgpListItem,ListItem
+from untils.clean_html.defaults import cleaner
+from feapder.dedup import Dedup
+from collections import namedtuple
+
+class AJlzxJygg(feapder.Spider):
+
+    def start_callback(self):
+         Menu = namedtuple('Menu', ['channel', 'code', 'types', 'crawl_page'])
+         self.site= "玖隆在线"
+
+         self.menus = [
+             Menu('交易公告', 'a_jlzx_jygg', "自定义参数", 1),
+         ]
+    def start_requests(self):
+         for menu in self.menus:
+             for page in range(1,menu.crawl_page+1):
+                 start_url = "http://www.e9656.com/portaletm-2.0.0//dataViewAjax!show.htm"
+                 params = {
+                     "callback": "",
+                     "ajaxParam.esbService": "afficheService.queryAfficheAll",
+                     "ajaxParam.esbParam": "%5B%7B%22cmemberCode%22%3A%22S000016%22%2C%22queryOrderStr1%22%3A%22afficheDate%20desc%22%7D%5D",
+                     "paging.limit": "12",
+                     "paging.start": "0",
+                     "ajaxParam.retClass": "com.soft.bc.oamsg.affiche.vo.QueryAffiche",
+                     "ajaxParam.esbParamClass": "[\"com.soft.bc.oamsg.affiche.vo.QueryBean\"]",
+                     "ajaxParam.esbParamName": "[\"queryBean\"]",
+                     "ajaxParam.resultParamName": "data",
+                     "ajaxParam.callbackParam": "{\"maskPlace\":\"$(\\\"div[name='doclist'][id='jygg'],span[name='doclist'][id='jygg']\\\")\"}"
+                 }
+                 yield feapder.Request(url=start_url, item=menu._asdict(),proxies=False,params=params)
+
+    def parse(self, request, response):
+        print(response.json)
+        menu = request.item
+        dedup = Dedup(Dedup.BloomFilter)
+        href_list = []
+        info_list = response.json.get("data").get("data").get("list")
+        for info in info_list:
+            href = f'https://www.e9656.com/trade//auctionHallAction!getOaAffiche.htm?glistTempbatch={info.get("afficheExbillno")}'
+            title = info.get("afficheTitle")
+            create_time = info.get("afficheEdate")
+            html = info.get("afficheContent")
+            result = cleaner(html)
+
+            area = "江苏"  # 省份
+            city = ""  # 城市
+
+            data_item = DataBakItem()  # 存储数据的管道
+            data_item.href = href  # 标书链接
+            data_item.channel = menu.get("channel")  # 最上方定义的抓取栏目 (编辑器定的)
+            data_item.spidercode = menu.get("code")  # 最上方定义的爬虫code(编辑器定的)
+            data_item.title = title  # 标题
+            data_item.publishtime = create_time  # 标书发布时间
+            data_item.site = self.site
+            data_item.area = area  # 城市默认:全国
+            data_item.city = city  # 城市 默认为空
+            data_item.contenthtml = html  # 城市 默认为空
+            data_item.detail = result  # 城市 默认为空
+            ss = dedup.filter_exist_data([href])
+            if ss == []:
+                continue
+            yield data_item
+        list = ListItem()
+        list.site = self.site
+        list.channel = menu.get("channel")
+        list.spidercode = menu.get("code")
+        list.url = request.url
+        list.count = len(info_list)
+        list.rel_count = len(href_list)
+        yield list
+        dedup.add(href_list)
+
+    def end_callback(self):
+        print("爬虫结束")
+
+if __name__ == "__main__":
+    AJlzxJygg(redis_key="maguopemng:AJlzxJygg").start()

+ 102 - 0
spiders/马国鹏/玖隆在线_招标计划公示.py

@@ -0,0 +1,102 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 2022-02-15 14:42:21
+---------
+@summary: JlzxZbjhgs
+---------
+@author: maguopemng
+"""
+import sys
+sys.path.append('C:/Users/topnet/Desktop/crawlab_feader/FworkSpider')
+sys.path.append('/app/spiders/sword_feapder/FworkSpider')
+import feapder
+from items.spider_item import DataBakItem,MgpListItem,ListItem
+from feapder.dedup import Dedup
+from collections import namedtuple
+
+
+class JlzxZbjhgs(feapder.Spider):
+
+    def start_callback(self):
+         Menu = namedtuple('Menu', ['channel', 'code', 'types', 'crawl_page'])
+         self.site= "玖隆在线"
+
+         self.menus = [
+             Menu('招标计划公示', 'a_jlzx_zbjhgs', "Notice", 1),
+         ]
+    def start_requests(self):
+         for menu in self.menus:
+             for page in range(1,menu.crawl_page+1):
+                 start_url = "http://www.e9656.com/portaletm-2.0.0//dataViewAjax!show.htm"
+                 params = {
+                     "callback": "",
+                     "ajaxParam.esbService": "portalDoclistService.queryPortalDoclists",
+                     "ajaxParam.esbParam": "%5B%7B%22cmemberCode%22%3A%22S000025%22%2C%22menuComcode%22%3A%22jghq%22%2C%22dataOpbillstate%22%3A%221%22%2C%22queryOrderStr1%22%3A%22doclistDate%20desc%22%7D%5D",
+                     "paging.limit": "15",
+                     "paging.start": "0",
+                     "ajaxParam.retClass": "com.soft.bc.portal.info.vo.QueryPortalDoclistVo",
+                     "ajaxParam.resultParamName": "data",
+                     "ajaxParam.esbParamClass": "[\"com.soft.bc.portal.info.vo.PortalDoclistQueryVo\"]",
+                     "ajaxParam.esbParamName": "[\"portalDoclistQueryVo\"]",
+                     "ajaxParam.callbackParam": "{\"maskPlace\":\"$(\\\"div[name='doclist'][id='jghq'],span[name='doclist'][id='jghq']\\\")\"}"
+                 }
+                 yield feapder.Request(url=start_url, item=menu._asdict(),proxies=False,params=params)
+
+    def parse(self, request, response):
+        menu = request.item
+        dedup = Dedup(Dedup.BloomFilter)
+        href_list = []
+        info_list = response.json.get("data").get("data").get("list")
+        for info in info_list:
+            create_time = info.get("doclistDate")
+            href = f'http://www.e9656.com/{info.get("doclistUrl")}'
+            title = info.get("doclistTitle")
+            area = "江苏"  # 省份
+            city = ""  # 城市
+
+            data_item = DataBakItem()  # 存储数据的管道
+            data_item.href = href  # 标书链接
+            data_item.channel = menu.get("channel")  # 最上方定义的抓取栏目 (编辑器定的)
+            data_item.spidercode = menu.get("code")  # 最上方定义的爬虫code(编辑器定的)
+            data_item.title = title  # 标题
+            data_item.publishtime = create_time  # 标书发布时间
+            data_item.site = self.site
+            data_item.area = area  # 城市默认:全国
+            data_item.city = city  # 城市 默认为空
+            ss = dedup.filter_exist_data([href])
+            if ss == []:
+                continue
+            list_item =  MgpListItem()
+            list_item.parse = "self.detail_get"
+            list_item.parser_name = "details"
+            list_item.item = data_item.to_dict
+            list_item.deal_detail = ['//div[@class="news_zwen_zwen"]']
+            list_item.proxies = False
+            list_item.parse_url = href
+            list_item.pri = 1
+            list_item.files={
+                "list_xpath":'//div[@class="news_zwen_zwen"]/p/span//a',
+                "url_xpath":'./@href',
+                "name_xpath":'./text()',
+                "files_type":('xls','doxc','ftp'), # 需要下载的附件类型
+                "file_type":'xls', # 默认的附件类型,用于url中未带附件类型的
+                "url_key":'http', # 用于区别连接是否为正常附件连接的url关键词 必须携带,如无可填http
+                # "host":'http://www.ceshi.com',  # 需要拼接url的host
+            }
+            href_list.append(href)
+            yield list_item
+        list = ListItem()
+        list.site = self.site
+        list.channel = menu.get("channel")
+        list.spidercode = menu.get("code")
+        list.url = request.url
+        list.count = len(info_list)
+        list.rel_count = len(href_list)
+        yield list
+        dedup.add(href_list)
+
+    def end_callback(self):
+        print("爬虫结束")
+
+if __name__ == "__main__":
+    JlzxZbjhgs(redis_key="maguopemng:JlzxZbjhgs").start()

+ 198 - 0
spiders/马国鹏/甘肃政府采购网.py

@@ -0,0 +1,198 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 2021-12-01 16:37:53
+---------
+@summary:甘肃政府采购网.py
+---------
+@author: 马国鹏
+"""
+import sys
+sys.path.append('/app/spiders/sword_feapder/FworkSpider')
+import feapder
+from items.spider_item import DataBakItem, MgpListItem
+from untils.proxy_pool import ProxyPool
+from feapder.dedup import Dedup
+from collections import namedtuple
+import time
+from lxml import etree
+import re
+
+
+class Gszfcg(feapder.Spider):
+    # 自定义数据库,若项目中有setting.py文件,此自定义可删除
+    def start_callback(self):
+        self.count = 0
+        self.prox_pool = ProxyPool()
+        self.cookie = None
+        self.host = 'http://www.ccgp-gansu.gov.cn/'
+        Menu = namedtuple('Menu', ['channel', 'code', "parse", 'render_time', 'url', 'crawl_page'])
+
+        self.menus = [
+            Menu('定点采购', 'a_gszfcgw_ddcg', "self.parse_num1", 2,
+                 "http://www.ccgp-gansu.gov.cn/web/article/142/{crawl_page}/index.htm", 1),
+            Menu('协议供货-公告栏', 'a_gszfcgw_xygh_ggl', "self.parse_num3",2,
+                 "http://www.ccgp-gansu.gov.cn/web/article/13001/{crawl_page}/index.htm", 1),
+            # Menu('协议供货定点采购合同', 'a_gszfcgw_xyghddcght',  "self.parse_num1",2, "Notice", 1),
+            Menu('招标项目合同', 'a_gszfcgw_zbxmht', "self.parse_num1", 2,
+                 "http://www.ccgp-gansu.gov.cn/web/contract/{crawl_page}/index.htm?contractsInfo.id=d0", 1),
+            Menu('最新标讯', 'a_gszfcgw_zxbx', "self.parse_num2", 2,
+                 "http://www.ccgp-gansu.gov.cn/web/articlenews/1/{crawl_page}/index.htm?articleSearchInfo.days=21&articleSearchInfo.division=d0",
+                 1),
+            Menu('综合查询-全部', 'gs_gszfcgw_zhcx_qb', "self.parse",2,
+                 "http://www.ccgp-gansu.gov.cn/web/doSearchmxarticlelssj.action", 1),
+        ]
+
+    def start_requests(self):
+        for menu in self.menus:
+            for page in range(menu.crawl_page):
+                url = menu.url.format(crawl_page=page*10)
+                print(url)
+                yield feapder.Request(url=url, item=menu._asdict(), render=True, callback=eval(menu.parse),render_time=2)
+
+
+    def parse(self, request, response):
+        browser = response.browser
+        browser.find_element_by_name("button").click()
+        # self.cookie = response.cookies
+        smenu = request.item
+        response = etree.HTML(browser.page_source)
+        dedup = Dedup(Dedup.BloomFilter)
+        href_list = []
+        for info in response.xpath("//ul[@class='Expand_SearchSLisi']/li"):
+            title = info.xpath('./a/text()')[0]
+            href = self.host + info.xpath('./a/@href')[0]
+            create_time = re.findall(r'\| 发布时间:(.*?) \|', etree.tounicode(info))[0]
+
+            item_data = DataBakItem()  # 存储数据的管道
+            item_data.href = href  # 标书链接
+            item_data.channel = smenu.get("channel")  # 最上方定义的抓取栏目 (编辑器定的)
+            item_data.spidercode = smenu.get("code")  # 最上方定义的爬虫code(编辑器定的)
+            item_data.title = title  # 标题
+            item_data.publishtime = create_time  # 标书发布时间
+            item_data.site = "甘肃政府采购网"
+            item_data.area = "甘肃省"  # 城市默认:全国
+            item_data.city = ""  # 城市 默认为空
+            ss = dedup.filter_exist_data([href])
+            if ss == []:
+                continue
+            list_item = MgpListItem()
+            list_item.parse = "self.detail_get"
+            list_item.parser_name = "details_firefox"
+            list_item.item = item_data.to_dict
+            list_item.deal_detail = ["//div[@class='articleCon']"]
+            list_item.parse_url = href
+            href_list.append(href)
+            yield list_item
+        dedup.add(href_list)
+
+    def parse_num1(self, request, response):
+        menu = request.item
+        dedup = Dedup(Dedup.BloomFilter)
+        href_list = []
+        for info in response.xpath("//ul[@class='newsList']/li"):
+            title = info.xpath('./span[2]/a/text()').extract_first()
+            if title is None:
+                continue
+            href = info.xpath('./span[2]/a/@href').extract_first()
+            create_time = info.xpath('./span[1]/text()').extract_first().strip() + ' 00:00:00'
+
+            item_data = DataBakItem()  # 存储数据的管道
+            item_data.href = href  # 标书链接
+            item_data.channel = menu.get("channel")  # 最上方定义的抓取栏目 (编辑器定的)
+            item_data.spidercode = menu.get("code")  # 最上方定义的爬虫code(编辑器定的)
+            item_data.title = title  # 标题
+            item_data.publishtime = create_time  # 标书发布时间
+            item_data.site = "甘肃政府采购网"
+            item_data.area = "甘肃省"  # 城市默认:全国
+            item_data.city = ""  # 城市 默认为空
+            ss = dedup.filter_exist_data([href])
+            if ss == []:
+                continue
+            list_item = MgpListItem()
+            list_item.parse = "self.detail_get"
+            list_item.parser_name = "details_firefox"
+            list_item.item = item_data.to_dict
+            list_item.deal_detail = ["//div[@class='mBd']"]
+            list_item.parse_url = href
+            href_list.append(href)
+            yield list_item
+        dedup.add(href_list)
+    def parse_num2(self, request, response):
+        menu = request.item
+        cookie = response.cookies
+        info_list = response.xpath("//*[@class='mBd']/ul/li")
+        if not info_list and menu.get("render_time")<5:
+            yield feapder.Request(url=request.url, item=menu,callback=self.parse_num2,cookies=response.cookies)
+        dedup = Dedup(Dedup.BloomFilter)
+        href_list = []
+        for info in info_list:
+            title = info.xpath('./a/text()').extract_first()
+            if title is None:
+                continue
+            href = info.xpath('./a/@href').extract_first()
+            create_time = info.xpath('./p/span/text()').extract_first().strip()
+            create_time = re.findall('审核时间:(.*?) \|',create_time)[0]
+
+            item_data = DataBakItem()  # 存储数据的管道
+            item_data.href = href  # 标书链接
+            item_data.channel = menu.get("channel")  # 最上方定义的抓取栏目 (编辑器定的)
+            item_data.spidercode = menu.get("code")  # 最上方定义的爬虫code(编辑器定的)
+            item_data.title = title  # 标题
+            item_data.publishtime = create_time  # 标书发布时间
+            item_data.site = "甘肃政府采购网"
+            item_data.area = "甘肃省"  # 城市默认:全国
+            item_data.city = ""  # 城市 默认为空
+            ss = dedup.filter_exist_data([href])
+            if ss == []:
+                continue
+            list_item = MgpListItem()
+            list_item.parse = "self.detail_get"
+            list_item.parser_name = "details_firefox"
+            list_item.item = item_data.to_dict
+            list_item.deal_detail = ["//div[@class='mBd']"]
+            list_item.parse_url = href
+            href_list.append(href)
+            yield list_item
+        dedup.add(href_list)
+    def parse_num3(self, request, response):
+        menu = request.item
+        info_list = response.xpath("//*[@class='mBd']/ul/li")
+        if not info_list and menu.get("render_time")<5:
+            yield feapder.Request(url=request.url, item=menu,callback=self.parse_num3,cookies=response.cookies)
+
+        dedup = Dedup(Dedup.BloomFilter)
+        href_list = []
+        for info in info_list:
+            title = info.xpath('./span[2]/a/text()').extract_first()
+            if title is None:
+                continue
+            href = info.xpath('./span[2]/a/@href').extract_first()
+            create_time = info.xpath('./span[1]/text()').extract_first().strip() + ' 00:00:00'
+
+            item_data = DataBakItem()  # 存储数据的管道
+            item_data.href = href  # 标书链接
+            item_data.channel = menu.get("channel")  # 最上方定义的抓取栏目 (编辑器定的)
+            item_data.spidercode = menu.get("code")  # 最上方定义的爬虫code(编辑器定的)
+            item_data.title = title  # 标题
+            item_data.publishtime = create_time  # 标书发布时间
+            item_data.site = "甘肃政府采购网"
+            item_data.area = "甘肃省"  # 城市默认:全国
+            item_data.city = ""  # 城市 默认为空
+            ss = dedup.filter_exist_data([href])
+            if ss == []:
+                continue
+            list_item = MgpListItem()
+            list_item.parse = "self.detail_get"
+            list_item.parser_name = "details_firefox"
+            list_item.item = item_data.to_dict
+            list_item.deal_detail = ["//div[@class='mBd']"]
+            list_item.parse_url = href
+            href_list.append(href)
+            yield list_item
+        dedup.add(href_list)
+
+
+
+
+if __name__ == "__main__":
+    Gszfcg(redis_key="magp:gszfcg").start()

+ 106 - 0
spiders/马国鹏/福建省政府采购网.py

@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 2022-01-06 16:37:37
+---------
+@summary: 福建省政府采购网.py
+---------
+@author: FworkSpider
+"""
+import sys
+sys.path.append('/app/spiders/sword_feapder/FworkSpider')
+import feapder
+from items.spider_item import DataBakItem,MgpListItem
+from feapder.dedup import Dedup
+from collections import namedtuple
+import random
+import requests
+from untils.chaojiying import Chaojiying_Client
+
+class Fjszfcgw(feapder.Spider):
+    str = '天仙丛付印五仔六五乐四甩瓜九七一失令斤册禾十仗丘非田白付乐仪八代匆乎二们句生四用'
+    def start_callback(self):
+         Menu = namedtuple('Menu', ['channel', 'code', 'types', 'crawl_page'])
+
+         self.menus = [
+             Menu('项目公告-全部', 'fj_fjszfcgw_xmgg_qb', "自定义参数", 10),
+         ]
+    def start_requests(self):
+         for menu in self.menus:
+             # for page in range(1,menu.crawl_page+1):
+                 start_url = f'http://www.ccgp-fujian.gov.cn/3500/noticelist/e8d2cd51915e4c338dc1c6ee2f02b127/?page={1}&verifycode={"".join(random.sample(self.str,4))}'
+                 yield feapder.Request(url=start_url,use_session=True, item=menu._asdict(),proxies=False,random_user_agent=False,page=1)
+
+    def parse(self, request, response):
+        # print(response.text)
+        menu = request.item
+        dedup = Dedup(Dedup.BloomFilter)
+        href_list = []
+        info_list = response.xpath("//tbody/tr")
+        if info_list == []:
+            img_url = 'http://www.ccgp-fujian.gov.cn/noticeverifycode/?1'
+            print('出现验证码')
+            img_res = requests.get(img_url)
+            with open('a.jpg','wb+') as f:
+                f.write(img_res.content)
+            chaojiying = Chaojiying_Client('ddddjy', 'ddddjy2021', '超级鹰')  # 用户中心>>软件ID 生成一个替换 96001
+            im = open('a.jpg', 'rb').read()  # 本地图片文件路径 来替换 a.jpg 有时WIN系统须要//
+            # print(chaojiying.PostPic(im, 1902))
+            res = chaojiying.PostPic(im, 2004)
+            print(res)
+            if res.get("err_no") != 0:
+                chaojiying.ReportError(res.get("pic_id"))
+            code = res.get("pic_str")
+            url = request.url[:-4]+code
+            yield feapder.Request(url=url, item=menu,proxies=False,random_user_agent=False,page=request.page)
+            return
+        for info in info_list:
+            href = info.xpath('./td/a/@href').extract_first()
+            title = info.xpath('./td/a/text()').extract_first()
+            create_time = info.xpath('./td[5]/text()').extract_first()
+
+            data_item = DataBakItem()  # 存储数据的管道
+            data_item.href = href  # 标书链接
+            data_item.channel = menu.get("channel")  # 最上方定义的抓取栏目 (编辑器定的)
+            data_item.spidercode = menu.get("code")  # 最上方定义的爬虫code(编辑器定的)
+            data_item.title = title  # 标题
+            data_item.publishtime = create_time  # 标书发布时间
+            data_item.site = "福建省政府采购网"
+            data_item.area = "福建"  # 城市默认:全国
+            data_item.city = ""  # 城市 默认为空
+            ss = dedup.filter_exist_data([href])
+            if ss == []:
+                continue
+            list_item =  MgpListItem()
+            list_item.parse = "self.detail_get"
+            list_item.parser_name = "details"
+            list_item.item = data_item.to_dict
+            list_item.deal_detail = ['//div[@class="notice-con"]']
+            list_item.proxies = False
+            list_item.parse_url = href
+            list_item.pri = 1
+            list_item.files={
+                "list_xpath":'//div[@class="notice-foot"]/a',
+                "url_xpath":'./@href',
+                "name_xpath":'./text()',
+                "files_type":('zip','doxc','ftp'),
+                "file_type":'zip',
+                "url_key":'attach',
+            }
+            href_list.append(href)
+            yield list_item
+        dedup.add(href_list)
+        page_url =  f'http://www.ccgp-fujian.gov.cn/3500/noticelist/e8d2cd51915e4c338dc1c6ee2f02b127/?page={request.page+1}&verifycode={"".join(random.sample(self.str,4))}'
+        if request.page < menu.get("crawl_page"):
+            yield feapder.Request(url=page_url, use_session=True, item=menu, proxies=False,
+                                  random_user_agent=False, page=request.page+1)
+
+    def end_callback(self):
+        print("爬虫结束")
+
+    def download_midware(self, request):
+        request.headers={
+            "user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36"
+        }
+
+if __name__ == "__main__":
+    Fjszfcgw(redis_key="FworkSpider:Fjszfcgw").start()

+ 75 - 0
spiders/马国鹏/苏州弘创招投标代理有限公司.py

@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 2021-12-27 11:24:31
+---------
+@summary: 苏州弘创招投标代理有限公司
+---------
+@author: topnet
+"""
+import sys
+sys.path.append('/app/spiders/sword_feapder/FworkSpider')
+import feapder
+from items.spider_item import DataBakItem,MgpListItem
+from untils.proxy_pool import ProxyPool
+from feapder.dedup import Dedup
+from collections import namedtuple
+
+
+class Szhcztbdl(feapder.Spider):
+
+    def start_callback(self):
+         self.count = 0
+         Menu = namedtuple('Menu', ['channel', 'code', 'types', 'crawl_page'])
+
+         self.menus = [
+             # Menu('招标信息', 'a_szhcztbdlyxgs_zbxx', "Notice", 27),
+             Menu('招标信息', 'a_szhcztbdlyxgs_zbxx', "Notice", 1),
+         ]
+    def start_requests(self):
+        for menu in self.menus:
+            for page in range(1,menu.crawl_page+1):
+                start_url = f'http://www.szhczbdl.com/index.php?c=category&id=2&page={page}'
+                print(start_url)
+                yield feapder.Request(url=start_url, item=menu._asdict())
+
+    def parse(self, request, response):
+        # print(response.text)
+        menu = request.item
+        self.count += 1   # 一个计数器
+        dedup = Dedup(Dedup.BloomFilter)
+        href_list = []
+        info_list = response.xpath('//div[@class="padding-big"]/div')
+        for info in info_list:
+            href = info.xpath('./a/@href').extract_first()
+            title = info.xpath('./a/@title').extract_first()
+            create_time = info.xpath('./span/text()').extract_first()
+            if href is None or create_time is None:
+                continue
+            create_time = create_time + ":00"
+            data_item = DataBakItem()  # 存储数据的管道
+            data_item.href = href  # 标书链接
+            data_item.channel = menu.get("channel")  # 最上方定义的抓取栏目 (编辑器定的)
+            data_item.spidercode = menu.get("code")  # 最上方定义的爬虫code(编辑器定的)
+            data_item.title = title  # 标题
+            data_item.publishtime = create_time  # 标书发布时间
+            data_item.site = "苏州弘创招投标代理有限公司"
+            data_item.area = "江苏省"  # 城市默认:全国
+            data_item.city = "苏州市"  # 城市 默认为空
+            ss = dedup.filter_exist_data([href])
+            if ss == []:
+                continue
+            list_item =  MgpListItem()
+            list_item.parse = "self.detail_get"
+            list_item.parser_name = "details"
+            list_item.item = data_item.to_dict
+            list_item.deal_detail = ['//div[@class="padding-big"]']
+            list_item.parse_url = href
+            href_list.append(href)
+            yield list_item
+        dedup.add(href_list)
+
+    def end_callback(self):
+        print("爬虫结束")
+
+if __name__ == "__main__":
+    Szhcztbdl(redis_key="fwork:Szhcztbdl").start()

+ 240 - 0
spiders/马国鹏/贵阳市公共资源交易监管网.py

@@ -0,0 +1,240 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 2021-12-21 14:34:16
+---------
+@summary:贵阳市公共资源交易监管网
+---------
+@author: 马国鹏
+"""
+import sys
+sys.path.append('/app/spiders/sword_feapder/FworkSpider')
+import feapder
+from items.spider_item import DataBakItem,MgpListItem
+from untils.proxy_pool import ProxyPool
+from feapder.dedup import Dedup
+from collections import namedtuple
+
+
+class Gysggzy(feapder.Spider):
+
+    def start_callback(self):
+         self.count = 0
+         Menu = namedtuple('Menu', ['channel', 'code', 'url', 'crawl_page'])
+
+         self.menus = [
+             Menu('政府采购投诉处理及处罚', 'gz_gysggzyjyjgw_zfcg_tscljcf',
+                  'http://ggzy.guiyang.gov.cn/zfcg/tscljcf/index.html', 1),
+             Menu('工程建设直接发包公告', 'gz_gysggzyjyjgw_gcjs_zjfbgg',
+                  'http://ggzy.guiyang.gov.cn/gcjs/zjfb/', 1),
+             # Menu('工程建设无效投标情况公示', 'gz_gysggzyjyjgw_gcjs_wxtbqkgs',
+             #      'http://ggzy.guiyang.gov.cn/gcjs/wxtbqkgs/', 1),
+             Menu('工程建设中标结果公告', 'gz_gysggzyjyjgw_gcjs_zbjggg',
+                  'http://ggzy.guiyang.gov.cn/gcjs/zbjggg_5864877/', 1),
+             # Menu('工程建设合同签订公示', 'gz_gysggzyjyjgw_gcjs_htqdgs',
+             #      'http://ggzy.guiyang.gov.cn/gcjs/htqdgs/', 1),
+             Menu('工程建设直接发包', 'gz_gysggzyjyjgw_gcjs_zjfb',
+                  'http://ggzy.guiyang.gov.cn/gcjs/zjfb/', 1),
+             Menu('国有企业采购合同', 'gz_gysggzyjyjgw_gyqycg_ht',
+                  'http://ggzy.guiyang.gov.cn/qt/gyqycg/ht/', 1),
+
+
+             Menu('政府采购更正公告', 'gz_gysggzyjyjgw_zfcg_gzgg',
+                  'http://ggzy.guiyang.gov.cn/zfcg/bgcqgg/hw_5372423/index.html', 1),
+             Menu('政府采购更正公告', 'gz_gysggzyjyjgw_zfcg_gzgg',
+                  'http://ggzy.guiyang.gov.cn/zfcg/bgcqgg/fw_5372424/index.html', 1),
+             Menu('政府采购更正公告', 'gz_gysggzyjyjgw_zfcg_gzgg',
+                  'http://ggzy.guiyang.gov.cn/zfcg/bgcqgg/gc_5372425/index.html', 1),
+
+             Menu('政府采购中标公示', 'gz_gysggzyjyjgw_zfcg_zbgs',
+                  'http://ggzy.guiyang.gov.cn/zfcg/zbcjjggg/hw_5372435/index.html', 1),
+             Menu('政府采购中标公示', 'gz_gysggzyjyjgw_zfcg_zbgs',
+                  'http://ggzy.guiyang.gov.cn/zfcg/zbcjjggg/fw_5372436/index.html', 1),
+             Menu('政府采购中标公示', 'gz_gysggzyjyjgw_zfcg_zbgs',
+                  'http://ggzy.guiyang.gov.cn/zfcg/zbcjjggg/gc_5372437/index.html', 1),
+
+             Menu('政府采购招标公告', 'gz_gysggzyjyjgw_zfcg_zbgg',
+                  'http://ggzy.guiyang.gov.cn/zfcg/zbgg_5372418/hw/index.html', 1),
+             Menu('政府采购招标公告', 'gz_gysggzyjyjgw_zfcg_zbgg',
+                  'http://ggzy.guiyang.gov.cn/zfcg/zbgg_5372418/fw/index.html', 1),
+             Menu('政府采购招标公告', 'gz_gysggzyjyjgw_zfcg_zbgg',
+                  'http://ggzy.guiyang.gov.cn/zfcg/zbgg_5372418/gc/index.html', 1),
+
+
+             Menu('政府采购-废标公告', 'gz_gysggzyjyjgw_zfcg_fbgg',
+                  'http://ggzy.guiyang.gov.cn/zfcg/fbgg/hw_5372439/index.html', 1),
+             Menu('政府采购-废标公告', 'gz_gysggzyjyjgw_zfcg_fbgg',
+                  'http://ggzy.guiyang.gov.cn/zfcg/fbgg/fw_5372440/index.html', 1),
+             Menu('政府采购-废标公告', 'gz_gysggzyjyjgw_zfcg_fbgg',
+                  'http://ggzy.guiyang.gov.cn/zfcg/fbgg/gc_5372441/index.html', 1),
+
+             Menu('政府采购评标结果', 'gz_gysggzyjyjgw_zfcg_pbjg',
+                  'http://ggzy.guiyang.gov.cn/zfcg/psjg/hw_5372431/index.html', 1),
+             Menu('政府采购评标结果', 'gz_gysggzyjyjgw_zfcg_pbjg',
+                  'http://ggzy.guiyang.gov.cn/zfcg/psjg/fw_5372432/index.html', 1),
+             Menu('政府采购评标结果', 'gz_gysggzyjyjgw_zfcg_pbjg',
+                  'http://ggzy.guiyang.gov.cn/zfcg/psjg/gc_5372433/index.html', 1),
+
+             Menu('政府采购单一来源', 'gz_gysggzyjyjgw_zfcg_dyly',
+                  'http://ggzy.guiyang.gov.cn/zfcg/dylygs/hw_5372447/index.html', 1),
+             Menu('政府采购单一来源', 'gz_gysggzyjyjgw_zfcg_dyly',
+                  'http://ggzy.guiyang.gov.cn/zfcg/dylygs/fw_5372448/index.html', 1),
+             Menu('政府采购单一来源', 'gz_gysggzyjyjgw_zfcg_dyly',
+                  'http://ggzy.guiyang.gov.cn/zfcg/dylygs/gc_5372449/index.html', 1),
+
+             Menu('政府采购终止公告', 'gz_gysggzyjyjgw_zfcg_zzgg',
+                  'http://ggzy.guiyang.gov.cn/zfcg/zzgg/hw_5372427/index.html', 1),
+             Menu('政府采购终止公告', 'gz_gysggzyjyjgw_zfcg_zzgg',
+                  'http://ggzy.guiyang.gov.cn/zfcg/zzgg/fw_5372428/index.html', 1),
+             Menu('政府采购终止公告', 'gz_gysggzyjyjgw_zfcg_zzgg',
+                  'http://ggzy.guiyang.gov.cn/zfcg/zzgg/gc_5372429/index.html', 1),
+
+             Menu('政府采购合同公告及管理', 'gz_gysggzyjyjgw_zfcg_htggjgl',
+                  'http://ggzy.guiyang.gov.cn/zfcg/zfcghtgg/hw_5372443/index.html', 1),
+             Menu('政府采购合同公告及管理', 'gz_gysggzyjyjgw_zfcg_htggjgl',
+                  'http://ggzy.guiyang.gov.cn/zfcg/zfcghtgg/fw_5372444/index.html', 1),
+             Menu('政府采购合同公告及管理', 'gz_gysggzyjyjgw_zfcg_htggjgl',
+                  'http://ggzy.guiyang.gov.cn/zfcg/zfcghtgg/gc_5372445/index.html', 1),
+
+
+             Menu('工程建设招标公告', 'gz_gysggzyjyjgw_gcjs_zbgg',
+                  'http://ggzy.guiyang.gov.cn/gcjs/zbgg_5372453/jl/index.html', 1),
+             Menu('工程建设招标公告', 'gz_gysggzyjyjgw_gcjs_zbgg',
+                  'http://ggzy.guiyang.gov.cn/gcjs/zbgg_5372453/kc/index.html', 1),
+             Menu('工程建设招标公告', 'gz_gysggzyjyjgw_gcjs_zbgg',
+                  'http://ggzy.guiyang.gov.cn/gcjs/zbgg_5372453/sg/index.html', 1),
+             Menu('工程建设招标公告', 'gz_gysggzyjyjgw_gcjs_zbgg',
+                  'http://ggzy.guiyang.gov.cn/gcjs/zbgg_5372453/sj/index.html', 1),
+             Menu('工程建设招标公告', 'gz_gysggzyjyjgw_gcjs_zbgg',
+                  'http://ggzy.guiyang.gov.cn/gcjs/zbgg_5372453/sbcg/index.html', 1),
+             Menu('工程建设招标公告', 'gz_gysggzyjyjgw_gcjs_zbgg',
+                  'http://ggzy.guiyang.gov.cn/gcjs/zbgg_5372453/zcb/index.html', 1),
+
+             Menu('工程建设流标公示', 'gz_gysggzyjyjgw_gcjs_lbgs',
+                  'http://ggzy.guiyang.gov.cn/gcjs/lbgs/jl_5372475/index.html', 1),
+             Menu('工程建设流标公示', 'gz_gysggzyjyjgw_gcjs_lbgs',
+                  'http://ggzy.guiyang.gov.cn/gcjs/lbgs/kc_5372476/index.html', 1),
+             Menu('工程建设流标公示', 'gz_gysggzyjyjgw_gcjs_lbgs',
+                  'http://ggzy.guiyang.gov.cn/gcjs/lbgs/sg_5372477/index.html', 1),
+             Menu('工程建设流标公示', 'gz_gysggzyjyjgw_gcjs_lbgs',
+                  'http://ggzy.guiyang.gov.cn/gcjs/lbgs/sj_5372478/index.html', 1),
+             Menu('工程建设流标公示', 'gz_gysggzyjyjgw_gcjs_lbgs',
+                  'http://ggzy.guiyang.gov.cn/gcjs/lbgs/sbcg_5372479/index.html', 1),
+             Menu('工程建设流标公示', 'gz_gysggzyjyjgw_gcjs_lbgs',
+                  'http://ggzy.guiyang.gov.cn/gcjs/lbgs/zcb_5372480/index.html', 1),
+
+             Menu('工程建设拦标价公示', 'gz_gysggzyjyjgw_gcjs_lbjkgs',
+                  'http://ggzy.guiyang.gov.cn/gcjs/lbjgs/jl_5372482/index.html', 1),
+             Menu('工程建设拦标价公示', 'gz_gysggzyjyjgw_gcjs_lbjkgs',
+                  'http://ggzy.guiyang.gov.cn/gcjs/lbjgs/kc_5372483/index.html', 1),
+             Menu('工程建设拦标价公示', 'gz_gysggzyjyjgw_gcjs_lbjkgs',
+                  'http://ggzy.guiyang.gov.cn/gcjs/lbjgs/sg_5372484/index.html', 15),
+             Menu('工程建设拦标价公示', 'gz_gysggzyjyjgw_gcjs_lbjkgs',
+                  'http://ggzy.guiyang.gov.cn/gcjs/lbjgs/sj_5372485/index.html', 1),
+             Menu('工程建设拦标价公示', 'gz_gysggzyjyjgw_gcjs_lbjkgs',
+                  'http://ggzy.guiyang.gov.cn/gcjs/lbjgs/sbcg_5372486/index.html', 1),
+             Menu('工程建设拦标价公示', 'gz_gysggzyjyjgw_gcjs_lbjkgs',
+                  'http://ggzy.guiyang.gov.cn/gcjs/lbjgs/zcb_5372487/index.html', 1),
+
+             Menu('工程建设终止暂停公告', 'gz_gysggzyjyjgw_gcjs_zzztgg',
+                  'http://ggzy.guiyang.gov.cn/gcjs/zzgg_5372488/jl_5372489/index.html', 1),
+             Menu('工程建设终止暂停公告', 'gz_gysggzyjyjgw_gcjs_zzztgg',
+                  'http://ggzy.guiyang.gov.cn/gcjs/zzgg_5372488/kc_5372490/index.html', 1),
+             Menu('工程建设终止暂停公告', 'gz_gysggzyjyjgw_gcjs_zzztgg',
+                  'http://ggzy.guiyang.gov.cn/gcjs/zzgg_5372488/sg_5372491/index.html', 1),
+             Menu('工程建设终止暂停公告', 'gz_gysggzyjyjgw_gcjs_zzztgg',
+                  'http://ggzy.guiyang.gov.cn/gcjs/zzgg_5372488/sj_5372492/index.html', 1),
+             Menu('工程建设终止暂停公告', 'gz_gysggzyjyjgw_gcjs_zzztgg',
+                  'http://ggzy.guiyang.gov.cn/gcjs/zzgg_5372488/sbcg_5372493/index.html', 1),
+             Menu('工程建设终止暂停公告', 'gz_gysggzyjyjgw_gcjs_zzztgg',
+                  'http://ggzy.guiyang.gov.cn/gcjs/zzgg_5372488/zcb_5372494/index.html', 1),
+
+             Menu('国有企业采购招标公告', 'gz_gysggzyjyjgw_gyqycg_zbgg',
+                  'http://ggzy.guiyang.gov.cn/qt/gyqycg/zbgg_5372695/hw_5372696/index.html', 1),
+             Menu('国有企业采购招标公告', 'gz_gysggzyjyjgw_gyqycg_zbgg',
+                  'http://ggzy.guiyang.gov.cn/qt/gyqycg/zbgg_5372695/fw_5372697/index.html', 1),
+             Menu('国有企业采购招标公告', 'gz_gysggzyjyjgw_gyqycg_zbgg',
+                  'http://ggzy.guiyang.gov.cn/qt/gyqycg/zbgg_5372695/gc_5372698/index.html', 1),
+
+             Menu('国有企业采购变更澄清公告', 'gz_gysggzyjyjgw_gyqycg_bgcqgg',
+                  'http://ggzy.guiyang.gov.cn/qt/gyqycg/bgcqgg/hw_5372700/index.html', 1),
+             Menu('国有企业采购变更澄清公告', 'gz_gysggzyjyjgw_gyqycg_bgcqgg',
+                  'http://ggzy.guiyang.gov.cn/qt/gyqycg/bgcqgg/fw_5372701/index.html', 1),
+             Menu('国有企业采购变更澄清公告', 'gz_gysggzyjyjgw_gyqycg_bgcqgg',
+                  'http://ggzy.guiyang.gov.cn/qt/gyqycg/bgcqgg/gc_5372702/index.html', 1),
+
+             Menu('国有企业采购评审结果', 'gz_gysggzyjyjgw_gyqycg_psjg',
+                  'http://ggzy.guiyang.gov.cn/qt/gyqycg/psjg_5372707/hw_5372708/index.html', 1),
+             Menu('国有企业采购评审结果', 'gz_gysggzyjyjgw_gyqycg_psjg',
+                  'http://ggzy.guiyang.gov.cn/qt/gyqycg/psjg_5372707/fw_5372709/index.html', 1),
+             Menu('国有企业采购评审结果', 'gz_gysggzyjyjgw_gyqycg_psjg',
+                  'http://ggzy.guiyang.gov.cn/qt/gyqycg/psjg_5372707/gc_5372710/index.html', 1),
+
+             Menu('国有企业采购中标成交结果公告', 'gz_gysggzyjyjgw_gyqycg_zbcjjggg',
+                  'http://ggzy.guiyang.gov.cn/qt/gyqycg/zbcjjggg/hw_5372712/index.html', 1),
+             Menu('国有企业采购中标成交结果公告', 'gz_gysggzyjyjgw_gyqycg_zbcjjggg',
+                  'http://ggzy.guiyang.gov.cn/qt/gyqycg/zbcjjggg/fw_5372713/index.html', 1),
+             Menu('国有企业采购中标成交结果公告', 'gz_gysggzyjyjgw_gyqycg_zbcjjggg',
+                  'http://ggzy.guiyang.gov.cn/qt/gyqycg/zbcjjggg/gc_5372714/index.html', 1),
+
+
+             Menu('国有企业采购废标公告', 'gz_gysggzyjyjgw_gyqycg_fbgg',
+                  'http://ggzy.guiyang.gov.cn/qt/gyqycg/fbgg_5372715/hw_5372716/index.html', 1),
+             Menu('国有企业采购废标公告', 'gz_gysggzyjyjgw_gyqycg_fbgg',
+                  'http://ggzy.guiyang.gov.cn/qt/gyqycg/fbgg_5372715/fw_5372717/index.html', 1),
+             Menu('国有企业采购废标公告', 'gz_gysggzyjyjgw_gyqycg_fbgg',
+                  'http://ggzy.guiyang.gov.cn/qt/gyqycg/fbgg_5372715/gc_5372718/index.html', 1),
+
+
+         ]
+    def start_requests(self):
+         for menu in self.menus:
+            for page in range(menu.crawl_page):
+                if page == 0:
+                    start_url = menu.url
+                else:
+                    start_url = menu.url.replace("index.html",f"index_{page}.html")
+                print(start_url)
+                yield feapder.Request(url=start_url, item=menu._asdict())
+
+    def parse(self, request, response):
+        # print(response.text)
+        menu = request.item
+        self.count += 1   # 一个计数器
+        dedup = Dedup(Dedup.BloomFilter)
+        href_list = []
+        info_list = response.xpath("//ul[@class='newsList']/div/li")
+        for info in info_list:
+            href = info.xpath("./a/@href").extract_first()
+            if menu.get("channel") in ['工程建设合同签订公示',"工程建设无效投标情况公示","工程建设中标结果公告"]:
+                href = info.xpath("./a/@href").extract_first()
+
+            title = info.xpath("./a/@title").extract_first()
+            create_time = info.xpath("./a/span/text()").extract_first()
+
+            data_item = DataBakItem()  # 存储数据的管道
+            data_item.href = href  # 标书链接
+            data_item.channel = menu.get("channel")  # 最上方定义的抓取栏目 (编辑器定的)
+            data_item.spidercode = menu.get("code")  # 最上方定义的爬虫code(编辑器定的)
+            data_item.title = title  # 标题
+            data_item.publishtime = create_time  # 标书发布时间
+            data_item.site = "贵阳市公共资源交易监管网"
+            data_item.area = "贵州省"  # 城市默认:全国
+            data_item.city = "贵阳市"  # 城市 默认为空
+            ss = dedup.filter_exist_data([href])
+            if ss == []:
+                continue
+            list_item =  MgpListItem()
+            list_item.parse = "self.detail_get"
+            list_item.parser_name = "details"
+            list_item.item = data_item.to_dict
+            list_item.deal_detail = ['//font[@id="Zoom"]']
+            list_item.parse_url = href
+            href_list.append(href)
+            yield list_item
+        dedup.add(href_list)
+
+    def end_callback(self):
+        print("爬虫结束")
+
+if __name__ == "__main__":
+    Gysggzy(redis_key="fwork:gysggzy").start()

+ 94 - 0
spiders/马国鹏/黔云招采电子招标采购交易平台.py

@@ -0,0 +1,94 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 2022-01-10 09:47:56
+---------
+@summary:	黔云招采电子招标采购交易平台
+---------
+@author: topnet
+"""
+import sys
+sys.path.append('/app/spiders/sword_feapder/FworkSpider')
+import feapder
+from items.spider_item import DataBakItem,MgpListItem,ListItem
+from feapder.dedup import Dedup
+from collections import namedtuple
+
+
+class Qyzcdzzbcgjypt(feapder.Spider):
+
+    def start_callback(self):
+         self.count = 0
+         self.site='黔云招采电子招标采购交易平台'
+         Menu = namedtuple('Menu', ['channel', 'code', 'types', 'crawl_page'])
+
+         self.menus = [
+             Menu('询价采购-采购公告', 'gz_qyzcdzzbcgjypt_xjcg_cggg', "Notice", 1),
+         ]
+    def start_requests(self):
+         for menu in self.menus:
+             for page in range(1,menu.crawl_page+1):
+                 start_url = f'https://www.e-qyzc.com/gg/toXinXiList?gongGaoType=5'
+                 data = {
+                    "currentPage": str(page),
+                    "xmBH": "",
+                    "ggName": "",
+                    "hangYeType": "",
+                    "zbrName": "",
+                    "zbdlName": ""
+                 }
+                 yield feapder.Request(url=start_url, item=menu._asdict(), proxies=False, data=data,method="POST")
+
+    def parse(self, request, response):
+        menu = request.item
+        self.count += 1   # 一个计数器
+        dedup = Dedup(Dedup.BloomFilter)
+        href_list = []
+        info_list = response.xpath('//table[@id="p1"]/tr[position()>1]')
+        for info in info_list:
+            href = info.xpath('./td/a/@href').extract_first().strip()
+            title = info.xpath('./td/a/text()').extract_first().strip()
+            create_time = info.xpath('./td[5]/text()').extract_first().strip()
+
+            data_item = DataBakItem()  # 存储数据的管道
+            data_item.href = href  # 标书链接
+            data_item.channel = menu.get("channel")  # 最上方定义的抓取栏目 (编辑器定的)
+            data_item.spidercode = menu.get("code")  # 最上方定义的爬虫code(编辑器定的)
+            data_item.title = title  # 标题
+            data_item.publishtime = create_time  # 标书发布时间
+            data_item.site = self.site
+            data_item.area = "贵州省"  # 城市默认:全国
+            data_item.city = ""  # 城市 默认为空
+            ss = dedup.filter_exist_data([href])
+            if ss == []:
+                continue
+            list_item =  MgpListItem()
+            list_item.parse = "self.detail_get"
+            list_item.parser_name = "details_firefox"
+            list_item.item = data_item.to_dict
+            list_item.deal_detail = ['//div[@class="page_contect bai_bg"]']
+            if "guid" not in href:
+                continue
+            uid = href.split("guid=")[-1].split("&")[0]
+            list_item.parse_url = f"https://www.e-qyzc.com/waiburukou/xjcgGongGao/view/{uid}.html"
+            href_list.append(href)
+            yield list_item
+        dedup.add(href_list)
+        list = ListItem()
+        list.site = self.site
+        list.channel = menu.get("channel")
+        list.spidercode = menu.get("code")
+        list.url = request.url
+        list.count = len(info_list)
+        list.rel_count = len(href_list)
+        yield list
+
+    # def exception_request(self, request, response):
+
+    def end_callback(self):
+        # list = ListItem()
+        # list.site=
+        print("爬虫结束")
+
+
+if __name__ == "__main__":
+    Qyzcdzzbcgjypt(redis_key="fwork:Qyzcdzzbcgjypt2").start()