# -*- coding:utf-8 -*-

import requests
from lxml import etree
from fake_useragent import UserAgent
from http import cookiejar
import json
from datetime import datetime
from pymongo import MongoClient
import time
from utils.HttpUtils import HttpUtils

requests.packages.urllib3.disable_warnings()  # 忽略HTTPS安全警告


"""
福建省政府采购网
http://www.ccgp-fujian.gov.cn/index.html
"""

class Test_Get():
    def __init__(self):
        #声明一个CookieJar对象实例来保存cookie
        self.cookie = cookiejar.CookieJar()
        ua = UserAgent(use_cache_server=False)  # 禁用服务器缓存
        self.headers = {
            # "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36",
            "User-Agent": ua.random
        }
        self.area = "宁德市-"

    def get_contents(self, pagenum, id):
        postData = {
            "page": pagenum,
            "notice_type": "1d5eac5cd0b14515aacaf2e9aee5f928",
        }

        url = "http://zfcg.czj.zhangzhou.gov.cn/350182/noticelist/d03180adb4de41acbb063875889f9af1/"
        url = f"http://zfcg.czj.ningde.gov.cn/{id}/noticelist/d03180adb4de41acbb063875889f9af1/"
        html = HttpUtils.do_request("GET", url, self.headers, postData)
        selector = etree.HTML(html.text)
        xpath = selector.xpath('//tbody/tr')
        for i in xpath:
            dict_data = dict()
            dict_data['area'] = "".join(i.xpath('./td[1]/text()'))  # 区域
            dict_data['type'] = "".join(i.xpath('./td[2]/text()'))  # 采购方式
            dict_data['company'] = "".join(i.xpath('./td[3]/text()'))  # 采购单位
            dict_data['title'] = "".join(i.xpath('./td[4]/a/text()')) # 公告标题
            dict_data['push_date'] = "".join(i.xpath('./td[5]/text()'))  # 发布日期
            url_child = "http://zfcg.czj.ningde.gov.cn/" + i.xpath('./td[4]/a/@href')[0]  # 公告标题
            print(f"=============提取：【{dict_data['title']}】=================")
            time.sleep(0.5)
            self.get_detail(dict_data['area'], dict_data['title'], url_child)



    def get_detail(self, area, title, url):
        ua = UserAgent(use_cache_server=False)  # 禁用服务器缓存
        headers = {
            # "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
            # "Accept-Encoding": "gzip, deflate",
            # "Accept-Language": "zh-CN,zh;q=0.9",
            # "Cache-Control": "max-age=0",
            # "Connection": "keep-alive",
            # "DNT": "1",
            # "Host": "zfcg.pingtan.gov.cn",
            # "Upgrade-Insecure-Requests": "1",
            # "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36",
            "User-Agent": ua.random
        }

        html = HttpUtils.do_request("GET", url, headers, "")
        selector = etree.HTML(html.text)
        root = selector.xpath('//table[@border="1"]/tbody')
        for s in root:
            tmp = "".join(s.xpath('./tr/@class'))
            if tmp == "firstRow":
                dict_data = dict()
                dict_data['area'] = self.area + area
                dict_data['title'] = title
                dict_data['jf'] = "".join(s.xpath('./tr[1]/td[2]/span//text()'))\
                    .replace("\n", "").replace("\t", "")  # 甲方
                dict_data['yf'] = "".join(s.xpath('./tr[1]/td[4]/span//text()'))\
                    .replace("\n", "").replace("\t", "")  # 乙方
                dict_data['fz_j'] = "".join(s.xpath('./tr[3]/td[2]/span//text()'))\
                    .replace("\n", "").replace("\t", "")  # 甲方负责人
                dict_data['fz_y'] = "".join(s.xpath('./tr[3]/td[4]/span//text()'))\
                    .replace("\n", "").replace("\t", "")  # 乙方负责人
                dict_data['wtr_j'] = "".join(s.xpath('./tr[4]/td[2]/span//text()'))\
                    .replace("\n", "").replace("\t", "")  # 甲方委托人
                dict_data['wtr_y'] = "".join(s.xpath('./tr[4]/td[4]/span//text()'))\
                    .replace("\n", "").replace("\t", "")  # 乙方委托人
                dict_data['tel_j'] = "".join(s.xpath('./tr[5]/td[2]/span//text()'))\
                    .replace("\n", "").replace("\t", "")  # 甲方联系方法
                dict_data['tel_y'] = "".join(s.xpath('./tr[5]/td[4]/span//text()'))\
                    .replace("\n", "").replace("\t", "")  # 乙方联系方法
                self.insertItem("福建省政府采购", dict_data)
                print("--------------插入数据------------------")
                print(str(dict_data))

    def insertItem(self, tableName, data):
        my_set = db[tableName]
        my_set.insert_one(data)


if __name__ == '__main__':
    conn = MongoClient('127.0.0.1', 27017)
    db = conn["Test"]
    test_get = Test_Get()
    id_list = ['350922', '350923', '350924', '350925', '350926', '350981', '350982']
    page_list =[22, 20, 16, 23, 10, 26, 46]
    tmp_zip = zip(id_list,page_list)
    tmp_list = list(tmp_zip)
    for id in tmp_list:
        for i in range(1, id[1]):
            print(f"======{id[0]}====第【{i}】页=============")
            test_get.get_contents(i, id[0])