# -*- coding:utf-8 -*-

import requests
from lxml import etree
from fake_useragent import UserAgent
from http import cookiejar
import json
from datetime import datetime
import time
import re
import csv
from pymongo import MongoClient
import pandas as pd
import os

from utils.HttpUtils import HttpUtils

requests.packages.urllib3.disable_warnings()  # 忽略HTTPS安全警告

"""
(.*?): (.*)
"$1":"$2",

https://www.gbw-china.com/list_goods/3/1.html
"""


class Test_Get():
    def __init__(self):
        #声明一个CookieJar对象实例来保存cookie
        # requests.utils.dict_from_cookiejar(html.cookies)  # 获取cookies转dict
        # res.cookies.get_dict()  # 获取cookies
        self.cookie = cookiejar.CookieJar()
        # self.ua = UserAgent()  # 禁用服务器缓存
        # ua = UserAgent(verify_ssl=False)  #
        self.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Connection': 'keep-alive',
            'Cookie': "mediav=%7B%22eid%22%3A%22848516%22%2C%22ep%22%3A%22%22%2C%22vid%22%3A%22Ah2nJN%3ApN%2B8n%24JC!*rTr%22%2C%22ctn%22%3A%22%22%2C%22vvid%22%3A%22Ah2nJN%3ApN%2B8n%24JC!*rTr%22%2C%22_mvnf%22%3A1%2C%22_mvctn%22%3A0%2C%22_mvck%22%3A0%2C%22_refnf%22%3A0%7D; __jsluid_s=b48d7504e88cb2222e27670b3789c3e6; __51cke__=; __51vcke__JFGurNctYEYspFoB=aab1343f-daf4-5193-accb-65858c15a896; __51vuft__JFGurNctYEYspFoB=1629197868550; Hm_lvt_37cd71fe1177d4648ae269884f3f4a6b=1629197869; Hm_lvt_c2ef344332b6463256443fbe8e7e229a=1629197869; Qs_lvt_336110=1629197868; _ga=GA1.1.1014573555.1629197869; nb-referrer-hostname=www.gbw-china.com; nb-start-page-url=https%3A%2F%2Fwww.gbw-china.com%2Flist_goods%2F0%2F1.html; __51uvsct__JFGurNctYEYspFoB=3; __tins__21011607=%7B%22sid%22%3A%201629208362819%2C%20%22vd%22%3A%201%2C%20%22expires%22%3A%201629210162819%7D; __51laig__=23; SERVERID=be61eae6e2f68e96c98dee359b38fd56|1629208351|1629201612; __vtins__JFGurNctYEYspFoB=%7B%22sid%22%3A%20%2268416dd7-72d3-59d8-888c-aa60620bcaa4%22%2C%20%22vd%22%3A%2015%2C%20%22stt%22%3A%204566503%2C%20%22dr%22%3A%20363085%2C%20%22expires%22%3A%201629210162995%2C%20%22ct%22%3A%201629208362995%7D; Qs_pv_336110=2700460806361584000%2C4227908331108755500%2C2215857095368991500%2C2697383410755825700%2C4183020174072732000; Hm_lpvt_c2ef344332b6463256443fbe8e7e229a=1629208363; Hm_lpvt_37cd71fe1177d4648ae269884f3f4a6b=1629208363; _ga_9YEVJZM58S=GS1.1.1629203795.3.1.1629208975.0; __jsl_clearance_s=1629208966.047|0|rvW9TMRmKSPCO4cK%2FJakoCHKfKY%3D",
            'DNT': '1',
            'Host': 'www.gbw-china.com',
            'Referer': 'https://www.gbw-china.com/list_goods/3/2.html',
            'sec-ch-ua': '"Chromium";v="92", " Not A;Brand";v="99", "Google Chrome";v="92"',
            'sec-ch-ua-mobile': '?0',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'same-origin',
            'Sec-Fetch-User': '?1',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'

        }
        self.count = 0


    def get_contents(self, cursor):
        # self.headers['User-Agent'] = self.ua.random

        url = f"https://www.gbw-china.com/list_goods/83/{cursor}.html"
        url = f"https://www.gbw-china.com/list_goods/0/{cursor}.html"

        res = HttpUtils.do_request("GET", url, self.headers, "")

        root = etree.HTML(res.text)

        for r in root.xpath('//table/tbody/tr'):
            dict_data = dict()

            dict_data['产品名称'] = r.xpath('./td[1]/div/a/text()')[0]
            dict_data['产品编号'] = r.xpath('./td[2]/div/a/text()')[0]
            dict_data['有效期'] = "".join(r.xpath('./td[3]/div/text()')).strip()
            dict_data['标准值'] = "".join(r.xpath('./td[4]/div/text()')).strip()
            dict_data['CAS号'] = "".join(r.xpath('./td[5]/div/text()')) + "\t"
            dict_data['规格'] = "".join(r.xpath('./td[6]/div/text()'))
            dict_data['标准价'] = "".join(r.xpath('./td[7]/div/text()')).replace('\n', '')
            dict_data['库存'] = "".join(r.xpath('./td[8]/div/text()')).replace('\n', '')
            dict_data['货期'] = "".join(r.xpath('./td[9]/div//text()')).replace('\n', '')
            dict_data['品牌'] = "".join(r.xpath('./td[10]/div/text()'))
            url_child = r.xpath('./td[1]/div/a/@href')[0]

            if dict_data['品牌'].find("TMstandard") == 0 or dict_data['品牌'].find("TMstandard-1") == 0 \
                    or dict_data['品牌'].find("TMstandard-2") == 0 or dict_data['品牌'].find("坛墨质检") == 0:
                # 提取详情页信息、组分信息列表，并保存
                self.get_detail(url_child, dict_data)
            else:
                return
        # 检查是否有下一页
        pno = int(re.findall(r"pno:(.*?),", res.text)[0])
        total = int(re.findall(r"total:(.*?),", res.text)[0])
        if pno == total or pno > total:
            self.count = 0
            return
        else:
            print(f"=====共计【{total}】页=======第【{pno + 1}】页=============")
            self.get_contents(pno + 1)

    def get_detail(self, url_child, item):
        """
        提取组分信息列表
        :param url_child: 详情页面链接
        :return:
        """
        html_child = HttpUtils.do_request("GET", url_child, self.headers, "")
        root_child = etree.HTML(html_child.text)

        # 获取分类信息
        num = 0
        for t in root_child.xpath('//a[@class="el-breadcrumb__item"]/span'):
            title = t.xpath('.//text()')[0]
            if title.find("首页") < 0:
                item['分类_' + str(num)] = title
                num += 1

        # 查找“组分信息”标签
        flag = False
        dict_data_all = dict()
        for c in root_child.xpath('//el-tabs[@type="border-card"]'):
            dict_data_d = dict()
            if c.xpath('./el-tab-pane/@label')[0] == "组分信息":
                flag = True
                for t in c.xpath('.//table/tbody/tr'):
                    dict_data_d['CAS号_1'] = "".join(t.xpath('./td[1]/text()')) + "\t"
                    dict_data_d['名称_1'] = "".join(t.xpath('./td[2]/text()'))
                    dict_data_d['标准值_1'] = "".join(t.xpath('./td[3]/text()'))
                    dict_data_d['单位_1'] = "".join(t.xpath('./td[4]/text()'))
                    dict_data_all = item | dict_data_d
                    # 保存数据
                    HttpUtils.dict_to_csv("坛墨_混标", dict_data_all)
                break
        if flag is False:
            # 保存数据
            HttpUtils.dict_to_csv("坛墨", item)



    # 提取待搜索的关键词。提取未查询过的关键词
    def check_data(self, source_path, target_path):
        """
        提取待搜索的关键词。根据source_path路径的文件与target_path路径文件比对待搜索的关键词。
        :param source_path:原关键词文件路径
        :param target_path:已经查询好的文件路径
        :return: search_list 全部关键词列表。line_num：该查询关键词所在的行号。target_path：保存文件路径
        """
        source_list = []
        source_list1 = []
        target_list = []
        target_value_list = []
        search_list = []
        search_list1 = []
        source_list_count = 0
        target_list_count = 0
        search_list_count = 0

        line_num = 0
        # source_path文件
        df_s = pd.read_excel(source_path, sheet_name=0)
        for index, row in df_s.iterrows():
            # source_list.append(str(row[0]).strip())
            # # 浓度
            # source_list1.append(str(row[1]).replace(" ", "").strip())
            if len(str(row[0])) > 4 and len(str(row[1])) > 4:
                source_list.append(str(row[0]).strip())
                # 浓度
                source_list1.append(str(row[1]).replace(" ", "").strip())


        source_list_count = len(source_list)

        # 如果有target_path文件
        if os.path.exists(target_path):
            # target_path文件
            df_t = pd.read_csv(target_path, header=None)
            for index, row in df_t.iterrows():
                target_list.append(str(row[0]).strip())
                target_value_list.append(str(row[1]).strip())

            target_list_count = len(target_list)

        # 对比未搜索的关键词，并将结果保存到search_list
        for i in range(0, source_list_count):
            if source_list[i] in target_list:
                pass
            else:
                search_list.append(source_list[i].strip())
                search_list1.append(source_list1[i].strip())

        search_list_count = len(search_list)

        return search_list, search_list1, line_num, target_path


if __name__ == '__main__':
    test_get = Test_Get()

    test_get.get_contents(324)

