# -*- coding:utf-8 -*-

import requests
from lxml import etree
from fake_useragent import UserAgent
from http import cookiejar
import json
from datetime import datetime
import time
import re
import csv
import os
from pymongo import MongoClient
from utils.HttpUtils import HttpUtils


requests.packages.urllib3.disable_warnings()  # 忽略HTTPS安全警告

"""
(.*?): (.*)
"$1":"$2",

http://ch.cato-chem.com/Product.aspx?pid=4
"""


class TestRequest():
    def __init__(self):
        #声明一个CookieJar对象实例来保存cookie
        # requests.utils.dict_from_cookiejar(html.cookies)  # 获取cookies转dict
        # res.cookies.get_dict()  # 获取cookies
        self.cookie = cookiejar.CookieJar()
        # self.ua = UserAgent(use_cache_server=False)  # 禁用服务器缓存
        # self.ua = UserAgent(verify_ssl=False)  #
        self.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Connection': 'keep-alive',
            'Cookie': 'ASP.NET_SessionId=s21k34pffb4ka4trwi0thjmt; UM_distinctid=17b34443187834-057d2d666563e2-4343363-144000-17b34443188b19; Hm_lvt_53c457eaf4bbd7139b35b3bafea0f9ef=1628669490; nb-referrer-hostname=ch.cato-chem.com; CNZZDATA1277897889=1060751102-1628669489-null%7C1628669489; Hm_lpvt_53c457eaf4bbd7139b35b3bafea0f9ef=1628669779; nb-start-page-url=http%3A%2F%2Fch.cato-chem.com%2FProduct.aspx%3Fpid%3D4',
            'DNT': '1',
            'Host': 'ch.cato-chem.com',
            'Referer': 'http://ch.cato-chem.com/Product.aspx?pid=4',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'
        }

    def get_contents(self, pid, cursor):
        url = f"http://ch.cato-chem.com/Product.aspx?pid={pid}&page={cursor}"
        print(url)
        html = HttpUtils.do_request("GET", url, self.headers, "")

        root = etree.HTML(html.text)
        dict_data = dict()
        for r in root.xpath('//ul[@class="product_list"]/li'):
            if len(r.xpath('./div')) > 0:
                dict_data['中文名称'] = "".join(r.xpath('./div[1]/div[@class="c_name"]/text()'))
                dict_data['英文名称'] = "".join(r.xpath('./div[1]/div[@class="e_name"]/text()'))
                dict_data['CAS号'] = "".join(r.xpath('.//div[@class="info"]/dl[1]/dd/text()')) + "\t"
                dict_data['货号'] = "".join(r.xpath('.//div[@class="info"]/dl[2]/dd/text()'))
                dict_data['分子式'] = "".join(r.xpath('.//div[@class="info"]/dl[3]/dd/text()'))
                title = r.xpath('.//@href')[0]
                url_child = "http://ch.cato-chem.com" + r.xpath('./div/div[@class="img_box"]/a/@href')[0]

                # 获取详情信息
                self.get_detail(url_child, dict_data)
        # 翻页
        # 判断是否有翻页导航栏
        if len(root.xpath('//div[@class="pl"]/text()')) > 0:
            cursorNum = int(root.xpath('//div[@class="pl"]/text()')[0].strip().replace("共", "").replace("页", "").split('/')[0])
            pageTotal = int(root.xpath('//div[@class="pl"]/text()')[0].strip().replace("共", "").replace("页", "").split('/')[1])
            if cursorNum < pageTotal:
                print(f"=====共【{pageTotal}】页=====第【{cursorNum + 1}】页=====")
                self.get_contents(pid, cursorNum + 1)

    def get_detail(self, url_child, item):
        html_child = HttpUtils.do_request("GET", url_child, self.headers, "")
        root_child = etree.HTML(html_child.text)
        item['中文名称'] = "".join(root_child.xpath('//div[@class="tet_box"]/h6/text()'))
        item['中文别名'] = "".join(root_child.xpath('//div[@class="dl_box"]/dl[1]/dd/text()'))
        item['英文名称'] = "".join(root_child.xpath('//div[@class="dl_box"]/dl[3]/dd/text()'))
        item['英文别名'] = "".join(root_child.xpath('//div[@class="dl_box"]/dl[5]/dd/text()'))
        item['货号'] = "".join(root_child.xpath('//div[@class="dl_box"]/dl[2]/dd/text()'))
        item['分子式'] = "".join(root_child.xpath('//div[@class="dl_box"]/dl[4]//text()')).replace("\r\n", "")\
            .replace("分子式：", "").strip()
        item['分子量'] = "".join(root_child.xpath('//div[@class="dl_box"]/dl[6]/dd/text()'))
        item['CAS号'] = "".join(root_child.xpath('//div[@class="dl_box"]/dl[7]/dd/text()')) + "\t"

        # 获取分类信息
        num = 0
        for t in root_child.xpath('//div[@class="product_nav"]/a'):
            title = "".join(t.xpath('.//text()'))
            if title.find("产品中心") < 0:
                item['分类_' + str(num)] = title
                num += 1
        flag = False
        for d in root_child.xpath('//div[@class="item_03"]//tbody/tr'):
            flag = True
            dict_data_d = dict()
            dict_data_d['货号/包装'] = d.xpath('./th[1]/text()')[0]
            dict_data_d['产品规格'] = d.xpath('./th[2]/text()')[0]
            # 合并基础数据与分组数据
            data_all_dict = item | dict_data_d
            # 保存数据
            HttpUtils.dict_to_csv("佳途", data_all_dict)
        if flag is False:
            # 保存数据
            HttpUtils.dict_to_csv("佳途", item)

    def get_yw(self, cursor):
        url_yw = f"http://ch.cato-chem.com/ProMedicine.aspx?pid=76&page={cursor}"
        res = HttpUtils.do_request("GET",url_yw,self.headers, "")
        root_yw = etree.HTML(res.text)

        list_url = root_yw.xpath('//tbody/td/a/@href')
        # List去重
        target_list = sorted(set(list_url), key=list_url.index)
        for t in target_list:
            url_c = "http://ch.cato-chem.com" + t
            self.get_contents(t.split('=')[-1], 1)


if __name__ == '__main__':
    test = TestRequest()
    # search_list = [75]
    # for s in search_list:
    #     test.get_contents(s, 1)

    # 药物
    for p in range(1, 65):
        print(f"============第【{p}】页=============")
        test.get_yw(p)
