# -*- coding:utf-8 -*-

import requests
from lxml import etree
from fake_useragent import UserAgent
from http import cookiejar
import json
from datetime import datetime
import time
import re
import csv
import os
from pymongo import MongoClient
from utils.HttpUtils import HttpUtils


requests.packages.urllib3.disable_warnings()  # 忽略HTTPS安全警告

"""
(.*?): (.*)
"$1":"$2",

http://www.altascientific.com/product/c-81
"""


class TestRequest():
    def __init__(self):
        #声明一个CookieJar对象实例来保存cookie
        # requests.utils.dict_from_cookiejar(html.cookies)  # 获取cookies转dict
        # res.cookies.get_dict()  # 获取cookies
        self.cookie = cookiejar.CookieJar()
        # self.ua = UserAgent(use_cache_server=False)  # 禁用服务器缓存
        # self.ua = UserAgent(verify_ssl=False)  #
        self.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Connection': 'keep-alive',
            'Cookie': 'PHPSESSID=00tu816nsfo3ubsqpj3243ibd1',
            'DNT': '1',
            'Host': 'www.altascientific.com',
            'Referer': 'http://www.altascientific.com/product/c-172',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'
        }


    def get_contents(self, id, cursor):
        url = f"http://www.altascientific.com/product/c-{id}-p-{cursor}"
        html = HttpUtils.do_request("GET", url, self.headers, "")

        root = etree.HTML(html.text)
        lineNum = 0  # 行号
        pageTotal = len(root.xpath('//div[@id="pages"]//li[@class="pages"]')) + 1  # 总页数

        # 分类
        tmp = "".join(root.xpath('//table[@class="r_s_1"]//text()')).replace("\r\n", "").strip()
        num = 0
        dict_type = dict()
        dict_type['分类_0'] = ""
        dict_type['分类_1'] = ""
        dict_type['分类_2'] = ""
        dict_type['分类_3'] = ""
        for c in tmp.split("》"):
            dict_type['分类_' + str(num)] = c
            num += 1

        for r in root.xpath('//div[@id="newsquery"]/table/tr'):
            if lineNum > 0:
                dict_data = dict()
                dict_data = dict_data | dict_type
                dict_data['产品编号'] = "".join(r.xpath('.//td[1]//text()'))
                dict_data['产品名称'] = "".join(r.xpath('.//td[2]//text()'))
                dict_data['Product Name'] = "".join(r.xpath('.//td[3]//text()'))
                dict_data['CAS'] = "".join(r.xpath('.//td[4]//text()')) + "\t"

                # 保存数据
                HttpUtils.dict_to_csv("阿尔塔", dict_data)
            lineNum += 1

        # 翻页
        if cursor < pageTotal:
            print(f"=====共计【{pageTotal}】页=======第【{cursor + 1}】页=============")
            self.get_contents(id, cursor + 1)


if __name__ == '__main__':
    test = TestRequest()
    id_list = [259,262,173,51,248,39,46,251,284,232,231,261,237,57,147,73,72]
    for t in id_list:
        test.get_contents(t, 1)
