# -*- coding:utf-8 -*-

import requests
from lxml import etree
from fake_useragent import UserAgent
from http import cookiejar
import json
from datetime import datetime
import time
import re
import csv
import os
from pymongo import MongoClient
from utils.HttpUtils import HttpUtils


requests.packages.urllib3.disable_warnings()  # 忽略HTTPS安全警告

"""
(.*?): (.*)
"$1":"$2",

http://www.altascientific.com/product/c-81
"""

class GetDetail():
    @staticmethod
    def get_contents(id):
        """
        获取数据

        :param id: url链接
        :return: 返回dict格式数据
        """
        headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Connection': 'keep-alive',
            'Cookie': 'PHPSESSID=00tu816nsfo3ubsqpj3243ibd1',
            'DNT': '1',
            'Host': 'www.altascientific.com',
            'Referer': 'http://www.altascientific.com/product/c-172',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'
        }
        url = f"http://www.altascientific.com/product/search.php?key_1={id}&Submit2.x=14&Submit2.y=14"
        html = HttpUtils.do_request("GET", url, headers, "")

        root = etree.HTML(html.text)
        lineNum = 0  # 行号
        pageTotal = len(root.xpath('//div[@id="pages"]//li[@class="pages"]')) + 1  # 总页数
        dict_data = dict()
        if len(root.xpath('//div[@id="newsquery"]/table/tr')) > 1:
            dict_data['产品号'] = id
            dict_data['结果'] = "有"
        else:
            dict_data['产品号'] = id
            dict_data['结果'] = "无"

        return dict_data

    @staticmethod
    def get_urls():
        """
        获取链接地址

        :return: 返回list格式
        """
        list_urls = []
        with open("阿尔塔缺失数据0820.csv", "r", encoding="utf-8-sig", newline="") as f:
            next(f)
            reader = csv.reader(f)
            for line in reader:
                list_urls.append(line[0])

        return list_urls

list_s = []
list_t = []
with open("result.csv", "r", encoding="utf-8-sig", newline="") as f:
    next(f)
    reader = csv.reader(f)
    for line in reader:
        list_s.append(line[0])


# with open("阿塔尔.csv", "r", encoding="utf-8-sig", newline="") as f:
#     next(f)
#     reader = csv.reader(f)
#     for line in reader:
#         list_t.append(line[4])
#
# for s in list_s:
#     flag = False
#     for t in list_t:
#         if s.strip() == t.strip():
#             flag = True
#             break
#     if flag is False:
#         with open("result.csv", "a+", encoding="utf-8-sig", newline="") as f:
#             csv_writer = csv.writer(f)
#             csv_writer.writerow([s])
