# -*- coding:utf-8 -*-

import requests
from lxml import etree
from fake_useragent import UserAgent
from http import cookiejar
import json
from datetime import datetime
import time
import re
import csv
from pymongo import MongoClient
# from requests_toolbelt import MultipartEncoder
import pandas as pd
import os


requests.packages.urllib3.disable_warnings()  # 忽略HTTPS安全警告


class Test_Get():
    def __init__(self):
        #声明一个CookieJar对象实例来保存cookie
        # requests.utils.dict_from_cookiejar(html.cookies)  # 获取cookies转dict
        self.cookie = cookiejar.CookieJar()
        ua = UserAgent()  # 禁用服务器缓存
        self.headers = {
            'User-Agent': ua.random,
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'Content-Type': 'application/x-www-form-urlencoded',
            # 'Cookie': 'JSESSIONID=D0D9A6B6BA04F06B779B0ADB05323C5A.marqmod3; paniermarques=""; ""; paniermarques=""; Q7Nd7AvqnWeW=v1K70wgw@@6lp; __utma=117113621.454562102.1604298352.1604298352.1604298352.1; __utmc=117113621; __utmz=117113621.1604298352.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); __utmt=1; ""; fe_typo_user=0c69f05cbd514f025bbfb55660995c73; __utmb=117113621.5.10.1604298352',
            'Host': 'bases-marques.inpi.fr',
            'Origin': 'https://bases-marques.inpi.fr',
            'Referer': 'https://bases-marques.inpi.fr/Typo3_INPI_Marques/retourRecherche',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'same-origin',
            'Sec-Fetch-User': '?1',
            'Upgrade-Insecure-Requests': '1',
            # 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36'
        }

    def get_home_cookies(self, url):
        while True:
            url = f"https://bases-marques.inpi.fr/"

            # session = requests.Session()
            # html = session.get(url, headers=self.headers)
            # 1.获取cookies。2.提取csrfToken
            html = requests.get(url, headers=self.headers)

            cookies_dict = html.cookies.get_dict()

            # 添加cookie
            cookies_str = ""
            for key, value in cookies_dict.items():
                cookies_str += key + "=" + value + ";"
            self.headers['Cookie'] = cookies_str
            print(self.headers['Cookie'])
            if cookies_dict.get('JSESSIONID') is not None:
                return

    def get_contents(self, search_word, save_path):
        # 获取cookies
        self.get_home_cookies("https://bases-marques.inpi.fr/")

        postData = {
            'marque': search_word,
            'classification':'',
            'baseFr': 'on',
            'baseCommu': 'on',
            'baseInter': 'on',
            'rechercher': 'Rechercher',
            'recherche': 'recherche'
        }
        # 提交查询参
        res_post = requests.post("https://bases-marques.inpi.fr/Typo3_INPI_Marques/marques_resultats_liste.html", headers=self.headers, data=postData, allow_redirects=False)
        # html_result = session.post(url, headers=self.headers, data=m)

        flag = ""
        # print(html.cookies.get_dict())
        root = etree.HTML(res_post.text)
        # 查找-验证码元素
        img_element = root.xpath('//img[@id="ipocaptcha_CaptchaImage"]')
        if len(img_element) > 0:
            print("====验证码=====")
        # 查找-未搜索到元素
        error_element = root.xpath('//ul[@class="error-summary-list"]')
        if len(error_element) > 0:
            error_str = root.xpath('//ul[@class="error-summary-list"]/li/text()')[0]
            print(f"====未搜索到结果:{error_str}=====")
            flag = "无"
        else:
            result_element = root.xpath('//div[@class="bas"]')
            for r in result_element:
                name = "".join(r.xpath('./div[@class="titre"]//text()'))
                company = "".join(r.xpath('.//div[@class="donnee"]//text()'))
                num = "".join(r.xpath('.//div[@class="detail"][1]/span[2]/text()'))
                classe  = "".join(r.xpath('.//div[@class="detail"][2]/div/text()'))
                print(f"======{num}=========")
                flag = "有"
                break
            # 保存结果
            with open(save_path, 'a+', encoding="utf-8-sig", newline='') as f:
                csv_writer = csv.writer(f)
                csv_writer.writerow([search_word, flag])

    def get_request_proxy(self, method, url, headers_s, data, retry=1):
        """
        使用代理
        :param method: 'POST','GET'
        :param url: 请求地址
        :param headers_s: headers头
        :param data: 请求参数
        :param retry: 重试次数
        :return: 返回 requests
        """
        # 蘑菇代理的隧道订单
        appKey = "cW9BVzdTZkc0eU1iWjJIbzo3RExyQ3QzQkVNRm9YUTNh"

        # 蘑菇隧道代理服务器地址
        ip_port = 'secondtransfer.moguproxy.com:9001'
        headers_p = {
            "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
            "Proxy-Authorization": 'Basic ' + appKey,
        }
        # 合并头
        headers = dict(headers_s, **headers_p)

        proxies = {
            "http": "http://" + ip_port,
            "https": "https://" + ip_port,
        }
        res = None
        try:
            if method == "POST":
                if url.find("https") == 0:
                    res = requests.post(url, headers=headers, data=data, proxies=proxies, verify=False, timeout=5, allow_redirects=False)
                else:
                    res = requests.post(url, headers=headers, data=data, proxies=proxies, timeout=5)
            elif method == "GET":
                if url.find("https") == 0:
                    res = requests.get(url, headers=headers, params=data, proxies=proxies, verify=False, timeout=5)
                else:
                    res = requests.get(url, headers=headers, params=data, proxies=proxies, timeout=5)
            else:
                res = None
        except Exception as ex:
            print(f"-------------【错误】,重试第【{retry}】次-------------")
            print(ex)
            retry += 1
            return HttpUtils.do_request_proxy(method, url, headers, data, retry)
        else:
            # if res.status_code != 200:
            #     print(f"-------------返回状态码:{res.status_code},重试第【{retry}】次-------------")
            #     retry += 1
            #     return get_request_proxy(method, url, headers, data, retry)
            if res.status_code == 404:
                pass
            elif res.status_code == 303:
                pass
            elif res.status_code != 200:
                print(f"-------------返回状态码:{res.status_code},重试第【{retry}】次-------------")
                retry += 1
                return HttpUtils.do_request_proxy(method, url, headers, data, retry)
        return res


    # 将cookies字符串转换dict格式
    def get_cookie_dict(self, str):
        itemDict = {}
        items = str.split(';')
        for item in items:
            arr = item.split('=')
            key = arr[0].replace(' ', '')
            value = arr[1]
            itemDict[key] = value
        return itemDict


    # 提取待搜索的关键词。提取未查询过的关键词
    def check_data(self, source_path, target_path):
        source_list = []
        target_list = []
        target_value_list = []
        search_list = []
        source_list_count = 0
        target_list_count = 0
        search_list_count = 0

        source_path = "./DE/BG.csv"
        target_path = "./DE/BG_result.csv"

        line_num = 0
        # source_path文件
        df_s = pd.read_csv(source_path, header=None, encoding='ANSI')
        for index, row in df_s.iterrows():
            source_list.append(str(row[0]).strip())

        source_list_count = len(source_list)

        # 如果有target_path文件
        if os.path.exists(target_path):
            # target_path文件
            df_t = pd.read_csv(target_path, header=None)
            for index, row in df_t.iterrows():
                target_list.append(str(row[0]).strip())
                target_value_list.append(str(row[1]).strip())

            target_list_count = len(target_list)

        # 对比未搜索的关键词，并将结果保存到search_list
        for i in range(0, source_list_count):
            if source_list[i] in target_list:
                pass
            else:
                search_list.append(source_list[i].strip())

        search_list_count = len(search_list)

        return search_list, line_num, target_path


if __name__ == '__main__':
    test_get = Test_Get()
    # 提取待搜索的关键词
    search_list, line_num, target_path = test_get.check_data("./DE/BG.csv", "./DE/BG_result.csv")
    for s in search_list:
        line_num += 1
        print(f"====count:{len(search_list) - line_num}=======line:{line_num}=====search:【{s}】=============")
        test_get.get_contents(s, target_path)

    # test_get.get_contents("s", target_path)
