# -*- coding:utf-8 -*-

import requests
from lxml import etree
from fake_useragent import UserAgent
from http import cookiejar
import json
from datetime import datetime
import time
import re
import csv
from pymongo import MongoClient
from requests_toolbelt import MultipartEncoder
import pandas as pd


requests.packages.urllib3.disable_warnings()  # 忽略HTTPS安全警告


class Test_Get():
    def __init__(self):
        #声明一个CookieJar对象实例来保存cookie
        # requests.utils.dict_from_cookiejar(html.cookies)  # 获取cookies转dict
        self.cookie = cookiejar.CookieJar()
        # ua = UserAgent(use_cache_server=False)  # 禁用服务器缓存
        self.headers = {
            # 'User-Agent': ua.random,
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'Content-Type': 'multipart/form-data; boundary=----WebKitFormBoundarySXlFGutTRSzv4YtI',
            'DNT': '1',
            'Host': 'trademarks.ipo.gov.uk',
            'Origin': 'https://trademarks.ipo.gov.uk',
            'Referer': 'https://trademarks.ipo.gov.uk/ipo-tmtext?reset',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'same-origin',
            'Sec-Fetch-User': '?1',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
            # 'Cookie':'UXFORMS_EXEC=3bcd70deb26708126b868fa535acc1bdd4fa88db-=&ipo-tmtextId=51660916&csrfToken=8ef6ee9b19d411491f5dc2da78ff9edcb90c01f2-1594819820907-4e6107ea4e9bc41bfdeb7c6c; Path=/; Secure; HTTPOnly'
        }


    def get_contents(self, search_word, save_path):

        url = f"https://trademarks.ipo.gov.uk/ipo-tmtext"

        # session = requests.Session()
        # html = session.get(url, headers=self.headers)
        # 1.获取cookies。2.提取csrfToken
        html = requests.get(url, headers=self.headers)

        cookies_dict = html.cookies.get_dict()
        tmp_cookies = cookies_dict['UXFORMS_EXEC']
        csrfToken = re.findall(r"csrfToken=(.*?)$", tmp_cookies)[0]
        print(f"====={csrfToken}=======")
        m = MultipartEncoder(
            fields={
                'csrfToken': csrfToken,
                'sectionIndex': '0',
                'searchType': 'WORD',
                'wordSearchType': 'EXACT',
                'wordSearchPhrase': 'zara',#search_word,
                'wordSearchMatchType': 'ALLWORDS',
                'ViennaClassesCategoriesDropDownOne':'',
                'ViennaClassesDivisionsDropDownOne':'',
                'ViennaClassesSectionsDropDownOne':'',
                'firstOperator': 'NO',
                'ViennaClassesCategoriesDropDownTwo':'',
                'ViennaClassesDivisionsDropDownTwo':'',
                'ViennaClassesSectionsDropDownTwo':'',
                'secondOperator': 'NO',
                'ViennaClassesCategoriesDropDownThree':'',
                'ViennaClassesDivisionsDropDownThree':'',
                'ViennaClassesSectionsDropDownThree':'',
                'filedFrom.day': '1',
                'filedFrom.month': '1',
                'filedFrom': '1876',
                'filedTo.day': '24',
                'filedTo.month': '7',
                'filedTo': '2020',
                'legalStatus': 'LIVELEGALSTATUS',
                'pageSize': '10'
            })
        self.headers['Content-Type'] = m.content_type
        # 添加cookie
        for key, value in cookies_dict.items():
            cookies_str = key + "=" + value
        self.headers['Cookie'] = cookies_str
        # 获取结果
        html_result = requests.post(url, headers=self.headers, data=m)
        # html_result = session.post(url, headers=self.headers, data=m)

        # print(html.cookies.get_dict())
        root = etree.HTML(html_result.text)
        # 查找-验证码元素
        img_element = root.xpath('//img[@id="ipocaptcha_CaptchaImage"]')
        if len(img_element) > 0:
            print("====验证码=====")
        # 查找-未搜索到元素
        error_element = root.xpath('//ul[@class="error-summary-list"]')
        if len(error_element) > 0:
            error_str = root.xpath('//ul[@class="error-summary-list"]/li/text()')[0]
            print(f"====未搜索到结果:{error_str}=====")
        else:
            result_element = root.xpath('//div[@class="search-results"]/div[@class="grid-row"]/div[@class="column-two-thirds"]')
            for r in result_element:
                title = "".join(r.xpath('./p[@class="bold-medium"]/a//text()'))
                print(f"======{title}=========")
                if title.find("EU") == 0:
                    flag = "是"
                    break

    def get_request_proxy(self, method, url, headers_s, data, retry=1):
        """
        使用代理
        :param method: 'POST','GET'
        :param url: 请求地址
        :param headers_s: headers头
        :param data: 请求参数
        :param retry: 重试次数
        :return: 返回 requests
        """
        # 蘑菇代理的隧道订单
        appKey = "cW9BVzdTZkc0eU1iWjJIbzo3RExyQ3QzQkVNRm9YUTNh"

        # 蘑菇隧道代理服务器地址
        ip_port = 'secondtransfer.moguproxy.com:9001'
        headers_p = {
            "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
            "Proxy-Authorization": 'Basic ' + appKey,
        }
        # 合并头
        headers = dict(headers_s, **headers_p)

        proxies = {
            "http": "http://" + ip_port,
            "https": "https://" + ip_port,
        }
        res = None
        try:
            if method == "POST":
                if url.find("https") == 0:
                    res = requests.post(url, headers=headers, data=data, proxies=proxies, verify=False, timeout=5, allow_redirects=False)
                else:
                    res = requests.post(url, headers=headers, data=data, proxies=proxies, timeout=5)
            elif method == "GET":
                if url.find("https") == 0:
                    res = requests.get(url, headers=headers, params=data, proxies=proxies, verify=False, timeout=5)
                else:
                    res = requests.get(url, headers=headers, params=data, proxies=proxies, timeout=5)
            else:
                res = None
        except Exception as ex:
            print(f"-------------【错误】,重试第【{retry}】次-------------")
            print(ex)
            retry += 1
            return HttpUtils.do_request_proxy(method, url, headers, data, retry)
        else:
            # if res.status_code != 200:
            #     print(f"-------------返回状态码:{res.status_code},重试第【{retry}】次-------------")
            #     retry += 1
            #     return get_request_proxy(method, url, headers, data, retry)
            if res.status_code == 404:
                pass
            elif res.status_code == 303:
                pass
            elif res.status_code != 200:
                print(f"-------------返回状态码:{res.status_code},重试第【{retry}】次-------------")
                retry += 1
                return HttpUtils.do_request_proxy(method, url, headers, data, retry)
        return res


    # 将cookies字符串转换dict格式
    def get_cookie_dict(self, str):
        itemDict = {}
        items = str.split(';')
        for item in items:
            arr = item.split('=')
            key = arr[0].replace(' ', '')
            value = arr[1]
            itemDict[key] = value
        return itemDict


if __name__ == '__main__':

    # source_path = "./X/HY.xlsx"
    # save_path = "./X/HY_result.csv"
    # search_list = []
    # line_num = 1
    # df = pd.read_excel(source_path)
    # for index, row in df.iterrows():
    #     search_list.append(row[0].strip())

    test_get = Test_Get()
    # for s in search_list:
    #     line_num += 1
    #     print(f"====count：{len(search_list) - line_num}=======line：{line_num}=====search：{s}=============")
    #     test_get.get_contents(s, save_path)

    test_get.get_contents("s", "save_path")
