# -*- coding:utf-8 -*-

import requests
from lxml import etree
from fake_useragent import UserAgent
from http import cookiejar
import json
from datetime import datetime
import time
import re
import csv
from pymongo import MongoClient
# from requests_toolbelt import MultipartEncoder
import pandas as pd


requests.packages.urllib3.disable_warnings()  # 忽略HTTPS安全警告


class Test_Get():
    def __init__(self):
        #声明一个CookieJar对象实例来保存cookie
        # requests.utils.dict_from_cookiejar(html.cookies)  # 获取cookies转dict
        self.cookie = cookiejar.CookieJar()
        ua = UserAgent(use_cache_server=False)  # 禁用服务器缓存
        self.headers = {
            'User-Agent': ua.random,
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'Content-Type': 'application/x-www-form-urlencoded',
            # 'Cookie': 'lang=de; de.dpma.register.gui.components.common.TrefferlisteExperteKonfig.checkedList=Datenbestand_MarkenIdentifikation_MarkenWiedergabe_Aktenzustand_; de.dpma.register.gui.components.common.TrefferlisteExperteKonfig.sortierSpalte=MarkenIdentifikation; de.dpma.register.gui.components.common.TrefferlisteExperteKonfig.sortierOrdnung=aufsteigend; de.dpma.register.gui.components.common.TrefferlisteExperteKonfig.trefferProSeite=100; de.dpma.register.gui.components.common.TrefferlisteExperteKonfig.maxTreffer=1000; de.dpma.register.gui.components.common.TrefferlisteExperteKonfig.ansicht=false; reg.check-1067163542=-2054484003; JSESSIONID=wugczS9S730OggklSKMUnam0.reg-app01',
            'DNT': '1',
            'Host': 'register.dpma.de',
            'Origin': 'https://register.dpma.de',
            'Referer': 'https://register.dpma.de/DPMAregister/marke/basis',
            'sec-ch-ua': '"Chromium";v="86", "\"Not\\A;Brand";v="99", "Google Chrome";v="86"',
            'sec-ch-ua-mobile': '?0',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'same-origin',
            'Sec-Fetch-User': '?1',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36'
        }


    def get_contents(self, search_word, save_path):

        url = f"https://register.dpma.de/DPMAregister/marke/basis"

        session = requests.Session()
        html = session.get(url, headers=self.headers)
        # 1.获取cookies。2.提取csrfToken
        # html = requests.get(url, headers=self.headers)

        # 获取参数formdata
        root_html = etree.HTML(html.text)

        cookies_dict = html.cookies.get_dict()
        formdata = root_html.xpath('//input[@name="t:formdata"]/@value')[0]

        # 添加cookie
        cookies_str = ""
        for key, value in cookies_dict.items():
            cookies_str += key + "=" + value + ";"
        self.headers['Cookie'] = cookies_str
        postData = {
            't:formdata': formdata,
            'checkbox': 'on',
            'checkbox_0': 'on',
            'checkbox_1': 'on',
            'marke': 'zara',# search_word,
            'rn':'',
            'bwt':'',
            'mf':'',
            'inh':'',
            'kla1':'',
            'kla2':'',
            'kla3':'',
            'wbk':'',
            'wdv':'',
            'tConfigEingeklappt': 'false',
            'checkbox_5': 'on',
            'checkbox_8': 'on',
            'sortierSpalte': 'MarkenIdentifikation',
            'select_0': 'aufsteigend',
            'trefferProSeite': '100',
            'maxTreffer': '1000',
            'cookieDummy':'',
            'rechercheStarten': 'Recherche starten'
        }
        # 提交查询参数，获取重新定向查询结果地址
        res_post = session.post("https://register.dpma.de/DPMAregister/marke/basis.kopf.form", headers=self.headers, data=postData)
        flag = ""
        if res_post.status_code != 302:
            root_post = etree.HTML(res_post.text)
            # 查找-未搜索到元素
            error_element = root_post.xpath('//div[@class="error"]')
            if len(error_element) > 0:
                error_str = "".join(root_post.xpath('//div[@class="error"]//text()'))
                print(f"====未搜索到结果:{error_str}=====")
                flag = "否"
        else:
            url_result = res_post.headers['Location']  # 查询结果地址"https://register.dpma.de/DPMAregister/marke/trefferliste"
            url_result = "https://register.dpma.de/DPMAregister/marke/trefferliste"  # 查询结果地址"https://register.dpma.de/DPMAregister/marke/trefferliste"
            # html_result = session.post(url, headers=self.headers, data=m)


            # 获取结果
            # html_result = requests.get(url_result, headers=self.headers)
            html_result = session.get(url_result, headers=self.headers)

            # print(html.cookies.get_dict())
            root = etree.HTML(html_result.text)
            # 查找-验证码元素
            img_element = root.xpath('//img[@id="ipocaptcha_CaptchaImage"]')
            if len(img_element) > 0:
                print("====验证码=====")
            else:
                result_element = root.xpath('//table[@id="trefferliste"]/tr')
                for r in result_element:
                    flag = "否"
                    country = "".join(r.xpath('./td[3]//text()'))
                    num = "".join(r.xpath('./td[4]//text()')).replace("\n", "")
                    print(f"======{num}=========")
                    break
                # 保存结果
                with open(save_path, 'a+', encoding="utf-8-sig", newline='') as f:
                    csv_writer = csv.writer(f)
                    csv_writer.writerow([search_word, flag])

    def get_request_proxy(self, method, url, headers_s, data, retry=1):
        """
        使用代理
        :param method: 'POST','GET'
        :param url: 请求地址
        :param headers_s: headers头
        :param data: 请求参数
        :param retry: 重试次数
        :return: 返回 requests
        """
        # 蘑菇代理的隧道订单
        appKey = "cW9BVzdTZkc0eU1iWjJIbzo3RExyQ3QzQkVNRm9YUTNh"

        # 蘑菇隧道代理服务器地址
        ip_port = 'secondtransfer.moguproxy.com:9001'
        headers_p = {
            "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
            "Proxy-Authorization": 'Basic ' + appKey,
        }
        # 合并头
        headers = dict(headers_s, **headers_p)

        proxies = {
            "http": "http://" + ip_port,
            "https": "https://" + ip_port,
        }
        res = None
        try:
            if method == "POST":
                if url.find("https") == 0:
                    res = requests.post(url, headers=headers, data=data, proxies=proxies, verify=False, timeout=5, allow_redirects=False)
                else:
                    res = requests.post(url, headers=headers, data=data, proxies=proxies, timeout=5)
            elif method == "GET":
                if url.find("https") == 0:
                    res = requests.get(url, headers=headers, params=data, proxies=proxies, verify=False, timeout=5)
                else:
                    res = requests.get(url, headers=headers, params=data, proxies=proxies, timeout=5)
            else:
                res = None
        except Exception as ex:
            print(f"-------------【错误】,重试第【{retry}】次-------------")
            print(ex)
            retry += 1
            return HttpUtils.do_request_proxy(method, url, headers, data, retry)
        else:
            # if res.status_code != 200:
            #     print(f"-------------返回状态码:{res.status_code},重试第【{retry}】次-------------")
            #     retry += 1
            #     return get_request_proxy(method, url, headers, data, retry)
            if res.status_code == 404:
                pass
            elif res.status_code == 303:
                pass
            elif res.status_code != 200:
                print(f"-------------返回状态码:{res.status_code},重试第【{retry}】次-------------")
                retry += 1
                return HttpUtils.do_request_proxy(method, url, headers, data, retry)
        return res


    # 将cookies字符串转换dict格式
    def get_cookie_dict(self, str):
        itemDict = {}
        items = str.split(';')
        for item in items:
            arr = item.split('=')
            key = arr[0].replace(' ', '')
            value = arr[1]
            itemDict[key] = value
        return itemDict


if __name__ == '__main__':

    source_path = "./DE/BG.csv"
    save_path = "./DE/BG_result.csv"
    search_list = []
    line_num = 1
    df = pd.read_csv(source_path, header=None, encoding='ANSI')
    for index, row in df.iterrows():
        search_list.append(row[0].strip())

    test_get = Test_Get()
    for s in search_list:
        line_num += 1
        print(f"====count：{len(search_list) - line_num}=======line：{line_num}=====search：{s}=============")
        test_get.get_contents(s, save_path)

    # test_get.get_contents("s", "save_path")
