# -*- encoding:utf-8 -*-
# 开发团队：大数据组
# 开发者：albert·bing
# 开发时间：2020/7/5 20:13
# 文件名称：yellow_calendar.py
# 开发工具：PyCharm


#  start your code

# import sys
# sys.path.append('/home/hadoop/programs/spider/WTP66_BigdataCrawler')
from selenium import webdriver
# 导入键盘操作的keys包
from selenium.webdriver.common.keys import Keys
# 导入chrome选项
from selenium.webdriver.chrome.options import Options

# from configs import config

import json

from urllib.parse import quote
import TreadeMark.MySqlUtil as MySqlUtil

import requests
from bs4 import BeautifulSoup
import urllib3
import time

# 忽略https的安全警告
urllib3.disable_warnings()


def getHeaders():
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                             'Chrome/89.0.4389.114 Safari/537.36', 'Referer':
                    # 'https://so.quandashi.com/hhr/searchdetail/43608705_28_b4a0eb8501b3d2ec.html',
                   'www.quandashi.com',
               'cookie': 'NTKF_T2D_CLIENTID=guest9D076C7A-90BD-2142-451F-E699CC225146; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2217ae699d69fc62-002100befdeca-c3f3568-2073600-17ae699d6a0597%22%2C%22%24device_id%22%3A%2217ae699d69fc62-002100befdeca-c3f3568-2073600-17ae699d6a0597%22%2C%22props%22%3A%7B%7D%7D; INGRESSCOOKIE=1627387884.356.6100.309192; Hm_lvt_df2da21ec003ed3f44bbde6cbef22d1c=1627366477,1627369668,1627387884; _csrf=f56928aaad6710641d94eb6e911ba26a4647d0a6127fb15f1614491eb37b42a7a%3A2%3A%7Bi%3A0%3Bs%3A5%3A%22_csrf%22%3Bi%3A1%3Bs%3A32%3A%22w-_YUtV9x1RPsCGompFNOW_QktWF11Yo%22%3B%7D; PHPSESSID=303825b872529c6cb12746bf8812fa93; QDS_COOKIE=user%3Ainfo%3AF7C9E9A9-A680-F1CD-66EC-7ED6A2188BB2; QDS_LOGIN_INFO=%7B%22userName%22%3A%22%5Cu672a%5Cu547d%5Cu540d%22%2C%22avtar%22%3A%22%22%7D; Hm_lpvt_df2da21ec003ed3f44bbde6cbef22d1c=1628043790; uniqueCode=17D2-5752-A29F-8E17; nTalk_CACHE_DATA={uid:kf_9479_ISME9754_guest9D076C7A-90BD-21,tid:1628065591340509'}
    return headers

def getHeaders2(referer):
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                             'Chrome/89.0.4389.114 Safari/537.36', 'Referer':referer,
               'cookie': 'NTKF_T2D_CLIENTID=guest9D076C7A-90BD-2142-451F-E699CC225146; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2217ae699d69fc62-002100befdeca-c3f3568-2073600-17ae699d6a0597%22%2C%22%24device_id%22%3A%2217ae699d69fc62-002100befdeca-c3f3568-2073600-17ae699d6a0597%22%2C%22props%22%3A%7B%7D%7D; INGRESSCOOKIE=1627387884.356.6100.309192; Hm_lvt_df2da21ec003ed3f44bbde6cbef22d1c=1627366477,1627369668,1627387884; _csrf=f56928aaad6710641d94eb6e911ba26a4647d0a6127fb15f1614491eb37b42a7a%3A2%3A%7Bi%3A0%3Bs%3A5%3A%22_csrf%22%3Bi%3A1%3Bs%3A32%3A%22w-_YUtV9x1RPsCGompFNOW_QktWF11Yo%22%3B%7D; PHPSESSID=303825b872529c6cb12746bf8812fa93; QDS_COOKIE=user%3Ainfo%3AF7C9E9A9-A680-F1CD-66EC-7ED6A2188BB2; QDS_LOGIN_INFO=%7B%22userName%22%3A%22%5Cu672a%5Cu547d%5Cu540d%22%2C%22avtar%22%3A%22%22%7D; Hm_lpvt_df2da21ec003ed3f44bbde6cbef22d1c=1628043790; uniqueCode=17D2-5752-A29F-8E17; nTalk_CACHE_DATA={uid:kf_9479_ISME9754_guest9D076C7A-90BD-21,tid:1628065591340509'}
    return headers

def createDriver():
    chrome_options = Options()
    chrome_options.add_argument('--headless')
    # driver = webdriver.Chrome(executable_path=config._CHROME_DRIVER_WIN, options=chrome_options)
    driver = webdriver.Chrome(executable_path="D:/softInstall/chromedriver.exe", options=chrome_options)
    return driver


def saveDetailIds(headers):
    sum_list = []
    for i in range(500, 550, 1):
        url2 = "https://www.quandashi.com/customer-clue/query-reject-clue-list?intCls=&province=&city=&regYear=&statusName=&searchKey=&pageNo=" + str(
            i) + "&pageSize=20"
        result_source_text = requests.get(url2, headers=headers)
        time.sleep(5)
        res_01 = result_source_text.text.split('"list":')
        res_02 = res_01[1].split(',"pages"')
        dick_text = json.loads(res_02[0])
        pages_num = res_02[1].split(':')[1].split("}")[0]
        sum_list.append(dick_text)
        print("爬取了第【"+str(i+1)+"】条")
    # print(sum_list)

    total_list = []
    for j in range(0, len(sum_list), 1):
        for i in range(0, len(sum_list[j]), 1):
            sum_one = sum_list[j]
            list = []
            list.append(sum_one[i]['id'])
            list.append(sum_one[i]['statusName'])
            list.append(sum_one[i]['appDate'])
            list.append(sum_one[i]['tmName'])
            list.append(sum_one[i]['regNo'])
            list.append(sum_one[i]['applicantCn'])
            list.append(sum_one[i]['agent'])
            list.append(sum_one[i]['detailId'])
            list.append(sum_one[i]['contactPhone'])
            list.append(sum_one[i]['contactEmail'])
            list.append(sum_one[i]['province'])
            list.append(sum_one[i]['city'])
            list.append(sum_one[i]['country'])
            list.append(sum_one[i]['createTime'])
            list.append(sum_one[i]['updateTime'])
            print(list)
            total_list.append(list)

    MySqlUtil.insert_data_dim_trade(total_list)


def getDetailIds(num):
    return MySqlUtil.select_trades(num)


def getDetail(total_list,num):
    sum_res = 0
    data = []
    for i in range(0, len(total_list), 1):
        # time.sleep(3)
        if i % 39 == 0 and i != 0:
            MySqlUtil.insert_detail_info(data)
            time.sleep(1)
        url = "https://so.quandashi.com/hhr/searchdetail/" + total_list[i][1] + ".html"
        headers2 = getHeaders2(url)
        source_text = requests.get(url=url, headers=headers2, verify=False)
        soup = BeautifulSoup(source_text.text, "html.parser")
        div_text = soup.find(name='table', attrs={'class', 'brand-detail-table'})
        titles = div_text.find_all(name='td', attrs={'class', 'td-title'})
        contents = div_text.find_all(name='td', attrs={'class', 'td-content'})

        # print(contents[0].text)

        # for i in range(0,len(contents),1):
        one_list = []
        # 索引id  detailId
        one_list.append(total_list[i][1])
        # 商标名称 trade_name
        one_list.append(contents[0].text.replace('\n', ''))
        # 法律状态 law_status
        one_list.append(contents[1].text.replace('\n', ''))
        # 申请号 app_num
        one_list.append(contents[2].text.replace('\n', ''))
        # 商品类别  goods_category
        one_list.append(contents[3].text.replace('\n', ''))
        # 申请日期 app_date
        one_list.append(contents[4].text.replace('\n', ''))
        # 申请人名称 app_person_name
        one_list.append(contents[5].text.replace('\n', ''))
        # 申请人地址 app_person_address
        one_list.append(contents[6].text.replace('\n', ''))
        # 初审公告期号  preliminary_review_num
        one_list.append(contents[12].text.replace('\n', ''))
        # 初审公告日期  preliminary_review_date
        one_list.append(contents[13].text.replace('\n', ''))
        # 注册公告期号  registration_num
        one_list.append(contents[14].text.replace('\n', ''))
        # 注册公告日期 registration_date
        one_list.append(contents[15].text.replace('\n', ''))
        # 专用权期限 exclusive_right_period
        one_list.append(contents[16].text.replace('\n', ''))
        if one_list[2] == '':
            print("爬取了【"+str(sum_res)+"】条数据")
            break
        data.append(one_list)
        print(one_list)
        num = num + 1
        print("解析完成第【"+str(num)+"】条")


if __name__ == '__main__':
    num = 4807
    # 获取请求头
    headers = getHeaders()

    # 保存DetailIds，为详细查询做准备
    # saveDetailIds(headers)

    # 获取DetailIds

    total_list = getDetailIds(num)
    # print(total_list)

    # 获取详细信息
    getDetail(total_list,num)

    # print(total_list)
