import os
import json
import base64
import sqlite3
import requests
from bs4 import BeautifulSoup
from lxml import etree
import sys
import random
import time

from win32crypt import CryptUnprotectData
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
# https://cloud.tencent.com/developer/article/1665352
# http://www.guikeyun.com/cms/news/296593.html
# https://zhuanlan.zhihu.com/p/62030238
# https://blog.csdn.net/weixin_45145550/article/details/112396820
# https://blog.csdn.net/weixin_42788769/article/details/112341879
# https://www.cnblogs.com/lfri/p/10542797.html

def de_title(func):
    def wrapper(*args, **kwargs):
        req = func(*args, **kwargs)
        # content = req.content.decode("utf-8")
        content = req.content
        content = BeautifulSoup(content, "html.parser")
        print(func.__name__, content.find("title").text)
    return wrapper

def get_string(local_state):
    with open(local_state, 'r', encoding='utf-8') as f:
        s = json.load(f)['os_crypt']['encrypted_key']
    return s


def pull_the_key(base64_encrypted_key):
    encrypted_key_with_header = base64.b64decode(base64_encrypted_key)
    encrypted_key = encrypted_key_with_header[5:]
    key = CryptUnprotectData(encrypted_key, None, None, None, 0)[1]
    return key


def decrypt_string(key, data):
    nonce, cipherbytes = data[3:15], data[15:]
    aesgcm = AESGCM(key)
    plainbytes = aesgcm.decrypt(nonce, cipherbytes, None)
    plaintext = plainbytes.decode('utf-8')
    return plaintext

# 获取cookies
def get_cookie_from_chrome(host):
    local_state = os.environ['LOCALAPPDATA'] + r'\Google\Chrome\User Data\Local State'
    cookie_path = os.environ['LOCALAPPDATA'] + r"\Google\Chrome\User Data\Default\Cookies"

    sql = "select host_key,name,encrypted_value from cookies where host_key='%s'" % host

    with sqlite3.connect(cookie_path) as conn:
        cu = conn.cursor()
        res = cu.execute(sql).fetchall()
        cu.close()
        cookies = {}
        key = pull_the_key(get_string(local_state))
        for host_key, name, encrypted_value in res:
            if encrypted_value[0:3] == b'v10':
                cookies[name] = decrypt_string(key, encrypted_value)
            else:
                cookies[name] = CryptUnprotectData(encrypted_value)[1].decode()

        # print(cookies)
        return cookies

@de_title
def test4():
    user_agent_list = [
        "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; …) Gecko/20100101 Firefox/61.0",
        "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
        "Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15"
    ]

    headers = {
        "user-agent": random.choice(user_agent_list)
    }


    cookie_dict = get_cookie_from_chrome('.zhipin.com')
    # 将字典转为CookieJar：
    cookies = requests.utils.cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True)
    s = requests.Session()
    s.cookies = cookies
    s.headers = headers
    req = s.get('https://www.zhipin.com/job_detail/?query=python&city=101010100&industry=&position=100109')
    print(req.text)
    return req


# 获取指定页html
def open_page(url):
    cookie_dict = get_cookie_from_chrome('.zhipin.com')
    # 将字典转为CookieJar：
    # cookies = requests.utils.cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True)
    # print(cookies)
    # s = requests.Session()
    # s.cookies = cookies
    # req = s.get('https://www.zhipin.com/job_detail/?query=python&city=101010100&industry=&position=100109')
    # return req

    cookies=''
    for key,value in cookie_dict.items():
        if(cookies!=''):
            cookies=cookies+'; '
        cookies=cookies+"%s=%s"%(key,value)
    # print(cookies)
    header = {
        'Cookie':cookies,
        'user-agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36',
    }

    res = requests.get(url, headers=header)
    html = res.text
    print(html)
    soup = BeautifulSoup(html, 'lxml')
    html = soup.prettify()

    return html


# 解析列表页html，并将数据入到字典中
def extract_text(html):
    selector = etree.HTML(html)

    titles = selector.xpath('//*[@class="job-name"]/a/@title')
    href = selector.xpath('//*[@class="job-name"]/a/@href')
    info_dict = {
        'titles': titles,
        'href': href
    }

    return info_dict

# 解析详情页html,并将数据存到字符串中返回
def extract_html(html):
    selector = etree.HTML(html)
    # description = selector.xpath('//*[@class="job-sec"][1]/div[1]')[0]
    description = selector.xpath('//*[@class="text"]')
    # /bookstore/book[price>35.00]//title
    if(description):
        desc=description[0].xpath('string(.)')
    else:
        sys.exit(html) 
    return desc


# 拆分list数据
def list_split(list):
    list_1 = [list[i] for i in range(0, len(list), 2) if list[i] != '']
    list_2 = [list[i] for i in range(1, len(list), 2) if list[i] != '']
    return list_1, list_2


# 清理list数据换行符与空格
def data_clear(list):
    for i in range(1, len(list)):
        # print(list[i])
        list[i] = list[i].replace("\n", "")
        list[i] = list[i].replace("<br/>", "")
        list[i] = list[i].strip()
    return list

if __name__ == '__main__':

    # conn = sqlite3.connect('boss.db')
    # # 创建游标
    # cursor = conn.cursor()
    
    # cursor.execute('''CREATE TABLE jobs_info
    #     (ID INTEGER PRIMARY KEY   AUTOINCREMENT,
    #     titles           TEXT    NOT NULL,
    #     description           TEXT    NOT NULL);''')

    # for i in range(1, 5):
    #     url = 'https://www.zhipin.com/c101010100/?query=%E7%88%AC%E8%99%AB%E5%B7%A5%E7%A8%8B%E5%B8%88&page=' + str(
    #         i) + '&ka=page-' + str(i)
    #     print(url)
    #     html = open_page(url)
    #     # print(html)
    #     info_dict = extract_text(html)
    #     # print(len(info_dict['titles']))
    #     if(len(info_dict['titles']))==0:
    #         sys.exit('请重新获取cookies') 
    #     for j in range(0, len(info_dict['titles'])):
    #         if i==4 and j==10:  #一页有三十条数据，所以第十页只取前十条，共100条数据
    #             break
    #         # 插入数据
    #         temp=[]
    #         # print(j)
    #         for key,value in info_dict.items():
    #             # print(key,value)
    #             if(key=='titles'):
    #                 # print(value[j])
    #                 temp.append(value[j])
    #             elif(key=='href'):                    
    #                 dhtml=open_page('https://www.zhipin.com'+value[j])
    #                 description=extract_html(dhtml)
    #                 temp.append(description)

    #         temp=data_clear(temp)
    #         # print(temp)
    #         sql = '''INSERT INTO jobs_info(titles, description) VALUES("%s","%s")'''%(temp[0],temp[1])
    #         print(sql)
    #         cursor.execute(sql)
    #     # 提交事物
    #     conn.commit()
    #     print("已获取到第" + str(i) + "页")

    # # 关闭游标
    # cursor.close()
    # # 关闭连接
    # conn.close()
    test4()
