import json
import random
import sys
import time
from collections import defaultdict
import datetime
import logging
import traceback

import pymongo
import requests

cookies = input(f"请输入最新cookies：")
home_args = input(f"请输入查询参数：")
key_word_str = input(f"请输入简历详情关键字，多个用空格隔开：")
key_word_str_must = input(f"请输入必须包含的关键字， 多个用空格隔开,非必填（回车跳过）：")
key_word_pipei_num = int(input(f"请输入关键字匹配数："))
file_name = input(f"请输保存结果的文件名称：")
key_word_list_ = key_word_str.split(" ")
key_word_list = [i.strip() for i in key_word_list_ if i]

key_word_must_list = []
if key_word_str_must:
    key_word_must = key_word_str_must.split(" ")
    key_word_must_list = [i.strip() for i in key_word_must if i]

print(f"获得关键字列表：{key_word_list}")
print(f"获得必须包含的某个关键字列表：{key_word_must_list}")
print("开始匹配简历，静候佳音，窗口不要关闭，\n================================================================================")
print("如果有匹配的简历，会打印出来简历详情页链接，可复制到浏览器直接打开查看，\n================================================================================")
print(f"亦可最终统一查看，全部匹配合适的简历最终会存储于当前目录下:{file_name}.txt，\n================================================================================")

handler_one = logging.FileHandler(filename=f"{file_name}.txt", encoding="utf-8")
handler_two = logging.FileHandler(filename=f"{file_name}关键字倒序.txt", encoding="utf-8")
logging.basicConfig(format="", level=logging.INFO, handlers=[handler_one])

cookies = cookies.encode("utf-8").decode("latin1")
home_args = home_args.encode("utf-8").decode("latin1")

home_header = {'Accept': 'application/json, text/plain, */*', 'Accept-Encoding': 'gzip, deflate, br',
               'Accept-Language': 'zh-CN,zh;q=0.9', 'Connection': 'keep-alive', 'Content-Length': '69',
               'Content-Type': 'application/x-www-form-urlencoded',
               'Cookie': cookies,
               'Host': 'api-h.liepin.com', 'Origin': 'https://h.liepin.com', 'Referer': 'https://h.liepin.com/',
               'sec-ch-ua': '"Google Chrome";v="107", "Chromium";v="107", "Not=A?Brand";v="24"',
               'sec-ch-ua-mobile': '?0', 'sec-ch-ua-platform': '"Windows"', 'Sec-Fetch-Dest': 'empty',
               'Sec-Fetch-Mode': 'cors', 'Sec-Fetch-Site': 'same-site',
               'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36',
               'X-Client-Type': 'web', 'X-Fscp-Std-Info': '{"client_id": "40095"}',
               'X-Fscp-Trace-Id': '2bfb10cd-1a19-4ffc-9eac-f79a4bfa2106', 'X-Fscp-Version': '1.1',
               'X-Requested-With': 'XMLHttpRequest', 'X-XSRF-TOKEN': 'hjuYF85ESuifplUrXOx0fQ'
               }


content_url = "https://api-h.liepin.com/api/com.liepin.rresume.userh.pc.old.get-work-exps"
home_url = "https://api-h.liepin.com/api/com.liepin.searchfront4r.h.search-resumes"
time_start = datetime.datetime.now()


class Mongo(object):
    @classmethod
    def get_mongo_client(cls):
        myclient = pymongo.MongoClient("mongodb://localhost:27017/")
        mydb = myclient["ip_proxy"]  # 创建数据库ssq
        mycol = mydb["datas"]  # 创建集合
        return mycol

    @classmethod
    def get_all_datas(cls):
        client = cls.get_mongo_client()
        datas = client.find({})
        return [x for x in datas]

    @classmethod
    def delete_data(cls, data: dict):
        client = cls.get_mongo_client()
        client.delete_many({"ip": data.get("ip")})


class Utils(object):
    @classmethod
    def test_ip(cls):
        ip_list = Mongo.get_all_datas()
        ip = random.choice(ip_list)
        url_test = "http://icanhazip.com/"
        try:
            response = requests.get(url=url_test, proxies=ip.get("ip"), timeout=3)
            # if response.status_code == 200:
            proxyIP = response.text.replace("\n", "")
            ip_ = list(ip.get("ip").values())[0].split(":")[0]
            if proxyIP != ip_:
                Mongo.delete_data(ip)
                print(f"删除无效的代理ip：{ip}, 并重新寻找可用代理ip")
                cls.test_ip()
            else:
                print(f"代理ip：{ip}可正常使用，开始上报产品")
                return ip.get("ip")
        except:
            # print(f"代理ip不能使用，异常信息{traceback.format_exc()}")
            Mongo.delete_data(ip)
            print(f"删除无效的代理ip：{ip}, 并重新寻找可用代理ip")
            cls.test_ip()


class LiePin(object):
    @classmethod
    def get_home_data(cls):
        all_data = []
        home_args_one, home_args_two = home_args.split("curPage%22%3A0")
        for page in range(1000):
            if page > 100:
                print(f"大于100页了：{page}")
            home_args_res = f"{home_args_one}curPage%22%3A{page}{home_args_two}"
            sys.stdout.flush()
            start = time.monotonic()
            random_time = random.uniform(1,5)
            now_start = datetime.datetime.now()
            while True:
                if time.monotonic() - start >= random_time:
                    break
            # time.sleep(1)
            now_end = datetime.datetime.now()
            print(f"此页耗时：{(now_end-now_start).total_seconds()}")
            try:
                if page == 0:
                    response = requests.post(url=home_url, data=home_args_res, headers=home_header, timeout=5)
                else:
                    home_args.replace("LOGIN", "UP")
                    response = requests.post(url=home_url, data=home_args_res, headers=home_header, timeout=5)
                response_text = json.loads(response.text)
                data = response_text.get("data").get("resList", [])
                if not data:
                    return all_data
                for data_ in data:
                    all_data.append(data_)
            except Exception as e:
                print(traceback.format_exc())
                print("请求异常，检查账号是否受限！\n检查步骤：\n刷新页面查看是否需要验证码")
                cookie_ = input(f"请输入当前最新cookie：")
                cookies = cookie_.encode("utf-8").decode("latin1")
                home_header.update({"Cookie": cookies})
                print("=======================开始继续匹配=========================")
        return all_data

    @classmethod
    def get_lasted_people(cls):
        all_data = cls.get_home_data()
        print(f"全部简历获取成功，总简历数：{len(all_data)}，开始简历详情匹配关键字，\n================================================================================")
        hg_sum = 0
        logging_data = defaultdict(list)
        for index, resume_ in enumerate(all_data):
            sys.stdout.flush()
            start = time.monotonic()
            random_time = random.uniform(1,5)
            now_start = datetime.datetime.now()
            while True:
                if time.monotonic() - start >= random_time:
                    break
            # time.sleep(1)
            now_end = datetime.datetime.now()
            print(f"此页耗时：{(now_end-now_start).total_seconds()}")
            url = resume_.get('detailUrl')
            full_url = f"https://h.liepin.com{url}"
            # print(f"开始匹配第{index+1}份简历，简历地址：{full_url}")
            print(f"开始匹配第{index+1}份简历")
            _, arg_str = url.split("/showresumedetail/?")
            arg_str_list = arg_str.split("&")
            arg_dict = defaultdict()
            for arg_ in arg_str_list:
                key_, value_ = arg_.split("=")
                arg_dict[key_] = value_
            try:
                response = requests.get(url=content_url, data={"resIdEncode": arg_dict.get("res_id_encode")}, headers=home_header, timeout=5)
            except Exception as e:
                print(traceback.format_exc())
                print(traceback.format_exc())
                print("请求异常，检查账号是否受限！\n检查步骤：\n刷新页面查看是否需要验证码")
                cookie_ = input(f"请输入当前最新cookie：")
                cookies = cookie_.encode("utf-8").decode("latin1")
                home_header.update({"Cookie": cookies})
                print("=======================开始继续匹配=========================")
            content_text = response.text
            pipei_key_word = set()
            pipei_key_word_num = defaultdict(int)
            for key_word in key_word_list:
                key_word = key_word.strip()
                if key_word in content_text:
                    pipei_key_word.add(key_word)
                    pipei_key_word_num[key_word] = content_text.count(key_word)
            if len(pipei_key_word) >= key_word_pipei_num:
                if not key_word_must_list:
                    logging_data[len(pipei_key_word)].append(f"查询详细信息成功, 匹配关键字：{pipei_key_word}，关键字匹配数：{dict(pipei_key_word_num)}， 简历地址：\n{full_url}")
                    logging.info(f"查询详细信息成功, 匹配关键字：{pipei_key_word}， 简历地址：\n{full_url}")
                    print(f"查询详细信息成功, 匹配关键字：{pipei_key_word}， 简历地址：\n{full_url}，\n================================================================================")
                    # f.write(f"查询详细信息成功, 匹配关键字：{pipei_key_word}， 简历地址：\n{full_url}\n")
                    hg_sum += 1
                    continue
                key_must_now = []
                key_must_now_num = defaultdict(int)
                for key_must in key_word_must_list:
                    if key_must in content_text:
                        key_must_now.append(key_must)
                        key_must_now_num[key_must] = content_text.count(key_must)
                if key_must_now:
                    logging_data[len(pipei_key_word)].append(f"查询详细信息成功, 匹配关键字：{pipei_key_word}， 关键字匹配数：{dict(pipei_key_word_num)}，存在必须包含的关键字：{key_must_now}, 出现次数：{dict(key_must_now_num)}，简历地址：\n{full_url}")
                    logging.info(f"查询详细信息成功, 匹配关键字：{pipei_key_word}， 存在必须包含的关键字：{key_must_now} ，简历地址：\n{full_url}")
                    print(
                        f"查询详细信息成功, 匹配关键字：{pipei_key_word}， 存在必须包含的关键字：{key_must_now} ， 简历地址：\n{full_url}，\n================================================================================")
                    # f.write(f"查询详细信息成功, 匹配关键字：{pipei_key_word}， 简历地址：\n{full_url}\n")
                    hg_sum += 1
        res_data = sorted(logging_data.items(),key = lambda item:item[0], reverse=True)

        time_end = datetime.datetime.now()
        total_hour = (time_end - time_start).total_seconds() / 3600
        print(f"筛选后数量：{hg_sum}，总耗时：{total_hour}\n================================================================================")
        print(f"简历筛选完成，全部简历存储于当前目录下:{file_name}.txt，\n================================================================================")
        logging.info(f"总简历数：{len(all_data)}, 筛选后简历数：{hg_sum}， 总耗时：{total_hour}小时")
        logging.getLogger().handlers.clear()
        logging.basicConfig(format="", level=logging.INFO, handlers=[handler_two])
        for data in res_data:
            for data_ in data[1]:
                logging.info(f"**************************************************************************************************************")
                logging.info(data_)
        logging.info(f"总简历数：{len(all_data)}, 筛选后简历数：{hg_sum}， 总耗时：{total_hour}小时")


if __name__ == '__main__':
    try:
        LiePin.get_lasted_people()
    except Exception as e:
        print(traceback.format_exc())
        with open(file=f"{file_name}_error.txt", mode="w", encoding="utf-8") as f:
            f.write(traceback.format_exc())
