import json
import requests
import os
import time
import random
import traceback
import load_local_file_to_svc


def get_cache_path(cate_label):
    # 判断目录是否存在，如果不存在则创建
    directory = f"cache/"
    if not os.path.exists(directory):
        os.makedirs(directory)

    cate_label_path = os.path.join(directory, f"{cate_label}.txt")
    return cate_label_path


def save_anchor_detail_to_file(detail_data, path):
    json_str = json.dumps(detail_data)
    with open(path, "w", encoding="utf-8") as file:
        file.write(json_str)


def save_anchor_detail_to_database(detail_datas):
    pass


def get_anchor_authorContact(uid):
    try:
        url = f"https://www.fastmoss.com/api/author/detail/authorContact?uid={uid}"
        response = requests.get(
            url,
            headers=headers,
            # proxies=proxies,
        )
    except:
        return None, None
    result = response.json()
    code = result.get("code")
    social = []
    emial = None
    if code == 200:
        datas = result.get("data", [])
        for data in datas:
            if data["has"]:
                name = data["name"]
                id = data["id"]
                if name == "email":
                    emial = id
                    pass
                else:
                    social.append(f"{name}:{id}")

        social_str = ",".join(social)
        return emial, social_str
    else:
        return None, None


def hanleData(anchor_list, file_name):

    global all_anchor_count

    presistent_data = []
    anchor_count = len(anchor_list)
    # print(f'{label} 相似的主播数量:{anchor_count}')

    all_anchor_count += anchor_count
    for anchor in anchor_list:
        anchor_id = anchor.get("uid")
        people_category_list = anchor.get("category", [])
        category = people_category_list[0] if len(people_category_list) > 0 else "全部"

        qingxiang_category_list = anchor.get("market_category_l1_name", [])
        if len(qingxiang_category_list) > 0:
            category = "/".join(qingxiang_category_list)

        contacts = anchor.get("contact", [])
        for contact in contacts:
            if contact.get("code") == 3:
                email, social = get_anchor_authorContact(anchor_id)
                break

        user_data = {
            "uid": str(anchor_id),
            "cate": category,
            "nickname": anchor.get("nickname"),
            "handle": anchor.get("unique_id"),
            # "email": email,
            "followers": anchor.get("follower_count"),
            # 'revenue': anchor.get('live_sale_amount'),
            "views": anchor.get("avg_play_count"),
            # "social": social,
            "detail": json.dumps(anchor),
            "platform": "fastmoss",
            # 'query_args' :label,
            "country": anchor.get("region"),
        }
        presistent_data.append(user_data)

    # 写入数据库
    if len(presistent_data) > 0:
        # print(f'开始入库:{len(presistent_data)}条')
        cacte_path = get_cache_path(file_name)
        try:
            # 缓存本地
            save_anchor_detail_to_file(presistent_data, cacte_path)
            # 入库
            # save_anchor_detail_to_database(presistent_data)
            # print(f"入库:{len(presistent_data)}条, 累计入库：{all_anchor_count}条✅")
        except:
            print(f"❌{cacte_path} 入库失败")


def new_fetch():
    global all_anchor_count

    all_anchor_count = 0
    current_page_num = 1
    total_cnt = None

    while total_cnt is None or current_page_num * pagesize < total_cnt:

        file_name = f"第{current_page_num}页_{fans_range_from}-{fans_range_to}"
        if os.path.exists(get_cache_path(file_name)):
            current_page_num += 1
            continue

        if fans_range_from is None or fans_range_to is None:
            url = f"https://www.fastmoss.com/api/author/search?page={current_page_num}&pagesize={pagesize}&{search_condition}"

        else:
            url = f"https://www.fastmoss.com/api/author/search?page={current_page_num}&pagesize={pagesize}&follower={fans_range_from},{fans_range_to}&{search_condition}"
        try:
            print(f"开始获取{total_cnt/pagesize}/{current_page_num}页,url=> {url}")
        except:
            pass

        try:
            response = requests.get(
                url,
                headers=headers,
                # proxies=proxies,
            )
        except:
            print("❌网络请求错误，重试！！！")
            continue

        if response.status_code == 200:
            result = response.json()
            code = result.get("code")
            data = result.get("data", [])
            json_data = json.dumps(result)
            with open("output.json", "w") as file:
                file.write(json_data)
            if code == 200:
                anchor_list = data.get("author_list", [])
                total_cnt = data.get("total_cnt")
                if len(anchor_list) > 0:
                    hanleData(anchor_list, file_name)
                    current_page_num += 1
                else:
                    input("请处理")
            elif code == "CAPTCHA_0001":
                input("🤖🤖🤖🤖处理人机验证")
                continue
            else:
                print(f"other error code = {code}")

        else:
            input("❌❌❌❌❌❌")
            continue

        time.sleep(random.randint(0, 1))

    try:
        print(f"read:{file_name}")
        with open(f"cache/{file_name}.txt", "r", encoding="utf-8") as f:
            json_arr = json.loads(f.read())
            print(json_arr)
            new_to_followers = json_arr[-1]["followers"]
            print(f"😁new_to_followers: {new_to_followers}")
            return new_to_followers
    except:
        traceback.print_exc()
        return None


# 须知：修改成登录后浏览器里的cookie
_Cookie = "fd_id=K53BzwAH0sD4RuoETgWmUbMi1XFNxGPc; vis_fid=662b5450e51db6236791714115664.9389; fp_visid=a3d5f54191676c6cd8ef1325c2e8a0ab; _ga=GA1.1.1218988917.1714115760; _ss_s_uid=c2c64a0947055150aee88d0c6f22821c; Hm_lvt_6ada669245fc6950ae4a2c0a86931766=1714115693,1714467692; Hm_lpvt_6ada669245fc6950ae4a2c0a86931766=1714467698; _uetsid=3ce8663006d011ef81a111ea1701629e|aqw80w|2|fld|0|1581; _uetvid=aeb3d400039c11ef9077e37cd6fb8a5f|kp7e1i|1714467753555|5|1|bat.bing.com/p/insights/c/v; _ga_GD8ST04HB5=GS1.1.1714467703.3.1.1714467761.2.0.0; _gcl_au=1.1.1388141456.1714115760.1504265234.1714467715.1714467761; fd_tk_exp=1715763761; fd_tk=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MTU3NjM3NjEsInN1YiI6IjNkYTlmOGIyNjI0NzRjNDA3ZGFkNzYwNmI0NzAxY2M2IiwibmJmIjoxNzE0NDY3NzYxLCJhdWQiOnsidWlkIjozNDQ0NTU5LCJ1bmlvbmlkIjoiIiwibWNfb3BlbmlkIjoiIiwibmlja25hbWUiOiJGYXN0TW9zc-eUqOaItyIsImNyZWF0ZWRfYXQiOjE3MTM4MzM4OTksImNyZWF0ZWRfZGF0ZSI6IjIwMjQtMDQtMjMiLCJsb2dpbl9zb3VyY2UiOiJwYyIsInZpc2l0b3JfaWQiOiJhM2Q1ZjU0MTkxNjc2YzZjZDhlZjEzMjVjMmU4YTBhYiIsImRvbWFpbiI6Ind3dy5mYXN0bW9zcy5jb20iLCJmcF92aXNpZCI6IjExZmJiMzEzY2Q3YzY4NWZkZmIxNzZmZmQ3YWY5Y2U2In0sImlhdCI6MTcxNDQ2Nzc2MSwianRpIjoiM2RhOWY4YjI2MjQ3NGM0MDdkYWQ3NjA2YjQ3MDFjYzYiLCJpc3MiOiJ3d3cuZmFzdG1vc3MuY29tIiwic3RhdHVzIjoxLCJkYXRhIjpudWxsfQ.HQABHYestnjB7cPUQAxvGVP5PUeOxUJrq7a9d0Jtwns"


_UA = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36"


headers = {
    "User-Agent": _UA,
    "Cookie": _Cookie,
    "Sec-Ch-Ua": '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"',
    "Lang": "ZH_CN",
    #  'Eagleeye-Pappname': Pappname,
    #  'Eagleeye-Sessionid': Sessionid,
    #  'Eagleeye-Traceid':  Traceid
}

proxies = {
    "http": "socks5://127.0.0.1:1080",
    "https": "socks5://127.0.0.1:1080",
}

all_anchor_count = 0
pagesize = 10


# 须知：设置粉丝范围
fans_range_from = 1000000
fans_range_to = 10000000

# 须知：设置筛选条件
search_condition = "shop_window=1&region=US&order=1,2"

if __name__ == "__main__":
    while fans_range_to > fans_range_from:
        new_fans_range_to = new_fetch()
        if new_fans_range_to is None:
            break
        else:
            fans_range_to = new_fans_range_to

    load_local_file_to_svc.process()
    print("over!!")
