import json
import os
import random
import time
from asyncio import exceptions

import selenium
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, TimeoutException, WebDriverException
from selenium.webdriver.edge.options import Options
from selenium.webdriver.edge.service import Service
from selenium.webdriver.common.by import By
import pandas as pd
import requests
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import asyncio
import aiohttp

Partition={
    "游戏":{
        "id":1,
        "pn":50,
    },
    "动画":{
        "id":2,
        "pn":50,
    },
    "生活":{
        "id":3,
        "pn":50,
    },
    "轻小说":{
        "id":16,
        "pn":50,
    },
    "科技":{
        "id":17,
        "pn":55,
    },
    "影视":{
        "id":28,
        "pn":50,
    },
    "兴趣":{
        "id":29,
        "pn":50,
    },
}

# 提供的Cookies字符串
Cookies_string=''
with open('cookies', 'r') as file:
    # 使用 read() 方法读取整个文件内容到字符串变量
    cookies_string = file.read()
# 分割Cookies字符串并转换为字典列表
Cookies_list = [{'name': cookie.split('=')[0], 'value': cookie.split('=')[1]} for cookie in cookies_string.split('; ')]

user_agents = [
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0",
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:86.0) Gecko/20100101 Firefox/86.0",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.192 Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.3 Safari/605.1.15",
    "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:86.0) Gecko/20100101 Firefox/86.0",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.192 Safari/537.36",
    "Mozilla/5.0 (iPhone; CPU iPhone OS 14_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Mobile/15E148 Safari/604.1",
    "Mozilla/5.0 (iPad; CPU OS 14_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Mobile/15E148 Safari/604.1",
    "Mozilla/5.0 (Android 11; Mobile; rv:86.0) Gecko/86.0 Firefox/86.0",
    "Mozilla/5.0 (Linux; Android 10; SM-G973F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.152 Mobile Safari/537.36",
    "Mozilla/5.0 (Linux; Android 9; SM-J730G) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.101 Mobile Safari/537.36",
    "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0",
    "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36",
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:62.0) Gecko/20100101 Firefox/62.0",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.81 Safari/537.36",
    "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0",
    "Mozilla/5.0 (Linux; Android 9; Mi A2 Lite) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.90 Mobile Safari/537.36",
    "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1",
    "Mozilla/5.0 (Windows Phone 10.0; Android 6.0.1; Microsoft; Lumia 950) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Mobile Safari/537.36 Edge/15.14977",
    "Mozilla/5.0 (PlayStation 4 3.11) AppleWebKit/537.73 (KHTML, like Gecko)",
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 11_2_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36",
    "Mozilla/5.0 (Linux; Android 11; SM-G991B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.152 Mobile Safari/537.36",
    "Mozilla/5.0 (iPad; CPU OS 13_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.2 Mobile/15E148 Safari/604.1"
]

headers = {
        "User-Agent":user_agents[1],
        "Cookie":cookies_string
    }
print(headers["User-Agent"])

# def print_dict(data, indent=0):
#     for key, value in data.items():
#         if isinstance(value, dict):
#             print(' ' * indent + str(key) + ":")
#             print_dict(value, indent + 4)
#         else:
#             print(' ' * indent + str(key) + ": " + str(value))

def scroll_to_bottom(browser):
    last_height = browser.execute_script("return document.body.scrollHeight")
    while True:
        # 滑动到页面底部
        browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
        time.sleep(2)  # 等待页面加载
        new_height = browser.execute_script("return document.body.scrollHeight")
        if new_height == last_height:
            break
        last_height = new_height

def Selenium_get_read_partition_recommend(partition_id):
    # 初始化一个空的列表来存储结果
    results = []
    # 指定WebDriver的路径（如果你已经将其添加到环境变量中，则不需要这一步）
    driver_path = './edgedriver_win64/msedgedriver.exe'  # 替换为你的EdgeDriver路径

    # 设置 edge WebDriver 的选项
    # options = Options()
    # options.add_argument('--headless')  # 无头模式，不显示浏览器界面
    # options.add_argument('--disable-gpu')  # 禁用 GPU 加速
    # options.add_argument('--no-sandbox')  # 以最高权限运行
    # options.add_argument('--disable-dev-shm-usage')  # 禁用 /dev/shm

    # 使用Service类来指定EdgeDriver的路径
    service = Service(driver_path)
    # 初始化Edge浏览器驱动
    browser = webdriver.Edge(service=service)
    # 设置网页加载超时时间为10秒
    browser.set_page_load_timeout(10)
    # 打开网页
    url = f'https://www.bilibili.com/read/home#rid={partition_id}'  # 替换为你要爬取的网页的URL
    browser.get(url)

    # 加载Cookies
    for cookie in Cookies_list:
        browser.add_cookie(cookie)
    # 刷新页面以应用Cookies
    browser.refresh()

    # 滑动到页面底部
    scroll_to_bottom(browser)

    # 获取加载后的内容
    content = browser.find_element(By.CSS_SELECTOR, 'body > div.tab-content > div.page-content > div.left-side > div.article-list > div.article-list-holder')

    # 获取所有子元素
    items = content.find_elements(By.CSS_SELECTOR, 'div.article-item')  # 假设每篇文章是用`div.article-item`表示的

    # 打印每行内容并统计个数
    count = 0
    for item in items:
        # 获取`data-id`属性
        data_id = item.get_attribute('data-id')

        # 获取`href`属性
        try:
            href_element = item.find_element(By.CSS_SELECTOR, 'div > div > div.article-left-block > a')
            href = href_element.get_attribute('href')
        except:
            href = None
        results.append({'data_id': data_id, 'href': href})
        print(f'data-id: {data_id}, href: {href}')
        count += 1

    print(f'总计 {count} 项内容')
    # 关闭浏览器
    browser.quit()
    df = pd.DataFrame(results)
    df.to_csv('posts_dataId_href.csv', index=False)
    # return results


async def fetch(session, url):
    try:
        async with session.get(url) as response:
            response.raise_for_status()  # 如果请求不成功，会抛出异常
            data = await response.json()
            return data
    except aiohttp.ClientError as e:
        print(f"Error fetching data from {url}: {e}")
        return None

async def async_api_get_read_partition_recommend(partition_id, pn, ps=1000):
    async with aiohttp.ClientSession(headers=headers) as session:
        tasks = []
        for i in range(pn):
            print(partition_id, i)
            url = f"https://api.bilibili.com/x/article/recommends?cid={partition_id}&pn={i}&ps={ps}"
            tasks.append(fetch(session, url))

        responses = await asyncio.gather(*tasks)
        for data in responses:
            if data is None:
                continue
        filtered_data = []
        try:
            for item in data['data']:
                if item['stats']['view'] >= 0:
                    extracted = {
                        'id': item['id'],
                        'category': {
                            'id': item['category']['id'],
                            'name': item['category']['name']
                        },
                        'title': item['title'],
                        'summary': item['summary'],
                        'banner_url': item['banner_url'],
                        'author': {
                            'mid': item['author']['mid'],
                            'name': item['author']['name'],
                            'face': item['author']['face']
                        },
                        'publish_time': item['publish_time'],
                        'stats': {
                            'view': item['stats']['view'],
                            'favorite': item['stats']['favorite'],
                            'like': item['stats']['like'],
                            'reply': item['stats']['reply']
                        },
                        'view_url': item['view_url']
                    }
                    print(extracted)
                    filtered_data.append(extracted)
        except KeyError as e:
            print(f"Key error: {e} in data: {data}")
        except TypeError as e:
            print(f"Type error: {e} with data: {data}")

def api_get_read_partition_recommend(partition_id,pn,ps=1200):
    data_sum=[]
    for i in range(pn):
        print(partition_id,i)
        url = f"https://api.bilibili.com/x/article/recommends?cid={partition_id}&pn={i}&ps={ps}"
        try:
            # 发送带有 cookies 的 GET 请求
            response = requests.get(url, headers=headers)
            response.raise_for_status()  # 如果请求不成功，会抛出异常
            # 获取 JSON 数据
            data = response.json()
            count=0
            for item in data['data']:
                # print(type(item['stats']['view']))
                if item['stats']['view'] >= 20:
                    extracted = {
                        'id': item['id'],
                        'category': {
                            'id': item['category']['id'],
                            'name': item['category']['name']
                        },
                        'title': item['title'],
                        'summary': item['summary'],
                        'banner_url': item['banner_url'],
                        'author': {
                            'mid ': item['author']['mid'],
                            'name': item['author']['name'],
                            'face': item['author']['face']
                        },
                        'publish_time': item['publish_time'],
                        'stats': {
                            'view': item['stats']['view'],
                            'favorite': item['stats']['favorite'],
                            'like': item['stats']['like'],
                            'reply': item['stats']['reply']
                        },
                        'view_url': item['view_url']
                    }
                    print(extracted)
                    data_sum.append(extracted)
                count=count+1
            print(count)
        except AttributeError:
            # 捕获 AttributeError 并忽略它
            print("data 变量为 None 或者不包含 items 方法，继续执行程序")
        except requests.exceptions.RequestException as e:
            print(f"Error fetching data for partition_id {partition_id}: {e}")
    return data_sum

def fetch_article_info(data_id):
    url = f"https://api.bilibili.com/x/article/viewinfo?id={data_id}"

    try:
        # 发送带有 cookies 的 GET 请求
        response = requests.get(url, headers=headers)
        response.raise_for_status()  # 如果请求不成功，会抛出异常

        # 获取 JSON 数据
        data = response.json()

        # 提取所需字段
        filtered_data = {
            "main_info": {
                "view": data["data"]["stats"]["view"],
                "favorite": data["data"]["stats"]["favorite"],
                "like": data["data"]["stats"]["like"],
                "reply": data["data"]["stats"]["reply"],
                "title": data["data"]["title"],
                "banner_url": data["data"]["banner_url"],
                "mid": data["data"]["mid"],
                "author_name": data["data"]["author_name"],
                "image_urls": data["data"]["image_urls"]
            },
        }

        return filtered_data


    except requests.exceptions.RequestException as e:
        print(f"Error fetching data for data-id {data_id}: {e}")
        return None


def get_num():
    sum = 0
    for category, details in Partition.items():
        print(category, details)
        with open(f"./data/{category}.json", "r", encoding="utf-8") as f:
            data = json.load(f)
            num_entries = len(data)
            sum = sum + num_entries
            print(f"{category} 文件中有 {num_entries} 条数据")

    print(f"总共有 {sum} 条数据")

def ensure_json_format(input_file, output_file):
    # 读取原始 JSON 文件
    with open(input_file, "r", encoding="utf-8") as f:
        data = json.load(f)

    # 将数据写入输出 JSON 文件
    with open(output_file, "w", encoding="utf-8") as f:
        json.dump(data, f, ensure_ascii=False, indent=4)

def api_get_content(id):
    def extract_data(reply):
        extracted = {
            "rpid": reply["rpid"],
            "oid": reply["oid"],
            "mid": reply["mid"],
            "member": {
                "mid": reply["member"]["mid"],
                "uname": reply["member"]["uname"],
                "avatar": reply["member"]["avatar"]
            },
            "content": {
                "message": reply["content"]["message"],
                "members": [
                    {
                        "mid": member["mid"],
                        "uname": member["uname"],
                        "avatar": member["avatar"]
                    } for member in reply["content"].get("members", [])
                ]
            },
            "replies": [extract_data(r) for r in (reply.get("replies") or [])],
            "reply_control": {
                "location": reply["reply_control"].get("location", "")
            }
        }
        return extracted

    url = f"https://api.bilibili.com/x/v2/reply?type=12&oid={id}"
    try:
        # 发送带有 cookies 的 GET 请求
        response = requests.get(url, headers=headers)
        response.raise_for_status()  # 如果请求不成功，会抛出异常

        # 获取 JSON 数据
        data = response.json()
        data_sum = []
        page_count = data["data"]["page"]["count"]
        replies = data["data"]["replies"]
        extracted_replies = [extract_data(reply) for reply in replies]

        data_sum.append(page_count)
        data_sum.append(extracted_replies)
        print(data_sum)
        return data_sum

    except requests.exceptions.RequestException as e:
        print(f"Error fetching data for id {id}: {e}")
    except KeyError as e:
        print(f"KeyError: {e}")

def get_user_info(uid):
    def extract_data(card):
        extracted = {
            "mid": card.get("mid", ""),
            "name": card.get("name", ""),
            "sex": card.get("sex", ""),
            "face": card.get("face", ""),
            "fans": card.get("fans", ""),
            "attentions": card.get("attention", ""),
            "sign": card.get("sign", ""),
            "article_count": data.get("data", {}).get("article_count", ""),
            "like_num": data.get("data", {}).get("like_num", "")
        }
        return extracted

    url = f"https://api.bilibili.com/x/web-interface/card?mid={uid}"
    try:
        # 发送带有 cookies 的 GET 请求
        response = requests.get(url, headers=headers)
        response.raise_for_status()  # 如果请求不成功，会抛出异常

        # 获取 JSON 数据
        data = response.json()
        card = data.get("data", {}).get("card", {})
        print(extract_data(card))
        return extract_data(card)

    except requests.exceptions.RequestException as e:
        print(f"Error fetching data for id {id}: {e}")
        raise

def get_post_url():
    url_list = []
    with open(f"./data/posts/post_urls.json", "w", encoding="utf-8") as f1:
        for category, details in Partition.items():
            with open(f"./data/posts/{category}.json", "r", encoding="utf-8") as f2:
                data = json.load(f2)
                for item in data:
                    url_data={
                        "id": item["id"],
                        "url": item["view_url"]
                    }
                    url_list.append(url_data)

        json.dump(url_list, f1, ensure_ascii=False, indent=4)


def get_post_detail():
    try:
        with open(f"./data/posts/post_urls.json", "r", encoding="utf-8") as f1:
            url_list = json.load(f1)
        driver_path = './edgedriver_win64/msedgedriver.exe'  # 替换为你的EdgeDriver路径

        # 设置 edge WebDriver 的选项
        # options = Options()
        # options.add_argument('--headless')  # 无头模式，不显示浏览器界面
        # options.add_argument('--disable-gpu')  # 禁用 GPU 加速
        # options.add_argument('--no-sandbox')  # 以最高权限运行
        # options.add_argument('--disable-dev-shm-usage')  # 禁用 /dev/shm

        # 使用Service类来指定EdgeDriver的路径
        service = Service(driver_path)
        # 初始化Edge浏览器驱动
        browser = webdriver.Edge(service=service)
        # 设置网页加载超时时间为10秒
        browser.set_page_load_timeout(10)
        # 打开网页
        post_detail_list = []
        count=0
        for url in url_list:
            try:
                browser.get(url['url'])
                # 加载Cookies
                for cookie in Cookies_list:
                    browser.add_cookie(cookie)
                # 刷新页面以应用Cookies
                browser.refresh()
                # 滑动到分享栏
                target_element = browser.find_element(By.CLASS_NAME, "share-box")
                browser.execute_script("arguments[0].scrollIntoView();", target_element)
                # 获取加载后的内容
                detail = browser.find_element(By.CSS_SELECTOR,
                                               '#app > div > div.article-container > div.article-container__content')
                detail_html = detail.get_attribute('outerHTML')
                post_detail={
                    "id": url['id'],
                    "detail ": detail_html,
                }
                count+=1
                if count % 10 == 0:
                    time.sleep(random.randint(1, 3))
                print(post_detail)
                post_detail_list.append(post_detail)
            except (NoSuchElementException, TimeoutException, WebDriverException) as e:
                print(f"Error processing URL {url['url']}: {e}")
                continue
        with open(f"./data/posts/posts_detail.json", "w", encoding="utf-8") as f2:
            json.dump(post_detail_list, f2, ensure_ascii=False, indent=4)
    except exceptions as e:
        print(e)


async def main_1():
    partitions = list(Partition.values())
    tasks = []
    for item in partitions:
        # print(item['id'],item['pn'])
        tasks.append(async_api_get_read_partition_recommend(item['id'],item['pn'],200))

    await asyncio.gather(*tasks)

def main_2():
    if not os.path.exists('./data/posts'):
        os.makedirs('./data/posts')

    for category, details in Partition.items():
        print(category, details)
        data = api_get_read_partition_recommend(details['id'], details['pn'])
        with open(f"./data/{category}.json", "w", encoding="utf-8") as f:
            json.dump(data, f, ensure_ascii=False, indent=4)

def main_3():
    content_sum=[]
    for category, details in Partition.items():
        print(category, details)
        data=[]
        with open(f"./data/posts/{category}.json", "r", encoding="utf-8") as f:
            data = json.load(f)
        count = 0
        for item in data:
            print(item['id'])
            count = count + 1
            content_sum.append(api_get_content(item['id']))
            if (count % 20 == 0):
                time.sleep(random.randint(1, 3))
    print(content_sum)

    if not os.path.exists('./data/contents'):
        os.makedirs('./data/contents')
    with open(f"./data/contents/contents.json", "w", encoding="utf-8") as f:
        json.dump(content_sum, f, ensure_ascii=False, indent=4)
    # print(id_list)
    # print(len(id_list))

def main_4():
    # user_info_sum = []
    # for category, details in Partition.items():
    #     with open(f"./data/posts/{category}.json", "r", encoding="utf-8") as f1:
    #         data = json.load(f1)
    #         for item in data:
    #            user_id_sum.append(item['author']['mid '])
    def save_count(count):
        with open("./data/users/count.json", "w", encoding="utf-8") as f:
            json.dump(count, f)

    def load_count():
        if os.path.exists("./data/users/count.json"):
            with open("./data/users/count.json", "r", encoding="utf-8") as f:
                return json.load(f)
        return 0

    try:
        with open(f"./data/users/users_id.json", "r", encoding="utf-8") as f1:
            user_id_list = json.load(f1)
            count = load_count()
            stop = False
            try:
                with open(f"./data/users/users.json", "a+", encoding="utf-8") as f2:
                    for i in range(count, len(user_id_list)):
                        if stop:
                            break
                        try:
                            user_info = get_user_info(user_id_list[i])
                            f2.write(json.dumps(user_info, ensure_ascii=False) + '\n')
                            count = i
                            print(count)
                            save_count(count)  # 保存当前的计数值
                        except requests.exceptions.RequestException:
                            stop = True
                            break
            finally:
                print(f"{count} users data saved to file.")
    except FileNotFoundError as e:
        print(f"File not found: {e}")
    except json.JSONDecodeError as e:
        print(f"Error decoding JSON: {e}")

            # with open(f"./data/users/users.json", "a+", encoding="utf-8") as f2:
            #     for item in data:
            #         user_info = get_user_info(item['author']['mid '])
            #         f2.write(json.dumps(user_info, ensure_ascii=False) + '\n')




if __name__ == '__main__':
    # # API异步爬取分区帖子
    # asyncio.run(main_1())

    # # API顺序爬取异步分区帖子
    # main_2()

    # # selenium动态爬取分区帖子
    # Selenium_get_read_partition_recommend(1)

    # # 计算json文件的数据数量
    # get_num()

    # API顺序爬取评论
    # main_3()

    # # API顺序爬取用户信息
    # main_4()

    # #清洗获得的用户信息数据
    # input_file = "./data/users/users.json"
    # output_file = "./data/users/users.json"
    # ensure_json_format(input_file, output_file)

    # get_post_url()
    get_post_detail()