import asyncio
import os
import random
import json
from asyncio import Task
from typing import Any, Dict, List, Optional, Tuple

from playwright.async_api import (BrowserContext, BrowserType, Page,
                                  async_playwright)

import config
from base.base_crawler import AbstractCrawler
from proxy.proxy_ip_pool import IpInfoModel, create_ip_pool
from store import douyin as douyin_store
from tools import utils
from var import crawler_type_var

from .client import DOUYINClient
from .exception import DataFetchError
from .field import PublishTimeType
from .login import DouYinLogin


class DouYinCrawler(AbstractCrawler):
    context_page: Page
    dy_client: DOUYINClient
    browser_context: BrowserContext

    def __init__(self) -> None:
        self.user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36"  # fixed
        self.index_url = "https://www.douyin.com"

    async def start(self) -> None:
        playwright_proxy_format, httpx_proxy_format = None, None
        if config.ENABLE_IP_PROXY:
            ip_proxy_pool = await create_ip_pool(config.IP_PROXY_POOL_COUNT, enable_validate_ip=True)
            ip_proxy_info: IpInfoModel = await ip_proxy_pool.get_proxy()
            playwright_proxy_format, httpx_proxy_format = self.format_proxy_info(ip_proxy_info)

        async with async_playwright() as playwright:
            # Launch a browser context.
            chromium = playwright.chromium
            self.browser_context = await self.launch_browser(
                chromium,
                None,
                self.user_agent,
                headless=config.HEADLESS
            )
            # stealth.min.js is a js script to prevent the website from detecting the crawler.
            await self.browser_context.add_init_script(path="libs/stealth.min.js")
            self.context_page = await self.browser_context.new_page()
            await self.context_page.goto(self.index_url)

            self.dy_client = await self.create_douyin_client(httpx_proxy_format)
            if not await self.dy_client.pong(browser_context=self.browser_context):
                login_obj = DouYinLogin(
                    login_type=config.LOGIN_TYPE,
                    login_phone="",  # you phone number
                    browser_context=self.browser_context,
                    context_page=self.context_page,
                    cookie_str=config.COOKIES
                )
                await login_obj.begin()
                await self.dy_client.update_cookies(browser_context=self.browser_context)
            crawler_type_var.set(config.CRAWLER_TYPE)
            input('输入回车键，开始....')
            if config.CRAWLER_TYPE == "search":
                # Search for notes and retrieve their comment information.
                await self.search()
            elif config.CRAWLER_TYPE == "detail":
                # Get the information and comments of the specified post
                await self.get_specified_awemes()
            elif config.CRAWLER_TYPE == "creator":
                # Get the information and comments of the specified creator
                await self.get_creators_and_videos()
            elif config.CRAWLER_TYPE == "notify":
                # 关注私信用户
                await self.post_notify()
            input('输入回车键，结束....')
            utils.logger.info("[DouYinCrawler.start] Douyin Crawler finished ...")

    async def search(self) -> None:
        utils.logger.info("[DouYinCrawler.search] Begin search douyin keywords")
        dy_limit_count = 10  # douyin limit page fixed value
        if config.CRAWLER_MAX_NOTES_COUNT < dy_limit_count:
            config.CRAWLER_MAX_NOTES_COUNT = dy_limit_count
        start_page = config.START_PAGE  # start page number
        for keyword in config.KEYWORDS.split(","):
            utils.logger.info(f"[DouYinCrawler.search] Current keyword: {keyword}")
            aweme_list: List[str] = []
            page = 0
            while (page - start_page + 1) * dy_limit_count <= config.CRAWLER_MAX_NOTES_COUNT:
                if page < start_page:
                    utils.logger.info(f"[DouYinCrawler.search] Skip {page}")
                    page += 1
                    continue
                try:
                    utils.logger.info(f"[DouYinCrawler.search] search douyin keyword: {keyword}, page: {page}")
                    posts_res = await self.dy_client.search_info_by_keyword(keyword=keyword,
                                                                            offset=page * dy_limit_count - dy_limit_count,
                                                                            publish_time=PublishTimeType(config.PUBLISH_TIME_TYPE)
                                                                            )
                except DataFetchError:
                    utils.logger.error(f"[DouYinCrawler.search] search douyin keyword: {keyword} failed")
                    break

                page += 1
                if "data" not in posts_res:
                    utils.logger.error(
                        f"[DouYinCrawler.search] search douyin keyword: {keyword} failed，账号也许被风控了。")
                    break

                for post_item in posts_res.get("data"):
                    try:
                        aweme_info: Dict = post_item.get("aweme_info") or \
                                           post_item.get("aweme_mix_info", {}).get("mix_items")[0]
                    except TypeError:
                        continue
                    aweme_list.append(aweme_info.get("aweme_id", ""))
                    await douyin_store.update_douyin_aweme(aweme_item=aweme_info)
            utils.logger.info(f"[DouYinCrawler.search] keyword:{keyword}, aweme_list:{aweme_list}")
            await self.batch_get_note_comments(aweme_list)

    async def get_specified_awemes(self):
        """Get the information and comments of the specified post"""
        semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
        task_list = [
            self.get_aweme_detail(aweme_id=aweme_id, semaphore=semaphore) for aweme_id in config.DY_SPECIFIED_ID_LIST
        ]
        aweme_details = await asyncio.gather(*task_list)
        for aweme_detail in aweme_details:
            if aweme_detail is not None:
                await douyin_store.update_douyin_aweme(aweme_detail)
        await self.batch_get_note_comments(config.DY_SPECIFIED_ID_LIST)

    async def get_aweme_detail(self, aweme_id: str, semaphore: asyncio.Semaphore) -> Any:
        """Get note detail"""
        
        async with semaphore:
            try:
                return await self.dy_client.get_video_by_id(aweme_id)
            except DataFetchError as ex:
                utils.logger.error(f"[DouYinCrawler.get_aweme_detail] Get aweme detail error: {ex}")
                return None
            except KeyError as ex:
                utils.logger.error(
                    f"[DouYinCrawler.get_aweme_detail] have not fund note detail aweme_id:{aweme_id}, err: {ex}")
                return None

    async def batch_get_note_comments(self, aweme_list: List[str]) -> None:
        """
        Batch get note comments
        """
        if not config.ENABLE_GET_COMMENTS:
            utils.logger.info(f"[DouYinCrawler.batch_get_note_comments] Crawling comment mode is not enabled")
            return

        task_list: List[Task] = []
        semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
        for aweme_id in aweme_list:
            task = asyncio.create_task(
                self.get_comments(aweme_id, semaphore), name=aweme_id)
            task_list.append(task)
        if len(task_list) > 0:
            await asyncio.wait(task_list)

    async def get_comments(self, aweme_id: str, semaphore: asyncio.Semaphore) -> None:
        async with semaphore:
            try:
                # 将关键词列表传递给 get_aweme_all_comments 方法
                await self.dy_client.get_aweme_all_comments(
                    aweme_id=aweme_id,
                    crawl_interval=random.random(),
                    is_fetch_sub_comments=config.ENABLE_GET_SUB_COMMENTS,
                    callback=douyin_store.batch_update_dy_aweme_comments
                )
                utils.logger.info(
                    f"[DouYinCrawler.get_comments] aweme_id: {aweme_id} comments have all been obtained and filtered ...")
            except DataFetchError as e:
                utils.logger.error(f"[DouYinCrawler.get_comments] aweme_id: {aweme_id} get comments failed, error: {e}")

    async def get_creators_and_videos(self) -> None:
        """
        Get the information and videos of the specified creator
        """
        # utils.logger.info("[DouYinCrawler.get_creators_and_videos] Begin get douyin creators")
        utils.logger.info("[DouYinCrawler.get_creators_and_videos] 开始获取抖音用户信息")
        for user_id in config.DY_CREATOR_ID_LIST:
            # 获取作者的信息
            creator_info: Dict = await self.dy_client.get_user_info(user_id)
            if creator_info:
                await douyin_store.save_creator(user_id, creator=creator_info)
            utils.logger.info("[DouYinCrawler.get_creators_and_videos] 开始获取抖音用户"+ creator_info.get('user').get('nickname') +"视频信息")
            # Get all video information of the creator
            # 获取作者的所有视频信息
            all_video_list = await self.dy_client.get_all_user_aweme_posts(
                sec_user_id=user_id,
                callback=self.fetch_creator_video_detail
            )
            # 根据视频id获取评论信息
            video_ids = [video_item.get("aweme_id") for video_item in all_video_list]
            utils.logger.info("[DouYinCrawler.get_creators_and_videos] 开始获取视频信息" )
            utils.logger.info(video_ids)
            await self.batch_get_note_comments(video_ids)

    async def fetch_creator_video_detail(self, video_list: List[Dict]):
        """
        Concurrently obtain the specified post list and save the data
        """
        semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
        task_list = [
            self.get_aweme_detail(post_item.get("aweme_id"), semaphore) for post_item in video_list
        ]

        note_details = await asyncio.gather(*task_list)
        for aweme_item in note_details:
            if aweme_item is not None:
                await douyin_store.update_douyin_aweme(aweme_item)

    @staticmethod
    def format_proxy_info(ip_proxy_info: IpInfoModel) -> Tuple[Optional[Dict], Optional[Dict]]:
        """format proxy info for playwright and httpx"""
        playwright_proxy = {
            "server": f"{ip_proxy_info.protocol}{ip_proxy_info.ip}:{ip_proxy_info.port}",
            "username": ip_proxy_info.user,
            "password": ip_proxy_info.password,
        }
        httpx_proxy = {
            f"{ip_proxy_info.protocol}": f"http://{ip_proxy_info.user}:{ip_proxy_info.password}@{ip_proxy_info.ip}:{ip_proxy_info.port}"
        }
        return playwright_proxy, httpx_proxy

    async def create_douyin_client(self, httpx_proxy: Optional[str]) -> DOUYINClient:
        """Create douyin client"""
        cookie_str, cookie_dict = utils.convert_cookies(await self.browser_context.cookies())  # type: ignore
        douyin_client = DOUYINClient(
            proxies=httpx_proxy,
            headers={
                "User-Agent": self.user_agent,
                "Cookie": cookie_str,
                "Host": "www.douyin.com",
                "Origin": "https://www.douyin.com/",
                "Referer": "https://www.douyin.com/",
                "Content-Type": "application/json;charset=UTF-8"
            },
            playwright_page=self.context_page,
            cookie_dict=cookie_dict,
        )
        return douyin_client

    async def launch_browser(
            self,
            chromium: BrowserType,
            playwright_proxy: Optional[Dict],
            user_agent: Optional[str],
            headless: bool = True
    ) -> BrowserContext:
        """Launch browser and create browser context"""
        if config.SAVE_LOGIN_STATE:
            user_data_dir = os.path.join(os.getcwd(), "browser_data",
                                         config.USER_DATA_DIR % config.PLATFORM)  # type: ignore
            browser_context = await chromium.launch_persistent_context(
                user_data_dir=user_data_dir,
                accept_downloads=True,
                headless=headless,
                proxy=playwright_proxy,  # type: ignore
                # viewport={"width": 1920, "height": 1080},
                user_agent=user_agent,
                executable_path='C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe'
            )  # type: ignore
            browser_context.set_default_timeout(5000) # 设置超时时间
            return browser_context
        else:
            browser = await chromium.launch(headless=headless, proxy=playwright_proxy)  # type: ignore
            browser_context = await browser.new_context(
                # viewport={"width": 1920, "height": 1080},
                user_agent=user_agent,
                executable_path='C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe'
            )
            return browser_context

    async def close(self) -> None:
        """Close browser context"""
        await self.browser_context.close()
        utils.logger.info("[DouYinCrawler.close] Browser context closed ...")


    async def post_notify(self) -> None:
        """
        Get the information and videos of the specified creator
        """
        loadData = [] # 加载keyWords文件数据
        notifiedData = []  # 加载notifyed文件数据
        conNum = config.NOTIFY_PAGE_CONCURRENCY_NUM # 点击关注/私信任务的并发数
        self.context_pages =[] #  点击关注/私信任务的窗口集合，大小和conNum一样
        self.saveCount = 0  # 添加到私信列表的统计数

        # 步骤一：读取creator_keywords_xxx.json文件
        with open(f"data\douyin\json\creator_keywords_{utils.get_current_date()}.json", 'r', encoding='utf-8') as file:  
            loadData = json.load(file) 
        # 步骤二：读取notify_notifyed文件  
        with open('data/douyin/json/notify_notifyed.json', 'r', encoding='utf-8') as file:   
            notifiedData = json.load(file) 
        # 步骤三：开启多个窗口
        for i in range (0, conNum):
            self.context_pages.append( await self.browser_context.new_page())
        await self.context_page.goto(self.index_url)

        # 步骤四：
        # 去重：一个人有多条评论，只发送一次私信
        # 过滤：已经发送过私信的，不再发送私信
        notifySet = set()
        for item in loadData:
            exitFlag = False
            # 过滤已发送的用户
            for notified_item in notifiedData:
                if item['sec_uid'] == notified_item.get('sec_uid'):
                    exitFlag = True
                    break;
            if not exitFlag:
                notifySet.add(item['sec_uid'])
        
        if config.NOTIFY_PROCESS_MODEL == 1:
            for url in notifySet:
                await self.do_post_notify(url)
        elif config.NOTIFY_PROCESS_MODEL == 2:
            # 步骤五：去重后的数据分成多个set集合，
            notifyArray = []
            for i in range (0, conNum):
                notifyArray.append(set())
            notifyCount = 0;
            for url in notifySet:
                notifyArray[notifyCount%conNum].add(url)
                notifyCount += 1
            # 步骤六：生成关注任务集合
            tasks =[]
            for i in range (0, conNum):
                task = asyncio.ensure_future(self.do_post_guanzhu(notifyArray[i],i))
                tasks.append(task)
            await asyncio.gather(*tasks)

            # 步骤二：读取notify_notifyed文件  
            with open('data/douyin/json/notify_notifyed.json', 'r', encoding='utf-8') as file:   
                newNotifiedData = json.load(file) 
            self.saveCount = len(newNotifiedData) - len(notifiedData)
            utils.logger.info("[私信程序] 私信列表一共关注了%d位用户" %(self.saveCount))
            # 步骤七：发送私信
            self.saveCount = await self.do_post_sixin(self.saveCount)
            utils.logger.info("[私信程序] 私信列表一共发送了%d条私信" %(self.saveCount))

    async def do_post_notify(self, url: str) -> None:
        try:
            utils.logger.info("[DouYinCrawler.post_notify] 发送抖音私信"+"https://www.douyin.com/user/"+url)
            await self.context_page.goto("https://www.douyin.com/user/"+ url)
            utils.logger.info("[DouYinCrawler.post_notify] 移动鼠标")
            await self.context_page.mouse.wheel(0,100)
            await self.context_page.mouse.wheel(0,100)
            await self.context_page.wait_for_timeout(1000) # 停顿1.5s
            
            try:
                utils.logger.info("[DouYinCrawler.post_notify] 点击关注")
                await self.context_page.locator("#user-tabbar").get_by_text("关注").click()
                await self.context_page.wait_for_timeout(1000)
            except: 
                utils.logger.info("[DouYinCrawler.post_notify] 关注失败")
            await self.context_page.wait_for_timeout(20000) # 停顿20s
            try:
                utils.logger.info("[DouYinCrawler.post_notify] 点击私信")
                await self.context_page.locator("#user-tabbar").get_by_text("私信").click()
                await self.context_page.wait_for_timeout(1500)
            except: 
                utils.logger.info("[DouYinCrawler.post_notify] 私信失败")
                utils.logger.info("[DouYinCrawler.post_notify] 用户不存在")
                return
            
            try:
                utils.logger.info("[DouYinCrawler.post_notify] 填充信息")
                await self.context_page.locator("#douyin-header-menuCt").get_by_role("textbox").fill(config.NOTIFY_INFO)
                await self.context_page.locator(".sCp7KhBv > svg > path:nth-child(2)").click()
                await douyin_store.update_by_notifyed(url) # 保持已发送的私信信息
                utils.logger.info("[DouYinCrawler.post_notify] 发送成功")
                await self.context_page.wait_for_timeout(1500)  
            except: 
                utils.logger.info("[DouYinCrawler.post_notify] 发送失败")       
        except:
            utils.logger.info("[DouYinCrawler.post_notify] 发送失败")
                          
    async def do_post_guanzhu(self, urls: set, pageIndex: int) -> None:
        for url in urls:
            # try:
                await self.do_post_guanzhu_detail(url,pageIndex)
            # except:
            #     utils.logger.info("[私信程序%d] 程序异常" %(pageIndex))

    async def do_post_guanzhu_detail(self,  url: str, pageIndex: int) -> None:
        context_page = self.context_pages[pageIndex]
        continueInit = False
        continueFlag = continueInit
        utils.logger.info("[私信程序%d] 跳转页面" %(pageIndex))
        # 步骤一：页面跳转
        await context_page.goto("https://www.douyin.com/user/"+ url)
        # 步骤二：3秒内没有获取到标题，不存在用户退出
        utils.logger.info("[私信程序%d] 获取用户" %(pageIndex))
        for i in range(0,3):
            if(len(await context_page.title()) > 0):
                continueFlag = True
                break
            else:
                await context_page.wait_for_timeout(1000)

        if ( continueFlag == False):
            utils.logger.info("[私信程序%d] 用户不存在，退出" %(pageIndex))
            return
        else:
            continueFlag = continueInit

        # 步骤四：30秒内没有出现私信按钮退出
        utils.logger.info("[私信程序%d] 获取私信标签" %(pageIndex))
        for i in range(0,15):
            if("私信" in await context_page.locator("#douyin-header-menuCt").inner_text()):
                continueFlag = True
                break
            else:
                await context_page.wait_for_timeout(2000)
        if ( continueFlag == False):
            utils.logger.info("[私信程序%d] 30秒内没有获取到私信标签，退出" %(pageIndex))
            return
        else:
            continueFlag = continueInit   

        utils.logger.info("[私信程序%d] 移动鼠标" %(pageIndex))
        await context_page.mouse.wheel(0,200)
        await context_page.wait_for_timeout(1000)
        
        # 步骤五：点击关注/私信
        utils.logger.info("[私信程序%d] 点击关注/私信" %(pageIndex))
        for i in range(0,5):
            if("关注" in await context_page.locator("#user-tabbar").inner_text()):
                await context_page.locator("#user-tabbar").get_by_text("关注").click()
                await context_page.wait_for_timeout(1000)
                continue
            elif("私信" in await context_page.locator("#user-tabbar").inner_text()):
                await context_page.locator("#user-tabbar").get_by_text("私信").click()
                await douyin_store.update_by_notifyed(url) # 保持已发送的私信数据
                continueFlag = True
                break
            else:
                await context_page.wait_for_timeout(1000)

        if ( continueFlag == False):
            utils.logger.info("[私信程序%d] 3秒内点击关注私信失败，退出" %(pageIndex))
            return
        else:
            utils.logger.info("[私信程序%d] 用户已添加到私信列表中" %(pageIndex))
            return            

    async def do_post_sixin(self, MaxCount: int) -> int:
        NotifiedCount = 0
        if MaxCount == 0:
            utils.logger.info("[私信程序] MaxCount=0，没有需要发送私信")
            return NotifiedCount
        
        utils.logger.info("[私信程序] 发送抖音私信")
        await self.context_page.locator(".xgplayer-play").click()
        utils.logger.info("[私信程序] 点击私信对话框")
        while True:
            try:
                # 死循环等待页面出现“私信”元素
                await self.context_page.locator("#douyin-header-menuCt").get_by_text("私信").click() 
                break
            except:
                continue
        # 点击私信对话框中的第一个
        await self.context_page.locator(".EUJzwIMS").first.click()
        firstP = self.context_page.locator(".EUJzwIMS").first
        await firstP.hover()
        
        # 第一个单独处理
        await self.context_page.locator("div:nth-child(1) > .UOLK2tYZ > .EUJzwIMS").click()
        await self.context_page.locator("#douyin-header-menuCt").get_by_role("textbox").fill(config.NOTIFY_INFO)
        # await self.context_page.locator(".sCp7KhBv > svg > path:nth-child(2)").click()
        NotifiedCount += 1
        if NotifiedCount >= MaxCount:
            return NotifiedCount
        lastP = await self.context_page.locator("div:nth-child(1) > .UOLK2tYZ > .EUJzwIMS").inner_text()
        await self.context_page.mouse.wheel(0,65)
        await self.context_page.wait_for_timeout(2000)   
        # 非第一个循环处理
        while True:
            curtP = await self.context_page.locator("div:nth-child(2) > .UOLK2tYZ > .EUJzwIMS").inner_text()
            if (lastP != curtP):
                lastP = curtP
                await self.context_page.locator("div:nth-child(2) > .UOLK2tYZ > .EUJzwIMS").click()
                await self.context_page.locator("#douyin-header-menuCt").get_by_role("textbox").fill(config.NOTIFY_INFO)
                # await self.context_page.locator(".sCp7KhBv > svg > path:nth-child(2)").click()
                NotifiedCount += 1
                if NotifiedCount >= MaxCount:
                    return NotifiedCount
                await self.context_page.mouse.wheel(0,65)
                await self.context_page.wait_for_timeout(2000)  
            else:
                break
        # 对话框已到底，划不动了，处理剩余的
        countFlag = await self.context_page.locator(".EUJzwIMS").count()
        print(countFlag)
        for i in  range(3, countFlag+1):
            locatorStr = "div:nth-child(%d) > .UOLK2tYZ > .EUJzwIMS" % (i)
            await self.context_page.locator(locatorStr).click()
            await self.context_page.locator("#douyin-header-menuCt").get_by_role("textbox").fill(config.NOTIFY_INFO)
            # await self.context_page.locator(".sCp7KhBv > svg > path:nth-child(2)").click()
            NotifiedCount += 1
            if NotifiedCount >= MaxCount:
                return NotifiedCount
            await self.context_page.wait_for_timeout(2000)  
   
