# coding=utf-8
import json
import logging
import os
from datetime import datetime

from loguru import logger

from dy_apis.douyin_api import DouyinAPI
from utils.common_util import init
from utils.data_util import handle_work_info, download_work, save_to_xlsx


class Data_Spider():
    def __init__(self):
        self.douyin_apis = DouyinAPI()

    def spider_work(self, auth, work_url: str, proxies=None):
        """
        爬取一个作品的信息
        :param auth : 用户认证信息
        :param work_url: 作品链接
        :return:
        """
        res_json = self.douyin_apis.get_work_info(auth, work_url)
        data = res_json['aweme_detail']

        work_info = handle_work_info(data)
        logger.info(f'爬取作品信息 {work_url}')
        return work_info

    def spider_some_work(self, auth, works: list, base_path: dict, save_choice: str, excel_name: str = '', proxies=None):
        """
        爬取一些作品的信息
        :param auth: 用户认证信息
        :param works: 作品链接列表
        :param base_path: 保存路径
        :param save_choice: 保存方式 all: 保存所有的信息, media: 保存视频和图片（media-video只下载视频, media-image只下载图片，media都下载）, excel: 保存到excel
        :param excel_name: excel文件名
        :return:
        """
        if (save_choice == 'all' or save_choice == 'excel') and excel_name == '':
            raise ValueError('excel_name 不能为空')
        work_list = []
        for work_url in works:
            work_info = self.spider_work(auth, work_url)
            work_list.append(work_info)
        for work_info in work_list:
            if save_choice == 'all' or 'media' in save_choice:
                download_work(work_info, base_path['media'], save_choice)
        if save_choice == 'all' or save_choice == 'excel':
            file_path = os.path.abspath(os.path.join(base_path['excel'], f'{excel_name}.xlsx'))
            save_to_xlsx(work_list, file_path)


    def spider_user_all_work(self, auth, user_url: str, base_path: dict, save_choice: str, excel_name: str = '', proxies=None):
        """
        爬取一个用户的所有作品
        :param auth: 用户认证信息
        :param user_url: 用户链接
        :param base_path: 保存路径
        :param save_choice: 保存方式 all: 保存所有的信息, media: 保存视频和图片（media-video只下载视频, media-image只下载图片，media都下载）, excel: 保存到excel
        :param excel_name: excel文件名
        :param proxies: 代理
        :return:
        """
        user_info = self.douyin_apis.get_user_info(auth, user_url)
        work_list = self.douyin_apis.get_user_all_work_info(auth, user_url)
        work_info_list = []
        logger.info(f'用户 {user_url} 作品数量: {len(work_list)}')
        if save_choice == 'all' or save_choice == 'excel':
            excel_name = user_url.split('/')[-1].split('?')[0]

        for work_info in work_list:
            work_info['auth'].update(user_info['user'])
            work_info = handle_work_info(work_info)
            work_info_list.append(work_info)
            logger.info(f'爬取作品信息 {work_info["work_url"]}')
            if save_choice == 'all' or 'media' in save_choice:
                download_work(work_info, base_path['media'], save_choice)
        if save_choice == 'all' or save_choice == 'excel':
            file_path = os.path.abspath(os.path.join(base_path['excel'], f'{excel_name}.xlsx'))
            save_to_xlsx(work_info_list, file_path)

    def spider_some_search_work(self, auth, query: str, require_num: int, base_path: dict, save_choice: str,  sort_type: str, publish_time: str, filter_duration="", search_range="", content_type="",   excel_name: str = '', proxies=None):
        """
            :param auth: DouyinAuth object.
            :param query: 搜索关键字.
            :param require_num: 搜索结果数量.
            :param base_path: 保存路径.
            :param save_choice: 保存方式 all: 保存所有的信息, media: 保存视频和图片（media-video只下载视频, media-image只下载图片，media都下载）, excel: 保存到excel
            :param sort_type: 排序方式 0 综合排序, 1 最多点赞, 2 最新发布.
            :param publish_time: 发布时间 0 不限, 1 一天内, 7 一周内, 180 半年内.
            :param filter_duration: 视频时长 空字符串 不限, 0-1 一分钟内, 1-5 1-5分钟内, 5-10000 5分钟以上
            :param search_range: 搜索范围 0 不限, 1 最近看过, 2 还未看过, 3 关注的人
            :param content_type: 内容形式 0 不限, 1 视频, 2 图文
            :param excel_name: excel文件名
        """
        work_info_list = []
        work_list = self.douyin_apis.search_some_general_work(auth, query, require_num, sort_type, publish_time, filter_duration, search_range, content_type)
        logger.info(f'搜索关键词 {query} 作品数量: {len(work_list)}')
        if save_choice == 'all' or save_choice == 'excel':
            excel_name = query
        for work_info in work_list:
            logger.info(json.dumps(work_info))
            if work_info["type"] == 6 :
                continue
            logger.info(f'爬取作品信息 https://www.douyin.com/video/{work_info["aweme_info"]["aweme_id"]}')
            work_info = handle_work_info(work_info['aweme_info'])
            work_info_list.append(work_info)
            if save_choice == 'all' or 'media' in save_choice:
                download_work(work_info, base_path['media'], save_choice)
        if save_choice == 'all' or save_choice == 'excel':
            file_path = os.path.abspath(os.path.join(base_path['excel'], f'{excel_name}.xlsx'))
            save_to_xlsx(work_info_list, file_path)
        return work_info_list


    def get_work_all_comment(self,auth,work_info_list):
        import pandas as pd
        all_comments_list = []
        all_comments_df = pd.DataFrame(columns=['UID', '用户名称', '评论内容', '创建时间', '作品URL'])
        print("===========开始获取用户评论===========")
        for work_info in work_info_list:
            logger.info(json.dumps(work_info))
            url = f'{work_info["work_url"]}'
            comments = self.douyin_apis.get_work_out_comment(auth,url)
            with open('datas/data.json', 'w', encoding='utf-8') as f:
                json.dump(comments, f)
            if not comments['comments']:
                continue
            for comment in comments['comments']:
                # 3. 获取当前评论的text字段
                comment_text = comment['text']
                # 4. 从嵌套的user字典中获取uid字段
                user_uid = comment['user']['short_id']
                user_name = comment['user']['nickname']
                create_time = comment['create_time']
                dt_object = datetime.fromtimestamp(create_time)
                create_time = dt_object.strftime("%Y-%m-%d %H:%M:%S")
                url = work_info["work_url"]

                # 5. 打印输出
                print(f"UID: {user_uid}，用户名称：{user_name}   评论内容：{comment_text}")
                # print(f"Text: {comment_text}")
                new_row = {
                    'UID': user_uid,
                    '用户名称': user_name,
                    '评论内容': comment_text,
                    '创建时间': create_time,
                    '作品URL': url
                }
                all_comments_list.append(new_row)

            # 保存到Excel文件
            excel_filename = 'datas/comments_data.xlsx'
            all_comments_df = pd.DataFrame(all_comments_list)
            all_comments_df.to_excel(excel_filename, index=False)
            print(f'评论保存完毕，已保存到 {excel_filename}')

            print('评论保存完毕')



    def get_live_user_info(self, auth, live_id,room_id):
        res = self.douyin_apis.get_live_user_info(auth,live_id,room_id)
        print(res)
        return res

    def get_live_info(self, auth, live_id):
        res = self.douyin_apis.get_live_info(auth,live_id)
        print(res)
        return res


if __name__ == '__main__':


    auth, base_path = init()
    data_spider = Data_Spider()
    room_id = '7557568353062832942'
    live_id = "433548908194"
    res = data_spider.get_live_info(auth, live_id)
    res_user = data_spider.get_live_user_info(auth, live_id, room_id)
    print(res_user)

