# _*_ encoding: utf-8 _*_
# __author__ = 'lx'
# todo:异步化研究
import asyncio
import datetime
import hashlib
import random
import aiohttp
import pandas

import pafy
import requests

from constant import VIDEO_PATH, COMMENT_RESULTS
from spider import config
from spider.utils import get_time_str
from spider.ytb_comment.comment import comment_etl, add_zh, save_to_csv
from spider.ytb_v3_api.ytb_api import YoutubeAPI


# async def save_to_csv(data: list, filename: str = 'comment', path=COMMENT_RESULTS):
#     df = pandas.DataFrame(data)
#     now = datetime.datetime.now().strftime("%Y%m%d%H%M")
#     filename = path + '/' + filename + now + '.csv'
#     # utf_8_sig防止excel打开出现乱码
#     df.to_csv(filename, index=False, encoding='utf_8_sig')

async def download_video(ytb_url,path=VIDEO_PATH):
    video = pafy.new(ytb_url)
    best = video.getbest()
    best.download(filepath=path + "/YTB_V"+ get_time_str() + '.' + best.extension)

# async def comment_etl(comments, limit_count=0):
#     validated_data = []
#     for i in range(len(comments['items'])):
#         # 进行点赞数过滤
#         if limit_count > 0:
#             if comments['items'][i]['snippet']['topLevelComment']['snippet']['likeCount'] < limit_count:
#                 continue
#         # 数据清洗
#         # todo:翻译、态度分析
#         validated_data.append({
#             'authorDisplayName': comments['items'][i]['snippet']['topLevelComment']['snippet']['authorDisplayName'],
#             'likeCount': comments['items'][i]['snippet']['topLevelComment']['snippet']['likeCount'],
#             'textOriginal': comments['items'][i]['snippet']['topLevelComment']['snippet']['textOriginal'],
#             # 'textOriginal': utils.improve_format(comments['items'][i]['snippet']['topLevelComment']['snippet']['textOriginal']),
#         })
#     return validated_data
# async def baidufanyi(q: str, from_type='auto', to='zh'):
#     # print(chardet.detect(q))
#     APPID = config.BAIDU_APPID
#     SECRET = config.BAIDU_SECRET
#     SALT = str(random.randint(0, 10000))
#     SIGN = APPID + q + SALT + SECRET
#     md5_sign = hashlib.md5(SIGN.encode()).hexdigest()
#     with aiohttp.ClientSession()as ssession:
#         ssession.get()
#     response = requests.get(config.BAIDU_FY_API,
#                  params={'q': q, 'from': from_type, 'to': to, 'appid': APPID, 'salt': SALT, 'sign': md5_sign})
#     content = json.loads(response.content)
#     # 判断错误
#     if 'error_code' in content.keys():
#         return 'error' + content['error_code']
#     result = ''
#     for i in content['trans_result']:
#         result += i['dst']
#     return result

# async def add_zh(data: list):
#     for i in data:
#         q = i['textOriginal']
#         tran_str = await baidufanyi(q)
#         i['zh'] = tran_str

# 测试2-----0:00:19.158742---0:00:16.696330---0:00:17.444329
async def test_async_main2():
    t1 = datetime.datetime.now()
    # await download_video('lRtedsK_6fs')
    api = YoutubeAPI(config.GOOGLE_APIKEY)
    comments = api.get_comment_threads_by_video_id('lRtedsK_6fs', order='relevance')
    comments = comment_etl(comments, 5)
    add_zh(comments)
    save_to_csv(comments)
    t2 = datetime.datetime.now()
    print(t2 - t1)

# 测试3----0:00:15.973265----0:00:14.511176
async def test_async_main3():
    from spider import utils
    t1 = datetime.datetime.now()
    utils.download_video('lRtedsK_6fs')
    api = YoutubeAPI(config.GOOGLE_APIKEY)
    comments = api.get_comment_threads_by_video_id('lRtedsK_6fs', order='relevance')
    comments = comment_etl(comments, 5)
    add_zh(comments)
    save_to_csv(comments)
    t2 = datetime.datetime.now()
    print(t2 - t1)

# 测试1
async def fetch(session, url):
    print("发送请求：", url)
    async with session.get(url, verify_ssl=False) as response:
        content = await response.content.read()
        file_name = url.rsplit('_')[-1]
        with open(file_name, mode='wb') as file_object:
            file_object.write(content)


async def main():
    async with aiohttp.ClientSession() as session:
        url_list = [
            'https://www3.autoimg.cn/newsdfs/g26/M02/35/A9/120x90_0_autohomecar__ChsEe12AXQ6AOOH_AAFocMs8nzU621.jpg',
            'https://www2.autoimg.cn/newsdfs/g30/M01/3C/E2/120x90_0_autohomecar__ChcCSV2BBICAUntfAADjJFd6800429.jpg',
            'https://www3.autoimg.cn/newsdfs/g26/M0B/3C/65/120x90_0_autohomecar__ChcCP12BFCmAIO83AAGq7vK0sGY193.jpg'
        ]
        tasks = [asyncio.create_task(fetch(session, url)) for url in url_list]
        await asyncio.wait(tasks)

def test_async_main():
    # 0:00:00.164560
    # 0:00:00.292218
    # 结论：异步确实快
    t1 = datetime.datetime.now()
    asyncio.run(main())
    t2 = datetime.datetime.now()
    print(t2 - t1)

    url_list = [
        'https://www3.autoimg.cn/newsdfs/g26/M02/35/A9/120x90_0_autohomecar__ChsEe12AXQ6AOOH_AAFocMs8nzU621.jpg',
        'https://www2.autoimg.cn/newsdfs/g30/M01/3C/E2/120x90_0_autohomecar__ChcCSV2BBICAUntfAADjJFd6800429.jpg',
        'https://www3.autoimg.cn/newsdfs/g26/M0B/3C/65/120x90_0_autohomecar__ChcCP12BFCmAIO83AAGq7vK0sGY193.jpg'
    ]
    for i in url_list:
        response = requests.get(i)
        content = response.content
        file_name = i.rsplit('_')[-1]
        with open(file_name, mode='wb') as file_object:
            file_object.write(content)
    t3 = datetime.datetime.now()
    print(t3 - t2)


if __name__ == '__main__':
    asyncio.run(test_async_main2())
