import json
from django.http import HttpResponse
from django.shortcuts import render
from fake_useragent import UserAgent
import re
import requests
from bs4 import BeautifulSoup as BS
import time
import pandas as pd
import os
from django.utils import timezone  # 导入时区处理模块
from .models import Author, Category, Product, Comment
from concurrent.futures import ThreadPoolExecutor


def response_as_json(data):
    json_str = json.dumps(data, ensure_ascii=False)
    response = HttpResponse(
        json_str,
        content_type="application/json",
    )
    response["Access-Control-Allow-Origin"] = "*"
    return response


def json_response(data, code=200):
    data = {
        "code": code,
        "msg": "success",
        "data": data,
    }
    return response_as_json(data)


def json_error(error_string="error", code=500, **kwargs):
    data = {
        "code": code,
        "msg": error_string,
        "data": {}
    }
    data.update(kwargs)
    return response_as_json(data)


JsonResponse = json_response
JsonError = json_error


def get_real_time(v_str):
    """弹幕出现时间: 把秒转换为时分秒"""
    total_seconds = int(v_str.split('.')[0])
    second = total_seconds % 60
    hour = total_seconds // 60 // 60
    minute = total_seconds // 60 % 60
    ret_time = str(hour) + "时" + str(minute) + "分" + str(second) + "秒"
    return ret_time


def get_bilibili_danmu(v_url):
    ua = UserAgent(os="Windows")
    """
    爬取B站弹幕
    :param v_url: 视频地址
    :return:
    """
    print(f"正在爬取视频 {v_url} 的弹幕")
    headers = {
        'User-Agent': ua.random
    }
    bv = v_url.split('/')[-1]
    # print(v_url.split('/')[-2])
    # print(bv)
    r1 = requests.get(
        url='https://api.bilibili.com/x/player/pagelist?bvid=' + bv, headers=headers)
    html1 = r1.json()
    cid = html1['data'][0]['cid']  # 获取视频对应的cid号
    danmu_url = f'https://comment.bilibili.com/{cid}.xml'  # 弹幕地址
    r2 = requests.get(danmu_url, headers=headers)
    html2 = r2.content.decode('utf-8')
    soup = BS(html2, 'lxml-xml')  # 使用lxml-xml解析器
    danmu_list = soup.find_all('d')
    video_url_list = []  # 视频地址
    danmu_url_list = []  # 弹幕地址
    time_list = []  # 弹幕时间
    time_loc_list = []  # 弹幕出现位置
    text_list = []  # 弹幕内容
    for d in danmu_list:
        data_split = d['p'].split(',')  # 按逗号分隔
        temp_time = time.localtime(int(data_split[4]))  # 转换时间格式
        danmu_time = time.strftime("%Y-%m-%d %H:%M:%S", temp_time)
        video_url_list.append(v_url)
        danmu_url_list.append(danmu_url)
        time_list.append(danmu_time)
        time_loc_list.append(get_real_time(v_str=data_split[0]))
        text_list.append(d.text)
    df = pd.DataFrame()  # 初始化一个DataFrame对象
    df['视频地址'] = video_url_list
    df['弹幕地址'] = danmu_url_list
    df['弹幕时间'] = time_list
    df['弹幕出现位置'] = time_loc_list
    df['弹幕内容'] = text_list

    # print(df)
    # 创建保存目录
    save_dir = './upload/comment'
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    # 按照BVID保存文件
    bvid = v_url.split('/')[-1]
    v_result_file = os.path.join(save_dir, f'{bvid}_弹幕.csv')
    if os.path.exists(v_result_file):  # 如果文件存在，不需写入字段标题
        header = None
    else:  # 如果文件不存在，说明是第一次新建文件，需写入字段标题
        header = ['视频地址', '弹幕地址', '弹幕时间', '弹幕出现位置', '弹幕内容']
    df.to_csv(v_result_file, encoding='utf_8_sig', mode='a+',
              index=False, header=header)  # 数据保存到csv文件
    return danmu_list, cid


def process_video(data, tab_name, status):
    print(f"正在爬取视频 https://www.bilibili.com/video/{data['bvid']}")
    title = data['title']
    # 去除非ASCII和中文字符
    title = re.sub(r'[^\x00-\x7F\u4e00-\u9fff]', '', title)
    bvid = data['bvid']
    video_url = f'https://www.bilibili.com/video/{bvid}'
    author_name = data['owner']['name']
    author_uid = data['owner']['mid']

    # 获取或创建作者
    author, _ = Author.objects.get_or_create(name=author_name, UID=author_uid)

    # 获取或创建分类
    category, _ = Category.objects.get_or_create(category_name=tab_name)

    # 创建或更新视频记录
    product, _ = Product.objects.get_or_create(
        BVID=bvid,
        defaults={
            'title': title,
            'category': category,
            'video_url': video_url,
            'score_list': data['score'],
            'play_cnt_list': data['stat']['view'],
            'danmu_cnt_list': data['stat']['danmaku'],
            'coin_cnt_list': data['stat']['coin'],
            'like_cnt_list': data['stat']['like'],
            'dislike_cnt_list': data['stat']['dislike'],
            'share_cnt_list': data['stat']['share'],
            'favorite_cnt_list': data['stat']['favorite'],
            'reply_cnt_list': data['stat']['reply']
        }
    )
    product.author_list.add(author)

    if status or (not status and Comment.objects.filter(BVID__BVID=bvid).exists()):
        print(f"视频 {bvid} 的弹幕已存在，跳过弹幕爬取")
        if status or (not status and os.path.exists(f'./upload/pics/{bvid}.png')):
            print(f"视频 {bvid} 的封面已存在，跳过封面爬取")
        else:
            save_dir = './upload/pics'
            if not os.path.exists(save_dir):
                os.makedirs(save_dir)

            pic_url = data['pic']
            pic_url_response = requests.get(pic_url)
            print(f"正在爬取视频 {bvid} 的弹幕")
            time.sleep(1)
            with open(f'./upload/pics/{bvid}.png', 'wb') as picfile:
                picfile.write(pic_url_response.content)
        return
    # 爬取弹幕
    danmu_list, cid = get_bilibili_danmu(video_url)

    # 保存弹幕数据到数据库
    for d in danmu_list:
        data_split = d['p'].split(',')
        temp_time = time.localtime(int(data_split[4]))
        danmu_time = time.strftime("%Y-%m-%d %H:%M:%S", temp_time)
        # 将朴素的日期时间对象转换为带时区的日期时间对象
        danmu_time = timezone.make_aware(pd.to_datetime(danmu_time))
        time_loc = get_real_time(v_str=data_split[0])
        Comment.objects.create(
            BVID=product,  # 外键关联Product
            video_url_list=video_url,
            danmu_url_list=f'https://comment.bilibili.com/{cid}.xml',
            time_list=danmu_time,
            time_loc_list=time_loc,
            text_list=d.text
        )


def crawl_bilibili(request, status=False):
    ua = UserAgent(os="Windows")
    # 定义各个版块的url地址
    url_dict = {
        'all': 'https://api.bilibili.com/x/web-interface/ranking/v2?rid=0&type=all',
        'animation': 'https://api.bilibili.com/x/web-interface/ranking/v2?rid=1&type=all',
        'music': 'https://api.bilibili.com/x/web-interface/ranking/v2?rid=3&type=all',
        'dance': 'https://api.bilibili.com/x/web-interface/ranking/v2?rid=129&type=all',
        'game': 'https://api.bilibili.com/x/web-interface/ranking/v2?rid=4&type=all',
        'knowledge': 'https://api.bilibili.com/x/web-interface/ranking/v2?rid=36&type=all',
        'technology': 'https://api.bilibili.com/x/web-interface/ranking/v2?rid=188&type=all',
        'motion': 'https://api.bilibili.com/x/web-interface/ranking/v2?rid=234&type=all',
        'car': 'https://api.bilibili.com/x/web-interface/ranking/v2?rid=223&type=all',
        'live': 'https://api.bilibili.com/x/web-interface/ranking/v2?rid=160&type=all',
        'food': 'https://api.bilibili.com/x/web-interface/ranking/v2?rid=211&type=all',
        'animal': 'https://api.bilibili.com/x/web-interface/ranking/v2?rid=217&type=all',
        'ghost_animal': 'https://api.bilibili.com/x/web-interface/ranking/v2?rid=119&type=all',
        'vogue': 'https://api.bilibili.com/x/web-interface/ranking/v2?rid=155&type=all',
        'amusement': 'https://api.bilibili.com/x/web-interface/ranking/v2?rid=5&type=all',
        'film': 'https://api.bilibili.com/x/web-interface/ranking/v2?rid=181&type=all',
        'original': 'https://api.bilibili.com/x/web-interface/ranking/v2?rid=0&type=origin',
        'newcomer': 'https://api.bilibili.com/x/web-interface/ranking/v2?rid=0&type=rookie',
    }

    # 构造请求头，防止被反爬
    headers = {
        'Cookie': 'SESSDATA=f0e5216b%2C1761694484%2C13844%2A52CjB6RXKJuN8f8rpK4nnJCCZepHNMSqa_RlhGP2kiCWZHsNuAn2TF4wY9mBkYMv9AfEcSVm9KN2F4Yl9FaVpEZmNCM0tZamRFTjl4TjFRWjRvU3RqNGJsTHBCY3U0ZHdTYzBuM2I5TTlwb2d3bnpJR0NUUWNFdlF1WHdyQkw5Z05GNEZfdG5iTE5nIIEC',
        'Accept': 'application/json, text/plain, */*',
        'Origin': 'https://www.bilibili.com',
        'Host': 'api.bilibili.com',
        'User-Agent': ua.random,
        'Accept-Language': 'zh-cn',
        'Connection': 'keep-alive',
        'Referer': 'https://www.bilibili.com/v/popular/rank/all'
    }

    all_tasks = []
    for tab_name, url in url_dict.items():
        try:
            r = requests.get(url, headers=headers)
            json_data = r.json()
            list_data = json_data['data']['list'][:35]

            for data in list_data:
                all_tasks.append((data, tab_name, status))
        except Exception as e:
            print("爬取失败:{}".format(str(e)))
            return JsonError(data="爬取失败:{}".format(str(e)))

    # 开启多线程
    with ThreadPoolExecutor(max_workers=10) as executor:
        for task in all_tasks:
            executor.submit(process_video, *task)

    return JsonResponse(data='爬取成功')

