# import logging
import pandas as pd
from fastapi import FastAPI, Query
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
import cachetools
import pymysql
from fastapi.responses import FileResponse
import os
from fastapi import HTTPException
# 放在地中海的服务器上
app = FastAPI()
origins = ["*"]
methods = ["GET", "POST", "PUT", "DELETE"]  # 允许的 HTTP 方法
allow_credentials = True  # 是否允许发送身份验证信息（cookies）到服务器

app.add_middleware(
    CORSMiddleware,
    allow_origins=origins,
    allow_credentials=allow_credentials,
    allow_methods=methods,
    allow_headers=["*"],
)

# 数据库连接配置
DB_CONFIG = {
    'host': 'rm-2zea30h4sh8g15zd1ho.mysql.rds.aliyuncs.com',
    'port': 3306,
    'user': "root",
    'password': 'Ds2024@()833429',
    'database': "douyinpinglun"
}

# 缓存有效期设为一分钟，单位为秒
CACHE_EXPIRATION = 60

# 创建缓存对象，使用 TTL 缓存策略
cache = cachetools.TTLCache(maxsize=100, ttl=CACHE_EXPIRATION)  # 增加maxsize以支持多个缓存项


# 分页获取视频
def fetch_video_with_pagination(page=1, page_size=10):
    """从数据库获取分页后的视频数据，以及总条目数"""
    mydb = pymysql.connect(**DB_CONFIG)
    cursor = mydb.cursor()

    # 计算 OFFSET
    offset = (page - 1) * page_size

    # 查询当前页的数据
    query_data = f"SELECT * from 抖音视频表 ORDER BY id LIMIT {page_size} OFFSET {offset}"
    cursor.execute(query_data)
    rows = cursor.fetchall()
    # 倒排
    reversed_rows = list(reversed(rows))

    # 查询总条目数
    query_count = "SELECT COUNT(*) from 抖音视频表"
    cursor.execute(query_count)
    total_items = cursor.fetchone()[0]

    cursor.close()
    mydb.close()

    # 计算总页数
    total_pages = (total_items + page_size - 1) // page_size

    return {
        'data': reversed_rows,
        'total_items': total_items,
        'total_pages': total_pages,
    }


def fetch_video():
    """从数据库获取所有视频数据"""
    mydb = pymysql.connect(**DB_CONFIG)
    cursor = mydb.cursor()
    query = "SELECT id,视频名称,视频作者,视频发布时间_2,视频链接,搜索关键词,创建时间,点赞量,评论量,收藏量,转发量 from 抖音视频表 order by id"
    cursor.execute(query)
    rows = cursor.fetchall()
    cursor.close()
    mydb.close()
    return rows


def fetch_comment(video_id):
    """从数据库获取指定视频的评论数据"""
    mydb = pymysql.connect(**DB_CONFIG)
    cursor = mydb.cursor()
    query = "SELECT 评论内容 from 视频评论表 where 关联视频表id=%s limit 50"
    cursor.execute(query, (video_id,))
    rows = cursor.fetchall()
    cursor.close()
    mydb.close()
    return rows


def fetch_all_comment(page=1, page_size=100):
    """从数据库获取指定视频的评论数据, 支持分页"""
    mydb = pymysql.connect(**DB_CONFIG)
    cursor = mydb.cursor()
    query = """
        SELECT c.*, v.视频名称, v.视频链接 
        FROM 视频评论表 c 
        JOIN 抖音视频表 v ON c.关联视频表id = v.ID
        LIMIT %s OFFSET %s
    """
    offset = (page - 1) * page_size
    cursor.execute(query, (page_size, offset))
    rows = cursor.fetchall()
    cursor.close()
    mydb.close()
    return rows


# def get_cached_all_comment():
#     """获取缓存的视频数据，若缓存不存在或已过期，则从数据库获取"""
#     cache_key = 'cached_all_comment'
#     if cache_key in cache:
#         return cache[cache_key]
#     else:
#         data = fetch_all_comment()
#         cache[cache_key] = data
#         return data


def get_cached_video_data():
    """获取缓存的视频数据，若缓存不存在或已过期，则从数据库获取"""
    cache_key = 'cached_video_data'
    if cache_key in cache:
        return cache[cache_key]
    else:
        data = fetch_video()
        cache[cache_key] = data
        return data



@app.get("/video")
async def get_products(page: int = Query(1, description="页码"), page_size: int = Query(15, description="每页数量")):
    print('page:', page)
    data = fetch_video_with_pagination(page, page_size)
    return data


# 后续再继续开发
@app.get("/comment")
async def get_products(videoId: int = Query(...)):
    print('video_id:', videoId)
    rows = fetch_comment(videoId)
    return rows


@app.get("/download_video")
async def download_video():
    # 从缓存中获取数据
    rows = get_cached_video_data()
    # 将数据转换为 DataFrame
    df = pd.DataFrame(rows, columns=[
        '视频id',
        '视频名称',
        '视频作者',
        '视频发布时间',
        '视频链接',
        '搜索关键词',
        '创建时间',
        '点赞量',
        '评论量',
        '收藏量',
        '转发量',
    ])
    excel_path = "抖音视频表.xlsx"  # 确保这个路径是有效的
    df.to_excel(excel_path, index=False)
    # 生成 Excel 文件并返回文件响应
    return FileResponse(excel_path, filename="抖音视频表.xlsx",
                        media_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')

@app.get("/download_all_comment")
async def download_video():
    # 初始化数据容器
    all_rows = []
    page = 1
    page_size = 100

    # 逐页获取数据
    while True:
        rows = fetch_all_comment(page, page_size)
        if not rows:
            break
        all_rows.extend(rows)
        page += 1

    # 将数据转换为 DataFrame
    df = pd.DataFrame(all_rows, columns=[
        '评论id',
        '关联视频表id',
        '评论内容',
        '用户昵称',
        '评论点赞数',
        '评论时间',
        '评论地点',
        '情感分类',
        '问答对编号',
        '创建时间',
        '视频名称',
        '视频链接',
    ])
    excel_path = "抖音视频表.xlsx"  # 确保这个路径是有效的
    df.to_excel(excel_path, index=False)

    # 检查文件是否存在
    if not os.path.exists(excel_path):
        raise HTTPException(status_code=404, detail="Excel file not found")

    # 生成 Excel 文件并返回文件响应
    return FileResponse(excel_path, filename="全部评论表.xlsx",
                        media_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')


if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8003)
