# P13 播放日志生成算法
# 本脚本主要目的是从 videos.csv 中读取视频信息，再生成播放记录并保存到MySQL数据库
# [生成参数] --(播放日志生成算法)-> [观看日志] 
# - 输入数据：videos.csv, size
# - 脚本名称：p13_generate_viewlogs.py
# - 输出：保存到MySQL数据库的viewlogs表

import random
import pandas as pd
import numpy as np
import sys
from datetime import datetime, timedelta
import pymysql
from sqlalchemy import create_engine

# 设置生成记录数量
output_size = 1000
if len(sys.argv) > 1:
    output_size = int(sys.argv[1])

# 数据库连接配置
db_config = {
    'host': 'localhost',
    'port': 3306,
    'user': 'root',  # 替换为你的数据库用户名
    'password': '0217',  # 替换为你的数据库密码
    'database': 'videos_all'
}

try:
    # 创建数据库连接引擎
    connection_string = (
        f"mysql+pymysql://{db_config['user']}:{db_config['password']}"
        f"@{db_config['host']}:{db_config['port']}/{db_config['database']}"
    )
    engine = create_engine(connection_string)

    # 从数据库读取数据
    query = "SELECT * FROM videos"
    df_in = pd.read_sql(query, engine)
    print(f'成功从数据库读取数据，共 {len(df_in)} 条记录。')

    # 转换为数值类型
    df_in['duration'] = pd.to_numeric(df_in['duration'], errors='coerce').fillna(0)
    df_in['likes'] = pd.to_numeric(df_in['likes'], errors='coerce').fillna(0)
    df_in['favorites'] = pd.to_numeric(df_in['favorites'], errors='coerce').fillna(0)
    df_in['reposts'] = pd.to_numeric(df_in['reposts'], errors='coerce').fillna(0)

    # 标签拆分
    df_in["labels"] = df_in["labels"].str.split(';')
    df = df_in.explode("labels").dropna(subset=["labels"])

    # 去掉空标签
    df["labels"] = df["labels"].str.strip()
    df = df[df["labels"] != ""]

except Exception as e:
    print(f"发生错误: {e}")
    import traceback
    traceback.print_exc()  

np.random.seed(42)

# 只生成5个不同用户的观看记录
user_ids = [f'user_{i:01d}' for i in range(1, 6)]  # user_1 到 user_5

# 定义用户兴趣标签
user_interest = {
    'user_1': '美女',
    'user_2': '帅哥',
    'user_3': '三角洲',
    'user_4': '永劫无间',
    'user_5': '音乐'
}

# 为生成播放记录
rows = []
for i in range(output_size):
    # 随机选一个用户
    user_id = random.choice(user_ids)
    interest = user_interest[user_id]

    # 在视频池中过滤出符合兴趣标签的视频
    candidate_videos = df[df['labels'].str.contains(interest, na=False)]

    if candidate_videos.empty:
        continue  # 如果没有符合条件的视频就跳过

    v = candidate_videos.sample(1).iloc[0]

    # 播放时间：视频发布后 7 天内随机
    publish_ts = pd.to_datetime(v['publishtime'])
    play_time = publish_ts + timedelta(seconds=np.random.randint(0, 7 * 24 * 3600))  

    # 观看时长：0 ~ 视频时长之间
    play_duration = random.randint(1, int(v['duration']))
    likes = random.random() < 0.35   # 35% 概率点赞
    favourites = random.random() < 0.12   # 12% 概率收藏
    shares = random.random() < 0.08   # 8% 概率转发

    rows.append({
        'user_id': user_id,
        'vid': v['vid'],
        'title': ['empty'],
        'category': v['category'],
        'author': v['uid'],
        'publish_time': v['publishtime'],
        'duration': v['duration'],
        'labels': v['labels'],
        'play_time': play_time.strftime('%Y-%m-%d %H:%M:%S'),
        'play_duration': play_duration,
        'likes': int(likes),
        'favourites': int(favourites),
        'shares': int(shares),
    })

# 创建DataFrame
df_out = pd.DataFrame(rows)

# 连接到MySQL数据库并保存数据
try:
    engine = create_engine(
        f"mysql+pymysql://{db_config['user']}:{db_config['password']}@"
        f"{db_config['host']}:{db_config['port']}/{db_config['database']}"
    )
    
    df_out.to_sql('viewlogs', con=engine, if_exists='replace', index=False)
    
    print("数据已成功保存到MySQL数据库！")
    print("前10条数据预览：")
    print(df_out.head(10))
    print(f'已生成 {len(df_out)} 条记录并保存到数据库。')
    print(f'使用了 {len(user_ids)} 个不同用户: {user_ids}')

except Exception as e:
    print(f"保存到数据库时出错: {e}")
    df_out.to_csv('viewlogs_backup.csv', index=False, encoding='utf-8')
    print("数据已保存到 viewlogs_backup.csv 作为备份。")
