import pandas as pd
import os
from joblib import dump, load
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from datetime import datetime
import json
from tqdm import tqdm

def read_behavior_files(TARGET_SOFTWARE:str):
    behavior_data = []
    software_usage_data = []
    
    for root, dirs, files in os.walk("data/processed"):
        for file in tqdm(files):
            if file.endswith(".txt"):
                parts = file.split('_')
                user_id = parts[0]
                date_str = parts[1]
                
                software_usage_count = 0
                file_path = os.path.join(root, file)
                with open(file_path, 'r', encoding='utf-8') as f:
                    lines = f.readlines()[2:]
                    for line in lines:
                        if f"P<={TARGET_SOFTWARE}>" in line:
                            software_usage_count += 1
                
                behavior_data.append([user_id, date_str, 1])
                software_usage_data.append([user_id, date_str, software_usage_count])
    
    behavior_df = pd.DataFrame(behavior_data, columns=['USERID','DATE','ACTIVE'])
    software_df = pd.DataFrame(software_usage_data, columns=['USERID','DATE',f'{TARGET_SOFTWARE}_USAGE'])
    return behavior_df, software_df

def define_churn(df):
    last_active = df.groupby('USERID')['DATE'].max().reset_index()
    last_active['LAST_DATE'] = pd.to_datetime(last_active['DATE'])
    last_active['CHURN'] = last_active['LAST_DATE'] < datetime(2012,8,6)
    return last_active[['USERID','CHURN']]

def generate_model(TARGET_SOFTWARE:str):
    print(f'Generate models for {TARGET_SOFTWARE}')

    df = pd.read_csv('data/demographic.csv')

    behavior_df, software_df = read_behavior_files(TARGET_SOFTWARE)
    churn_df = define_churn(behavior_df)

    software_agg = software_df.groupby('USERID')[f'{TARGET_SOFTWARE}_USAGE'].agg(['sum','mean','max']).reset_index()
    software_agg.columns = ['USERID', f'{TARGET_SOFTWARE}_TOTAL', f'{TARGET_SOFTWARE}_AVG', f'{TARGET_SOFTWARE}_MAX']

    merged_df = pd.merge(df, churn_df, on='USERID')
    merged_df = pd.merge(merged_df, software_agg, on='USERID', how='left').fillna(0)

    merged_df = pd.get_dummies(merged_df, columns=['GENDER','EDU','JOB','INCOME','PROVINCE','CITY','ISCITY'])
    merged_df['AGE'] = 2012 - merged_df['BIRTHDAY'].astype(int)
    merged_df = merged_df.drop('BIRTHDAY', axis=1)

    X = merged_df.drop(['USERID','CHURN'], axis=1)
    y = merged_df['CHURN']
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
    feature_colums = X.columns.to_list()

    BASE_DIR = os.path.dirname(os.path.abspath(__file__))
    
    os.makedirs(os.path.join(BASE_DIR, 'feature_template'), exist_ok=True)

    with open(f'src/core/churn_prediction/feature_template/{TARGET_SOFTWARE}.json', 'w') as f:
        json.dump(feature_colums, f)

    detective = RandomForestClassifier()
    detective.fit(X_train, y_train)
    
    os.makedirs(os.path.join(BASE_DIR, 'models'), exist_ok=True)

    detective_path = f'src/core/churn_prediction/models/{TARGET_SOFTWARE}.joblib'
    dump(detective, detective_path)

    print(f'{TARGET_SOFTWARE} Model Saved')

def prepare_new_data(new_df, TARGET_SOFTWARE:str):
    try:
        with open(f'src/core/churn_prediction/feature_template/{TARGET_SOFTWARE}.json') as f:
            template = json.load(f)
    except:
        generate_model(TARGET_SOFTWARE)
        with open(f'src/core/churn_prediction/feature_template/{TARGET_SOFTWARE}.json') as f:
            template = json.load(f)

    if 'processed' in os.listdir('data'):
        software_usage_data = []
        print('Loading data...')
        for root, dirs, files in os.walk("data/processed"):
            for file in tqdm(files):
                if file.endswith(".txt"):
                    parts = file.split('_')
                    user_id = parts[0]
                    date_str = parts[1]
                    
                    software_usage_count = 0
                    file_path = os.path.join(root, file)
                    with open(file_path, 'r', encoding='utf-8') as f:
                        lines = f.readlines()[2:]
                        for line in lines:
                            if f"P<={TARGET_SOFTWARE}>" in line:
                                software_usage_count += 1
                    
                    software_usage_data.append([user_id, date_str, software_usage_count])
        
        if software_usage_data:
            software_df = pd.DataFrame(software_usage_data, 
                                     columns=['USERID','DATE',f'{TARGET_SOFTWARE}_USAGE'])
            software_agg = software_df.groupby('USERID')[f'{TARGET_SOFTWARE}_USAGE'].agg(['sum','mean','max']).reset_index()
            software_agg.columns = ['USERID', f'{TARGET_SOFTWARE}_TOTAL', f'{TARGET_SOFTWARE}_AVG', f'{TARGET_SOFTWARE}_MAX']
            new_df = pd.merge(new_df, software_agg, on='USERID', how='left').fillna(0)
    
    processed = new_df.drop('USERID', axis=1)
    processed = pd.get_dummies(processed).reindex(columns=template, fill_value=0)
    processed['AGE'] = 2012 - new_df['BIRTHDAY'].astype(int)
    return processed


def model_analysis(TARGET_SOFTWARE:str):
    data = pd.read_csv('data/demographic.csv')

    prepared_data = prepare_new_data(data, TARGET_SOFTWARE)

    detective_path = f'src/core/churn_prediction/models/{TARGET_SOFTWARE}.joblib'
    loaded_detective = load(detective_path)

    risk_scores = loaded_detective.predict_proba(prepared_data)[:,1]  # 流失风险分数（0-1）

    result_df = data[['USERID']].copy()
    result_df['流失风险'] = risk_scores

    if f'{TARGET_SOFTWARE}_TOTAL' in data.columns:
        result_df = pd.merge(result_df, data[['USERID', f'{TARGET_SOFTWARE}_TOTAL']], on='USERID')
        result_df['软件使用频率'] = result_df[f'{TARGET_SOFTWARE}_TOTAL']

    BASE_DIR = os.path.dirname(os.path.abspath(__file__))
    RESULTS_DIR = os.path.join(BASE_DIR, 'analyze_results')
    
    os.makedirs(RESULTS_DIR, exist_ok=True)
    
    output_path = os.path.join(RESULTS_DIR, f'{TARGET_SOFTWARE}.csv')
    result_df.to_csv(output_path, index=False)
    
    print(f"分析完成，结果已保存:'src/core/churn_prediction/analyze_results/{TARGET_SOFTWARE}.csv'")

if __name__ == '__main__':
    TARGET_SOFTWARE = 'EXCEL.EXE'
    model_analysis(TARGET_SOFTWARE)