import numpy as np 
import pandas as pd
import re
import string
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
from sklearn.metrics import accuracy_score
import joblib
import warnings
from sklearn.feature_extraction.text import TfidfVectorizer

from sklearn.ensemble import GradientBoostingClassifier


warnings.filterwarnings('ignore')
df = pd.read_csv('data/mbti_1.csv')

df['seperated_post'] = df['posts'].apply(lambda x: x.strip().split("|||"))
df['num_post'] = df['seperated_post'].apply(lambda x: len(x))
df['id'] = df.index

df1 = pd.DataFrame(df['seperated_post'].tolist(), index=df['id']).stack().reset_index(level=1, drop=True).reset_index(name='idposts')
df1 = df1.join(df.set_index('id'), on='id', how = 'left')

def clean_text(text):
    result = re.sub(r'http[^\s]*', '',text)
    result = re.sub('[0-9]+','', result).lower()
    result = re.sub('@[a-z0-9]+', 'user', result)
    result = re.sub('[%s]*' % string.punctuation, '',result)
    result=result.lower()
    return result
    
df1
df1['idposts']= df1['idposts'].apply(clean_text)
cleaned_df = df1.groupby('id')['idposts'].apply(list).reset_index()
df['cleaned_post'] = cleaned_df['idposts'].apply(lambda x: ' '.join(x))

tfidf = TfidfVectorizer(stop_words='english')
X = tfidf.fit_transform(df['cleaned_post'])

y_df = pd.DataFrame(df['type'])
y_df['EI'] = y_df['type'].apply(lambda x: 1 if x[0] == 'E' else 0)
y_df['SN'] = y_df['type'].apply(lambda x: 1 if x[1] == 'S' else 0)
y_df['TF'] = y_df['type'].apply(lambda x: 1 if x[2] == 'T' else 0)
y_df['JP'] =y_df['type'].apply(lambda x: 1 if x[3] == 'J' else 0)

def makemodel(keyword):
    y_f = y_df[keyword].values
    X_f_train, X_f_test, y_f_train, y_f_test = train_test_split(X, y_f, 
                                                    stratify=y_f,
                                                    test_size=0.2,
                                                    random_state=123)
    f_classifier = XGBClassifier()
    f_classifier.fit(X_f_train, y_f_train, 
                     early_stopping_rounds = 10, 
                     eval_metric="logloss", 
                     eval_set=[(X_f_test, y_f_test)], verbose=False)
    print("Accuracy %s" % keyword, accuracy_score(y_f_test, f_classifier.predict(X_f_test)))
    return f_classifier

EImodel = makemodel('EI')
SNmodel = makemodel('SN')
TFmodel = makemodel('TF')
JPmodel = makemodel('JP')
joblib.dump(EImodel,'modelsave/EImodel.pkl')
joblib.dump(SNmodel,'modelsave/SNmodel.pkl')
joblib.dump(TFmodel,'modelsave/TFmodel.pkl')
joblib.dump(JPmodel,'modelsave/JPmodel.pkl')
joblib.dump(tfidf,'modelsave/tfidf.pkl')



