#!/usr/bin/env python
# coding: utf-8


import sys
import os
import ast
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
from sklearn import metrics
from sklearn import svm
import scipy
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import pandas as pd
path = os.getcwd()
training = sys.argv[1]
validation = sys.argv[2]
train_df = pd.read_csv(os.path.join(os.getcwd(),training))
validation_df = pd.read_csv(os.path.join(os.getcwd(),validation))
train_df = train_df[train_df.rating>=2]
y_train = train_df['revenue']
y_train_rating = train_df['rating']
y_val = validation_df['revenue']
y_val_rating = validation_df['rating']
rating_effe = train_df.corr()['revenue'].sort_values()

def show_limit_columns(train_df,cloumns):
    return train_df.copy().filter(items=columns)
def datetime_transform(train_df):
    results = train_df.copy()
    for i in train_df.select_dtypes(include=[np.object]).columns:
        try:
            results[i] = pd.to_datetime(train_df[i], infer_datetime_format=True)
        except:
            pass
    return results

def datetime_to_ymd(train_df):
    results = train_df.copy()
    for col in results.select_dtypes(include=[np.datetime64]).columns:
        results = results.drop([col], axis=1)
        try:
            results['{0}_y'.format(col)] = train_df[col].dt.year
            results['{0}_m'.format(col)] = train_df[col].dt.month
            results['{0}_d'.format(col)] = train_df[col].dt.day
        except:
            pass
    return results

def extract_out(info, info_name):
    if info != np.nan:
        feature_name = ast.literal_eval(info)
        result = " ".join("{}".format(x[info_name]).replace(" ", "_") for x in feature_name)
        return result
    else:
        return np.nan
def field_out(train_df, columns):
    results = train_df.copy()
    for col, col_name in columns:
        results[col] = results[col].apply(lambda x: extract_out(x, col_name))
    return results

def encode_text_num(df_columns):
    df_columns = df_columns.copy().fillna('')
    vectorizer = CountVectorizer()
    df_counts = vectorizer.fit_transform(df_columns)
    tfidf_transformer = TfidfTransformer()
    df_tfidf = tfidf_transformer.fit_transform(df_counts)
    df_tfidf = df_tfidf.toarray()
    text_nums = np.sum(df_tfidf,axis=1)
    return text_nums

def text_to_num(train_df):
    results = train_df.copy()
    for col in train_df.select_dtypes(include=[np.object]):
    
        results[col] = pd.Series(encode_text_num(results[col]))
    return results

columns = [("cast", "name"),("spoken_languages", "name"),("keywords", "name"),("genres", "name"),("crew", "name"),("production_companies", "name"),("production_countries","name")]
exclude_columns = []
x_train = train_df.drop(labels=["rating","revenue","movie_id"]+exclude_columns,axis=1).copy()
movies_id = validation_df["movie_id"].copy()
x_val = validation_df.drop(labels=["rating","revenue","movie_id"]+exclude_columns,axis=1).copy()
x_train = datetime_transform(x_train)
x_train = datetime_to_ymd(x_train)
x_train = field_out(x_train, columns)
x_train = text_to_num(x_train)
x_val = datetime_transform(x_val)
x_val = datetime_to_ymd(x_val)
x_val = field_out(x_val, columns)
x_val = text_to_num(x_val)
x_train = x_train.dropna()
y_train.drop(y_train.tail(1).index,inplace=True)
scalers = StandardScaler()
scalerm = MinMaxScaler()
final_x_train = scalers.fit_transform(x_train)
final_x_val = scalers.fit_transform(x_val)
final_x_train = pd.DataFrame(data=final_x_train, columns=x_train.columns)
final_x_val = pd.DataFrame(data=final_x_val, columns=x_val.columns)
x_train_s = final_x_train.copy()
x_val_s = final_x_val.copy()
final_x_train = scalerm.fit_transform(x_train)
final_x_val = scalerm.fit_transform(x_val)
final_x_train = pd.DataFrame(data=final_x_train, columns=x_train.columns)
final_x_val = pd.DataFrame(data=final_x_val, columns=x_val.columns)
x_train_m = final_x_train.copy()
x_val_m = final_x_val.copy()
model = linear_model.LinearRegression()
model.fit(x_train_m, y_train)
predict = model.predict(x_val_m)
mse = mean_squared_error(predict,y_val)
correlation = scipy.stats.pearsonr(predict, y_val)[0]
df = pd.DataFrame({
    'zid': ['z5203592'],
    'MSE': [mse],
    'correlation':[round(correlation, 2)]
})
df.to_csv('z5203592.PART1.summary.csv',index=False)
df1 = pd.DataFrame({
    'movie_id':movies_id,
    'predicted_revenue': predict
})
df1.to_csv('z5203592.PART1.output.csv',index=False)
y_train_rating.drop(y_train_rating.tail(1).index,inplace=True)
y_train_rating = y_train_rating.map({2: 0, 3: 1})
clf = svm.LinearSVC()
clf.fit(x_train_s, y_train_rating)
y_pred = clf.predict(x_val_s)
y_val_rating = y_val_rating.map({2:0,3:1})
accuracy = metrics.accuracy_score(y_val_rating, y_pred)
recall = metrics.recall_score(y_val_rating, y_pred, average='macro')
precision = metrics.precision_score(y_val_rating, y_pred, average='macro')
pd2 = pd.DataFrame({
    'zid': ['z5203592 '],
    'average_precision': [round(precision, 2)],
    'average_recall': [round(recall, 2)],
    'accuracy': [round(accuracy, 2)]
})
pd2.to_csv('z5203592.PART2.summary.csv',index=False)
y_pred = pd.Series(data=y_pred)
y_pred = y_pred.map({
    0:2,1:3
})
pd3 = pd.DataFrame({
    'movie_id':movies_id,
    'predicted_rating':y_pred
})
pd3.to_csv("z5203592.PART2.output.csv",index=False)

