
import data_processing_engineering.get_data_from_database.database_connection as dbConnection
from baseline.true_order import get_true_order_dict, get_order_dict
from evaluation_index_other.Kendall_tau_distance import kendall_tau_distance
from evaluation_index_other.mrr import mrr
from utils.date_utils.date_function import get_waiting_time, get_close_pr_time
import csv
from evaluation_index_other.ndcg import ndcg

import linecache
import os
import pandas as pd
import xgboost as xgb
from xgboost import DMatrix
from sklearn.datasets import load_svmlight_file
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

# import seaborn as sns
from utils.path_exist import path_exists_or_create
from utils.print_photo import showBN
from pgmpy.models import BayesianNetwork, BayesianModel
from pgmpy.estimators import BayesianEstimator


from pgmpy.estimators import HillClimbSearch
from pgmpy.estimators import K2Score, BicScore

pr_number_index = 0
repo_name_index = 1
pr_user_id_index = 2
pr_user_name_index = 3
pr_author_association_index = 4
labels_index = 5
created_at_index = 6
closed_at_index = 7
merged_at_index = 8
updated_at_index = 9
merged_index = 10
mergeable_state_index = 11
assignees_content_index = 12
comments_number_index = 13
comments_content_index = 14
review_comments_number_index = 15
review_comments_content_index = 16
commit_number_index = 17
changed_file_num_index = 18
total_add_line_index = 19
total_delete_line_index = 20
title_index = 21
body_index = 22


def get_pr_number_from_origin_data_path(origin_data_path):
    test = pd.read_csv(origin_data_path)
    file_pr_dict = {}
    pr_number_series = test.get("pr_number")
    for i in range(pr_number_series.__len__()):
        print(str(i) + "   pr_number_series   " + str(pr_number_series[i]))
        file_pr_dict[pr_number_series[i]] = i
    return file_pr_dict

def get_data_by_repo_name_and_origin_data_path(origin_data_path, repo_name):
    data = dbConnection.getDataFromSql(
        "select * from pr_self where repo_name='" + repo_name + "' and closed_at is not null order by pr_number")

    print(len(data))

    file_pr_dict = get_pr_number_from_origin_data_path(origin_data_path)

    useful_features_index = [0,  ##pr_number
                             2,  ##repo_name
                             3,  ##pr_user_id
                             4,  ##pr_user_name
                             5,  ##pr_author_association
                             8,  ##labels
                             10,  ##created_at
                             12,  ##closed_at
                             13,  ##merged_at
                             11,  ##updated_at
                             14,  ##merged
                             16,  ##mergeable_state
                             18,  ##assignees_content
                             20,  ##comments_number
                             21,  ##comments_content
                             22,  ##review_comments_number
                             23,  ##review_comments_content
                             24,  ##commit_number
                             26,  ##changed_file_num
                             27,  ##total_add_line
                             28,  ##total_delete_line
                             6,  ##title
                             7,  ##body
                             ]


    selected_data = []
    for item in data:
        tmp = []
        for i in useful_features_index:
            tmp.append(item[i])
        selected_data.append(tmp)

    first_response_time = []
    merged_list = []

    pr_number_index_list = []
    day_data = {}
    for item in selected_data:
        tmp = []
        merge_tmp = []
        created_time = item[created_at_index]
        created_day = created_time.date()
        if day_data.__contains__(created_day):
            day_data[created_day][item[pr_number_index]] = {}
            day_data[created_day][item[pr_number_index]]['created_time'] = item[created_at_index]
            day_data[created_day][item[pr_number_index]]['closed_time'] = item[closed_at_index]
        else:
            day_data[created_day] = {}
            day_data[created_day][item[pr_number_index]] = {}
            day_data[created_day][item[pr_number_index]]['created_time'] = item[created_at_index]
            day_data[created_day][item[pr_number_index]]['closed_time'] = item[closed_at_index]
        tmp.append(('created_time', item[created_at_index]))
        tmp.append(('updated_time', item[updated_at_index]))
        tmp.append(('closed_time', item[closed_at_index]))
        tmp.append(('comments_number', item[comments_number_index]))
        tmp.append(('comments_content', item[comments_content_index]))
        tmp.append(('review_comments_number', item[review_comments_number_index]))
        tmp.append(('review_comments_content', item[review_comments_content_index]))
        tmp.append(('pr_user_name', item[pr_user_name_index]))
        merge_tmp.append(('merged', item[merged_index]))
        tmp = dict(tmp)
        merge_tmp = dict(merge_tmp)
        first_response_time.append((item[pr_number_index], tmp))
        merged_list.append((item[pr_number_index], merge_tmp))
        pr_number_index_list.append(item[pr_number_index])
    pr_number_index_list.sort()
    pr_number_index_dict = {}
    count = 0
    for item in pr_number_index_list:
        pr_number_index_dict[item] = count
        count = count + 1
    first_response_time_dict = dict(first_response_time)
    merge_dict = dict(merged_list)
    first_response_time_dict = get_close_pr_time(first_response_time_dict)  # get_waiting_time(first_response_time_dict)
    # print(first_response_time_dict)

    response_time = []
    for item in first_response_time_dict.keys():
        response_time.append(first_response_time_dict[item])
    return day_data, response_time, first_response_time_dict, file_pr_dict, merge_dict


def prepare_temp_file(temp_data_path, origin_data_path, open_pr_index_list, day):
    file = open(temp_data_path, 'w+')
    for i in range(len(open_pr_index_list)):
        s = getline(origin_data_path, open_pr_index_list[i] + 1)
        file.write(s)
    file.close()




def getline(the_file_path, line_number):
    if line_number < 1:
        return ''
    for cur_line_number, line in enumerate(open(the_file_path, 'rU')):
        if cur_line_number == line_number - 1:
            return line
    return ''


def model_result(day_data, day, pr_number_index_dict, result_rate_in_dict):

    open_pr_list = []

    open_pr_index_dict = {}
    sort_result = []

    for key in day_data.keys():
        if key > day:
            continue
        else:

            temp_pr_dict = day_data[key]
            for pr_key in temp_pr_dict:
                if pr_number_index_dict.__contains__(pr_key) is False or temp_pr_dict[pr_key][
                    'closed_time'].date() < day:
                    continue
                else:
                    open_pr_list.append(pr_key)
                    open_pr_index_dict[pr_key] = result_rate_in_dict.get(pr_key)
    if open_pr_list.__len__() == 0:
        return sort_result
    temp_sort_tmp = []

    for index in range(open_pr_list.__len__()):
        pr_number = open_pr_list[index]
        if result_rate_in_dict.__contains__(pr_number):

            temp_sort_tmp.append(result_rate_in_dict.get(pr_number))
    temp_sort_tmp.sort(reverse=True)
    temp_pr_number_sort_tmp = []
    for index in range(temp_sort_tmp.__len__()):
        for temp_key in open_pr_index_dict.keys():
            if open_pr_index_dict.get(temp_key) == temp_sort_tmp[index] and temp_pr_number_sort_tmp.__contains__(
                    temp_key) is False:
                temp_pr_number_sort_tmp.append(temp_key)
    return temp_pr_number_sort_tmp


def alg_model_result(true_rate_label_dict, day_data, pr_number_index_dict, result_rate_in_dict, result_path, real_label_dict):
    ndcg_list = []
    day_list = []
    mrr_list = []
    kendall_list = []
    max_day = None
    min_day = None
    for day in day_data.keys():
        print("=================================day:", day)

        sort_result = model_result(day_data, day, pr_number_index_dict, result_rate_in_dict)
        if sort_result.__len__() == 0:
            print(origin_data_path + "no pr")
            continue
        rank_sort = []
        true_sort = []
        rank_dict = {}

        print("sort_result:", sort_result)
        for pr_number_result in sort_result:
            rank_dict[pr_number_result] = real_label_dict[pr_number_result]
        temp_rank_list = sorted(rank_dict.items(),key=lambda s:s[1],reverse=True)
        rank_dict = {}
        for s in temp_rank_list:
            rank_dict[s[0]] = s[1]
        print("rank_dict:", temp_rank_list)
        for pr_index in rank_dict.keys():
            rank_sort.append(true_rate_label_dict[pr_index])
        for pr_number_result in sort_result:
            true_sort.append(true_rate_label_dict[pr_number_result])

        true_sort.sort(reverse=True)
        ndcg_num = ndcg(true_sort, rank_sort, rank_sort.__len__(),form = "exp")
        mrr_num = mrr(true_sort, rank_sort)
        kendall_num = kendall_tau_distance(true_sort, rank_sort)
        print("pr_number:", sort_result)
        print("rank_sort:", rank_sort)
        print("true_sort:", true_sort)
        print("ndcg_num:", ndcg_num)
        print("mrr_num:", mrr_num)
        print("kendall_num:", kendall_num)
        if max_day is None or max_day < day:
            max_day = day
        if min_day is None or min_day > day:
            min_day = day
        day_list.append(day)
        ndcg_list.append(ndcg_num)
        mrr_list.append(mrr_num)
        kendall_list.append(kendall_num)

    headers = ['date',
               'ndcg',
               'mrr',
               'kendall_tau_distance'
               ]
    row_data = []
    for i in range(len(day_list)):
        tmp = []
        tmp.append(day_list[i])
        tmp.append(ndcg_list[i])
        tmp.append(mrr_list[i])
        tmp.append(kendall_list[i])
        row_data.append(tmp)
    print(row_data)

    with open(result_path, 'w', encoding='utf-8', newline='') as f:
        writer = csv.writer(f, dialect='excel')
        writer.writerow(headers)
        for item in row_data:
            writer.writerow(item)
    return None



def train_model_and_result(alg_name,test_data_path, repo_name):
    module_result = {}

    test_pr_number = pd.read_csv(test_data_path)
    pr_number_series = test_pr_number.get("pr_number")
    for i in range(pr_number_series.__len__()):
        print(str(i) + "   pr_number_series   " + str(pr_number_series[i]))
        print(str(i) + "  y_pred    " + str(1))
        module_result[pr_number_series[i]] = 1
    return module_result


# Press the green button in the gutter to run the script.
if __name__ == '__main__':
    repo_list = ["tensorflow", "netbeans", "phoenix", "Katello", "kuma", "moby", "opencv", "react", "scikit-learn", "terraform"]#
    for repo_name in repo_list:
        alg_name = "single_label"

        file_path = "../data_processing_engineering/bayesian_data/" + repo_name + "/"
        path_exists_or_create(file_path)
        origin_data_path = file_path + repo_name + "_bayes_rank_format_test_data.csv"
        bayesian_result_path = "./result/single_label/" + repo_name + "/"
        path_exists_or_create(bayesian_result_path)
        result_path = bayesian_result_path + repo_name + "_" + alg_name + "_result.csv"

        test_data_path = file_path + repo_name + "_bayes_rank_format_test_data.csv"


        result_rate_in_dict = train_model_and_result(alg_name,test_data_path, repo_name)

        for key in result_rate_in_dict.keys():
            print(str(key) + "....." + str(result_rate_in_dict.get(key)))
        day_data, response_time, first_response_time_dict, pr_number_index_dict, merge_dict = get_data_by_repo_name_and_origin_data_path(
            origin_data_path, repo_name)
        real_label_dict = get_true_order_dict(response_time, first_response_time_dict)
        true_rate_label_dict = get_order_dict(response_time, first_response_time_dict, merge_dict)
        alg_model_result(true_rate_label_dict, day_data, pr_number_index_dict, result_rate_in_dict, result_path, real_label_dict)
    import os
    os.system("shutdown -s -t 600")