import logging
import unittest
from itertools import combinations

import pandas as pd
import os
import re
from multiprocessing import Process, Value, Lock, Queue, freeze_support

logging.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',
                    level=logging.DEBUG)

lock = Lock()

data_csv_path = r"./data/csv_data/"
raw_data_path = data_csv_path + "log.txt"
split_data_folder = data_csv_path + "log_data_split/" # 原始log数据按行切分
cleaned_split_data_csv_folder = data_csv_path + "log_data_split_cleaned/" # 经过清洗之后得到的csv分表
decoded_data_csv_folder = data_csv_path + "log_data_split_cleaned_decoded/" # 分数编码之后的文件夹
agged_data_csv_path = data_csv_path + "/log_data_split_cleaned_decoded_agg.csv" # 针对repo和actor分数加总
heter2homo_data_csv_by_actor_folder = data_csv_path + "log_data_split_cleaned_decoded_agg_hetero2homo/" #转同构图

def split_txt(ifp,ofp):
    '''
    切分日志数据
    :param ifp:原日志数据地址
    :param ofp:切分日志数据文件夹
    :return:
    '''
    idx = 2136
    text_batch = ""
    batch_size = 0

    flag = False
    with open(ifp,'r',encoding='utf-8') as f:
        text = f.readline()

        while text:
            if not flag:
                if text.find("22983883400") < 0:
                    text = f.readline()
                    continue
                else:
                    flag = True
                    text = f.readline()
                    continue

            if batch_size >= 10000:
                logging.info('saving idx:{}!'.format(idx))
                with open(os.path.join(ofp,'{}.txt'.format(idx)),'w',encoding='utf-8') as p:
                    p.write(text_batch)
                    p.close()
                text_batch = ""
                idx += 1
                batch_size = 0
            result = re.findall(re.compile("^(\d{11}),([a-zA-Z]+Event),(.*?),(\d+),.*?,(\d+)"), text)
            if len(result) != 0:
                batch_size += 1
                # logging.info("batch_size:{}".format(batch_size))
                # print("text_batch:"+text_batch)
                text_batch += text

            text = f.readline()
        f.close()

#Parallel
def clean_split_data(ifp,ofp):
    '''
    过滤日志信息

    采用多进程方式并行处理
    遍历切分日志的每行，利用正则判断是否有所需的字段信息
    将所需的字段信息以csv组织形式写入输出文件
    :param ifp:
    :param ofp:
    :return:
    '''
    with open(ofp, 'w', encoding='utf-8') as of:
        of.write("id,type,action,actor_id,repo_id\n")
        with open(ifp, 'r', encoding='utf-8') as f:
            line = f.readline()
            idx = 0
            while line:
                result = re.findall(re.compile("^(\d{11}),([a-zA-Z]+Event),(.*?),(\d+),.*?,(\d+)"), line)
                if result:
                    otext = "{},{},{},{},{}\n".format(result[0][0], result[0][1], result[0][2], result[0][3],
                                                      result[0][4])

                    of.write(otext)
                line = f.readline()
                idx += 1
            f.close()
        of.close()
    return

def slave_clean(i,q,ifp,ofp):
    '''
    过滤日志信息（多进程）

    采用多进程方式并行处理
    :param i:
    :param q:
    :param ifp:
    :param ofp:
    :return:
    '''
    global lock
    while True:
        lock.acquire()
        if not q.empty():
            task = q.get()
            print("proc {} filters {}...".format(i,task))
            try:
                # clean_split_data(split_data_folder+task,cleaned_split_data_csv_folder+task.replace("txt","csv"))
                clean_split_data(ifp+task,ofp+task.replace("txt","csv"))
            except Exception as e:
                print("proc {}, exception!".format(i))
                print(e)
            lock.release()
        else:
            print("proc {}, task queue is empty!".format(i))
            lock.release()
            break

def host_clean(ifp,ofp):
    '''
    过滤日志信息（主进程）

    采用多进程方式并行处理
    :param ifp:
    :param ofp:
    :return:
    '''
    task_list = os.listdir(ifp)
    task_queue = Queue()
    for task in task_list:
        task_queue.put(task)
    psize = 7
    procs = [Process(target=slave_clean,args=(i,task_queue,ifp,ofp)) for i in range(psize)]
    for p in procs:
        p.start()


# DECODE SCORE
def decode_event_score_apply(x):
    '''
    活跃度赋分

    :param ifp:
    :param ofp:
    :return:
    '''
    if x.type not in ["IssueCommentEvent","IssueEvent","PullRequestEvent","PullRequestReviewCommentEvent"]:
        return 0
    elif x.type == "IssueCommentEvent" and x.action == "created":
        return 1
    elif x.type == "IssueEvent" and x.action == "open":
        return 2
    elif x.type == "PullRequestEvent" and x.action == "created":
        return 4
    elif x.type == "PullRequestReviewCommentEvent" and x.action == "created":
        return 4
    elif x.type == "PullRequestEvent" and x.action == "closed":
        return 5
    else:
        return 0

def decode_event_score(ifp,ofp):
    '''
        活跃度赋分
    :param ifp:
    :param ofp:
    :return:
    '''
    df = pd.read_csv(ifp)
    df['score'] = df.apply(lambda x:decode_event_score_apply(x),axis=1)
    df.drop(columns=['type','action'], inplace=True)
    df.drop(df[df.score == 0].index, inplace = True)
    df.to_csv(ofp)

    return 0

def slave_decode(i,q,ifp,ofp):
    '''
        活跃度赋分（从进程）

        采用多进程并行处理
    :param ifp:
    :param ofp:
    :return:
    '''
    global lock
    while True:
        lock.acquire()
        if not q.empty():
            task = q.get()
            print("proc {} decode {}...".format(i,task))
            try:
                # decode_event_score(cleaned_split_data_csv_folder+task,decoded_data_csv_folder+task)
                decode_event_score(ifp+task,ofp+task)
            except Exception as e:
                print("proc {}, exception!".format(i))
                print(e)
            lock.release()
        else:
            print("proc {}, task queue is empty!".format(i))
            lock.release()
            break

def host_decode(ifp,ofp):
    '''
    活跃度赋分（主进程）

    采用多进程并行处理
    :param ifp:
    :param ofp:
    :return:
    '''
    # task_list = os.listdir(cleaned_split_data_csv_folder)
    task_list = os.listdir(ifp)
    task_queue = Queue()
    for task in task_list:
        task_queue.put(task)
    psize = 7
    procs = [Process(target=slave_decode,args=(i,task_queue,ifp,ofp)) for i in range(psize)]
    for p in procs:
        p.start()

# AGG分数加总
def score_agg(ifp,ofp):
    '''
    关系降维

    对开发者与代码仓库之间的多种Github行为得分进行求和
    :param ifp:
    :param ofp:
    :return:
    '''
    # task_list = os.listdir(decoded_data_csv_folder)
    task_list = os.listdir(ifp)

    df = pd.DataFrame()
    count_idx = 0
    for csv_file in task_list:
        count_idx += 1
        # df0 = pd.read_csv(os.path.join(decoded_data_csv_folder,csv_file))
        df0 = pd.read_csv(os.path.join(ifp, csv_file))
        df = pd.concat([df, df0])
        df= df.groupby(['actor_id', 'repo_id'], as_index=False).agg({'score': 'sum'})
    # df.to_csv(agged_data_csv_path)
    df.to_csv(ofp)

# 算边权转二部图
# Serial
def heter2homo_serial(ifp,ofp):
    '''
    顶点降维

    融合开发者顶点，最终形成仅包含代码仓顶点的同构图
    :param ifp:
    :param ofp:
    :return:
    '''
    df = pd.read_csv(ifp)

    actor_list = list(df['actor_id'].unique())
    actor_num = len(actor_list)

    df_tot = pd.DataFrame(columns=["nodeA", "nodeB", "score"])
    actor_count = 0
    idx = 0;
    for actor_id in actor_list:
        actor_count += 1
        print("parsing actor {}, the {}/{}, cuurent df_tot size:{}...".format(actor_id,actor_count,actor_num,len(df_tot)))

        repo_df = df[df["actor_id"] == actor_id]
        repo_pairs = combinations(list(repo_df['repo_id']), 2)

        for repo_pair in repo_pairs:
            score_a = int(repo_df[repo_df['repo_id'] == repo_pair[0]]['score'])
            score_b = int(repo_df[repo_df['repo_id'] == repo_pair[1]]['score'])
            score = score_a * score_b / (score_a + score_b)
            # 加边数
            df_tot.loc[len(df_tot.index)] = [repo_pair[0], repo_pair[1], score]
            df_tot.loc[len(df_tot.index)] = [repo_pair[1], repo_pair[0], score]
            if len(df_tot) > 50000:
                # 存文件
                df_tot[['nodeA', 'nodeB']] = df_tot[['nodeA', 'nodeB']].astype('int')
                # df_tot.to_csv(heter2homo_data_csv_by_actor_folder + "{}.csv".format(idx))
                df_tot.to_csv(ofp + "{}.csv".format(idx))
                idx += 1
                # 清空
                df_tot = pd.DataFrame(columns=["nodeA", "nodeB", "score"])

def agg_csv_by_edges(num_of_edges,ifp,ofp):
    '''
    遍历若干csv文件，直到数据集总边数达到num_of_edges

    :param num_of_edges:
    :param ifp:
    :param ofp:
    :return:
    '''
    csv_file_list = os.listdir(ifp)
    df = pd.DataFrame();
    num_of_file = int(num_of_edges / 50000)

    for i in range(num_of_file):
        csv_file = csv_file_list[i]
        df_next = pd.read_csv(os.path.join(ifp,csv_file))
        df = pd.concat([df,df_next])

    # df = df.groupby(['actor_id','repo_id']).sum()
    df.to_csv(ofp,index=None)

def vretex_remap(ifp,ofp):
    '''
    顶点编号重映射，从0开始连续编号

    :param ifp:
    :param ofp:
    :return:
    '''
    df = pd.read_csv(ifp)
    keys = list(pd.concat([df['nodeA'],df['nodeB']]).unique())

    vertex_map ={}
    idx = 1
    for k in keys:
        vertex_map[k] = idx
        idx += 1

    df['nodeA'] = df['nodeA'].map(vertex_map)
    df['nodeB'] = df['nodeB'].map(vertex_map)

    print("orig df len:{}".format(len(df)))
    # df = df.groupby(['nodeA', 'nodeB'])['score'].sum()
    df = df.groupby(['nodeA','nodeB']).agg({"score": "sum"})
    print("grouped df len:{}".format(len(df)))
    df.to_csv(ofp)

def main():

    # ======== 切分日志数据 ========
    split_txt(raw_data_path,split_data_folder)

    # ======== 过滤日志信息 ========
    host_clean(split_data_folder,cleaned_split_data_csv_folder)

    # ======== 活跃度赋分 ========
    host_decode(cleaned_split_data_csv_folder,decoded_data_csv_folder)

    # ======== 关系降维 ========
    score_agg(decoded_data_csv_folder,agged_data_csv_path)

    # ======== 顶点降维 ========
    heter2homo_serial(agged_data_csv_path,heter2homo_data_csv_by_actor_folder)

    # 构建不同规模数据，以验证性能
    # ================ 100 kilo edges ================
    agg_csv_by_edges(100000,heter2homo_data_csv_by_actor_folder,data_csv_path+"100k-edges.csv")
    vretex_remap(data_csv_path+"100k-edges.csv",data_csv_path+"100k-edges-remaped.csv")

    # ================ 1 million edges ================
    agg_csv_by_edges(1000000,heter2homo_data_csv_by_actor_folder,data_csv_path+"1m-edges.csv")
    vretex_remap(data_csv_path+"1m-edges.csv",data_csv_path+"1m-edges-remaped.csv")

    # ================ 10 million edges ================
    agg_csv_by_edges(10000000,heter2homo_data_csv_by_actor_folder,data_csv_path+"10m-edges.csv")
    vretex_remap(data_csv_path+"10m-edges.csv",data_csv_path+"10m-edges-remaped.csv")

    # ================ 20 million edges ================
    agg_csv_by_edges(20000000,heter2homo_data_csv_by_actor_folder,data_csv_path+"20m-edges.csv")
    vretex_remap(data_csv_path+"20m-edges.csv",data_csv_path+"20m-edges-remaped.csv")

    # ================ 40 million edges ================
    agg_csv_by_edges(40000000,heter2homo_data_csv_by_actor_folder,data_csv_path+"40m-edges.csv")
    vretex_remap(data_csv_path+"40m-edges.csv",data_csv_path+"40m-edges-remaped.csv")

if __name__ == '__main__':
    main()