import os
from collections import defaultdict
from tqdm import tqdm,trange
from random import randint, random,choice,shuffle
import csv
import re
import json

def build(dir="./data/mydict"):
    """
    Args:
        dir:

    Returns:

    """
    files = os.listdir(dir)
    # files = ["en-de.txt"]
    mydict = defaultdict(lambda: defaultdict(lambda :[]))
    langs = []
    for fn in files:
        lang = fn.split(".")[0].split("-")[1]
        langs.append(lang)
        with open(os.path.join(dir, fn)) as fp:
            lines = fp.readlines()
            for str in tqdm(lines, desc=f"load dict in reading {fn}"):
                str = str.strip('\n')
                ls = re.split("\t|\s",str)
                try:
                    en, tgt = ls[0], ls[1]
                    mydict[en][lang].append(tgt)
                except Exception as e:
                    print(f"{fn}-{str}")

    # write_file("./data/mydict/all_dict.json", mydict)
    # write_file("./data/mydict/all_lang.json", langs)
    return mydict, langs

def write_file(fn,data):
    f = open(fn, 'a', encoding='utf8', newline='')  # 指定newline=‘’参数
    json.dump(data, f)
    f.close()

def read_dict(path="./data/mydict/all_dict.csv"):
    with open(path, newline='', encoding='utf-8') as f:
        reader = csv.reader(f)
    return reader


def get_data(in_name):
    fin = open(in_name)
    data = []
    for i, line in tqdm(enumerate(fin.readlines()[1:]), desc="get data"):
        l_split = line.strip().split('\t')
        p, h, l = l_split
        p = p.replace(',', ' ')
        p = p.replace('"', '')
        h = h.replace(',', ' ')
        h = h.replace('"', '')
        data.append((p, h, l))
    return data

def replaceToken(all_data, mydict, alpha=0.5):
    t2cid = {}
    out_data = []
    def replaceString(sent):
        ls = re.split(r'([,.\s])', sent)
        ret = []
        for i in range(len(ls)):
            sen = ls[i]
            if len(mydict[sen]) != 0 and random() < alpha:
                # replace
                try:
                    langsDict = mydict[sen]
                    langs = list(langsDict.keys())
                    idx = randint(0, len(langs)-1)
                    tgt_list = langsDict[langs[idx]]
                    tgt_idx = randint(0, len(tgt_list)-1)
                    ret.append(tgt_list[tgt_idx])
                except:
                    print(f"{sen} cause error")
            else:
                ret.append(sen)
        return "".join(ret)


    # all_lg = list(all_data.keys())
    for i, d in enumerate(all_data):
        if d[2] == 'contradictory':  # hard negative
            t2cid[d[0]] = i
    error = 0
    for i, d in tqdm(enumerate(all_data), desc="process data"):
        if d[0] in t2cid:
            cid = t2cid[d[0]]
        else:
            cid = choice(list(t2cid.values()))
            error += 1
        if d[2] == 'entailment':
            pos = replaceString(d[0])
            out_data.append(all_data[i][0] + ',' + pos + ',' + all_data[cid][1])

            # for lg in all_lg:
            #     slg = random.choice(all_lg)
            #
            #     slg2 = random.choice(all_lg)
    print('error:', error)
    return out_data



if os.path.exists("./data/mydict/all_dict.csv") and os.path.exists("./data/mydict/all_lang.csv"):
    mydict = csv.reader("./data/mydict/all_dict.csv")
    langs = csv.reader("./data/mydict/all_lang.csv")
else:
    mydict,langs = build()


all_data = get_data('data/XNLI-MT-1.0/multinli/multinli.train.en.tsv')
out_data = replaceToken(all_data, mydict)

shuffle(out_data)
fout = open('data/nli_codeswitch_all2.csv', 'w')
fout.write('sent0,sent1,hard_neg\n')
fout.write('\n'.join(out_data))

def mullangCodeSwith(inputs, mydict, langs, alpha = 0.8, beta=0.5):
    nums_lang = len(langs)
    ret = []
    for i in trange(len(inputs)):

        if random() < alpha:
            pass
        else:
            pass