import fileModel
import random
import re
import os
import corpusAnalysis

def analysis_tag(path):
    cat = {}
    f = fileModel.open_file(path)
    for index,line in enumerate(f):

        lines = line.strip("\n")
        if lines not in cat:
            cat[lines] = 0
        cat[lines] += 1
    print(cat)
# root = "G:\\我的本地文件\\资料\\数据\\短文本\\news\\"
# analysis_tag('G:\intellij\TopicModelForShortText\My_LDA\data2\yahooQA\\yahoo_question_tag.txt')

def removebadTag(path,tagpath):
    #removed = ['news events','environment','dining out','None','asia pacific','societ e culture','social science','local businesses']
    stay = ['health','consumer electronics','family relationships','entertainment music','home garden','science mathematics','beauty style','education reference','society culture','business finance']
    f = fileModel.open_file(path)
    f_tag = fileModel.open_file(tagpath)
    w = fileModel.open_file(path+".10",'w')
    w_tag = fileModel.open_file(tagpath+".10",'w')
    for index,line in enumerate(f):
        tag_line = f_tag.readline()
        tag_item  = tag_line.strip()
        if tag_item  not in stay:
            continue
        w.write(line)
        w_tag.write(tag_line)
    f.close()
    f_tag.close()
    w.close()
    w_tag.close()
# removebadTag("G:\intellij\TopicModelForShortText\My_LDA\data2\yahooQA\\yahoo_question.txt.pd","G:\intellij\TopicModelForShortText\My_LDA\data2\yahooQA\\yahoo_question_tag.txt")

def read_data_to_svm(path):
	wordmap = {}
	result = []
	with open(path,'r',encoding='utf-8') as f:
		for line in f:
			lines = line.split()
			linemap = {}

			for word in lines:
				if word not in wordmap:
					wordmap[word] = len(wordmap)
				if wordmap[word] not in linemap:
					linemap[wordmap[word]] = 0
				linemap[wordmap[word]] += 1

			result.append(str(len(linemap))+' '+' '.join([str(key)+":"+str(linemap[key]) for key in linemap]))

	return result


def get_idf(path, prespath):
    wordmap = {}
    docCount = 0
    with open(path, 'r', encoding='utf-8') as f:
        for line in f:
            docCount += 1
            lines = line.split()
            tmpset = set()
            for item in lines:
                if item in tmpset:
                    continue
                tmpset.add(item)
                if item not in wordmap:
                    wordmap[item] = {"idf": 0}
                wordmap[item]["idf"] += 1

    with open(prespath, 'r', encoding='utf-8') as f:
        for line in f:
            lines = line.split()
            wordmap[lines[1]]["class"] = int(lines[2])
            wordmap[lines[1]]["idf"] = docCount / wordmap[lines[1]]["idf"]

    wordmap = sorted(wordmap.items(), key=lambda d: d[1]["idf"], reverse=True)
    for line in wordmap:
        if line[1]["class"] < 2215:
            print(line[0], line[1]["idf"], line[1]["class"], "-----------")
        else:
            print(line[0], line[1]["idf"], line[1]["class"])

def format_tf_libsvm(datapath,tagpath):
    wordmap = read_wordmap(datapath)

    f = fileModel.open_file(datapath)
    f_write = fileModel.open_file(datapath+".libsvm",'w')
    for index,line in enumerate(f):
        write_line = [0 for i in range(len(wordmap))]
        line_list = line.split()
        for item in line_list:
            write_line[wordmap[item]]+=1
        for i in range(len(wordmap)):
            if write_line[i]!=0:
                write_line[i] = str(round(write_line[i]/len(line_list),4))
            else:
                write_line[i]="0"
        print(index)
        f_write.write(' '.join(write_line)+"\n")
    f.close()

# rootpath = "D:\\javaEE\\MLDA\\data\\QA_Q\\"
# format_tf_libsvm(rootpath+"yahoo_question.txt.pd.filter.final.10",rootpath+"yahoo_question_tag.txt.final.10")

def thetatolibsvm(path,tagpath):
	tagmap = {}
	with open(path,'r', encoding='utf-8') as df,open(tagpath,'r', encoding='utf-8') as tf,open(path+".svm",'w', encoding='utf-8') as w:
		for line,tagp in zip(df,tf):
			tag = tagp.strip()
			if tag not in tagmap:
				tagmap[tag] = len(tagmap)
			lines = line.split()
			w.write(str(tagmap[tag]))
			for index,item in enumerate(lines):

				w.write(" "+str(index+1)+":"+item)
			w.write("\n")





def cutData(datapath,tagpath,num=1000):
	f_data = open(datapath,"r",encoding = "utf-8")
	f_tag = open(tagpath,"r",encoding = "utf-8")
	w_data = open(datapath+"."+str(num),"w",encoding = "utf-8")
	w_tag = open(tagpath+"."+str(num),"w",encoding = "utf-8")

	tagNumList = {}

	for dataLine,tagline in zip(f_data,f_tag):
		if tagline not in tagNumList:
			tagNumList[tagline] = 0
		tagNumList[tagline] += 1
		if tagNumList[tagline]>1000:
			continue
		w_data.write(dataLine)
		w_tag.write(tagline)

	f_data.close()
	f_tag.close()
	w_data.close()
	w_tag.close()


def cutData_train_test(datapath,tagpath,num=1000):

	f_tag = open(tagpath,"r",encoding = "utf-8")
	w_data = open(datapath+".train."+str(num),"w",encoding = "utf-8")
	w_tag = open(tagpath+".train."+str(num),"w",encoding = "utf-8")

	w_data2 = open(datapath+".test."+str(num),"w",encoding = "utf-8")
	w_tag2 = open(tagpath+".test."+str(num),"w",encoding = "utf-8")

	w_med_train = open(datapath+".med.train","w",encoding = "utf-8")
	w_med_test = open(datapath+".med.test","w",encoding = "utf-8")

	tagNumList = {}
	tagMap = {}

	sk_data = read_data_to_svm(datapath)

	for dataLine,tagline in zip(sk_data,f_tag):
		if tagline not in tagNumList:
			tagNumList[tagline] = 0
			tagMap[tagline] = len(tagMap)
		tagNumList[tagline] += 1
		if tagNumList[tagline]>2000:
			continue
		elif tagNumList[tagline]>1000:
			w_med_test.write(str(tagMap[tagline])+" "+dataLine+"\n")
			w_data2.write(dataLine+"\n")
			w_tag2.write(str(tagMap[tagline])+"\n")
		else:
			w_med_train.write(str(tagMap[tagline])+" "+dataLine+"\n")
			w_data.write(dataLine+"\n")
			w_tag.write(str(tagMap[tagline])+"\n")
	f_tag.close()
	w_data.close()
	w_tag.close()
	w_data2.close()
	w_tag2.close()
	w_med_train.close()
	w_med_test.close()

def revet_dict(path,newpath):
	with open(path,"r",encoding='utf-8') as f,open(newpath,"w",encoding='utf-8') as w:
		for line in f:
			lines = line.split()
			w.write(lines[1]+"\t"+lines[0]+"\n")