import numpy as np
import math
def get_csv_data(csv_path):
    data_list = []
    with open(csv_path, 'r') as file:
        for line in file:
            groups = line.strip().split('|,|')
            report_id = groups[0]
            description = groups[1]
            words = description.split(' ')
            if len(groups)>=3:
                label = groups[2]
                areas = label.split(' ')
                label = np.zeros(17,dtype=np.int)

                for area_id in areas:
                    if area_id!='':
                        label[int(area_id)] = 1
                data_list.append((report_id, words, label))
            else:
                data_list.append((report_id, words))

    return data_list


training_data_list=get_csv_data(r'data/track1_round1_train_20210222.csv')
testing_data_list=get_csv_data(r'data/track1_round1_testA_20210222.csv')

# print(training_data_list)
# word_set=set()

idf_dict = {}
label_list= []
rpt_word_dict_train={}
for report_id, words, label in training_data_list:
    word_count = {}
    for word in words:
        if words!='':
            # word_set.add(word)
            word_count[word] = word_count.get(word,0)+1
    for word in word_count:
        idf_dict[word] = idf_dict.get(word,0)+1
    rpt_word_dict_train[report_id] = word_count
    label_list.append(label)
for word in idf_dict:
    idf = idf_dict[word]
    idf = math.log(len(training_data_list)/idf)
    idf_dict[word] = idf

rpt_word_dict_test={}
for report_id, words in testing_data_list:
    word_count = {}
    for word in words:
        if words!='':
            word_count[word] = word_count.get(word,0)+1
    rpt_word_dict_test[report_id] = word_count

def get_tf_idf(idf_dict,rpt_word_dict):
    # print(rpt_word_dict)
    word_list = list(idf_dict.keys())
    rpt_tf_idf_list=[]
    for report_id in rpt_word_dict:
        word_count = rpt_word_dict[report_id]
        word_count_list = []
        for word in word_list:
            if word in word_count:
                if word in idf_dict:
                    word_count_list.append(word_count[word]*idf_dict[word])
                else:
                    word_count_list.append(0)
            else:
                word_count_list.append(0)
        total_word = sum(word_count_list)
        tf_list = list(map(lambda x:x/total_word,word_count_list))
        rpt_tf_idf_list.append(tf_list)
    return rpt_tf_idf_list

tf_idf_train= np.array(get_tf_idf(idf_dict,rpt_word_dict_train))
tf_idf_test= np.array(get_tf_idf(idf_dict,rpt_word_dict_test))




import numpy as np
import sklearn.model_selection as ms
import sklearn.svm as svm
import sklearn.metrics as sm
import matplotlib.pyplot as plt

c_range = np.logspace(-2, 2, 5, base=2)
gamma_range = np.logspace(-2, 2, 5, base=2)
label_array = np.array(label_list)
predictions=[]
for i in range(len(label_list[0])):
    # svc = SVC(kernel='rbf', class_weight='balanced',)

    params = [
        {'kernel': ['linear'], 'C': [1, 10]},
        {'kernel': ['poly'], 'C': [1], 'degree': [2, 3]},
        {'kernel': ['rbf'], 'C': [1, 10], 'gamma': [1, 0.1]}
    ]
    model = ms.GridSearchCV(svm.SVC(probability=True),
                            params,
                            refit=True,
                            return_train_score=True,  # 后续版本需要指定True才有score方法
                            cv=5)
    label = label_array[:,i]
    # clf = svc.fit(tf_idf_train, label)
    model.fit(tf_idf_train, label)
    model_best = model.best_estimator_

    prediction = model_best.predict(tf_idf_test)
    # print(prediction)
    predictions.append(prediction)

for i,(report_id, words), in enumerate(testing_data_list):
    result = []
    for j in range(len(label_list[i])):
        result.append(str(predictions[j][i]))
    print(report_id+'|,|'+' '.join(result))