from data_processing import get_from_file
from word_cloud import display_word_cloud
from topic_analysis import do_lda, print_top_words
from clustering import do_kmeans, visualize_kmeans
from classificiation import do_svm
import datetime as dt
from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
import joblib

# period
begin_date = dt.datetime(2022, 5, 1)
end_date = dt.datetime(2022, 7, 31)
period = "2022.05.01-2022.07.31"
lang = "en"

# tokenize
weibo_data_path = "data/weibo_2022.02.01-2022.11.30.jsonl"
twitter_data_path = "data/twitter_2022.02.24-2022.11.26.jsonl"
processed_data_path = "data/%s_tokenized_%s.xlsx" % (period, lang)

# word cloud
word_cloud_mask_path = "data/plane.png"
word_cloud_file_path = "out/word_cloud_%s_%s.png" % (period, lang)

# topic analysis
feature_word_cnt = 500
topic_cnt = 9
topic_top_word_cnt = 25
topic_analysis_res_path = "out/lda_%d_%d_%s.html" % (feature_word_cnt, topic_cnt, lang)
lda_path = "out/lda_%s_%s.mdl" % (period, lang)
feature_name_path = "out/feature_name_%s_%s.mdl" % (period, lang)

# clustering
cluster_cnt = 8
kmeans_res_path = "out/kmeans_%s_%d_%s.png" % (period, cluster_cnt, lang)
tendency_res_path = "out/tendency_%s_%s.png" % (period, lang)
reduced_dimension = 2

# classification
kernel = "poly"
clf_path = "out/classifier_%s_%s.mdl" % (period, lang)
confusion_matrix_path = "out/confusion_matrix_%s_%s_%s.png" % (period, kernel, lang)

data = get_from_file(weibo_data_path, twitter_data_path, processed_data_path, lang=lang)
display_word_cloud(data, word_cloud_mask_path, word_cloud_file_path)

lda, feature_names, text_vectors = do_lda(data, feature_word_cnt, topic_cnt, topic_analysis_res_path)
joblib.dump(lda, lda_path)
joblib.dump(feature_names, feature_name_path)
print_top_words(lda, feature_names, topic_top_word_cnt)
labels = do_kmeans(text_vectors, cluster_cnt)
visualize_kmeans(text_vectors, labels, reduced_dimension, kmeans_res_path)

# Get the tendency graph
delta = dt.timedelta(days=1)
y_label = []
date_label = []
while True:
    str_time = begin_date.strftime("%Y-%m-%d")
    date_label.append(str_time)
    day_labels = []
    for i in range(data.shape[0]):
        if data.loc[i]["time"].startswith(str_time):
            day_labels.append(labels[i])

    day_count = Counter(day_labels)
    day_cnt = []
    for j in range(cluster_cnt):
        day_cnt.append(day_count[j])
    y_label.append(day_cnt)
    if begin_date == end_date:
        break
    begin_date += delta

xs = [dt.datetime.strptime(d, '%Y-%m-%d').date() for d in date_label]
plt.figure(figsize=(10, 6), dpi=100)
plt.tick_params(labelsize=10)
for i in range(cluster_cnt):
    y = []
    for j in range(len(y_label)):
        y.append(y_label[j][i])
    plt.plot(xs, y, label="topic %d" % (i + 1))
plt.legend()
plt.ylabel("count")
plt.savefig(tendency_res_path)
plt.show()

# Get the cluster words
value = [None] * cluster_cnt
for i in range(cluster_cnt):
    value[i] = np.sum(text_vectors[np.where(labels == i)], axis=0)

for i in range(cluster_cnt):
    arr = np.zeros((topic_cnt, feature_word_cnt))
    for j in range(topic_cnt):
        arr[j] = lda.components_[j] * value[i][j]
    weight = np.sum(arr, axis=0)
    topic_w = " ".join([feature_names[j] for j in weight.argsort()[:-topic_top_word_cnt - 1:-1]])
    print("Cluster #%d:" % (i + 1))
    print(topic_w)

clf = do_svm(text_vectors, labels, confusion_matrix_path, kernel)
joblib.dump(clf, clf_path)