# -*- coding: utf-8 -*-
"""
# 使用了 [jieba==0.42.1]，遵循其 [MIT] 许可证，原始代码来源：[https://github.com/fxsjy/jieba]
# 使用了 [matplotlib==3.7.4]，遵循其 [PSF] 许可证，原始代码来源：[https://matplotlib.org]
# 使用了 [numpy==1.24.4]，遵循其 [BSD-3-Clause] 许可证，原始代码来源：[https://www.numpy.org]
# 使用了 [openpyxl==3.1.2]，遵循其 [MIT] 许可证，原始代码来源：[https://openpyxl.readthedocs.io]
# 使用了 [pandas==2.0.3]，遵循其 [BSD 3-Clause License] 许可证，原始代码来源：[https://pandas.pydata.org]
# 使用了 [pdfminer.six==20240706]，遵循其 [MIT] 许可证，原始代码来源：[https://github.com/pdfminer/pdfminer.six]
# 使用了 [scikit-learn==1.3.2]，遵循其 [new BSD] 许可证，原始代码来源：[http://scikit-learn.org]
# 使用了 [scipy==1.10.1]，遵循其 [BSD License] 许可证，原始代码来源：[https://scipy.org/]
"""
import os,re,shutil,sys,glob,time,datetime
import numpy as np
import pandas as pd
from concurrent.futures import ThreadPoolExecutor
from pandas.plotting import parallel_coordinates as pc
import jieba.analyse as analyse
from pdfminer.high_level import extract_text
from pdfminer.pdfparser import PDFSyntaxError
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.gridspec as gridspec
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.decomposition import LatentDirichletAllocation as LDA
from scipy import stats
from scipy.sparse import csr_matrix
from openpyxl.styles import Alignment
from openpyxl.styles import Font
from openpyxl.styles.borders import Border,Side



class Timer:#AI optimization: class decorator, for function timing.
    def __init__(self, func):
        self.func = func
    def __call__(self, *args, **kwargs):
        print(self.func.__doc__)
        start = time.time();result = self.func(*args, **kwargs);end = time.time()
        print(f"  Function: {self.func.__name__}, time cost: {end - start:.2f} secends.")
        return result
    
class Config:#AI optimize 01.
    BASE_DIR = "/media/sq/F:/A_Andrewz/B_Study/CSSCI/"#set your CSSCI directory.
    PDF_DIR = os.path.join(BASE_DIR,"01_input")#input directory.
    TXT_DIR = os.path.join(BASE_DIR,"02_output")#output directory.
    PROCESSED_DIR = os.path.join(BASE_DIR,"03_archive")#archive directory.
    CURRENT_DIR=os.path.dirname(os.path.abspath(__file__))
    STOPWORDS_PATH = os.path.join(CURRENT_DIR, '..', 'docs', 'stopwords.txt')
    TFIDF_PATH=os.path.join(CURRENT_DIR, '..', 'docs', 'tfidf_20250421_empty.xlsx')
    N_CLUSTER=8;APPEARTIMES=5;N_KEYWORDS=20;SCALE_RATE=100000000.0;P_THRES=0.001
    ANALYSIS_KEYS=['n','nr','ns','nt','nw','nv','v','vn','an','av','dv'];MIN_TXT_SIZE=2048
    FONT_SIZE=12;ALPHA_VAL=0.8;ROTATION=80;COLOR=['red','green','blue','cyan','orange','purple']
    GRID_STYLE={'axis':'x','color':'lightgray','linestyle':':','linewidth':1.2,'alpha':0.7}

N_TOPICS=0

thin_border=Border(left=Side(style='thin'),
                   right=Side(style='thin'),
                   top=Side(style='thin'),
                   bottom=Side(style='thin'))

def save_text(file_path,text,name):
    fn=file_path+os.sep+name+".txt"
    with open(fn,"a",encoding='utf-8') as w:
        w.write(text)
        w.close()
        
def save_dataframe(df,prefix=""):
    ticks=datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
    if not os.path.exists(r"./result"):
        os.makedirs(r"./result")
    if(prefix!=""):
        prefix+="_"
    fn=r"./result/"+prefix+re.split(r'[_.]',os.path.basename(sys.argv[0]))[1]+"_"+ticks+".xlsx"
    with pd.ExcelWriter(fn,engine='openpyxl') as writer:
        df.to_excel(writer,index=False)
        workbook = writer.book
        worksheet = writer.sheets['Sheet1']
        worksheet.freeze_panes = 'A2'
        for row in worksheet.iter_rows(min_row=1, max_col=worksheet.max_column, max_row=worksheet.max_row):
            for cell in row:
                cell.alignment=Alignment(horizontal='center', vertical='center', wrapText=True)
                cell.font=Font(name="宋体",sz=9)
                cell.border = thin_border
        for sheet in workbook:
            max_row = sheet.max_row
            for row in range(1, max_row + 1):
                sheet.row_dimensions[row].height = 12
    return(fn)
    
def save_fig(fig,prefix=""):
    FIGURE_DPI=300
    ticks=datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
    if not os.path.exists(r"./result"):
        os.makedirs(r"./result")
    if(prefix!=""):
        prefix+="_"
    fn=r"./result/"+prefix+re.split(r'[_.]',os.path.basename(sys.argv[0]))[1]+"_"+ticks+".png"
    fig.savefig(fn,dpi=FIGURE_DPI)
    return(fn)


@Timer
def get_txt():
    """Step 01:get pdf from input, convert to txt, move pdf to archive."""
    li_archive_file_names={os.path.splitext(os.path.basename(x))[0] for x in glob.glob(os.path.join(Config.TXT_DIR,"*.txt"))}
    cnt_converted=0;cnt_not_read=0
    for fp_pdf in glob.glob(os.path.join(Config.PDF_DIR,"*.pdf")):
        base_name=os.path.splitext(os.path.basename(fp_pdf))[0]#without path and suffix.
        if base_name in li_archive_file_names:#already converted.
            try:#try to move pdf.
                shutil.move(fp_pdf,Config.PROCESSED_DIR)
            except shutil.Error:#pdf already existed.
                os.remove(fp_pdf)
            print(f"Exist file: {base_name}.")
        else:
            try:
                text=extract_text(fp_pdf)
                text=re.sub(r"\s+|[-—][0-9]{1,2}[-—]","",text.strip())#simple replace function.
                save_text(Config.TXT_DIR,text,base_name)
                cnt_converted+=1
                shutil.move(fp_pdf,Config.PROCESSED_DIR)
                print("Converted new file: %s. Total converted files:%d."%(base_name,cnt_converted))
            except PDFSyntaxError as e:
                cnt_not_read+=1
                print(f"PDF analyse failed: {base_name} - {str(e)}, Total not read files:{cnt_not_read}.")
                continue
            except Exception as e:
                cnt_not_read+=1
                print(f"Unknown failure: {base_name} - {str(e)}, Total not read files:{cnt_not_read}.")
                continue
    for txt in glob.glob(os.path.join(Config.TXT_DIR, "*.txt")):
        if os.path.getsize(txt) <= Config.MIN_TXT_SIZE:
            print("Small file: %s."%(os.path.basename(txt)))

def get_tfidf(text_in="",n=Config.N_KEYWORDS):
    """Step 02-02: get TF-IDF keywords and scale."""
    if not text_in.strip():
        return [],[]
    li_tags=analyse.extract_tags(sentence=text_in,topK=n,withWeight=True,allowPOS=Config.ANALYSIS_KEYS)
    li_tags.sort(key=lambda x:x[1],reverse=True)#sort tags with weight.
    total=sum(w for _,w in li_tags)
    li_o=[(k,w*Config.SCALE_RATE/total) for k,w in li_tags if pd.notna(k)]#change percentage to int for LDA.
    return(zip(*li_o))

def process1file(fp: str, exist_set: set) -> dict:
    """Step 02-01:read 1 file and get TF-IDF sparse array."""
    with open(fp, 'r', encoding='utf-8') as f:
        text=''.join(line.strip() for line in f if line.strip())
        keys,weights = get_tfidf(text)
    
    key_set = set(keys)
    return {
        'title': os.path.basename(fp),
        **dict(zip(keys, weights)),
        **{k:0.0 for k in exist_set - key_set}#AI optimization.
    }

@Timer
def load_and_make_tfidf_dataframe(fp_tfidf):
    """Step 02：make TF-IDF matrix."""
    analyse.set_stop_words(Config.STOPWORDS_PATH)
    df_tfidf=pd.read_excel(fp_tfidf,engine='openpyxl').set_index('title')
    exist_set=set(df_tfidf.columns);exist_titles=set(df_tfidf.index)
    txt_files=[f for f in os.listdir(Config.TXT_DIR) if f.endswith(".txt") and f not in exist_titles]
    with ThreadPoolExecutor() as executor:#AI optimization.
        batch_size=100
        futures=[]
        for i in range(0,len(txt_files),batch_size):#step=batch_size.
            batch=txt_files[i:i+batch_size]
            futures.extend(executor.submit(process1file,os.path.join(Config.TXT_DIR,fname),exist_set) for fname in batch)
        new_data = [f.result() for f in futures] if futures else []
    if new_data:
        df_final = pd.concat([df_tfidf,pd.DataFrame(new_data).set_index('title')],axis=0,copy=False)
    else:
        df_final = df_tfidf
    df_final = df_final.reset_index().fillna(0.0, downcast='float')#fill na with 0.0.
    mask = df_final.ne(0.0).sum().ge(Config.APPEARTIMES)#keep the topic words that appears more than 5 tims.
    return save_dataframe(df_final.loc[:, mask], prefix="TF-IDF")

@Timer
def make_csr_matrix(fp):
    """Step 03-01: make CSR form TF-IDF matrix, return keywords."""
    df=pd.read_excel(fp,engine='openpyxl')
    cnt_paper=len(df)
    li_keys=[col for col in df.columns if col != 'title']
    csr_a=csr_matrix(df[li_keys].astype(np.float32).values)
    return(cnt_paper,csr_a,li_keys)
    
def sqrt_ceil(n):
    root = n ** 0.5
    return int(root) if root.is_integer() else int(root) + 1

@Timer
def lda_analyse(fp):
    global N_TOPICS
    """Step 03: make CSR form TF-IDF matrix, then do LDA."""
    (cnt_paper,cv,li_feature)=make_csr_matrix(fp)#make cv data.
    N_TOPICS=sqrt_ceil(cnt_paper)
    lda=LDA(n_components=N_TOPICS,max_iter=100,doc_topic_prior=1.0/N_TOPICS,topic_word_prior=0.01,learning_method='batch',random_state=0)#create lda model.
    lda_fit=lda.fit_transform(cv)#fit data.
    df_topic=pd.DataFrame(np.array(lda_fit),columns=[f"{i}" for i in range(N_TOPICS)],dtype=np.float32)
    df_components=pd.DataFrame(np.array(lda.components_),columns=li_feature)
    fp_topics=save_dataframe(df_topic,"Topics")
    fp_keywords=save_dataframe(df_components,"Keywords_allocation")
    return(fp_topics,fp_keywords)

@Timer
def get_kmeans_label(file_path: str, n_clusters: int = Config.N_CLUSTER) -> str:
    global N_TOPICS
    """Step 04: do Kmeans cluster analysis."""
    df=pd.read_excel(file_path)
    X=StandardScaler().fit_transform(df.select_dtypes(include='number'))
    li_inertia=[]
    li_sihouette=[]
    k_range=N_TOPICS+5
    for i in range(2,k_range):
        km=KMeans(n_clusters=i,n_init=100,random_state=42)
        labels=km.fit_predict(X)
        score = silhouette_score(X, labels)
        li_inertia.append(km.inertia_)
        li_sihouette.append(score)
    plt.figure(figsize=(10,5))
    ax1 = plt.gca()
    ax1.plot(range(2, k_range), li_inertia, color="g")
    ax1.set_ylabel("Inertia", color="g")#set left y label.
    ax1.tick_params(axis='y', colors="g")
    ax1.set_ylim(min(li_inertia), max(li_inertia))
    ax2 = ax1.twinx()
    ax2.plot(range(2, k_range), li_sihouette, color="r")
    ax2.set_ylabel("Silhouette", color="r")#set right y label.
    ax2.tick_params(axis='y', colors="r")
    ax2.set_ylim(min(li_sihouette), max(li_sihouette))
    
    plt.xticks(ticks=range(2, k_range), labels=[str(i) for i in range(2, k_range)])
    plt.grid(True)
    plt.show()

    km=KMeans(n_clusters,n_init=1000,random_state=42)
    labels=km.fit_predict(X)
    df['label']=labels
    return(save_dataframe(df,prefix="Kmeans"))

@Timer
def f_oneway_check(fp_k: str, p_threshold: float =Config.P_THRES) -> str:
    """Step 05: do F-check, set p, get the significant variables."""
    df=pd.read_excel(fp_k)
    df.columns=["T#"+str(col) for col in df.columns]
    df=df.rename(columns={"T#label":"label"})
    results=[]
    label_groups=df.groupby('label')
    for col in df.columns.drop('label'):
        groups=[group[col].values for _,group in label_groups if not group.empty]
        if len(groups)<2:continue#skip.
        f_val,p_val=stats.f_oneway(*groups)
        if p_val<p_threshold:
            results.append((col,f_val,p_val))
    return(save_dataframe(pd.DataFrame(results,columns=["Topic","F_value","p_value"]),prefix="F-check"))

def cm2inch(value):
    return value/2.54

def process_label_data(ax,label,df,topics,draw_keys,display_keys):
    """Step 06-01: draw subplot of Kmeans cluster."""
    filtered_df=df[df['label']==label][topics]#include significant topics with "label" and "number".
    filtered_df=filtered_df.drop('label',axis=1)#drop 'label' column.
    top20_features=filtered_df.mean(skipna=True).nlargest(21)#get the largest p 20 topics.
    top_features=top20_features.index.tolist()
    draw_set=set(draw_keys).union(top_features)
    display_set=set(display_keys).union(top_features)
    ax.clear()
    if "number" in top_features:
        pc(pd.DataFrame(top20_features).T,'number',ax=ax,color=Config.COLOR)
        top_features.remove("number")
    patch_0=mpatches.Patch(label="Cluster#"+str(label),color='white')
    ax.legend(handles=[patch_0],bbox_to_anchor=(0.8, 0.8),loc='center left').remove()
    ax.set(title="Cluster#"+str(label), xticks=range(len(top_features)),xticklabels=top_features, facecolor='white')
    ax.tick_params(rotation=Config.ROTATION,labelsize=Config.FONT_SIZE)
    ax.grid(**Config.GRID_STYLE)  #get grid style from Config.    
    return list(draw_set), list(display_set)

@Timer
def draw_paralle_line(fp_k,fp_f):
    """Step 06: draw the means value of clusters."""
    df_k=pd.read_excel(fp_k)
    df_k.index.name='number'#set index name: number.
    df_k.reset_index(inplace=True)#change index to column.
    df_k.columns=["T#"+str(col) for col in df_k.columns]
    df_k=df_k.rename(columns={"T#label":"label"})
    df_k=df_k.rename(columns={"T#number":"number"})
    df_f=pd.read_excel(fp_f)
    li_topics=df_f["Topic"].values.tolist()#get the significant topic list.
    li_info=['number','label']
    li_topics.extend(li_info)
    plt.rcParams['font.sans-serif']=['simhei']
    plt.rcParams.update({'font.size': Config.FONT_SIZE})
    fig=plt.figure(figsize=(cm2inch(24.0),cm2inch(24.0)))
    li_draw_keys=['number','label']
    li_display_keys=[]
    labels = sorted(df_k['label'].unique())
    len_labels=len(labels)
    rows=(len_labels + 1) // 2
    gs = gridspec.GridSpec(rows + 1, 2)#make grids.
    axes = [fig.add_subplot(gs[pos//2, pos%2]) for pos in range(len(labels))]
    ax_bt = fig.add_subplot(gs[rows:, :])
    for ax,label in zip(axes,labels):#draw the subplots.
        (li_draw_keys,li_display_keys)=process_label_data(ax,label,df_k,li_topics,li_draw_keys,li_display_keys)
    data00=df_k[li_draw_keys]
    data00=data00.drop('number',axis=1)
    mean00=data00.groupby('label').mean()
    mean00_df=pd.DataFrame(mean00)
    mean00_df=mean00_df.reset_index()#put index into data.
    li_draw_keys.remove('number')
    li_draw_keys.remove('label')
    li_display=[x if x in li_display_keys else "" for x in li_draw_keys]
    pc(mean00_df,'label',ax=ax_bt,color=Config.COLOR)
    ax_bt.legend(loc='center left',bbox_to_anchor=(0.9,0.6))
    ax_bt.set_xticks(range(0,len(li_display)))
    ax_bt.set_xticklabels(li_display,rotation=Config.ROTATION,fontsize=Config.FONT_SIZE)
    styles=['-', '--', ':','-.']#line styles.
    for i, line in enumerate(ax_bt.get_lines()):
        line.set_linestyle(styles[i % len(styles)])
    ax_bt.yaxis.set_tick_params(gridOn=False)
    ax_bt.xaxis.set_tick_params(gridOn=False)
    ax_bt.set_facecolor('white')
    plt.tight_layout()
    plt.show()
    save_fig(fig,prefix="Draw")
    li_rst=[int(re.sub(r"T#","",x)) for x in li_display_keys if x!="" and x!="number"]
    return(li_rst)
    
def print_topic(array,feature,n_top_words,topics):
    """Step 07-01: print Top10 words."""
    for index,topic in enumerate(array):
        if(index in topics):
            print("Topic #%d:"%index)
            li_topic_words=[feature[i] for i in topic.argsort()[:-n_top_words-1:-1]]
            print(li_topic_words)
            
@Timer 
def explain_lda(fp_allocation,fp_kmeans,li_topics):
    """Step 07: print Top10 words of the significant Topics."""
    df_c=pd.read_excel(fp_allocation)
    l_feature=df_c.columns.values.tolist()
    lda_c=df_c.to_numpy()
    n_top_words=10
    print_topic(lda_c,l_feature,n_top_words,li_topics)
    df_cluster=pd.read_excel(fp_kmeans)
    cnt_label=df_cluster['label'].value_counts()
    print(cnt_label)

@Timer    
def main_stream(fp_tfidf):
    """main stream."""
    get_txt()
    fp_tfidf=load_and_make_tfidf_dataframe(fp_tfidf)#get and update tfidf file.
    (fp_t,fp_a)=lda_analyse(fp_tfidf)
    fp_k=get_kmeans_label(fp_t)
    fp_f=f_oneway_check(fp_k)
    li_topics=draw_paralle_line(fp_k,fp_f)
    explain_lda(fp_a,fp_k,li_topics)#fp_a a is for allocation.

if __name__=="__main__":
    main_stream(Config.TFIDF_PATH)
