'''
author:        Wang Chenyang <cy-wang21@mails.tsinghua.edu.cn>
date:          2025-07-30 13:31:54
Copyright © Department of Physics, Tsinghua University. All rights reserved
'''

from turtle import title
import PhysBERT
import literature_format as lf
import numpy as np
import pickle
import os
import csv


TAGGED_ITEM_FNAME = "data/tagged_items_embed.pkl"
VECTORIZED_ITEM_FNAME = "data/tagged_items_embed_vectorized.pkl"


def main_update_tagged_items(
    rewrite_old: bool = False,
    clear_old: bool = False
):
    # 1. Get all tagged items
    tagged_items = lf.zotero.get_all_tagged_items()

    # 2. Create BERT embedding
    bert = PhysBERT.BERTEmbedding()

    # 3. Get title and abstract
    if os.path.exists(TAGGED_ITEM_FNAME) and not clear_old:
        with open(TAGGED_ITEM_FNAME, "rb") as fp:
            embed_vec = pickle.load(fp)
    else:
        embed_vec = {}

    for item_cnt, item_key in enumerate(tagged_items):
        if item_key in embed_vec and not rewrite_old:
            continue
        title, abstract = lf.zotero.get_title_and_abstract(item_key)
        if title.strip() == "" or abstract.strip() == "":
            continue
        raw_data = title + "\n\n" + abstract
        curr_vec = bert.embed_sentence(raw_data).detach().numpy().flatten()
        embed_vec[item_key] = curr_vec / np.linalg.norm(curr_vec)
        print("Processing: ", title, "shape = ", embed_vec[item_key].shape)

    # 4. Save
    with open(TAGGED_ITEM_FNAME, "wb") as f:
        pickle.dump(embed_vec, f)


def normalize():
    with open(TAGGED_ITEM_FNAME, "rb") as fp:
        embed_vec = pickle.load(fp)
    for item_key, item_vec in embed_vec.items():
        item_vec = item_vec.flatten()
        embed_vec[item_key] = item_vec / np.linalg.norm(item_vec)
    with open(TAGGED_ITEM_FNAME, "wb") as f:
        pickle.dump(embed_vec, f)


def test_norm():
    with open(TAGGED_ITEM_FNAME, "rb") as fp:
        embed_vec = pickle.load(fp)
    for item_key, item_vec in embed_vec.items():
        print(item_key, item_vec.shape, np.sum(item_vec ** 2))


def lookup_nearest(query_str: str):
    bert = PhysBERT.BERTEmbedding()
    query_vec = bert.embed_sentence(query_str).detach().numpy().flatten()
    query_vec = query_vec / np.linalg.norm(query_vec)
    with open(TAGGED_ITEM_FNAME, "rb") as fp:
        embed_vec = pickle.load(fp)
    print(len(embed_vec))
    result_list = []
    for item_key, item_vec in embed_vec.items():
        dist = query_vec.dot(item_vec)
        result_list.append((item_key, dist))
    
    result_list.sort(key=lambda x: x[1], reverse=True)
    for item_key, dist in result_list[:100]:
        title, _ = lf.zotero.get_title_and_abstract(item_key)
        print(dist, item_key, title.encode("utf-8"))


def vectorize_embedding_base():
    with open(TAGGED_ITEM_FNAME, "rb") as fp:
        embed_data = pickle.load(fp)
    key_list = []
    embed_matrix = []
    for item_key, item_vec in embed_data.items():
        key_list.append(item_key)
        embed_matrix.append(item_vec)
    embed_matrix = np.stack(embed_matrix, axis=0)
    print(embed_matrix.shape)
    with open(VECTORIZED_ITEM_FNAME, "wb") as f:
        pickle.dump((key_list, embed_matrix), f)


def embedding_keywords():
    with open("config/user-keywords.csv", "r", encoding='utf-8') as f:
        user_kw = []
        reader = csv.reader(f, quotechar='"', delimiter=',', skipinitialspace=True)
        for row in reader:
            if not row or row[0].startswith("#"):
                continue
            if len(row) < 3:
                print(f"Warning: Skipping row with insufficient columns: {row}")
                continue
            
            keyword = row[0].strip()
            weight = float(row[1].strip())
            # 如果有多个字段，将第3个字段及之后的内容合并为描述
            description = ', '.join(row[2:]).strip().strip('"')
            user_kw.append((keyword, weight, description))

    bert = PhysBERT.BERTEmbedding()
    kw_embeds = []
    for keyword, weight, description in user_kw:
        kw_embeds.append(bert.embed_sentence(keyword + ": " + description).detach().numpy().flatten())
    kw_embeds = np.stack(kw_embeds, axis=0)
    print(kw_embeds.shape)

    with open("data/kw-embeds.pkl", "wb") as f:
        pickle.dump((user_kw, kw_embeds), f)


if __name__ == "__main__":
    # normalize()
    # test_norm()
    # lookup_nearest(
    #     "We build a general band theory for non-Hermitian lattices"
    # )
    # main_update_tagged_items()
    # vectorize_embedding_base()
    embedding_keywords()