# import findspark
# findspark.init()
import pyspark
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, locate, regexp_extract, array_position, udf, expr, when
from pyspark.sql.types import StructType, StructField, StringType, ArrayType
from pypinyin import lazy_pinyin, Style
import pycantonese
import zhconv
import time
# import sparknlp
# from sparknlp.base import *
# from sparknlp.annotator import *
from pyspark.ml import Pipeline
from sklearn.metrics.pairwise import cosine_similarity

spark = SparkSession.builder \
    .appName("CantoneseRhyme") \
    .getOrCreate()
#     .config("spark.driver.memory", "20g") \
#     .config("spark.driver.maxResultSize", "20g") \
#     .config("spark.jars", "spark-nlp-assembly-3.3.4.jar") \

df = spark.read.format("orc").load("rhyme/cantonese.orc").repartition(3).cache()

# # First extract the prerequisites for the NerDLModel
# documentAssembler = DocumentAssembler() \
#     .setInputCol("rhyme") \
#     .setOutputCol("document")

# sentence = SentenceDetector() \
#     .setInputCols(["document"]) \
#     .setOutputCol("sentence")

# tokenizer = Tokenizer() \
#     .setInputCols(["sentence"]) \
#     .setOutputCol("token")

# embeddings = BertEmbeddings.load('bert_wwm') \
#     .setInputCols(['document', 'token']) \
#     .setOutputCol('embeddings')

# pipeline = Pipeline().setStages([
#     documentAssembler,
#     sentence,
#     tokenizer,
#     embeddings,
# ])

def word_parser(word):
    pys = pycantonese.characters_to_jyutping(zhconv.convert(word, 'zh-tw'))
    pys_new = []
    pys_new_notone = []
    for py in pys:
        for jp in pycantonese.parse_jyutping(py[1]):     
            pys_new.append(jp.final + jp.tone)
            pys_new_notone.append(jp.final)
    return (str(pys_new_notone), str(pys_new))

def find_rhyme_without_tone(word):
    word_pinyin = str(word_parser(word)[0])[1:-1]
    filtered = df.filter(df.pinyin_without_tone.contains(word_pinyin))
    sentences = filtered.select(df.sentences, locate(word_pinyin, df.pinyin_without_tone).alias('position'),
                                df.pinyin_without_tone_split)

    sentences = sentences.withColumn('find_result', expr(
        "IF(position==1, 0, array_position(pinyin_without_tone_split, position - 3) + 1)"))
    sentences = sentences.withColumn('rhyme', expr(f"substring(sentences, find_result, {len(word)})"))
    # start = time.time()
    # sentences.show()
    return sentences.groupBy("rhyme").count().sort('count', ascending=False)


def find_rhyme_with_tone(word):
    word_pinyin = str(word_parser(word)[1])[1:-1]
    filtered = df.filter(df.pinyin_with_tone.contains(word_pinyin))
    
    sentences = filtered.select(df.sentences, locate(word_pinyin, df.pinyin_with_tone).alias('position'),
                                df.pinyin_with_tone_split)

    sentences = sentences.withColumn('find_result', expr(
        "IF(position==1, 0, array_position(pinyin_with_tone_split, position - 3) + 1)"))
    sentences = sentences.withColumn('rhyme', expr(f"substring(sentences, find_result, {len(word)})"))
    return sentences.groupBy("rhyme").count().sort('count', ascending=False)


def get_cosine_similarity(feature_vec_1, feature_vec_2):
    return cosine_similarity([feature_vec_1], [feature_vec_2])[0][0]


def find_rhyme_with_tone_similar(word):
    rhyme = find_rhyme_with_tone(word).filter("count > 5")
    result = pipeline.fit(rhyme).transform(rhyme)
    embedding = result.select(result.rhyme, result.embeddings).collect()

    word_df = spark.createDataFrame([[word]]).toDF("rhyme")
    result = pipeline.fit(word_df).transform(word_df)
    word_embedding = result.select(result.rhyme, result.embeddings).collect()

    similarities = [get_cosine_similarity(i.embeddings[0].embeddings, word_embedding[0].embeddings[0].embeddings) for i
                    in embedding]
    similarities_dict = {}
    for i in range(len(similarities)):
        similarities_dict[embedding[i].rhyme] = similarities[i]
    similarities_dict = sorted(similarities_dict.items(), key=lambda item: item[1], reverse=True)
    return similarities_dict


def find_rhyme_without_tone_similar(word):
    rhyme = find_rhyme_without_tone(word).filter("count > 5")
    result = pipeline.fit(rhyme).transform(rhyme)
    embedding = result.select(result.rhyme, result.embeddings).collect()

    word_df = spark.createDataFrame([[word]]).toDF("rhyme")
    result = pipeline.fit(word_df).transform(word_df)
    word_embedding = result.select(result.rhyme, result.embeddings).collect()

    similarities = [get_cosine_similarity(i.embeddings[0].embeddings, word_embedding[0].embeddings[0].embeddings) for i
                    in embedding]
    similarities_dict = {}
    for i in range(len(similarities)):
        similarities_dict[embedding[i].rhyme] = similarities[i]
    similarities_dict = sorted(similarities_dict.items(), key=lambda item: item[1], reverse=True)
    return similarities_dict
