package toy.keli.edic.data.model

import android.util.Log
import androidx.collection.arraySetOf
import androidx.compose.ui.util.fastForEach
import androidx.lifecycle.viewModelScope
import toy.keli.edic.data.Db
import dagger.hilt.android.lifecycle.HiltViewModel
import kotlinx.coroutines.CoroutineScope
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.launch
import toy.keli.edic.ui.main.extend.commonPrefixLengthWith
import java.util.Collections
import java.util.concurrent.CountDownLatch
import java.util.concurrent.TimeUnit
import javax.inject.Inject

/**
 * 单词扩展 按照这个单词作为词根
 */
@HiltViewModel
class WordExtend @Inject constructor() : Model() {

    var word: String=""
    var name: String = ""
    var esize: Int =0
    var isize: Int =0
    //这些字母组成的单词
    var data = listOf<String>()
    //这些字母子集内包含的单词
    var inword = listOf<String>()
    //这些字母扩展的单词
    var extword = listOf<String>()
    constructor(word: String) : this() {
        this.word = word
        name = getname(word)
        val q = self.query.where("name", name).build()
        val wee = self.waitLaunch({
            dao.getWordExtend(q)
        })
        if(wee==null || wee.size==0) return
        val w = wee[0]
        esize = w.esize
        isize = w.isize
        if(w.inword.length>0) {
            inword = w.inword.split(",")
        }
        if(w.extword.length>0) {
            extword = w.extword.split(",").filter { it.length>0 }
        }
        data = w.data.split(",")
    }

    companion object {
        val dao = Db.dictionary.dictionaryDao
        val self = WordExtend()
        fun getname(word:String): String {
            val sets = hashSetOf<Char>()
            word.forEach {
                sets.add(it)
            }
            val a = sets.map { it }
            Collections.sort(a)
            return a.joinToString("")
        }
    }

    fun <T>waitLaunch(run: suspend CoroutineScope.() -> T?, ws:Long = 500) : T{
        val countDownLatch = CountDownLatch(1)
        var rt:T? = null
        viewModelScope.launch(Dispatchers.IO) {
            rt = run()
            countDownLatch.countDown()
        }
        countDownLatch.await(ws, TimeUnit.MILLISECONDS)
        return  rt as T
    }

    fun eSize(): Int {
        val num = extword.filter{it.matches(Regex(word))}
        return num.size
    }
    //余弦相似度
    fun cosineSimilarity(s1: String, s2: String): Double {
        val map1 = s1.groupingBy { it }.eachCount()
        val map2 = s2.groupingBy { it }.eachCount()

        val intersection = map1.keys.intersect(map2.keys)
        val dotProduct = intersection.sumOf { map1[it]!! * map2[it]!! }

        val norm1 = Math.sqrt(map1.values.sumOf { it * it }.toDouble())
        val norm2 = Math.sqrt(map2.values.sumOf { it * it }.toDouble())

        return if (norm1 == 0.0 || norm2 == 0.0) 0.0 else dotProduct / (norm1 * norm2)
    }
    /**
     * 获取相似的单词，长度基本一样，有几个字母不相同
     */
    fun getSameWords(): List<String> {
        val rt = arrayListOf<String>()
      /*
        //长度相差不超过2个字母，相似度0.8以上
        val datas = inword+extword+data
        datas.filter { it.length > word.length-2 && it.length<word.length+2 }.fastForEach {
                if(cosineSimilarity(it,word) > 0.8) rt.add(it)
        }
        //扩展所有数据和内数据的扩展单词做处理
        val nSet = arraySetOf<String>()
        //name的删除字母搜索
        if(datas.size==0) {
            var i=0;
            while (i < name.length-1) {
                nSet.add(name.substring(0, i) + name.substring(i + 1))
                if(i+2<name.length) {
                    nSet.add(name.substring(0, i) + name.substring(i + 2))
                }
                i += 1
            }
        }else {
            (inword + data).forEach {
                val name = getname(it)
                nSet.add(name)
                !nSet.contains(name)
            }
        }
        val q = self.query.select("'' as data ,'' as inword,extword,name").whereIn("name",nSet.toList() ).build()
        val all = self.waitLaunch({
            dao.getWordExtend(q)
        })
        all.flatMap {
            it.extword.split(",")
        }.filter { it.length > word.length-2 && it.length<word.length+2 }.fastForEach {
            if(cosineSimilarity(it,word) > 0.8 && !rt.contains(it)) rt.add(it)
        }
       */
        val all = Article.findByName("频率统计top4k.list").let {
            if(it!=null) it.content.split("\n") else listOf<String>()
        }.filter{
            !it.startsWith("top")
        }.flatMap {
            it.split(",")
        }
        if (all != null) {
            all.filter { it.length > word.length-2 && it.length<word.length+2 }.fastForEach {
                if(cosineSimilarity(it,word) > 0.7 && !rt.contains(it)) rt.add(it)
            }
        }
        return rt.sorted().sortedByDescending { it.commonPrefixLengthWith(word) }
    }
}