
from collections import Counter

import slimit.lexer

_commonAPIs_Js=None
_lexers=slimit.lexer.Lexer()
StopChars=['.','(',')','[',']','{','}',';','<','>','?','/','\\','|','!','@','#','$','%','^','&','*','_','-','+','=','~','`',':','"',"'",',','0','1','2','3','4','5','6','7','8','9']
StopChars=''.join(StopChars)

class CommonAPIs_Js:
    def __init__(self,raw_list) -> None:
        self.word_bags,self.common_apis=self.clean(raw_list)
    
    def clean(self,raw_list:list):
        
        clean_result=[]
        
        for e in raw_list:
            if len(e)<=2 or len(e)>=64: continue
            clean_result.append(e)

        word_bags=vocab_build(tokenize_js(e) for e in clean_result)
    
        return word_bags,clean_result
    
    def load_word_stand(self,word_stand_file):
        self.word_pns={}
        with open(word_stand_file,'r') as f:
            lines=f.readlines()
            for e in lines:
                word,p,n=e.split('\t')
                self.word_pns[word]=(int(p),int(n))
                
        return self.word_pns
    
def tokenize_js(js_code):
    lexer=_lexers
    lexer.input(js_code)
    
    while True:
        token=lexer.token()
        if not token:
            break
        yield token.value
        
def vocab_build(docs):
    
    counter=Counter()
    
    for doc in docs:
        for token in doc:
            if len(token)>2 and token[0] not in StopChars:
                counter[token]+=1
                
    return counter

def commonAPIs_Js():
    if _commonAPIs_Js is None:
        raise Exception('Common APIs Js is not initialized.')
    return _commonAPIs_Js

def init_intelligence_data_js(file_path):
    global _commonAPIs_Js
    raw_list=[]
    with open(file_path,'r') as f:
        lines=f.readlines()
        
        for e in lines:
            last_comma=e.rfind(',')
            raw_list.append(e[:last_comma])
        # find last ','
        
    _commonAPIs_Js=CommonAPIs_Js(raw_list)
        