from sklearn.externals import joblib
from html.parser import HTMLParser
import urllib.parse as urlparse
import numpy as np
import re
import nltk


hmmclf = joblib.load("C:\\Users\\LENOVO\\Desktop\\machineLearning\\Code\\testSite\\myDetector\\detectorTools\\xss-hmm-module.pkl")
print("hmm loaded...")
tokens_pattern = r'''(?x)
 "[^"]+"
|http://\S+
|</\w+>
|<\w+>
|<\w+
|\w+=
|>
|\w+\([^<]+\) #函数 比如alert(String.fromCharCode(88,83,83))
|\w+
'''
#处理参数值的最小长度
MIN_LEN=10

#状态个数
N=5
#最大似然概率阈值
T=-200
#字母
#数字 1
#<>,:"'
#其他字符2
SEN=['<','>',',',':','\'','/',';','"','{','}','(',')']

index_wordbag=1 #词袋索引
wordbag={} #词袋


def do_str(line):
    words=nltk.regexp_tokenize(line, tokens_pattern)
    #print  words
    return words

def load_wordbag(filename,max=100):
    X = [[0]]
    X_lens = [1]
    tokens_list=[]
    global wordbag
    global index_wordbag

    with open(filename,errors='ignore') as f:
        for line in f:
            line=line.strip('\n')
            #url解码
            line=urlparse.unquote(line)
            #处理html转义字符
            h = HTMLParser()
            line=h.unescape(line)
            if len(line) >= MIN_LEN:
                #print "Learning xss query param:(%s)" % line
                #数字常量替换成8
                line, number = re.subn(r'\d+', "8", line)
                #ulr日换成http://u
                line, number = re.subn(r'(http|https)://[a-zA-Z0-9\.@&/#!#\?:=]+', "http://u", line)
                #干掉注释
                line, number = re.subn(r'\/\*.?\*\/', "", line)
                #print "Learning xss query etl param:(%s) " % line
                tokens_list+=do_str(line)

            #X=np.concatenate( [X,vers])
            #X_lens.append(len(vers))


    fredist = nltk.FreqDist(tokens_list)  # 单文件词频
    keys=list(fredist.keys())
    keys=keys[:max]
    for localkey in keys:  # 获取统计后的不重复词集
        if localkey in wordbag.keys():  # 判断该词是否已在词袋中
            continue
        else:
            wordbag[localkey] = index_wordbag
            index_wordbag += 1

    print ("GET wordbag size(%d)" % index_wordbag)

def test(remodel,article):
    rst = list()
    for line in article:
        line = line.strip('\n')
        # url解码
        line = urlparse.unquote(line)
        # 处理html转义字符
        h = HTMLParser()
        line = h.unescape(line)

        if len(line) >= MIN_LEN:
            #print  "CHK XSS_URL:(%s) " % (line)
                # 数字常量替换成8
            line, number = re.subn(r'\d+', "8", line)
                # ulr日换成http://u
            line, number = re.subn(r'(http|https)://[a-zA-Z0-9\.@&/#!#\?:]+', "http://u", line)
                # 干掉注释
            line, number = re.subn(r'\/\*.?\*\/', "", line)
                # print "Learning xss query etl param:(%s) " % line
            words = do_str(line)
            #print "GET Tokens (%s)" % words
            vers = []
            for word in words:
                    # print "ADD %s" % word
                if word in list(wordbag.keys()):
                    vers.append([wordbag[word]])
                else:
                    vers.append([-1])

            np_vers = np.array(vers)
            #print np_vers
                    #print  "CHK SCORE:(%d) QUREY_PARAM:(%s) XSS_URL:(%s) " % (pro, v, line)
            pro = remodel.score(np_vers)

            if pro >= T:
                # print  ("SCORE:(%d) XSS_URL:(%s) " % (pro,line))
                # rst.append("SCORE:("+pro+") XSS_URL:("+line+") " )
                rst.append("SCORE:(%d) XSS_URL:(%s) ;" % (pro,line) )
                    #print line
    return rst

if __name__ == '__main__':
	# articles = list()
	# articles.append('agb,bvhdlajdgh.')
	# articles.append('haukfjq38947,"jdia!".')
	# articles.append('789465132')
	# articles.append('<div>Leave your message</div>')
	# articles.append('<td id="paramName">{{paramName}}</td><td id="paramValue">{{value}}</td><td id="funcBtn"><div onclick=ajaxRequest(this,"confirm")>confirm</div></td>')
	# articles.append('')
	# load_wordbag('xss-2000.txt',2000)
	# print(test(hmmclf, articles))

    s='agb,bvhdlajdgh.haukfjq38947,"jdia!".789465132;<div>Leave your message</div>;<td id="paramName">{{paramName}}</td><td id="paramValue">{{value}}</td><td id="funcBtn"><div onclick=ajaxRequest(this,"confirm")>confirm</div></td>;   '
    spliter = re.compile(r';|\.')
    print(nltk.sent_tokenize(s))
    print(spliter.split(s))

