#!/root/anaconda2/bin/python
#-*- coding:utf-8 -*-

import pandas as pd
import string
from sklearn.preprocessing import normalize
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split
import re

blank = u'\s'
spci_syms = u'＠@％＆＊＃\｜#\|\$&\*￥……'
brakets = u'\【\】『』〖\[\]\<\>\《\》≮≯〗〔〕「」'
replace = u'\s　一 .*\-·：'
suffix = u'com|cn|cc|so|net|org|edu|info'
normal_punctation = u'!-/:-@\[\.-`\{-…ˆ‹›«»‘-”！-／：-＠\［-｀\｛-～｡､･、。'
normal_number = u'0-9０-９'
normal_char = u'a-zA-Zａ-ｚＡ-Ｚ'
normal_hanzi = u'\u4e00-\u9fff'
numbers = u'OoᴑᴼᴏᵒＯℴ୦௦Оסｏ౦০օΟο０೦ೲഠ๏໐࿀࿁零⚬⚪∘ₒ₀⁰º°' \
  u'ー㈠➊➀❶⓵⑴壹①１¹一l|⒈₁ᘖ' \
  u'二㈡➋➁❷⓶⒉⑵₂贰②２շջ²ᒾᒿᗱ' \
  u'三㈢➌➂❸⓷⒊⑶₃叁③３³౩зЗვჳᗲᗳ' \
  u'四㈣➍➃❹⓸⒋⑷₄⁴肆④４' \
  u'五㈤➎➄❺⓹⒌⑸₅⁵伍⑤５' \
  u'六㈥➏➅❻⓺⒍⑹₆⁶⑥６бᏮᑲ' \
  u'七㈦➐➆❼⓻⒎⑺₇⁷柒⑦７' \
  u'八㈧➑➇❽⓼⒏⑻₈⁸捌⑧８৪' \
  u'九㈨➒➈❾⓽⒐⑼₉⁹玖⑨９୨৭'
normal_table = u'0000000000000000000000000000000000001111111111111111222222222222222222333333333333333333333' \
                 u'4444444444444555555555555666666666666666777777777777788888888888888999999999999999'


def test(filepath):
    df = pd.read_excel(filepath, parse_cols=[0,1, 9, 10])
    df = df.dropna(axis=0, how='any')
    df = df.drop_duplicates(subset=['content'])

    df['pure'] = df.content.map(translate)
    df['400'] = df.pure.map(get_400)
    df['url'] = df.content.map(get_url)
    df['url_length'] = df.content.map(get_url_length)
    df['parts'] = df.content.map(get_parts)
    df['brakets'] = df.content.map(get_brakets)
    df['specific_syms'] = df.content.map(get_spcific_syms)
    df['%_num'] = df.content.map(get_percent)
    df['name'] = df.content.map(get_name)
    df['strange_num'] = df.content.map(get_strange_num)
    df['money'] = df.content.map(get_money)
    df['cell_phone'] = df.pure.map(get_cellphone)
    df['cell_phone_pos'] = df.pure.map(get_cellphone_position)
    df['phone'] = df.pure.map(get_phone)
    df['phone_pos'] = df.pure.map(get_phone_position)
    df['qq'] = df.pure.map(get_qq)
    df['wechat'] = df.pure.map(get_wechat)
    df['bank'] = df.pure.map(get_bank)
    df['numbers'] = df.pure.map(get_numbers)
    df['origin_length'] = df.content.map(len)
    df['pure_length'] = df.pure.map(get_pure_length)
    df['differ_length'] = df['origin_length'] - df['pure_length']

    first = df.pop(u'一级标签')
    second = df.pop(u'二级标签')
    df.insert(df.shape[1],u'一级标签', first)
    df.insert(df.shape[1],u'二级标签', second)

    return df


def get_url(str):
    p = ur"((?:[%s%s%s]+)(?:.[%s]))"%(normal_char,normal_punctation,normal_number,suffix)
    tmp = re.findall(p, str)
    return len(tmp)


def get_url_length(str):
    p = ur"((?:[%s%s%s]+)(?:.[%s]))"%(normal_char,normal_punctation,normal_number,suffix)
    tmp = re.findall(p, str)
    l = 0
    for item in tmp:
        tmp = re.split(u'[.．]',item)
        if 'www'in item or 'http'in item:
            item = '.'.join(tmp[1:-1])
        else:
            item = '.'.join(tmp[0:-1])
        l += len(item)
    return l


def get_parts(str):
    p = u'[,?!，。？！]'
    tmp = re.split(p,str)
    return len(tmp)


def get_brakets(str):
    p = u'[%s]'%brakets
    tmp = re.findall(p, str)
    return len(tmp)/2


def get_spcific_syms(str):
    p = ur'([%s]+|[^%s%s%s%s%s%s\s])'% (spci_syms,normal_char,normal_punctation,normal_number,normal_hanzi,brakets,numbers)
    # p = ur'(%s+)|(%s{2,})'%(spci_syms,normal_punctation)
    tmp = re.findall(p, str)
    # print len(tmp),'\t'.join(tmp), str
    # print len(tmp),'\t'.join(tmp)
    if tmp:
        return len(tmp)
    else:return 0


def get_percent(str):
    p = ur'[%％]'
    tmp = re.findall(p,str)
    # print len(tmp),'\t'.join(tmp)
    return len(tmp)


def get_cellphone(str):
    p = ur'1[3578]\d{9}'
    tmp = re.findall(p,str)
    # print tmp
    return len(tmp)


def get_cellphone_position(str):
    p = ur'1[3578]\d{9}'
    tmp = re.findall(p,str)
    # print len(tmp),tmp,str
    if tmp:
        return string.index(str, tmp[0])/float(len(str))
    else:
        return 0


def get_400(str):
    p = ur'400\d{7}'
    tmp = re.findall(p,str)
    # print tmp,str
    return len(tmp)


def get_phone(str):
    p = ur'((?:0\d{2,3})(?:\-?\d{7}))'
    tmp = re.findall(p,str)
    # print len(tmp),tmp,str
    return len(tmp)


def get_phone_position(str):
    p = ur'((?:0\d{2,3})(?:\-?\d{7}))'
    tmp = re.findall(p,str)
    # print len(tmp),tmp,str
    if tmp:
        return string.index(str, tmp[0])/float(len(str))
    else:
        return 0


def get_qq(str):
    p = u'((?:qq|QQ)(?:[:|：%s\s]?[\u4e00-\u9fff]{0,3})(?:\d+))'%normal_punctation
    p=u'[Q|q]+[\u4e00-\u9fff]{0,3}(\d{7,})'
    tmp = re.findall(p,str)
    # print tmp, '\t', str
    return len(tmp)


def get_wechat(str):
    p=u'[wei|微][xin|信][\u4e00-\u9fff]?(\w{5,})'
    tmp = re.findall(p,str)
    # print tmp,'\t',str
    return len(tmp)


def get_numbers(str):
    p = ur'((?:\d+[:：/年月日]?)+)'
    tmp = re.findall(p, str)
    # print len(tmp), tmp, str
    return len(tmp)


def get_name(str):
    """匹配方括号中的内容"""
    brakets = u'\[\]\<\>\《\》≮≯〗〔〕「」'
    p1 = ur"[【【〖『](.*?)[】】〗』]"
    p2 = u'[\[\{\［\｛＜<《≮「](.*?)[\]\］\}\｝＞》>≯」]'
    # p=ur"^[《](.*?)[》]"
    tmp = re.findall(p1, str)
    if tmp:
        # print len(tmp),'\t'.join(tmp)
        return len(tmp)
    else:
        tmp = re.findall(p2, str)
        # print 'second:',len(tmp),'\t'.join(tmp)
        return len(tmp)


def get_money(str):
    p=u'((?:\d+\.?\d*[元|$￥])|(?:[$|￥]\d+\.?\d*))'
    tmp = re.findall(p,str)
    # print len(tmp),'\t'.join(tmp),str
    if tmp:
        return len(tmp)
    else:return 0


def get_bank(str):
    # p = u"[\s一.\-\|—二三四五六七八九零壹贰叁肆伍陆柒捌玖①②③④⑤⑥⑦⑧⑨０１２３４５６７８９0123456789]{15,}"
    p = u'\d{15,}'
    tmp = re.findall(p, str)
    tmp = [item for item in tmp if len(item) == 16 or len(item) == 19]
    # print len(tmp), tmp, str
    return len(tmp)
    # intab = u'一二三四五六七八九零壹贰叁肆伍陆柒捌玖①②③④⑤⑥⑦⑧⑨０１２３４５６７８９'
    # outtab = u'12345678901234567891234567890123456789'
    # trans_table = dict((ord(intab[i]), unicode(outtab[i])) for i in range(len(intab)))
    # tmp = tmp.translate(trans_table)
    # bank = re.sub(r'[-\.\s—]','',tmp)


def get_pure_length(str):
    str=re.sub(u'[^\u4e00-\u9fff]','',str)
    # print len(str),str
    return len(str)


def translate(str):
    str=re.sub(u'[^%s%s%s%s]'%(normal_char,normal_hanzi,normal_number,normal_punctation),'',str)
    p = ur"((?:(?:[%s\d]+)(?:[\s　一 .\-]*)){2,})"%numbers
    tmp = re.findall(p, str)
    if tmp:
        intab = numbers
        outtab = normal_table
        trans_table = dict((ord(intab[i]), unicode(outtab[i])) for i in range(len(intab)))
        for item in tmp:
            str = re.sub(item, item.translate(trans_table), str)
        str = re.sub(u'[%s]*'%replace,'', str)
        return str
        # return str.translate(trans_table)
    else:
        return str


def get_strange_num(str):
    p = u'((?:[\d%s]+[\s%s]*[%s]+[\d%s\s%s]*)+)'%(numbers,normal_punctation,numbers,normal_punctation,numbers)
    tmp=re.findall(p,str)
    # print len(tmp),','.join(tmp)
    return len(tmp)


def train(df):
    # df = pd.read_csv('F:/sms_feature.csv',encoding='gbk')
    feature = pd.DataFrame(df,columns=['url_length','numbers','origin_length','pure_length','differ_length','parts','url',
                                       'name','brakets','specific_syms','%_num','strange_num','money',
                                       'cell_phone','400','phone','qq','wechat','bank','cell_phone_pos'])
    target1 = df[u'一级标签']
    target2 = df[u'二级标签']
    feature_train1, feature_test1, target_train1, target_test1 = train_test_split(feature, target1, test_size=0.2, random_state=42)
    feature_train2, feature_test2, target_train2, target_test2 = train_test_split(feature, target1, test_size=0.2, random_state=42)

    clf = RandomForestClassifier(20)
    clf.fit(feature, target1)
    df.insert(df.shape[1]-1,u'res_一级', clf.predict(feature))
    print clf.score(feature, target1)

    clf.fit(feature, target2)
    df.insert(df.shape[1],u'res_二级', clf.predict(feature))
    print clf.score(feature, target2)

    clf.fit(feature_train1, target_train1)
    print clf.score(feature_test1, target_test1)

    clf.fit(feature_train2, target_train2)
    print clf.score(feature_test2, target_test2)

    print clf.feature_importances_
    return df


if __name__ == '__main__':
    filepath='F:/message/sms_new.xlsx.'
    df = test(filepath)
    # df = pd.read_csv('F:/sms_feature.csv')
    # feature = pd.DataFrame(df,columns=['numbers','origin_length','url','parts','name','brakets','specific','%_num','strange_num','money,','cell_phone',
    #                                '400','phone','qq','wechat','bank','pure_length',
    #                                'differ_length'])
    # for i in range(feature.shape[0]):
    #     if len(feature.ix[i])!=18:
    #         print len(feature.ix[i]), feature.ix[i]
    # df=pd.DataFrame(df,columns=['origin','content'])
    df = train(df)
    # df.to_csv("F:/sms_feature.csv", encoding='utf-8')
    # s = u'有（成&都)-(发&鰾）１３８８０９７０６８８李会计'
    # print get_strange_num(s)
    # get_spcific_syms(u'有3% ∮ 6% Ⅴ 17%建筑⊙材料可开，可做账Θ报销，13856939938 张')