#coding:utf-8
import re, os, sys
sys.path.append("matchutils")
from utils.utils import *
from utils.jieba_wrapper import cutwords
from utils import pygtrie 
from utils.wordtool import *

"""
就好这一口第三季
《娱乐星周刊》2016年
"""


class VideoNameParser(object):
    dig_pattern = re.compile(u'[零一二三四五六七八九十百千]+')
    dig_pattern1 = re.compile(u'第[零一二三四五六七八九十百千]+[届期部季篇辑回册话]|[【（\(\[][ ]?[零一二三四五六七八九十百千]+[ ]?[\]\)）】]')
    dig_pattern_en = re.compile(u'[\(（【][ ]?[0123456789]+[ ]?[\)）】]' )
    year_area_pattern = re.compile(u'([\(\[（【][0-9][0-9][ ]?[意奥澳加德法俄印泰日韩美英台港葡西][国影剧版][】\]\)）]).+' )
    epi_pattern_del = re.compile(u'[第全共][0-9]+集')
    part_pattern_01 = re.compile(u'[\(【（][上中下][篇部]?[\)】）]| [上中下][部篇]?$| [上中下][部篇][ ：:]')

    _romawords = {}
    _romawords['Ⅰ'] = 'i'
    _romawords['Ⅱ'] = 'ii'
    _romawords['Ⅲ'] = 'iii'
    _romawords['Ⅳ'] = 'iv'
    _romawords['Ⅴ'] = 'v'
    _romawords['Ⅵ'] = 'vi'
    _romawords['Ⅶ'] = 'vii'
    _romawords['Ⅷ'] = 'viii'
    _romawords['Ⅸ'] = 'iv'
    _romawords['Ⅹ'] = 'x'
    #_romawords['Ⅺ'] = 'xi'
    _romawords['Ⅻ'] = 'xii'
    _romawords['i'] = 'i'
    _romawords['ii'] = 'ii'
    _romawords['iii'] = 'iii'
    _romawords['iv'] = 'iv'
    _romawords['v'] = 'v'
    _romawords['vi'] = 'vi'
    _romawords['vii'] = 'vii'
    _romawords['viii'] = 'viii'
    _romawords['iv'] = 'iv'
    #_romawords['x'] = 'x'
    _romawords['xi'] = 'xi'
    _romawords['xii'] = 'xii'
    _romawords['ⅰ'] = 'i'
    _romawords['ⅱ'] = 'ii'
    _romawords['ⅲ'] = 'iii'
    _romawords['ⅳ'] = 'iv'
    _romawords['ⅴ'] = 'v'
    _romawords['ⅵ'] = 'vi'
    _romawords['ⅶ'] = 'vii'
    _romawords['ⅷ'] = 'viii'
    _romawords['ⅸ'] = 'ix'
    _romawords['ⅹ'] = 'x'
    _romawords['ⅺ'] = 'xi'
    _romawords['ⅻ'] = 'xii'

    _is_TV_Flag = True

    __slots__ = ('countrys', 'langs', '_quanwords' )
    def __init__(self):

        self.countrys = {}
        self.countrys['中国'] = 1
        self.countrys['美国'] = 1
        self.countrys['泰国'] = 1
        self.countrys['日本'] = 1
        self.countrys['韩国'] = 1
        self.countrys['意大利'] = 1
        self.countrys['澳大利亚'] = 1
        self.countrys['加拿大'] = 1
        self.countrys['英国'] = 1
        self.countrys['德国'] = 1
        self.countrys['法国'] = 1
        self.countrys['泰国'] = 1
        self.countrys['俄国'] = 1
        self.countrys['韩版'] = 1
        self.countrys['台版'] = 1
        self.countrys['日版'] = 1
        self.countrys['港版'] = 1
        self.countrys['泰版'] = 1
        self.countrys['英版'] = 1
        self.countrys['陆版'] = 1
        self.countrys['内地版'] = 1

        self.langs = {}
        self.langs['中文'] = 1
        self.langs['国语'] = 1
        self.langs['粤语'] = 1
        self.langs['泰语'] = 1
        self.langs['汉语'] = 1
        self.langs['韩语'] = 1
        self.langs['日语'] = 1
        self.langs['美语'] = 1
        self.langs['英语'] = 1

        self._quanwords = {}
        fname = 'quan_words.txt'
        if dict_dir:
            fname = dict_dir + '/quan_words.txt'
        f = open(fname, 'rb')
        for line in f:
            line = line.strip()
            if line == '': continue
            if line.startswith('#'): continue
            self._quanwords[ line ] = 1
        f.close()

    def gen_feature(self, words): 
        words = words.split()
        result = []
        for word in words:
            word = word.decode('utf-8', 'ignore')
            word = re.sub(u'[0-9一二三四五六七八九十]', u'', word )
            if len(word) == 0:
                continue
            result.append( word )

        result.sort()
        result = u''.join( result )
        if len(result) <= 1:
            ###words.sort()
            return ''.join( words )

        result = result.encode('utf-8', 'ignore')    

        return result
    def parse( self, name, people = None ):
        result,_ ,_ = self.parse_full( name, people )
        return ' '.join(result)

    def _convdigit(self, line ):
        if isinstance(line, str):
            line = line.decode('utf-8', 'ignore')

        words = self.dig_pattern1.findall( line )
        if len(words) <= 0:
            line = re.sub(u'第[0-9]+集', u'', line).strip()
            return line.encode('utf-8', 'ignore')
        for word in words:
            if word.startswith(u'第'):
                temp = word[1:-1]
                newword = fromHanDigit( temp )
                newword = newword.decode('utf-8', 'ignore')
                newword = word[0]+newword+word[-1]
                line = line.replace( word, newword, 1 )
            else:
                temp = word[1:-1].strip()
                newword = fromHanDigit( temp )
                newword = newword.decode('utf-8', 'ignore')
                newword = word[0]+newword+word[-1]
                line = line.replace( word, newword, 1 )

        tmp = re.sub(u'第[0-9]+集', u'', line).strip()

        if tmp!= u'': line = tmp
        return line.encode('utf-8', 'ignore')

    def deal_people(self, name, peoplelist = []):
        """ 人物处理 """

        if len( name) <= 3:
            return name, []

        if len( peoplelist ) <= 0:
            return name, []

        if not isinstance( peoplelist, list ) :
            return name, []

        #词典构建
        bFlag = False
        pname_dic = pygtrie.CharTrie()
        for pname in peoplelist:
            pnames = format_peoplename( pname ) 
            for word in pnames:
                word = word.decode('utf-8', 'ignore').strip()
                if len( word ) <= 1:
                    continue
                pname_dic[ word ] = 1
                bFlag = True
        if bFlag == False:
            return name, []

        #人名查找
        i, num = 0, len( name )
        name2 = u''
        del_pnames = []

        while i < num :
            res, val = pname_dic.longest_prefix( name[i:])
            if res is None:
                i += 1
                name2 += name[i-1]
                continue
            i += len(res) 
            del_pnames.append( res.encode('utf-8', 'ignore') ) 
            if i < num and name[i] == u'版':
                i+=1
        return name, del_pnames


    def extract_fragment(self, name ):
        lexwords = {}

        if len(name) <= 3:
            return name, lexwords

        #类别词
        if name.startswith(u'电视剧 '):
            name = name[4:]
            lexwords['电视剧'] = 4
        elif name.startswith(u'电影 '):
            name = name[3:]
            lexwords['电影'] = 4
        elif name.startswith(u' 电视剧'):
            name = name[:-4]
            lexwords['电视剧'] = 4
        elif name.startswith(u' 电影'):
            name = name[:-3]
            lexwords['电影'] = 4

        #特殊处理【08日影】蛇女/咒灵-DVD中字 
        imp_words = []
        if name.endswith(u'-dvd中字'):
            name = name[:-6].strip()
            lexwords['dvd'] = 100
            lexwords['中字'] = 1

        #国家＋年份
        r = self.year_area_pattern.search( name )
        if r is not None:
            word = r.groups()[0]
            name = name.replace(word, u'', 1)
            word = word[1:-1].replace(u' ', u'')

            area = word[2:].encode('utf-8', 'ignore')
            lexwords[ area ] = 2

            year = word[:2].encode('utf-8', 'ignore')
            if year.startswith('0'): 
                year = '20'+year
            elif year.startswith('1'): 
                year = '20'+year
            else: 
                year = '19'+year
            if year >= "1970" and year <= cur_year:
                lexwords[year] = 5

        #地区
        name, area = deal_country(name)
        for word in area:
            lexwords[word] = 2

        #语言
        name, lang = deal_lang( name )
        for word in lang:
            lexwords[word] = 1

        #处理年份
        name, year = deal_year( name)
        for word in year:
            word = word.strip()
            lexwords[word] = 5

        #日期 
        name, days = deal_days(name)
        for word in days:
            lexwords[word] = 5
        #第几季／部／集／
        name, part = deal_part( name )
        for word in part:
            lexwords[word] = 6

        r = self.dig_pattern_en.search( name )
        if r is not None:
            w = r.group()
            name = name.replace( w, u'')
            w = w[1:-1].encode('utf-8', 'ignore').strip()
            if len(w) == 4 and w >= "1970" and w <= cur_year:
                lexwords[w] = 5
            else:
                lexwords[w] = 6
        #版
        if len(name) >= 5 and name.endswith(u'版') and name[-4:] == u' ':
            word = name[-3:].encode('utf-8', 'ignore')
            imp = wordTool.get_word_imps( word ) 
            if imp is not None:
                lexwords[word] = imp
                name = name[:-4].strip()

        return name, lexwords


    def parse_words( self, name, people = [] ):
        words, flags, infos = self.parse_full( name, people )

        words = self.convHan2Dig( words)
        num = len(words)
        if len(flags) != num:
            sys.exit()
        allwords = []
        for i in range(num):
            word = {}
            word['word'] = words[i]
            word['flag'] = flags[i]
            allwords.append( word )
        return allwords, infos

    def parse_full( self, name, people = [] ):
        """ 成分解析 """
        name = self._convdigit( name )
        if isinstance( name, str):
            name = name.decode('utf-8', 'ignore')

        #去第×集:
        name = self.epi_pattern_del.sub( u'', name )
        name = name.lower()
        #规则解析
        name, lexwords = self.extract_fragment( name )

        findPart = False
        for w in lexwords:
            if lexwords[w] == 6:
                findPart = True

        #带括号
        if not findPart:
            parts = self.part_pattern_01.findall( name )
            if parts:
                findPart = True

        allnum = len(name)
        allchars = []
        for i in range(allnum): 
            ch = name[i]
            if ch == u'.' or ch == u'·':
                if i-1>=0 and name[i-1].isdigit()  and i+1< allnum and name[i+1].isdigit():
                    if ch == u'·': ch = u'.'
                else:
                    ch = u' '
            if ch == u'之' or ch == u'&' or ch == u'-':
                ch = u' '
            elif ch == u'：':
                ch = u':'
            allchars.append( ch )
        name = u''.join( allchars ).strip()

        #人物处理:10
        name, del_pnames= self.deal_people( name, people )        
        for pname in del_pnames:
            lexwords[w] = 10

        #词和词性
        newwords = []
        newflags = []
        #切词
        name = name.encode('utf-8', 'ignore')
        words = cutwords( name )
        if words is None:
            return [], [], lexwords
        #唯一一个词
	if len(words) == 1:
            word = words[0]
            return [ word['word'] ], [ word['flag'] ], lexwords

        allnum = len( words )
        for i in range( allnum ):
            word = words[i]
            flag = word['flag']
            w = word['word'].strip()

            if w == '': continue
            if w == '之' and len(newwords) > 0:
                continue
            if w == '的':
                continue
            if w == '版':
                continue
            #日本字
            if w in 'ストレイドッグスストレイドッグス':
                newwords.append( word['word'] ) 
                newflags.append( 'n' )
                continue
            imp = wordTool.get_word_imps( w ) 
            #罗马数字
            if w in self._romawords:
                if len(newwords) > 0 and newwords[-1] == 'part':
                    newwords.pop()
                    newflags.pop()
                    continue
                if len(words) <= 0:
                    newwords.append( w )
                    newflags.append( flag )
                    lexwords[w] = 6
                    continue
                if w == 'x': 
                    newwords.append( w )
                    newflags.append( flag )
                    continue
                if len(newwords) > 0 and w == 'v' and newwords[-1] == '视频': 
                    newwords.append( w )
                    newflags.append( flag )
                    continue
                w = self._romawords[w] 
                if len(newwords) == 0 or findPart: #第一个词或已有part
                    newwords.append( w )
                    newflags.append( flag )
                    continue
                pword  = newwords[-1]
                if pword.isalpha(): #紧挨数字
                    newwords.append( w )
                    newflags.append( flag )
                    continue
                 
                if w == 'v' and pword == '深':
                    newwords.append( w )
                    newflags.append( flag )
                    continue

                if w == 'i' and pword == 'and':
                    newwords.append( w )
                    newflags.append( flag )
                    continue

                if i == allnum-1: #最后一个词
                    lexwords[w] = 6
                    continue
                nextword = words[i+1]['word'] 
                if nextword in ['领', '形', '型']:
                    newwords.append( w )
                    newflags.append( flag )
                    continue

                if nextword == "" and i+2 < allnum:
                    nextword = words[i+2]['word'] 
                if nextword.isalpha():
                    newwords.append( w )
                    newflags.append( flag )
                    continue
                lexwords[w] = 6
                continue

            if w.isdigit() and i + 2 < allnum:
                if words[i+1]['word'] == '.' and words[i+2]['word'].isdigit():
                    w = w + '.' + words[i+2]['word']
                    newwords.append( w )
                    newflags.append( flag )
                    words[i+2]['word'] = ''
                    continue
            #上和下不在尾巴位置
            if (w == '上'or w == '下') and i+1 < allnum:
                nextchar = words[i+1]['word'].decode('utf-8', 'ignore')
                if nextchar and is_unicode_hanzi( nextchar[0]):
                    newwords.append( w )
                    newflags.append( flag )
                    continue

            imp = wordTool.get_word_imps( w ) 
            #重要词
            if imp is not None:
                if not self._is_TV_Flag and imp == 100:
                    if w.endswith('卫视') or w.endsiwth('卫视版'):
                        newwords.append( w )
                        newflags.append( flag )
                        continue

                lexwords[w] = imp
                continue
            #语言
            if w.endswith('语') and langTool.exists( w):
                ###TIPS####
                lexwords[w] = 1
                continue

            #特殊处理
            if w == '007' or w == '零零七':
                newwords.append( '007' )
                newflags.append( 'n' )
                continue
            if w == '365' or w == '三六五':
                newwords.append( '365' )
                newflags.append( 'n' )
                continue
            if w == '中' and i+1<allnum and words[i+1]['word'] == '字':
                lexwords['中字'] = 1
                i += 1
                continue

            #带量词
            if flag == 'm' and i+1 < allnum:
                nextword = words[i+1]['word']
                if nextword in self._quanwords or nextword == '%':
                    w = fromHanDigit(w)
                    newwords.append( w )
                    newflags.append( flag )
                    continue
                if len(nextword) >= 6:
                    a1 = nextword.decode('utf-8', 'ignore')[0].encode('utf-8', 'ignore')
                    if a1 in ['大', '个', '种', '天']:
                        newwords.append( w )
                        newflags.append( flag )
                        continue

            if w == 'i' and i+1 < allnum:
                if words[i+1]['word'].isalpha():
                    newwords.append( w )
                    newflags.append( flag )
                    continue

            #×集
            if w.isdigit() and i + 1 < allnum:
                if words[i+1]['word'] == '集':
                    continue
            #集
            if w == '集' and i - 1>0 and words[i-1]['flag'] == 'm':
                continue

            #全/共
            if (w == '共' or w == '全') and i + 1 < allnum:
                if words[i+1]['flag']=='m':
                    continue
            if (w == '共' or w == '全') and i - 1 >= 0:
                if words[i-1]['flag'] == 'm':
                    continue

            if w.isdigit(): #英文数字
                if i > 0 and words[i-1]['word'] in ['（', '(', '【', '[', '〈', '〖']:
                    newwords.append( w )
                    newflags.append( flag )
                    continue

                if i+1 < allnum and words[i+1]['word'] in self._quanwords:
                    newwords.append( w )
                    newflags.append( flag )
                    continue
                if findPart or w.startswith('0') or int(w) >= 20 or len(newwords) <= 0:
                    newwords.append(w)
                    newflags.append( flag )
                    continue
                if words[i-1]['word'] in ['+', '/', '*', '-'] or newwords[-1].isdigit(): 
                    newwords.append(w)
                    newflags.append( flag )
                    continue

                if len(newwords) >= 2 and newwords[-1] in ['对', '比', '进', 'vs'] and newwords[-2].isdigit():
                    newwords.append(w)
                    newflags.append( flag )
                    continue

                if i+1 == allnum:
                    lexwords[w] = 6
                    continue
                flag = 'm'
            elif flag == 'm':  #中文数字
                #print w, flag
                if findPart or len(newwords) <= 0:
                    w = fromHanDigit(w)
                    newwords.append(w)
                    newflags.append( flag )
                    continue
                t = w.decode('utf-8', 'ignore').strip()
                t1 = re.sub(u'[零一二三四五六八九十]+',u'',t )
                if len(t1) != 0 or len(t) > 1:
                    newwords.append( w )
                    newflags.append( flag )
                    continue
            #数字 
            if w.isdigit() and i + 1 < allnum:
                pword = newwords[-1]

                if pword.endswith('月') and pword[:-3] in ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']:
                    newwords.append(w)
                    newflags.append( flag )
                    continue
                if pword == '又' or pword == '仅':
                    newwords.append(w)
                    newflags.append( flag )
                    continue
                #10对10
                if pword in ['比', '对', '进', 'vs'] and len(newwords) >= 2 and newwords[-2].isdigit():
                    newwords.append(w)
                    newflags.append( flag )
                    continue

                nextword = words[i+1]['word']
                if nextword.startswith('多'):
                    newwords.append(w)
                    newflags.append( flag )
                    continue
                if pword in ['巷', '路'] and nextword == '弄':
                    newwords.append(w)
                    newflags.append( flag )
                    continue
                #10对10
                if nextword in ['比', '对', '进', 'vs'] and i+2<allnum and words[i+2]['word'].isdigit():
                    newwords.append(w)
                    newflags.append( flag )
                    continue

                if nextword != "" and nextword == '大':
                    newwords.append(w)
                    newflags.append( flag )
                    continue
                if nextword == '' and i+2< allnum:
                    nextword = words[i+1]['word']
                if nextword in ['*', '/', '+'] or nextword.isdigit(): 
                    newwords.append(w)
                    newflags.append( flag )
                    continue
                #if nextword == '' or  nextword in ['）', ')', '》', ']', '］', '〗', '之']:
                if nextword in ['）', ')', '》', ']', '］', '〗', '之', '〕', '〉']:
                    lexwords[w] = 6
                    continue
                if nextword == '':
                    if i+2<allnum and words[i+2]['word'].isalnum(): 
                        newwords.append(w)
                        newflags.append( flag )
                        continue
                    else:
                        lexwords[w] = 6
                        continue
                        pass
                if nextword != "": #后接汉字
                    if is_unicode_hanzi( nextword.decode('utf-8', 'ignore')[0]):
                        if i+2 == allnum and len(nextword) == 3:
                            newwords.append(w)
                            newflags.append( flag )
                            continue
                        else:
                            lexwords[w] = 6
                            continue
                toks = {'-':1, '—':1, ':':1, '：':1}
                if nextword not in toks: 
                    newwords.append(w)
                    newflags.append( flag )
                    continue
                if i+2 >= allnum:
                    lexwords[w] = 6
                    continue

                a2 = words[i+2]['word']
                if a2.isdigit():
                    newwords.append(w)
                    newflags.append( flag )
                    continue
                lexwords[w] = 6
                continue
            #中文数字 
            if flag == 'm':
                w = fromHanDigit(w)
                if len(newwords) <= 0 or not w.isdigit() or findPart or int(w)>10:
                    newwords.append(w)
                    newflags.append( flag )
                    continue
                if len(w) == 4 and w >= "1970" and w <= cur_year:
                    lexwords[w] = 5 #年份
                    continue

                pword = words[i-1]['word']
                if pword in fame_name:
                    newwords.append(w)
                    newflags.append( flag )
                    continue
                if pword in [ '又', '有'] :
                    newwords.append(w)
                    newflags.append( flag )
                    continue
                if len(newwords) >= 2 and pword in ['比', '对', '进', 'vs']:
                    tmp = newwords[-2]
                    if tmp.isdigit() and int(tmp) <= 32:
                        newwords.append(w)
                        newflags.append( flag )
                        continue
                if len(newwords) >= 2 and w in ['1', '2'] and pword == '说' and newwords[-2] == '有一':
                    newwords.append(w)
                    newflags.append( flag )
                    continue

                #数字在末尾
                if i+1==allnum:
                    lexwords[w] = 6
                    continue
                nextword = words[i+1]['word']
                if nextword in ['　', ' ', '', '-', '—', '：', ':', ']', '】', '）', ')', '］', '》', '〗']:
                    lexwords[w] = 6
                    continue

                if is_unicode_hanzi( nextword.decode('utf-8', 'ignore')[0]):
                    newwords.append(w)
                    newflags.append( flag )
                    continue
                lexwords[w] = 6
                continue

            if w.endswith('版'):
                if areaTool.exists( w[:-3]) :
                    lexwords[w] = 2
                    continue
                if areaTool.exists( w) :
                    lexwords[w] = 2
                    continue
                newwords.append(w)
                newflags.append( flag )
                continue

            #语言
            if w in self.langs or w == '粤':
                lexwords[w] = 1
                continue

            #区域
            if w in self.countrys:
                if i+1<allnum:
                    nextword = words[i+1]['word']
                    if nextword in ['版本', '版']:
                        lexwords[w] = 2
                        continue
                    if nextword != "" and is_unicode_hanzi( nextword.decode('utf-8', 'ignore')[0]):
                        newwords.append(w)
                        newflags.append( flag )
                        continue
                if i-1>0:
                    pword = words[i-1]['word']
                    if pword != "" and is_unicode_hanzi(pword.decode('utf-8', 'ignore')[0]):
                        newwords.append(w)
                        newflags.append( flag )
                        continue
                lexwords[w] = 2
                continue

            #版本词
            if i+1 < allnum and words[i+1]['word'] == '版':
                w = w+'版'
                newwords.append(w)
                newflags.append( flag )
                continue

            #标点
            if flag == 'w': continue
            if flag == 'x' and not w.isalnum():
                continue
            newwords.append( word['word'] ) 
            newflags.append( flag )
        return newwords, newflags, lexwords


    def convHan2Dig(self, oriwords):
        newwords = []
        for word in oriwords:
            ori = word
            word = word.decode('utf-8', 'ignore')
            digs = self.dig_pattern.findall(word)
            if not digs:
                newwords.append( ori )
                continue
            for temp in digs:
                dig = fromHanDigit( temp )
                if dig.isdigit() and dig!= "0":
                    dig = dig.decode('utf-8', 'ignore')
                    word = word.replace( temp, dig, 1 )
            newwords.append( word.encode('utf-8', 'ignore') )
            
        return newwords

def test_douban():
    from douban import DoubanItem
    pa = VideoNameParser()
    f = open('../douban/all_douban.json', 'rb')
    for line in f:
        line = line.strip()
        if line == '': continue
        item = DoubanItem( line )
        if item.is_empty():
            continue
        name = item.get_by_ckey('名称')
        starringName = item.get_by_ckey('主演')
        if starringName is None:
            starringName = ''

        result = pa.parse( name, starringName.split(',') ) 
        #print name
        print '\t###', result
        pass
    f.close()
    pass


def deal_alias():
    pa = VideoNameParser()
    f = open('ent_map.txt', 'rb')
    for line in f:
        line = line.strip()
        if line == '': continue
        line = tradition2simple(line)
        fields = line.split('\t')
        if len(fields) != 2: continue
        w1, w2 = fields
        a1, a2 = fields
        if len(w1.decode('utf-8', 'ignore')) <= 1:continue
        if len(w2.decode('utf-8', 'ignore')) <= 1:continue
        words1, infos1 = pa.parse_words( w1 ) 
        words1 = [ w['word'] for w in words1 if w['flag'] != 'w' and w['word'] != "" ]

        for w in infos1 :
            if w.endswith('卫视') and len(w) >= 12:
                words1.append( w )
                continue
            if w == '续集' or w == '续':
                words1.append( w )
                continue
            if infos1[w] == 200:
                words1.append( w )
                continue

        words2, infos2 = pa.parse_words( w2 ) 
        words2 = [ w['word'] for w in words2 if w['flag'] != 'w' and w['word'] != "" ]
        for w in infos2 :
            if w.endswith('卫视') and len(w) >= 12:
                words2.append( w )
                continue
            if w == '续集' or w == '续':
                words2.append( w )
                continue
            if infos2[w] == 200:
                words2.append( w )
                continue

        w1 = ''.join(list(set(words1)))
        w2 = ''.join(list(set(words2)))
        if w1 == w2: 
            #print line
            continue
        pass
        a2 = a2.replace('电影版', '').replace('连续剧版','')
        a2 = a2.replace('动画版', '').replace('美国版','')
        a2 = a2.replace('动画版', '').replace('电视剧版','')
        #print line
    f.close()
    pass

#全局
gpa = VideoNameParser()
import time

if __name__ == '__main__':

    dmap = {1:'语言', 2:'区域', 3:'卫视', 4:'分类', 5:'年份/日期', 6:'季/部', 8:'全集', 10:'人物', 100:'强规则', 200:'重要词'}
    #f = open('data/all_videonames.txt', 'rb')
    #f = open('data/all_videonames.txt', 'rb')
    f = open('/letv/zoujiaqi/search_and_rec/knowledge_graph/linking/data/tagnames.txt', 'rb')
    pa = VideoNameParser()
    #l = ['天机·富春山居图']
    l = ['四川大学']
    #for line in f:
    for line in l:
    #for line in ['二百三十', '四是多少', '十五个', '五个', '十来天', '七月半', '八十年', '7月半']:
        if line == '': continue
        xwords, flags ,inf1 = pa.parse_full( line, [] ) 
        xwords = pa.convHan2Dig(xwords)
        if len(xwords) != len(flags):
            sys.exit()
        flag = False
        line = line.replace('（一）', '')
        line = line.replace('（二）', '')
        line = line.replace('（三）', '')
        line = line.replace('（四）', '')
        line = line.replace('（五）', '')
        line = line.replace('（六）', '')
        line = line.replace('（七）', '')
        line = line.replace('（八）', '')
        line = line.replace('（九）', '')
        line = line.replace('(一)', '')
        line = line.replace('(二)', '')
        line = line.replace('(三)', '')
        line = line.replace('(四)', '')
        line = line.replace('(五)', '')
        line = re.sub('（[0-9]+）', '', line)
        line = re.sub('\([0-9]+\)', '', line)
        line = re.sub('【[0-9]+】', '', line)
        for key in inf1: 
            if inf1[key] != 6: continue
            key = key.strip()
            if key.startswith('第') and key.endswith('部'): continue
            if key.startswith('第') and key.endswith('季'): continue
            if key.startswith('第') and key.endswith('期'): continue
            if key.startswith('第') and key.endswith('篇'): continue
            if key.startswith('第') and key.endswith('辑'): continue
            if key.startswith('第') and key.endswith('届'): continue
            if key.startswith('第') and key.endswith('话'): continue
            if key.isdigit() and line.endswith(key): continue
            if key.isalpha() and len(key) >= 2: continue
            if key.isalpha() and line.endswith(key): continue
            flag = True
            break
        #if not flag: continue
        print line 
        print '\t\t',' '.join(xwords)
        for key in inf1:
            t = inf1[key]
            if t not in dmap: continue
            print '\t', dmap[t] ,"\t",key
        
    #f.close()
    pass
