#coding=UTF-8

import urllib2
import json
import traceback
import codecs, sys
from HTMLParser import HTMLParser
from ictclas import *

baseUrl = 'http://zh.wikipedia.org/w/api.php?action=query&format=json&prop=extracts&titles='
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.97 Safari/537.11',
    'Accept-Charset':'GBK,utf-8;q=0.7,*;q=0.3',
    'Accept-Language':'zh-CN,zh;q=0.8,en-US;q=0.6,en;q=0.4'
}

def fetchHtml(title, timeout):
    print('now_parsing@' + title)    
    req = urllib2.Request(url=baseUrl + title, headers=headers)
    fp = urllib2.urlopen(req, None, timeout)
    html = fp.read()
    fp.close()
    return html

def extract(txt):
    cwa = Cwa()
    s = cwa.analyzeWord(txt, 'UTF-8')
    ss = s.split(' ')
    names = set([])
    
    for e in ss:
        t = e.split('/')
        if len(t) < 2: continue
        
        wa = WordAttr(t[1])
        if wa.mainAttr() == 'n':
            if wa.subAttr() != None and wa.subAttr().startswith('r'):
                names.add(t[0].strip())
    
    for n in names:
        print('%s:%s'%(n, urllib2.quote(n.encode('utf-8'))))
    
    return names
        
def parse():
    ret = fetchHtml('%E9%B2%81%E8%BF%85', 1000)
    jsonObj = json.loads(ret)
    c1 = jsonObj['query']['pages']
    
    content = None
    for (k,v) in c1.items():
        content = v['extract']
        print(len(content))
        break
        
    extract(content)
            
if __name__=='__main__':
    parse()
    os.system('pause')