#! -*- coding:utf-8 -*-
'''
www.tcm100.com
get 黄煌的医案
'''
import sys, os, time, uuid, re, codecs
import urllib2
import chardet
from bs4 import BeautifulSoup
from pymongo import MongoClient

reload(sys)
sys.setdefaultencoding('utf-8')


def gethtml(url):
    #headers = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
    #req = urllib2.Request(url=url, headers=headers)
    #data = urllib2.urlopen(req).read()
    content = urllib2.urlopen(url).read()
    typeEncode = sys.getfilesystemencoding()  ##系统默认编码
    print chardet.detect(content)
    infoencode = chardet.detect(content).get('encoding', 'utf-8')  ##通过第3方模块来自动提取网页的编码
    html = content.decode(infoencode, 'ignore').encode("GB18030")
    #content =urllib2.urlopen(url).read().decode("gb2312").encode("utf-8") #decode("UTF-8").encode(systype) #data #.decode("UTF-8") #.encode(systype)
    #print html
    return html

def saveurl(url,file):
    content =gethtml(url)
    #print content
    man_file = codecs.open(file, 'w','utf-8')
    man_file.write(content) #.encode("gb2312"))
    man_file.close()

def saveContent(content,file):
    man_file = codecs.open(file, 'w','utf-8')
    man_file.write(content) #.encode("gb2312"))
    man_file.close()

def readdata(file):
    read_file=open(file,"r")
    file_content= read_file.read()
    #print file_content
    read_file.close()
    return file_content

def handleYian(htmltext):
    htmltext = re.sub(r'&nbsp;|\xa0|\\xa0|\u3000|\\u3000|\\u0020|\u0020', '', str(htmltext))
    findReturn = BeautifulSoup(htmltext, "html.parser",from_encoding="GB18030")
    d  = findReturn.find('div',{'class' : 'content'})
    print 'content:',str(d.text.encode("GB18030"))
    links = d.find_all('br')
    for hos in links:
        hos.insert_before("\n")
    ret = d.get_text("").replace(" ", "")
    ret.strip()
    title=findReturn.find('table').find('div',{'class':'title'}).find_next('div')
    print 'desc',str(ret.encode("GB18030"))
    return (str(ret.encode("GB18030")),str(title.text.encode("GB18030")))

def save_mongo(uuid,desc,title):
    conn = MongoClient("okzor.com", 27017)
    db = conn.health
    db.authenticate("hou", "hou@123")
    doc=[]
    doc.append({"uuid": uuid, "desc": desc, "title": title,"author":"huanghuang"})
    db.kickhtml.insert(doc)

def updateMongo():
    conn = MongoClient("okzor.com", 27017)
    db = conn.health
    db.authenticate("hou", "hou@123")
    contents = db.kickhtml.find({})
    for i in contents:
        db.kickhtml.update({"uuid":str(i['uuid'])},{"$set":{"author":"huanghuang"}})

def get_huanghuang():
    url="http://www.tcm100.com/user/hhyian/index.htm"
    path="/root/python/html/tcm100/huanghuang/"
    content =gethtml(url)
    content = re.sub(r'&nbsp;|\xa0|\\xa0|\u3000|\\u3000|\\u0020|\u0020', '', str(content))
    findReturn = BeautifulSoup(content, "html.parser")
    table = findReturn.find("table") #select('table[cellPadding="2"]')
    #print 'table',table
    links = table.find_all('a')
    for hos in links:
        print hos['href']
        docid=str(uuid.uuid1()).replace("-", "")
        filename=''.join([path,docid])
        content =gethtml(hos['href'])
        #saveContent(content.decode("GB18030").encode("utf-8"),filename)
        #saveurl(hos['href'],filename)
        (desc,title)=handleYian(content)
        #print 'save mongodb:',desc,title
        save_mongo(docid,desc.decode("GB18030").encode("utf-8"),title.decode("GB18030").encode("utf-8"))
        time.sleep(1)
        #break

if __name__ == '__main__':
    #os.mkdir("tcm100")
    print "tcm100_huanghuang"
    #get_huanghuang()






