# -*-coding: utf-8 -*-
import pycurl
import StringIO
import sys
import lxml.html as HTML
import datetime
#import chardet

reload(sys)
sys.setdefaultencoding("utf-8")


def decodeHtmlEntity(s):
    if s=='' or not s:
        return
    result = s
    import locale
    result = result.decode(locale.getdefaultlocale()[1], "ignore").encode(locale.getdefaultlocale()[1]).replace("xc2xa0", " ")
    return result


def logit(content):
    file = open('log.txt', 'a')
    try:
        timestr = datetime.datetime.now().strftime('%y-%m-%d %H:%M:%S')+': '
        file.write(timestr+content+'\r\n')
    finally:
        file.close()


def getContent(url):
    c = pycurl.Curl()
    c.setopt(pycurl.URL, url)
    b = StringIO.StringIO()
    c.setopt(pycurl.TIMEOUT, 30)
    c.setopt(pycurl.WRITEFUNCTION, b.write)
    c.setopt(pycurl.USERAGENT, 'Mozilla/5.0 (Windows; U; Windows NT 6.1; zh-CN; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12')
    c.setopt(pycurl.FOLLOWLOCATION, 1)
    c.setopt(pycurl.MAXREDIRS, 5)
    # 代理
    #c.setopt(pycurl.PROXY, 'http://11.11.11.11:8080')
    #c.setopt(pycurl.PROXYUSERPWD, 'aaa:aaa')
    c.perform()
    content = b.getvalue()
    logit(content.decode('gbk'))
    return content


def getItem(doc, exp):
    root = HTML.document_fromstring(doc)
    tnodes = root.xpath(exp)
    return tnodes

#doc = """<html><head><title>why len(y) == 1</title><script>var y = 1</script></head>sample.<html>"""
#exp = "//title"
#print getItem(doc, exp)[0].text

kword = '除臭竹炭鞋垫'
url = 'http://s.taobao.com/search?q='+kword+'&initiative_id=staobaoz_20120908'
doc = getContent(url)
#logit(doc)
exp = '//div[@id="list-content"]/form[@id="bid-form"]/ul/li[@class="list-item"]/h3[@class="summary"]/a[@title]/text()'
content = getItem(doc, exp)
#print chardet.detect(content[0].text)
#logit(str(content))
count = len(content)
#print chardet.detect(content[0])
print count
for ele in content:
    #pass
    #print len(ele.xpath('/h3[@class="summary"]/a[@title]'))
    #varvar = ((decodeHtmlEntity(ele)).strip()).decode('gbk')
    #type = sys.getfilesystemencoding()
    #print type
    #print chardet.detect(ele)
    #print ele.decode('gbk').encode('utf-8')
    #ele=unicode(ele, 'gbk', 'ignore').encode('utf-8', 'ignore')
    #print (ele.decode('gbk').encode('utf-8'))
    #logit(ele.decode('gbk').encode('utf-8'))
    #logit(ele.decode('gbk'))
    #logit(ele.decode('ascii'))
    print ele.decode('gbk').encode('utf-8')
    logit(ele.decode('gbk').encode('utf-8'))

'''
from django.http import HttpResponse
from lxml import  etree
import  lxml.html as HTML
import  math
import  threading
from models import readModel
class getDetail(threading.thread):
  def __int__(self,novel):
    threading.thread.__init__(self)
    self.novel=novel
  def run(self):
    #rm=readModel()
    novelClass=u''+self.novel.xpath("//td")[0].textContent()
    #rm.novelClass=novelClass
    #rm.save()
    print novelClass




def getFromLib(requet):
    libUrl='http://read.10086.cn/booklist?nodeId=0&fee=0&order=1&view=1&page=1'
    libContent=urllib2.urlopen(libUrl).read()
    #dom=HTML.document_fromstring(libContent)
   # novels=dom.xpath("//")
    novels=re.split(r'<tr class=',libContent)
    print len(novels)

    for i in len(novels):
        novel=novels[i+1]
        getDetail(novel)
        t = getDetail(novel)
        t.setDaemon(True)
        t.start()
    while threading.activeCount() > 4:
         pass

    return HttpResponse(libContent)

'''
