# -*-coding: utf-8 -*-
import pycurl
import StringIO
import sys
import lxml.html as HTML
import datetime
import time
#import chardet

reload(sys)
sys.setdefaultencoding("utf-8")


def logit(content, filename='log.txt'):
    '''
    逐行添加日志，每行一个回车
    头部是时间
    '''
    file = open(filename, 'a')
    try:
        timestr = datetime.datetime.now().strftime('%y-%m-%d %H:%M:%S')+': '
        file.write(timestr+content+'\r\n')
    finally:
        file.close()


def getContent(url, codec='gbk'):
    '''
    根据给定的url，获取内容
    返回的是预先解码的内容
    比如淘宝默认gbk，则返回gbk解码
    代理功能未开发，todo
    '''
    c = pycurl.Curl()
    c.setopt(pycurl.URL, url)
    b = StringIO.StringIO()
    c.setopt(pycurl.TIMEOUT, 30)
    c.setopt(pycurl.WRITEFUNCTION, b.write)
    headers = 'Mozilla/5.0 (Windows; U; Windows NT 6.1; zh-CN; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12'
    c.setopt(pycurl.USERAGENT, headers)
    c.setopt(pycurl.FOLLOWLOCATION, 1)
    c.setopt(pycurl.MAXREDIRS, 5)
    # 代理
    #c.setopt(pycurl.PROXY, 'http://11.11.11.11:8080')
    #c.setopt(pycurl.PROXYUSERPWD, 'aaa:aaa')
    c.perform()
    content = b.getvalue()
    #gbk解码后存入文件中文正常
    if(debug):
        logit(content.decode(codec))
    return content.decode(codec)


def getItem(doc, exp):
    '''
    根据传入的xpath语句
    找到节点并返回节点列表
    '''
    root = HTML.document_fromstring(doc)
    tnodes = root.xpath(exp)
    return tnodes


def nextPage(kword, num=2):
        '''
        生成访问淘宝搜索页面的下一页的网址
        num必须大于等于2,第2页
        '''
        base = 'http://s.taobao.com/search?'
        spm = '&a230r.1.8.6.97bd6a'
        dstr = str((datetime.date.today()).isoformat()).replace('-', '')
        iid = '&initiative_id=tbindexz_'+dstr
        cmd = '&commend=all'
        pre = '&newpre=null'
        ssid = '&ssid=s5-e'
        s = '&s='+str((num-1)*40)+'#J_FilterTabBar'
        return base+'q='+kword+iid+cmd+pre+s


def getItemLoc(doc, exp, itemid):
    '''
    找到指定的itemid，格式为“id=9743300929”
    doc是通过url获得的html文档
    exp 为从整个文档找到包含目标子集的xpath语言
    '''
    flag = 0
    position = 0
    page = 1
    nodes = getItem(doc, exp)
    count = len(nodes)
    #print count
    for node in nodes:
        position = position + 1
        nodehref = node.xpath('@href')[0]
        #logit(nodehref)
        if(debug):
            logit(nodehref)
        if('?'+itemid in nodehref) or ('&'+itemid in nodehref):
            flag = flag + 1
            #logit(itemid+'  in position '+str(position)+',on page '+str(page))
            return position
            if(debug):
                print 'find it'
            break
    if(flag==0):
        #print 'not find'
        #logit('not find')
        return False


def getFirstUrl(kword):
    baseparam = '&commend=all&ssid=s5-e&search_type=item&sourceId=tb.index'
    dstr = str((datetime.date.today()).isoformat()).replace('-', '')
    ivid = '&initiative_id=tbindexz_'+dstr
    url = 'http://s.taobao.com/search?q='+kword+baseparam+ivid
    return url

debug = False
kword = '包邮 电饭煲'
itemid = "id=17658096230"
maxPage = 100
page = 1

print 'start keyword '+kword.encode('gbk')+' search for '+itemid
logit('\r\n start keyword '+kword+' search for '+itemid)
url = getFirstUrl(kword)
doc = getContent(url)
exp = '//li[@class="list-item"]/h3[1]/a[1]'
position = getItemLoc(doc, exp, itemid)
if(not position):
    #未找到
    print 'Page '+str(page)+' not find '+itemid
    logit('Page '+str(page)+' not find '+itemid)
    while page<=maxPage:
        time.sleep(1.5)
        page = page + 1
        url = nextPage(kword, page)
        doc = getContent(url)
        position = getItemLoc(doc, exp, itemid)
        if(position):
            print 'find in position '+str(position)+' on page '+str(page)
            logit('find in position '+str(position)+' on page '+str(page))
            break
        else:
            print 'Page '+str(page)+' not find '+itemid
            logit('Page '+str(page)+' not find '+itemid)
else:
    print 'find in position '+str(position)+' on page '+str(page)+'\r\n'
    logit('find in position '+str(position)+' on page '+str(page)+'\r\n')
