import requests
import re
from bs4 import BeautifulSoup as bs
from pymongo import MongoClient
from limitTool import limitToolSingleton
 #保存页面到本地
homePage = 'page.html'
LianTuhomePage = 'liantuHomePage.html'
detailPage = 'detail.html'
detail2Page = 'detail2.html'
cookie = 'ASP.NET_SessionId=grfmzsbhogiiiiui0n3bhwyv'
cookieLiantu = 'Hm_lvt_0635661383aaa290c3690f94520a59f1=1529640573,1529913462'
prefix = '69202028'
def save_html(r_page,name):
    f = open(name,'wb')
    f.write(r_page.content)#save to page.html
    f.close()
    return r_page

def save_code(code,name):
    f = open(name,'w')
    code = int(code) + 1
    code = str(code)
    code = code.zfill(13)
    f.write(code)
    f.close()
    return code

def load_html(name):
    with open(name,'r') as f:
        str = f.read()
        return str

def fetchByBarcodeFromLiantu(code):
    #this is url fetcher
    print(code)
    headers = {'Accept': '*/*',
              'Accept-Encoding':'gzip, deflate',
              'Accept-Language': 'zh-CN,zh;q=0.9',
              'Connection': 'keep-alive',
              'Cookie': cookieLiantu,
               'Content - Length': '17',
               'Content - Type': 'application / x - www - form - urlencoded;charset = UTF - 8',
              'Host': 'www.liantu.com',
              'Referer': 'http://www.liantu.com/tiaoma/',
              'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36',
              'X - Requested - With': "XMLHttpRequest"}
    prefix = 'http://www.liantu.com/tiaoma/query.php'
    url = prefix
    r = requests.post(url=url, headers=headers,data={'ean':code})
    result = r.text
    save_html(r,LianTuhomePage)
    #this is file fetcher
    # result = load_html(homePage)
    # print(result)
    soup = bs(result, 'lxml')
    print(soup)
    json = r.json()
    print(json)
    if json.__len__() != 0:#如果json不为空,说明这个条形码是有对应的值. 那么就去中国商品网查询信息.并且保存下来
        print('its no none')
        return True
    else:
        print("its none")
        return False

def fetchByBarcode(code):
    #this is url fetcher
    print(code)
    headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng',
              'Accept-Encoding':'gzip, deflate',
              'Accept-Language': 'zh-CN,zh;q=0.9',
              'Connection': 'keep-alive',
              'Cookie': cookie,
              'Host': 'search.anccnet.com',
              'Referer': 'http://search.anccnet.com/searchResult2.aspx?keyword=6935490207223',
              'Upgrade-Insecure-Requests': '1',
              'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36'}
    prefix = 'http://search.anccnet.com/searchResult2.aspx?keyword='
    url = prefix + code
    r = requests.get(url=url, headers=headers)
    result = r.text
    save_html(r,homePage)
    #this is file fetcher
    # result = load_html(homePage)
    # print(result)
    soup = bs(result, 'lxml')
    # print(soup)
    # items = soup.find_all('div', attrs={'class': 'comment-item'})
    ol = soup.find('ol',attrs={'id':'results'})
    print(ol)
    if ol == None:
        print('ol not usabled, next code')
        return
    ass = []
    for a in ol.find_all('a'):
        print(a)
        ass.append(a)

    if ass.__len__() == 0:
        print('ass not usabled, next code')
        return
    targetUrl = ass[ass.__len__() - 1].get('href')
    print('targetUrl is ' + targetUrl)
    if targetUrl.find('base_id') != -1:#方法一
        print('it is baseid')
        parseDetailBaseId(targetUrl,code)
    else:#方法二
        print('it is aspx')
        parseDetailAspx(targetUrl,code)
    # parseDetail(targetUrl)
js_name = 'Att_Sys_en-us_141_G'
js_brand = 'Att_Sys_zh-cn_304_G'
js_capacity = 'Att_Sys_zh-cn_332_G'
js_package = 'Att_Sys_zh-cn_35_G'
js_Origin = 'Att_Sys_zh-cn_74_G'

def parseDetailBaseId(url,code):
    #fetch file
    # result = load_html(detailPage)
    #fetch url
    headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
               'Accept-Encoding': 'gzip, deflate',
               'Accept-Language': 'zh-CN,zh;q=0.9',
               'Connection': 'keep-alive',
               'Cookie': cookie,
               'Host': 'search.anccnet.com',
               'Upgrade-Insecure-Requests': '1',
               'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36'}
    r = requests.get(url=url)
    result = r.text
    # save_html(r,detailPage)
    # return
    soup = bs(result,'lxml')
    model = {}
    js = soup.find_all('script')
    # print(js)
    print('**********')
    p_name = []
    p_brand = []
    p_capacity = []
    p_package = []
    p_original = []
    for j in js:
        res_name =  r"SetValue\(\'" + js_name + "'.+\)"
        res_brand =  r"SetValue\(\'" + js_brand + "'.+\)"
        res_capacity =  r"SetValue\(\'" + js_capacity + "'.+\)"
        res_package =  r"SetValue\(\'" + js_package + "'.+\)"
        res_original =  r"SetValue\(\'" + js_Origin + "'.+\)"

        if p_name == []:
            p_name = re.findall(res_name, j.text)
        if p_brand == []:
            p_brand = re.findall(res_brand, j.text)
        if p_capacity == []:
            p_capacity = re.findall(res_capacity, j.text)
        if p_package == []:
            p_package = re.findall(res_package, j.text)
        if p_original == []:
            p_original = re.findall(res_original, j.text)
    model['name'] = parseValue(p_name)
    model['barnd'] = parseValue(p_brand)
    model['capacity'] = parseValue(p_capacity)
    model['package'] = parseValue(p_package)
    model['origin'] = parseValue(p_original)
    model['barcode'] = code
    print(model)

    #now is the picture
    p_image = ""
    pdiv = soup.find('div',attrs={'id':'imageListDiv'})
    if pdiv != None:
        imgs = pdiv.find('img')
        if imgs != None:
            img = imgs.get('src')
            if img != None:
                p_image = imgs
                print(p_image)
                print(p_image.get('src1') == None)
                if p_image.get('src1') == None:
                    print('none')
                    print(p_image)
                    p_image = p_image.get('src')

    print(p_image)
    model['image'] = p_image
    insertToDb(model)

def parseDetailAspx(url,code):
    #fetch file
    # result = load_html(detail2Page)
    #fetch url
    headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
               'Accept-Encoding': 'gzip, deflate',
               'Accept-Language': 'zh-CN,zh;q=0.9',
               'Connection': 'keep-alive',
               'Cookie': cookie,
               'Host': 'search.anccnet.com',
               'Upgrade-Insecure-Requests': '1',
               'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36'}
    r = requests.get(url=url)
    result = r.text
    # save_html(r,detail2Page)
    # return
    soup = bs(result,'lxml')
    model = {}
    divs = soup.find_all('div',attrs={'class':'section'})

    print('***')
    div = None
    for i in divs:
        div = divs[0]
        break
    if div == None:
        print('not useable')
        return
    print('***')
    spans = div.find_all('span',attrs={'class':'attrVal'})
    print(spans)
    imgs = div.find('img')
    p_image = ''
    if imgs != None:
        img = imgs.get('src')
        if img != None:
            p_image = img
    model['name'] = parseStrCheckEmpty(spans[3])
    model['barnd'] = parseStrCheckEmpty(spans[4])
    model['capacity'] = parseStrCheckEmpty(spans[5])
    model['package'] = ''
    model['origin'] = parseStrCheckEmpty(spans[6])
    model['barcode'] = code
    model['image'] = p_image
    print(model)
    insertToDb(model)

def parseStrCheckEmpty(html):
    if html == None:
        return  ''
    else:
        return html.text
def parseValue(list):
    if list.__len__() == 0:
        return ""
    str = list[0]
    targetStr = str[str.find(',') + 1:]
    targetStr =  targetStr.replace("'", "" )
    targetStr =  targetStr.replace(")", "" )
    print(targetStr)
    return targetStr

def insertToDb(model):
    print('insert to DB')
    conn= MongoClient('localhost',27017)
    db = conn.barcodeDs
    barcode_set = db.barcode_set
    result = barcode_set.find({'barcode':model['barcode']})
    if result.count() == 0:
        print('no data')
        barcode_set.insert(model)
    else:
        print('existed')

def strTest(str):
    if str.find('base_id') != -1:
        print('it has')
    else:
        print('it has not')

def currentCodeFromFile():#文件里面存的只有12位。取出来后，需要再根据算法添加校验位
    code = '000000000000'
    with open('code.txt','r') as f:
        code = f.read()
    return code

def appendBarcode(prefix):
    # barcode length is 13. the first 692 means china, and the next 4,5,6 number is factory number. the last one is the verifynumber.
    #  which means all we should do is to random the 13 - 3 - 1 - 4(5,6) = 5,4,3. the max length should random is 5.
    barcode = prefix

    #先从文件中读取值，如果不是9999的话，证明上次的任务还没有结束
    #否则的话，就归零,继续下一个prefix。暂时的话，先返回null。让外界停止查询
    randomCode = load_RandomFactoryCode()
    if randomCode == '9999':
        return None
    elif randomCode.__len__() == 0:
        print('randomcode is empty ,need to init')
        #如果txt返回的字符串长度为0.需要初始化
        for i in range(0,randomLengthForPrefix()):
            randomCode += '0'
        save_RandomFactoryCode(fullCode=prefix + randomCode + '0',name='randomCode.txt',baseInit=True)
    print(randomCode)
    return prefix + randomCode

def randomLengthForPrefix():
    randomlength = 13 - prefix.__len__() - 1
    return randomlength

def save_RandomFactoryCode(fullCode,name,baseInit):#这里返回的是不包括前缀的条形码
    #取得的code是13位的。我们需要处理的是中间的可以分配的值
    print('fullcode is ' + fullCode)
    randomCode = fullCode[prefix.__len__():prefix.__len__()+randomLengthForPrefix()]
    f = open(name, 'w')
    if baseInit == False:#如果是初始化的，是不需要+1的
        #取得这个randomcode后加一，存到txt中。
        randomCode = int(randomCode) + 1
        randomCode = str(randomCode)
        randomCode = randomCode.zfill(randomLengthForPrefix())
    f.write(randomCode)
    f.close()
    return prefix + randomCode

def load_RandomFactoryCode():#这里返回的是不包括前缀的条形码
    code = ''
    with open('randomCode.txt', 'r') as f:
        code = f.read()
    return code

def createVerifyNumber(codeNeedSubFix):
    #从最左往右，取奇数位相加。不明白网上说的为什么要从右往左。不是一样吗
    code = 0
    evenNumber = 0
    oddNumber = 0
    print(codeNeedSubFix)
    for i in range(0,6):#1.先取得网上方法的偶数位，从左往右是奇数位。但是为了统一，还是取名evennumber。乘以3
        evenNumber += int(codeNeedSubFix[1 + i * 2])
    code += evenNumber * 3
    for i in range(0,6):#2.再取得奇数位，直接加起来
        oddNumber += int(codeNeedSubFix[i * 2])
    code += oddNumber
    verifyCode = 0 if ((10 - code % 10) == 10) else (10 - code % 10)  #3.10-减去余数
    # verifyCode = 3-2>1 ? 0 : 2
    # print('verifycode is ' + str(verifyCode) + ' end')
    return codeNeedSubFix + str(verifyCode)

def startSpider():
    print('started')
    # 先根据前缀，查清楚需要的生成的位数。
    barcode = appendBarcode(prefix)#这里取得的是none或者是12位条形码
    if barcode != None:#如果为none。证明这个prefix已经查询完毕了
        #然后遍历。
        index = 0
        while 1:
        # while index <= 2:
            # index += 1
            limitToolSingleton.wait(barcode[prefix.__len__():barcode.__len__()])  #根据随机码判断是否需要休息
            barcode = createVerifyNumber(barcode)#12位的条形码需要添加一位校验位
            result = fetchByBarcodeFromLiantu(barcode)# 添加完毕后去联图上查询
            if result == True:#如果结果不为空,那么去商品网获取信息
                print('this code not null ,need to check from china goods info' + barcode)
                fetchByBarcode(barcode)
            else:
                print('this code is null')
            #无论结果空与否，都要保存这个条形码，进行下一个查询
            barcode = save_RandomFactoryCode(barcode,'code.txt',False)#这里存的是不包括前缀的数字
            print('after a loop. barcode is ' + barcode)

if __name__=='__main__':
    # fetchByBarcode('8801100128845')
    # fetchByBarcode('6917935002150')
    # fetchByBarcodeFromLiantu('6917935002150')
    startSpider()