import requests
import re
from bs4 import BeautifulSoup as bs
from pymongo import MongoClient
from limitTool import limitToolSingleton
 #保存页面到本地
homePage = 'page.html'
detailPage = 'detail.html'
detail2Page = 'detail2.html'
cookie = 'Hm_lvt_0635661383aaa290c3690f94520a59f1=1529640573,1529913462'
def save_html(r_page,name):
    f = open(name,'wb')
    f.write(r_page.content)#save to page.html
    f.close()
    return r_page

def save_code(code,name):
    f = open(name,'w')
    print('here code is ' + code)
    code = int(code) + 1
    code = str(code)
    code = code.zfill(13)
    f.write(code)
    f.close()
    return code

def load_html(name):
    with open(name,'r') as f:
        str = f.read()
        return str

def fetchByBarcodeFromLiantu(code):
    #this is url fetcher
    print(code)
    headers = {'Accept': '*/*',
              'Accept-Encoding':'gzip, deflate',
              'Accept-Language': 'zh-CN,zh;q=0.9',
              'Connection': 'keep-alive',
              'Cookie': cookie,
               'Content - Length': '17',
               'Content - Type': 'application / x - www - form - urlencoded;charset = UTF - 8',
              'Host': 'www.liantu.com',
              'Referer': 'http://www.liantu.com/tiaoma/',
              'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36',
              'X - Requested - With': "XMLHttpRequest"}
    prefix = 'http://www.liantu.com/tiaoma/query.php'
    url = prefix
    r = requests.post(url=url, headers=headers,data={'ean':code})
    result = r.text
    save_html(r,homePage)
    #this is file fetcher
    # result = load_html(homePage)
    # print(result)
    soup = bs(result, 'lxml')
    print(soup)
    json = r.json()
    print(json)
    if json.__len__() != 0:#如果json不为空,说明这个条形码是有对应的值. 那么就去中国商品网查询信息.并且保存下来
        print('its no none')
        return True
    else:
        print("its none")
        return False
    # items = soup.find_all('div', attrs={'class': 'comment-item'})
    # ol = soup.find('ol',attrs={'id':'results'})
    # print(ol)
    # if ol == None:
    #     print('ol not usabled')
    #     return
    # ass = []
    # for a in ol.find_all('a'):
    #     print(a)
    #     ass.append(a)
    #
    # if ass.__len__() == 0:
    #     print('ass not usabled')
    #     return
    # targetUrl = ass[ass.__len__() - 1].get('href')
    # print('targetUrl is ' + targetUrl)
    # if targetUrl.find('base_id') != -1:#方法一
    #     print('it is baseid')
    #     parseDetail(targetUrl,code)
    # else:#方法二
    #     print('it is aspx')
    #     parseDetail2(targetUrl,code)
    # parseDetail(targetUrl)
js_name = 'Att_Sys_en-us_141_G'
js_brand = 'Att_Sys_zh-cn_304_G'
js_capacity = 'Att_Sys_zh-cn_332_G'
js_package = 'Att_Sys_zh-cn_35_G'
js_Origin = 'Att_Sys_zh-cn_74_G'

def parseDetail(url,code):
    #fetch file
    # result = load_html(detailPage)
    #fetch url
    headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
               'Accept-Encoding': 'gzip, deflate',
               'Accept-Language': 'zh-CN,zh;q=0.9',
               'Connection': 'keep-alive',
               'Cookie': 'ASP.NET_SessionId=qzwrmde4cx0lhtrw3ghc2o20',
               'Host': 'search.anccnet.com',
               'Upgrade-Insecure-Requests': '1',
               'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36'}
    r = requests.get(url=url)
    result = r.text
    # save_html(r,detailPage)
    # return
    soup = bs(result,'lxml')
    model = {}
    js = soup.find_all('script')
    # print(js)
    print('**********')
    p_name = []
    p_brand = []
    p_capacity = []
    p_package = []
    p_original = []
    for j in js:
        res_name =  r"SetValue\(\'" + js_name + "'.+\)"
        res_brand =  r"SetValue\(\'" + js_brand + "'.+\)"
        res_capacity =  r"SetValue\(\'" + js_capacity + "'.+\)"
        res_package =  r"SetValue\(\'" + js_package + "'.+\)"
        res_original =  r"SetValue\(\'" + js_Origin + "'.+\)"

        if p_name == []:
            p_name = re.findall(res_name, j.text)
        if p_brand == []:
            p_brand = re.findall(res_brand, j.text)
        if p_capacity == []:
            p_capacity = re.findall(res_capacity, j.text)
        if p_package == []:
            p_package = re.findall(res_package, j.text)
        if p_original == []:
            p_original = re.findall(res_original, j.text)
    model['name'] = parseValue(p_name)
    model['barnd'] = parseValue(p_brand)
    model['capacity'] = parseValue(p_capacity)
    model['package'] = parseValue(p_package)
    model['origin'] = parseValue(p_original)
    model['barcode'] = code
    print(model)

    #now is the picture
    p_image = ""
    pdiv = soup.find('div',attrs={'id':'imageListDiv'})
    if pdiv != None:
        imgs = pdiv.find('img')
        if imgs != None:
            img = imgs.get('src')
            if img != None:
                p_image = imgs
    # print(pdiv)
    model['image'] = p_image
    # insertToDb(model)

def parseDetail2(url,code):
    #fetch file
    # result = load_html(detail2Page)
    #fetch url
    headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
               'Accept-Encoding': 'gzip, deflate',
               'Accept-Language': 'zh-CN,zh;q=0.9',
               'Connection': 'keep-alive',
               'Cookie': 'ASP.NET_SessionId=qzwrmde4cx0lhtrw3ghc2o20',
               'Host': 'search.anccnet.com',
               'Upgrade-Insecure-Requests': '1',
               'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36'}
    r = requests.get(url=url)
    result = r.text
    # save_html(r,detail2Page)
    # return
    soup = bs(result,'lxml')
    model = {}
    divs = soup.find_all('div',attrs={'class':'section'})

    print('***')
    div = None
    for i in divs:
        div = divs[0]
        break
    if div == None:
        print('not useable')
        return
    print('***')
    spans = div.find_all('span',attrs={'class':'attrVal'})
    print(spans)
    imgs = div.find('img')
    p_image = ''
    if imgs != None:
        img = imgs.get('src')
        if img != None:
            p_image = img
    model['name'] = parseStrCheckEmpty(spans[3])
    model['barnd'] = parseStrCheckEmpty(spans[4])
    model['capacity'] = parseStrCheckEmpty(spans[5])
    model['package'] = ''
    model['origin'] = parseStrCheckEmpty(spans[6])
    model['barcode'] = code
    model['image'] = p_image
    print(model)
    insertToDb(model)

def parseStrCheckEmpty(html):
    if html == None:
        return  ''
    else:
        return html.text
def parseValue(list):
    if list.__len__() == 0:
        return ""
    str = list[0]
    targetStr = str[str.find(',') + 1:]
    targetStr =  targetStr.replace("'", "" )
    targetStr =  targetStr.replace(")", "" )
    print(targetStr)
    return targetStr

def insertToDb(model):
    conn= MongoClient('localhost',27017)
    db = conn.barcodeDs
    barcode_set = db.barcode_set
    result = barcode_set.find({'barcode':model['barcode']})
    if result.count() == 0:
        print('no data')
        barcode_set.insert(model)
    else:
        print('existed')

def strTest(str):
    if str.find('base_id') != -1:
        print('it has')
    else:
        print('it has not')
def currentCodeFromFile():
    code = '0000000002900'
    with open('code.txt','r') as f:
        code = f.read()
    return code
if __name__=='__main__':
    print('started')
    fetchByBarcodeFromLiantu('6917935002150')


    code = currentCodeFromFile()
    # index = 0
    while 1:
    # while index <= 2:
        # index += 1
        limitToolSingleton.wait(code)
        fetchByBarcodeFromLiantu(code)
        print(code)
        code = save_code(code,'code.txt')


    # url = 'http://search.anccnet.com/goodsdetail2.aspx?gtin=F25F56A9F703ED74CE68E8EBDADF3A26254462DA35DEF10E3D1304B8FF8474D280AF64787244D367'
    # parseDetail2(url,'8801100128845')
    # cookies = loadCookies()

    # if cookies == None:#cookies为空,需要登录
    #     loginDouban()#登录并且保存cookies
    # #打开豆瓣网页
    # else :
    #     print('has cookies')
    #     print(cookies)
    # # openHomeDouban()
    # allcoments = []
    # x = 0
    # url = 'https://movie.douban.com/subject/26942674/comments'
    # while 1:
    #     if x == 2:#只提取前x页的数据
    #         break
    #     commentList,nextPage = fetchComments(url)
    #     for comment in commentList:#取出该页的评论,添加到大数组中
    #         comment = comment + '\n'
    #         allcoments.append(comment)
    #     if(len(nextPage) == 0):#没有下一页也要break
    #         print('没有下一页了')
    #         break
    #     url = 'https://movie.douban.com/subject/26942674/comments' + nextPage
    #     x += 1
    # print('电影评论读取完毕')
    # print(allcoments)
    #
    # #将所有评论拼成一句话
    # with open(fpath, 'w', encoding='utf-8')  as f:
    #     f.writelines(allcoments)
    #     print('ha')
    #
    # print(words)
    # word = words[0]
    # print(word[0])