# coding = utf-8
import string

from bs4 import BeautifulSoup
import re
import urllib.request, urllib.error
from urllib.parse import quote
import xlwt
import ssl
import time

# Cancel Certificate Verification
ssl._create_default_https_context = ssl._create_unverified_context


def main():
    keywords = input("请输入关键字：")
    page = 50
    datalist = []
    savePath = "jd" + keywords + ".xls"
    baseUrl = "https://search.jd.com/Search?keyword=" + keywords + "&page="
    datalist = getData(baseUrl, page)
    saveData(datalist, savePath)

# Use regular expressions to get the data we need
findImgSrc = re.compile(r'<img.*data-lazy-img="(.*?)"', re.S)
findPrice = re.compile(r'<i>(.*?)</i>', re.S)
findInfo = re.compile(r'<div class="p-name p-name-type-2">(.*?)<em>(.*?)</em>', re.S)
findTag = re.compile(r'<span(.*?)>(.*?)</span>', re.S)
findStore = re.compile(r'<span class="J_im_icon"><a.*?>(.*?)</a>', re.S)
findSupply = re.compile(r'<i class="goods-icons J-picon-tips J-picon-fix" data-idx="1" data-tips="京东自营，品质保障">(.*?)</i>',
                        re.S)


def getUrl(askUrl):
    head = {}
    head[
        "User-Agent"] = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36"
    s = quote(askUrl, safe=string.printable)  # Simulated user login
    request = urllib.request.Request(s, headers=head) # ask the website

    html = ""

    try:
        response = urllib.request.urlopen(request) # get the request
        html = response.read().decode("utf-8") # set the decoding method
    except Exception as e: # print out the situation
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)

    return html


def getData(baseUrl, page):
    datalist = []
    for i in range(0, int(page)):# for each page
        url = baseUrl + str(i)
        html = getUrl(url)
        soup = BeautifulSoup(html, "html.parser")

        for item in soup.find_all("li", class_="gl-item"):# find from the goods list
            data = []
            goodID = ''
            shopID = ''
            for item1 in item.find_all("div", class_="p-price"): # seacr the price
                goodID = item1.strong.get("class")[0][2:] # Interception of information

            for item2 in item.find_all("div", class_='p-shop'): # search the shop id
                #print(item2.a.get("href"))
                if(item2.span): # sometimes a few of labels will change. So we need to make sure every label is correct
                    if(item2.span):
                        if(item2.span.a):
                            if(item2.span.a.get("href")):
                                shopID = item2.span.a.get("href")[20:-13] # use the shop id from href
            #print(item)
            item = str(item)


            imgSrc = re.findall(findImgSrc, item)[0] # use the regular expression that we have already made to intercept the information
            imgSrc = imgSrc[2:]  # Get rid of the extra signs
            price = re.findall(findPrice, item)[0]
            data.append(imgSrc)# save into list
            data.append(price)# save into list
            info = re.findall(findInfo, item)[0]
            tmpTag = info[1]
            tag = re.findall(findTag, tmpTag)
            if len(tag) != 0: # some goods have no tag
                data.append(tag[0][1])# save into list
                tmpTag = re.sub(tag[0][1], '', tmpTag)
            else:
                data.append('')# save into list

            tmpTag = re.sub('<(.*?)>', '', tmpTag)  # Get rid of the extra signs
            tmpTag = re.sub('\n', '', tmpTag)
            tmpTag = re.sub('\t', '', tmpTag)
            data.append(tmpTag) # save into list


            store = ' '
            storelist= re.findall(findStore, item)
            if(len(storelist)>0): # make sure it is not empty
                store = storelist[0]
            data.append(store)

            supply = re.findall(findSupply, item)
            if len(supply) != 0: #if the good is from shop "京东自营", then the length of tag content will be more than zero
                data.append(supply[0])
            else:
                data.append("第三方")

            data.append(goodID)
            data.append(shopID)# save into list
            datalist.append(data) # save the whole list into the final result list
    return datalist


def saveData(datalist, savePath):
    workbook = xlwt.Workbook(encoding="utf-8")# create a new xls file and set the encoding method
    worksheet = workbook.add_sheet("jd_products") #add a new sheet
    col = ("图片链接", "价格", "标签", "品牌参数&描述", "店铺", "货源", "商品id", "商铺id") # head
    for i in range(0, 8): # write the head
        worksheet.write(0, i, col[i])
    for i in range(0, len(datalist)):# write the data
        data = datalist[i]
        for j in range(0, 8):
            worksheet.write(i + 1, j, data[j])

    workbook.save(savePath) # save into suitable address

def start_crawler(in_list):
    for i in in_list:
        print("start_crawler")
        main(i)
        print("crawler done")

if __name__ == '__main__':
    #in_list = ['计算机','球鞋','台灯','插线板','剃须刀','净化器','加湿器','抽湿机','WiFi','乒乓球拍','转接线','扩展坞','酸奶','橘子','老干妈']
    #in_list = ['投影仪', '收银机', '复印机', '碎纸机', '摄像头', '键盘']
    in_list = ['传真机']
    for i in in_list:
        main(i)
        print("Done")
        print(i)