import time
import requests
import json
import re
from selenium import webdriver#调用webdriver模块
import csv
from operator import itemgetter



## 在这里设置下爬取结果文件保存的路径
csv_file = open(r'.\data\demo.csv','w',newline='',encoding='utf-8-sig')
writer = csv.writer(csv_file)


driver = webdriver.Chrome()#设置引擎为Chrome，模拟真实地打开一个浏览器
driver.get('https://login.taobao.com/member/login.jhtml')
# 使用扫码登录
time.sleep(1)


## 运行代码之前输入自己的账号和密码
# user = driver.find_element_by_name('fm-login-id')
# user.send_keys('账号')
# time.sleep(1)
# assistant = driver.find_element_by_name('fm-login-password')
# assistant.send_keys('密码')
# time.sleep(1)
# submit = driver.find_element_by_class_name('fm-btn')
# submit.click()#登录
# time.sleep(5)
cookie_list = driver.get_cookies()
cookies = {}
print(len(cookie_list))
for cookie in cookie_list:
    cookies[cookie['name']] = cookie['value']
print("已经成功的获取到用户登录的cookies")
print(cookies)
driver.close()



headers = {'Host':'s.taobao.com',
           'User-Agent':'Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:63.0) Gecko/20100101 Firefox/63.0',
           'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
           'Accept-Language':'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
           'Accept-Encoding':'gzip, deflate, br',
           'Connection':'keep-alive'
        }
list_url = 'http://s.taobao.com/search?q=%(key)s&ie=utf8&s=%(pnum)d'

titles = '"raw_title":"(.*?)"'       #标题
locations = '"item_loc":"(.*?)"'    #销售地
sales = '"view_sales":"(.*?)人付款"' #销售量
comments = '"comment_count":"(.*?)"'#评论数
prices = '"view_price":"(.*?)"'     #销售价格
nicks = '"nick":"(.*?)"'
ids = '"user_id":"(.*?)"'    
nids = '"nid":"(.*?)"'          #这里需要nid，是因为商品的链接是需要这个参数的
writer.writerow(['商品名称','销售地','销售量','评论数','销售价格','店铺', '店铺ID','商品链接'])



key = input('输入想要爬取的商品名称：')
Page = 5 # 爬取的页数 ，可以自行修改
data = []

for i in range(Page):
    pnum = i*44
    url = list_url%{'key':key,'pnum':pnum}
    print(url)
    res = requests.get(url,headers=headers,cookies=cookies)
    html = res.text

    title = re.findall(titles,html)
    location = re.findall(locations,html)
    sale = re.findall(sales,html)
    comment = re.findall(comments,html)
    price = re.findall(prices,html)
    nick = re.findall(nicks, html)
    id = re.findall(ids, html)
    nid = re.findall(nids,html)
    for j in range(len(title)):
        # goods_url = 'https://detail.tmall.com/item.htm?id='+nid[j]+'&ns=1&abbucket=19'
        goods_url = 'https://detail.m.tmall.com/item.htm?spm=a320p.7692363.0.0.12dbc423E7Y7NI&id='+nid[j]
        gres = requests.get(goods_url,headers=headers,cookies=cookies)
        detail = gres.text
        sale[j] = sale[j]  if sale[j][-1] !='+'  else sale[j][:-1]
        if sale[j][-1] == '万':
            data.append([ title[j],location[j],float(sale[j][:-1])*10000,comment[j],price[j],nick[j],id[j],goods_url ]) #如果最后一位是万，去掉最后一位，乘以10000即可
        else:
            data.append([ title[j],location[j],float(sale[j]),comment[j],price[j],nick[j],id[j], goods_url ])
    
    print('-------Page%s 已经抓取完毕!--------\n\n'%(i+1))
    time.sleep(2)
data.sort(key=itemgetter(2))
data.reverse()#按照销量进行排序  
for j in range(len(data)):
    writer.writerow(data[j])