# -*- coding: utf-8 -*-
"""
Spyder Editor

This is a temporary script file.
"""
import urllib.request
import re

import requests 
import json
import pandas as pd
from bs4 import BeautifulSoup 
#内容存放
data =[]
#关键词 和页数
keyword = '茶叶'
length = 2
#获取内容
url = 'https://s.taobao.com/search?initiative_id=staobaoz_20180121'
for x in range(length):
    #此处改变的url参数为data_value，data_value为0时第一页，为44是第二页，88时第三页以此类22222223333333333333333推
    payload = {'q': keyword,'ie':'utf8','data_value':44*x }  #字典传递url参数    
    resp = requests.get(url, params = payload)
    html = resp.text
    #通过正则表达式获取1111111111111111所需数据
    context = re.findall(r'g_page_config =.*g_srp_loadCss',html,re.S)[0]#添加[0]是获取里面的str格式数据
    context = re.findall(r'{.*}',context)[0]
    #转化为字典格式
    context_dict = json.loads(context)
    #判断查询是否有数据
    data_list_status = context_dict['mods']['itemlist']
    if data_list_status['status'] == 'hide':
        print ('非常抱歉，没22211111111111222222222222222有找到与“%s”相关的宝贝333333' % keyword)
        break
    else:
        #主要内容
        data_list = context_dict['mods']['itemlist']['data']['auctions']
       #print ('==========data_list')
       # print (data_list)
        for index in data_list:
    
            #商家三个指标
            data_item = index['detail_url']
            #print (index['detail_url'])
            dd_list=[]
            if data_item[:2] == '//':
                data_item = 'http://'+ data_item[2:]
               # print (data_item)
               
            resp_item = requests.get(data_item)
            html_item = resp_item.text
    
            soup = BeautifulSoup(html_item,"html.parser")
            content=soup.find_all('div', class_='shop-rate')#天猫
            #===============个人
            if len(content) == 0:
                for tag in soup.find_all('div', class_='tb-shop-info-bd'):        
                    dl = tag.findAll('dl')
                    for dl_index in dl:
                        dd  = dl_index.findAll('dd')
                        #print ('===================')
                        dd_miaoshu = dd[0].find('a').contents[0].strip()#.contents[0] 
                        dd_list.append(dd_miaoshu)
                
            #==================天猫
            else:
                for tag in soup.find_all('div', class_='shop-rate'):                
                    ul = tag.find('ul')
                    li = ul.findAll('li')                    
                    li_miaoshu = li[0].find('em',class_= 'count').get_text()#描述相符
                    li_fuwu = li[1].find('em',class_= 'count').contents[0]#服务态度
                    li_sudu = li[2].find('em',class_= 'count').contents[0]#发货速度
    
                    dd_list.append(li_miaoshu)
                    dd_list.append(li_fuwu)
                    dd_list.append(li_sudu)
            #print ('==================dd_list')
            #print (dd_list)
            temp={
                    'price':index['view_price'],
                    'name' : index['raw_title'],
                    'img_url' : index['pic_url'],
                    'num' : index['view_sales'],
                    'url':index['detail_url'],
                    'index':x+1,
                    'descripe':dd_list[0],
                    'attitude':dd_list[1],
                    'speed':dd_list[2]
                    }         
            data.append(temp)
        
#print ('===============')
#print (data)
            
def find_all(item,attr,c):
    return item.find_all(attr,attrs={'class':c},limit=1)
      
#导出 excel格式
import xlwt 
file =xlwt.Workbook()
table = file.add_sheet('淘宝女装',cell_overwrite_ok=True )

#第一行写标题
head = ['index','name','price','num','img_url','url','descripe','attitude','speed']
i=0
for each_header in head:
    table.write(0,i,each_header)
    i+=1

#写入数据 
for index in range(len(data)):
    data_1 = data[index]
  #  print (data_1['descripe'])
    x = index+1
    table.write(x,0,data_1['index'])
    table.write(x,1,data_1['name'])
    table.write(x,2,data_1['price'])
    table.write(x,3,data_1['num'])
    table.write(x,4,data_1['img_url'])
    table.write(x,5,data_1['url'])
    table.write(x,6,data_1['descripe'])
    table.write(x,7,data_1['attitude'])
    table.write(x,8,data_1['speed'])
file.save(u'搜索%s的结果2222222222222222222222.xls' % keyword)


'''    
for k in range(0,100):        #100次，就是100个页的商品数据

    payload ['s'] = 44*k+1   #此处改变的url参数为s，s为1时第一页，s为45是第二页，89时第三页以此类推                          
    resp = requests.get(url, params = payload)
    print(resp.url)          #打印访问的网址
    resp.encoding = 'utf-8'  #设置编码
    title = re.findall(r'"raw_title":"([^"]+)"',resp.text,re.I)  #正则保存所有raw_title的内容，这个是书名，下面是价格，地址
    price = re.findall(r'"view_price":"([^"]+)"',resp.text,re.I)    
    loc = re.findall(r'"item_loc":"([^"]+)"',resp.text,re.I)
    x = len(title)           #每一页商品的数量
   # print ('============title')
    #print (price)
file.close()
'''
'''
response = urllib.request.urlopen('https://s.taobao.com/search?q=女装')
read = response.read()
html_doc=str(read,'utf-8')
print (html_doc)
'''
'''
def getHtml(url):
    page = urllib.request.urlopen(url)
    html = page.read()
    html = html.decode('utf-8')
    return html


def getImg(html):
    reg = r'src="(.+?\.jpg)" pic_ext'
    imgre = re.compile(reg)
    imglist = re.findall(imgre,html)

    return imglist

html = getHtml("http://tieba.baidu.com/p/2460150866")

print (html)
'''
