# -*- coding: utf-8 -*-
"""
Created on Wed Sep 21 15:48:24 2016
淘宝爬虫
利用selenium来爬取淘宝商户信息
爬虫入口为 https://shopsearch.taobao.com/search?app=shopsearch'+\
        '&q=&imgfile=&commend=all&ssid=s5-e&search_type=shop'+\
        '&sourceId=tb.index&spm=a21bo.50862.201856-taobao-item.2'+\
        '&ie=utf8&initiative_id=tbindexz_20160921
        
进去网页后再点击打开【热门类目】底下的各个子类目，进去之后就可以爬取
商家的信息了，主要爬店名 + 淘宝链接
@author: stevenkwong
"""

# coding = utf-8

from selenium import webdriver
import  time  #调入time函数
import numpy as np
import pandas as pd
from multiprocessing import  Pool
import os
import re

def whatstime():
    import time
    #  时间格式
    fmt = "[%Y-%b-%d  %H:%M:%S]: "
    timestr = time.strftime(fmt,time.localtime())
    return timestr
    
def get_cat_link(url):
    # 获取各个类别和它对应的连接
    browser = webdriver.Firefox()
    browser.get(url)
    # cat_box 存放所有类别的容器
    cat_box = browser.find_element_by_class_name("level-one-cat-list")
    
    # 找出容器cat_box里面的所有类别，他们的标签名都是'a'
    cats_tags = cat_box.find_elements_by_tag_name('a')
    
    #cat_link_dict 存放各个类别和类别对应的主页连接
    cat_link_dict = dict()
    for each in cats_tags:
        cat_name = each.text
        cat_link = each.get_attribute('href')
        cat_link_dict[cat_name] = cat_link
    time.sleep(2)
    browser.quit()
    return cat_link_dict

def get_cat_page_num(url):
    browser = webdriver.Firefox()
    #   获取某个分类下总的页面数
    browser.get(url)
    pages_num_container =  browser.find_element_by_class_name('pager')
    pages_num_container = pages_num_container.find_elements_by_class_name('item')
    for each in pages_num_container:
        try:
            temp = each.find_element_by_class_name('current')
            temp = each.text
            temp = temp.split('/')[1]
            pages_num = int(temp)
        except BaseException as e:
            pass
    browser.quit()
    return pages_num

def get_shop_info(cat,url):
     #    获取各家店的店名和店铺连接，销量信息
    start = time.clock()
    pages_num = get_cat_page_num(url)
    browser = webdriver.Firefox()
    shop_info_dict_list = list()
    for ii in range(pages_num):
#        if ii <=0:
#            pass
#        else:
#            break
        
        print(whatstime() + 'catching' +  cat + 'page%d'%ii)
        suffix = '&s=%d'%(ii *20)
        page_url = url + suffix
        print(page_url)
        browser.get(page_url)
        
        containers = browser.find_elements_by_class_name('list-item')
        # 需要将页面拖到最底部，才能动态加载所有的内容，滚动的值是试出来的
        #将页面滚动条拖到底部
        for num in range(10):
            js="var q=document.documentElement.scrollTop=%d"%((num + 1)*500)
            time.sleep(0.2)
            browser.execute_script(js)         
        time.sleep(2)
        
        for each_shop in containers:
            shop_info_dict = dict()
            shop_h4 = \
                each_shop.find_elements_by_tag_name('h4')[0]
            a = shop_h4.find_elements_by_tag_name('a')[0]
            # 获取店名
            shop_name = a.text
            # 获取店连接
            shop_website = a.get_attribute('href')
            try:
                info_sale = each_shop.find_elements_by_class_name('info-sale')[0]
                sale_num = info_sale.find_element_by_tag_name('em').text
            except BaseException as be:
                sale_num = np.nan
            
            shop_info_dict['shop_name'] = shop_name
            shop_info_dict['shop_website'] = shop_website
            shop_info_dict['sale_num'] = sale_num         
            shop_info_dict_list.append(shop_info_dict)
    browser.quit() 
    save_file(cat, shop_info_dict_list)
    end = time.clock()
    print(whatstime() + cat + '总共耗时：%.2f 秒'%(end - start))       
       
    return shop_info_dict_list

def save_file(cate_name, shop_info_dict_list):
    first_ele = shop_info_dict_list[0]
    cols_name = [each for each in first_ele.keys()]
    cols_name.insert(0,'店铺类别')
    shop_info_table = pd.DataFrame(columns = cols_name)
    shop_info_dict = dict()
    for each in cols_name:
        if (each == '店铺类别'):
            shop_info_dict[each] = [cate_name for ii in range(len(shop_info_dict_list))]
        else:
            shop_info_dict[each] = [each_II[each] for each_II in shop_info_dict_list]
    tmp_df = pd.DataFrame(shop_info_dict)
    shop_info_table = pd.concat([shop_info_table,tmp_df])
    
    fmt = "%Y%m%d_%H%M%S"
    timestr = time.strftime(fmt,time.localtime())
    file_path = '/mnt/hgfs/VMWare/crawler_outcome'    
    file_name = timestr + cate_name + '.xlsx'
    shop_info_table.to_excel(file_path +'/' + file_name)
    
if __name__ == '__main__':
    
    url = 'https://shopsearch.taobao.com/search?app=shopsearch'+\
        '&q=&imgfile=&commend=all&ssid=s5-e&search_type=shop'+\
        '&sourceId=tb.index&spm=a21bo.50862.201856-taobao-item.2'+\
        '&ie=utf8&initiative_id=tbindexz_20160921'
    # 获取各个类别和它对应的连接    
    cat_link_dict = get_cat_link(url)
    result = dict()
    
    
    # find those existed
    origin_path = os.getcwd()
    os.chdir('/mnt/hgfs/VMWare/crawler_outcome')
    files_list = os.listdir(os.getcwd())
    pattern = re.compile(r'(\d+?_\d+)(\D+).xlsx')
    existed_cate = list()
    for each in files_list:
        match = pattern.match(each)
        try:
            existed_cate.append(match.group(2))
        except AttributeError as ae:
            pass
    os.chdir(origin_path)
    
    for each in existed_cate:
        del cat_link_dict[each]
    
    total_task_num = len(cat_link_dict)
    now_num = 0
    print(whatstime() + '父进程 %s'% os.getpid())
    p = Pool(processes=2,maxtasksperchild=1)
    
    for each in cat_link_dict:
#        if now_num >=20:
#            break
#        else:
#            pass
        #print(whatstime() + '正在抓取' + each + '数据，已完成总进度%.2f'%((now_num/total_task_num)*100) + '%' )
        #shop_info_dict_list = get_shop_info(each,cat_link_dict[each])
        #esult[each] = shop_info_dict_list
        p.apply_async(get_shop_info,args=(each,cat_link_dict[each],))
        now_num = now_num + 1 
    print(whatstime() + '等待所有进程完毕... ...')
    p.close()
    p.join()
    print(whatstime() + '把马云的网店都扒光啦！完成！')
    
    

#time.sleep(3)  # 休眠3秒
#browser.quit()