# -*- coding: utf-8 -*-
"""
Created on Wed Sep 30 15:48:24 2016
alibaba爬虫

【工具】
使用PhantomJS

该爬虫用来根据黄页的类别信息，抓取每个种类下的店铺的名称类别
https://page.1688.com/cp/cp1.html?spm=a260k.635.1998396936.6.jNch7m&tracelog=cnindex_service_company

然后在产品搜索页面抓取店铺的 名称 和 网址
使用PhantomJS
@author: stevenkwong
"""

# coding = utf-8

from selenium import webdriver
import time
import pandas as pd
import os
import re
from multiprocessing import  Pool

def whatstime():
    import time
    #  时间格式
    fmt = "[%Y-%b-%d  %H:%M:%S]: "
    timestr = time.strftime(fmt,time.localtime())
    return timestr

def get_shop_info(sub_cat,cat):
    print('LOADING' + cat + ' ' + sub_cat)
#    driver = webdriver.Firefox()
    driver = webdriver.PhantomJS(executable_path=\
    '/home/stevenkwong/文档/web_app/phantomjs-2.1.1-linux-x86_64/bin/phantomjs',\
    service_args=['--ignore-ssl-errors=true'])
    print(whatstime() + 'opening:\n'+ 'https://www.1688.com/' +'... ...')
    driver.get('https://www.1688.com/')
    print(whatstime() + 'got web: \n') 
    print(driver.current_url)
    
    driver.find_element_by_id("alisearch-keywords").send_keys(sub_cat)
    driver.find_element_by_id("alisearch-submit").click()
    time.sleep(3)
    try:
        driver.find_element_by_class_name("s-overlay-widget-ic").click()
    except BaseException:
        pass
    total_page = int(driver.find_element_by_css_selector('.fui-paging-num').text)
    shop_info_dict_list = list()
    for i in range(1,total_page):
        ############debug-2################
#        if i <=5:
#            pass
#        else:
#            break
        ############debug-2################
        #time.sleep(3)
        
        for num in range(10):
            # when using phantomjs
            js="var q=document.body.scrollTop=%d"%((num)*900)
            # when using firefox            
            #js="var q=document.documentElement.scrollTop=%d"%((num)*600)
            time.sleep(0.3)
            driver.execute_script(js) 
#        js="var q=document.documentElement.scrollTop=%d"%(5000)
#        time.sleep(0.5)
#        driver.execute_script(js)      
            
        hh = driver.find_element_by_id("sm-offer-list")
        lis = hh.find_elements_by_class_name("sm-offer-item")
    
        for li in lis:
            shop_info_dict = dict()
            aa = li.find_element_by_class_name("sm-offer-companyName")
            shop_name=aa.text
            shop_info_dict['商户名称'] = shop_name
            shop_website=aa.get_attribute("href")
            shop_info_dict['阿里巴巴网址'] = shop_website
            print(whatstime() + '(' + cat + sub_cat + ' 第%d页'%i + '/' +'共%d页'%total_page +\
                                            ', 有%d个店铺): '%len(lis) + shop_name+" @ "+shop_website)
            shop_info_dict_list.append(shop_info_dict)
    
#        page=driver.find_element_by_id("sm-pagination")
        # page.find_element_by_class_name("fui-paging-input").send_keys(i+1)
    
        driver.execute_script("$('.fui-next').click()")
        # page.find_element_by_class_name("fui-next").click()
    # nextpage=driver.find_element_by_class_name("next")
    # ActionChains(driver).move_to_element(nextpage)
    save_file(cat, shop_info_dict_list)
    time.sleep(3)  # 休眠3秒
    driver.quit()
    return

def get_cat_link(url):
    # 获取各个类别和它对应的连接
#    browser = webdriver.Firefox()
    browser = webdriver.PhantomJS(executable_path=\
    '/home/stevenkwong/文档/web_app/phantomjs-2.1.1-linux-x86_64/bin/phantomjs',\
    service_args=['--ignore-ssl-errors=true'])
    print(whatstime() + 'opening:\n'+ url +'... ...')
    browser.get(url)
    print(whatstime() + 'got web:\n' )
    print(browser.current_url)
#    browser = webdriver.PhantomJS(executable_path=\
#    '/home/stevenkwong/文档/web_app/phantomjs-2.1.1-linux-x86_64/bin/phantomjs',\
#    service_args=['--ignore-ssl-errors=true'])
#    print(whatstime() + 'opening:\n'+ url)
#    browser.get(url)
#    print(whatstime() + 'got web:\n' + browser.current_url)

# more_cate 存放所有类别的容器
    more_cate = browser.find_element_by_css_selector('#more-categorate')
# detail 是具体再细分的类别所在的容器
    details = more_cate.find_elements_by_class_name('detail')
    headers = more_cate.find_elements_by_css_selector('.header.fd-clr')
    cates_links_dict = dict()
    for ii in range(len(details)):
        each_header = headers[ii]
        each_header.click()
        time.sleep(1)
        each_detail = more_cate.find_elements_by_class_name('detail')[ii]
        cell_tags = each_detail.find_elements_by_css_selector('.cell-tags')
        pm_list = each_detail.find_element_by_css_selector('.promotion-list')
        
        
        for each_cell_tags in cell_tags:
            cate_name = each_cell_tags.find_element_by_tag_name('a').text
            print(cate_name)
            dd = each_cell_tags.find_element_by_tag_name('dd')
            a_s = dd.find_elements_by_tag_name('a')
            links = list()
            for each_a in a_s:
                #print('-' + each_a.text)
                if each_a.text == '更多':
                    pass
                else:
                    #print(each_a.get_attribute('href'))
                    links.append(each_a.text)
            cates_links_dict[cate_name] = links
      # 每个栏目下更多的商家分类
        dds = pm_list.find_elements_by_tag_name('dd')
        for each_dd in dds:
            links = list()
            if each_dd.text.find('更多') == -1:
                pass
            else:
                continue
            cate_name = each_dd.text
            link = each_dd.text
            print('-' + cate_name)
            print(link)
            links.append(link)
            cates_links_dict[cate_name] = links
    browser.quit()
    return cates_links_dict

def save_file(cate_name, shop_info_dict_list):
    first_ele = shop_info_dict_list[0]
    cols_name = [each for each in first_ele.keys()]
    cols_name.insert(0,'店铺类别')
    shop_info_table = pd.DataFrame(columns = cols_name)
    shop_info_dict = dict()
    for each in cols_name:
        if (each == '店铺类别'):
            shop_info_dict[each] = [cate_name for ii in range(len(shop_info_dict_list))]
        else:
            shop_info_dict[each] = [each_II[each] for each_II in shop_info_dict_list]
    tmp_df = pd.DataFrame(shop_info_dict)
    shop_info_table = pd.concat([shop_info_table,tmp_df])
    
    fmt = "%Y%m%d_%H%M%S"
    timestr = time.strftime(fmt,time.localtime())
    file_path = '/mnt/hgfs/VMWare/crawler_outcome'    
    file_name = timestr + cate_name + '.xlsx'
    shop_info_table.to_excel(file_path +'/' + file_name)

if __name__ == '__main__':
    url = 'https://page.1688.com/cp/cp1.html?spm=a260k.635'+\
        '.1998396936.6.jNch7m&tracelog=cnindex_service_company'
    cat_link_dict = get_cat_link(url)
 ###################tdebuging -1############################   
        # find those existed
    origin_path = os.getcwd()
    os.chdir('/mnt/hgfs/VMWare/crawler_outcome')
    files_list = os.listdir(os.getcwd())
    pattern = re.compile(r'(\d+?_\d+)(\D+).xlsx')
    existed_cate = list()
    for each in files_list:
        match = pattern.match(each)
        try:
            existed_cate.append(match.group(2))
        except AttributeError as ae:
            pass
    os.chdir(origin_path)
    existed_cate = list(set(existed_cate))
    
    for each in existed_cate:
        print('deleting' + each)
        del cat_link_dict[each]    
 #####################debuging -2############################   
    p = Pool(processes=2,maxtasksperchild=1)
    for each in cat_link_dict:
        cat_list = cat_link_dict[each]
        for each_sub_cat in cat_list:
#            get_shop_info(each_sub_cat,each)
            p.apply_async(get_shop_info,args=(each_sub_cat,each,))

    print(whatstime() + '等待所有进程完毕... ...')
    p.close()
    p.join()
    print(whatstime() + '把马云的网店都扒光啦！完成！')