# -*- coding: utf-8 -*-
"""
Created on Wed Sep 21 15:48:24 2016
alibaba爬虫

没有使用代理ip的爬虫程序，希望通过模仿用户的动作行为来避免被服务器识别为爬虫的问题
【方法】
1.  放慢浏览时间
2.  增加滚轮动作，滑至底部以实现完全加载

@author: stevenkwong
"""

# coding = utf-8
from selenium.webdriver.common.proxy import *
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium import webdriver
import  time  #调入time函数
import numpy as np
import pandas as pd
from multiprocessing import  Pool
import os
import random
import pickle

def whatstime():
    import time
    #  时间格式
    fmt = "[%Y-%b-%d  %H:%M:%S]: "
    timestr = time.strftime(fmt,time.localtime())
    return timestr
    
def change_proxy(browser,url):
    myProxy = '139.129.234.84:8998' 
    proxy = Proxy({
        'proxyType': ProxyType.MANUAL,
        'httpProxy': myProxy,
        'ftpProxy': myProxy,
        'sslProxy': myProxy,
        'noProxy': '' # set this value as desired
        })
        
    browser = browser.profile.set_proxy(proxy=proxy)
    browser.get(url)    
    # 预防封号，增加延时
    time.sleep(random.randint(3,6))  
    return browser    
    
def get_search_widget(browser):
    search_pannel = browser.find_element_by_class_name('search-pannel')
    search_widget = search_pannel.find_element_by_class_name('search-i-wrap')
    search_widget = search_widget.find_element_by_tag_name('input')
    search_widget.clear()
    search_button = search_pannel.find_element_by_class_name('search-i-action')
    return search_widget,search_button
    
def get_cat_link(url):
    f = open('cat_link_dict.pkl','rb')
    cat_link_dict = pickle.load(f)
    # 获取各个类别和它对应的连接
#  一般只需要读取本地的永久化文件就行了，但如果网
#  址失效，可通过运行下面代码，手动登陆获取
#########start -- #1#######################
#    browser = webdriver.Firefox( )
#
#    browser.get(url)
#    # cat_box 存放所有类别的容器
#    cat_box = browser.find_elements_by_id('nav-sub')[0]
#    
#    # 找出容器cat_box里面的所有类别，他们的标签名都是'a'
#    cats_tags = cat_box.find_elements_by_tag_name('a')
#    cats = [each.text for each in cats_tags] 
#    
#    # 打开搜索页
#    search_url = 'https://s.1688.com/company/-.html'
#    
#    # 找到输入窗口
#    browser.get(search_url)
#    
#   #cat_link_dict 存放各个类别和类别对应的主页连接
#    cat_link_dict = dict()
#    ind = 0
#    for ii,each in enumerate(cats):
#        if ii >= 6:
#            pass
#        else:
#            continue
#        ind = ind +1
#        search_widget,search_button = get_search_widget(browser)
#        search_widget.send_keys(each)
#        
#        search_button.click()
#        cat_link_dict[each] = browser.current_url
#        time.sleep(5)
#        if ind <= 18:
#            pass
#        else:
#            time.sleep(10)
#    pickle.dump(cat_link_dict,open('cat_link_dict.pkl','wb'))   
#    browser.quit()
 #########end-- #1#######################   
    return cat_link_dict


def get_shop_contact_info(browser,url):
    # 这里访问的是每个企业的子页面url
    browser.get(url)
    # 预防封号，增加延时
    time.sleep(random.randint(10,20))  
    # 先检查一下有无被马云发现，强制我登陆再访问，有的话就加cookies再打开
    current_url = browser.current_url
    
    if current_url.find('login') == -1:
        pass
    else:
        browser = add_cookies_browser(browser,url)
#    # 再检查是否进入了验证码页面
#    if current_url.find('sec') == -1:
#        pass
#    else:
#        browser = change_proxy(browser,url)
        
    contact_info = dict()
    try:
        # 找到联系方式所在的容器
        contact_big = browser.find_element_by_css_selector('.mod.mod-contactBig')
        # 联系方式都是存放在一条条叫 dl 的tag中的
        dls = contact_big.find_elements_by_tag_name('dl')
    
        for each in dls:
            # item_name是联系方式的栏目名称 “联系人”/“地址”等
            dt_text = each.find_element_by_tag_name('dt').text
            dt_text = dt_text.replace(':','')
            dt_text = dt_text.replace('：','') # 以防万一，不知道他用的是中文还是英文字符下的冒号
            item_name = ''.join(dt_text.split(' '))
            if item_name == '联系人':
                # 如果是联系人，是藏在dl - dd - a的第一个a里面
                value = each.find_element_by_tag_name('dd').text
                value = value.replace('和我联系','').strip()
            else:
                value = each.find_element_by_tag_name('dd').text
            # 根据运营部的要求，地址不要空格
            if item_name == '地址':
                value = each.find_element_by_tag_name('dd').text
                value = ''.join(value.split(' '))
            else:
                pass
            contact_info[item_name]=value
    except NoSuchElementException as NSEE:
        try:
            # 针对某些企业没有大的联系表，只有小的，这里作小的处理
            contact_small = browser.find_element_by_css_selector('.mod.mod-contactSmall')
            dls = contact_small.find_elements_by_tag_name('dl')

            for each in dls:
                # item_name是联系方式的栏目名称 “联系人”/“地址”等
                dt_text = each.find_element_by_tag_name('dt').text
                dt_text = dt_text.replace(':','')
                dt_text = dt_text.replace('：','') # 以防万一，不知道他用的是中文还是英文字符下的冒号
                item_name = ''.join(dt_text.split(' '))
                
                value = each.find_element_by_tag_name('dd').text
                            # 根据运营部的要求，地址不要空格
                if item_name == '地址':
                    value = each.find_element_by_tag_name('dd').text
                    value = ''.join(value.split(' '))
                else:
                    pass
                contact_info[item_name]=value
        except NoSuchElementException as NSEE:
            # 还有的企业甚至连小的联系表都没有的。。。囧
            pass
     
    return browser,contact_info
    
def add_cookies_browser(browser,url):
    f = open('cookies.pkl','rb')
    cookies = pickle.load(f)
    browser.get(url)
    # 预防封号，增加延时
    time.sleep(random.randint(3,6))   
    try:
        for each_cook in cookies:
            browser.add_cookie(each_cook)
    except BaseException as be:
        pass
    
    browser.get(url) 
    # 预防封号，增加延时
    time.sleep(random.randint(3,6))     
    return browser
    
def get_shop_info(cat,url):
     #    获取各家店的店名和店铺连接，销量信息
    start = time.clock()
    browser = webdriver.Firefox()
    browser = add_cookies_browser(browser,url)
    # sub_browser 是专门用来打开各个企业的子页的
#    sub_browser = webdriver.Firefox()
#    sub_browser = add_cookies_browser(sub_browser,url)
    
    total_page = browser.find_element_by_css_selector('.page-op')\
                    .find_element_by_tag_name('input').get_attribute('data-max')
    
    shop_info_dict_list = list()
    actions = ActionChains(browser)
    
    for ii in range(int(total_page)):
        
########调试用#########
#        if ii <=0:
#            pass
#        else:
#            break
########调试用#########
        
        print(whatstime() + 'catching' +  cat + 'page%d'%ii)
        suffix = '&sortType=pop&pageSize=30&offset=3&beginPage=%d'%(ii )
        page_url = url + suffix
        print(page_url)
        browser.get(page_url)
        time.sleep(random.randint(3,6))
        containers = browser.find_elements_by_class_name('list-item-title-text')
        
        # 打开了某一页之后，再打开私密浏览器
        # 模拟键盘动作，启动私隐浏览器
        actions.key_down(Keys.CONTROL).key_down(Keys.SHIFT).\
                            send_keys('p').key_up(Keys.CONTROL).key_up(Keys.SHIFT).perform()
        handles = browser.window_handles
        original_handle = handles[0]
        private_handle = handles[1]

        
        for jj,each_shop in enumerate(containers):
             
            shop_info_dict = dict()
########调试用#########
#            if jj<=4:
#                pass
#            else:
#                break
########调试用#########
            
            # 获取店名
            shop_name = each_shop.get_attribute('title')
            # 获取店连接
            shop_alisite = each_shop.get_attribute('href')
            browser.switch_to_window(private_handle)
            
            browser,contact = get_shop_contact_info(browser,shop_alisite)
            browser = add_cookies_browser(browser,shop_alisite)
             
            shop_info_dict['shop_name'] = shop_name
            shop_info_dict['ali_site'] = shop_alisite
            contact_items_list = ['联系人','电话','移动电话','传真','地址','邮编','公司主页']
            for each_item in contact_items_list:
                try:
                    shop_info_dict[each_item] = contact[each_item]
                except KeyError as ke:
                    shop_info_dict[each_item] = ' '
            print(whatstime() + shop_name)        
            shop_info_dict_list.append(shop_info_dict)
        #browser.close()
            browser.switch_to_window(original_handle)
    browser.quit() 
    save_file(cat, shop_info_dict_list)
    end = time.clock()
    print(whatstime() + cat + '总共耗时：%.2f 秒'%(end - start))       
       
    return shop_info_dict_list

def save_file(cate_name, shop_info_dict_list):
    first_ele = shop_info_dict_list[0]
    cols_name = [each for each in first_ele.keys()]
    cols_name.insert(0,'店铺类别')
    shop_info_table = pd.DataFrame(columns = cols_name)
    shop_info_dict = dict()
    for each in cols_name:
        if (each == '店铺类别'):
            shop_info_dict[each] = [cate_name for ii in range(len(shop_info_dict_list))]
        else:
            shop_info_dict[each] = [each_II[each] for each_II in shop_info_dict_list]
    tmp_df = pd.DataFrame(shop_info_dict)
    shop_info_table = pd.concat([shop_info_table,tmp_df])
    
    fmt = "%Y%m%d_%H%M%S"
    timestr = time.strftime(fmt,time.localtime())
    file_path = '/mnt/hgfs/VMWare/crawler_outcome'    
    file_name = timestr + cate_name + '.xlsx'
    shop_info_table.to_excel(file_path +'/' + file_name)
    
if __name__ == '__main__':
    
    url = 'https://www.1688.com/'
    # 获取各个类别和它对应的连接    
    cat_link_dict = get_cat_link(url)
    result = dict()
    
    total_task_num = len(cat_link_dict)
    now_num = 0
    print(whatstime() + '父进程 %s'% os.getpid())
    p = Pool(processes=2,maxtasksperchild=1)
    
    for each in cat_link_dict:
        
########调试用#########
#        if now_num <2:
#            pass
#        else:
#            break
########调试用#########
        #shop_info_dict_list = get_shop_info(each,cat_link_dict[each])
        p.apply_async(get_shop_info,args=(each,cat_link_dict[each],))
        now_num = now_num + 1 
    print(whatstime() + '等待所有进程完毕... ...')
    p.close()
    p.join()
    print(whatstime() + '把马云的网店都扒光啦！完成！')
    