# -*- coding: utf-8 -*-
"""
Created on Wed Sep 21 15:48:24 2016
alibaba爬虫

【工具】
使用firefox

该爬虫用来根据黄页的类别信息，抓取每个种类下的店铺的名称连接和类别
爬虫入口，黄页主站：

https://page.1688.com/cp/cp1.html?spm=a260k.635.1998396936.6.jNch7m&tracelog=cnindex_service_company
@author: stevenkwong
"""

# coding = utf-8
from selenium.webdriver.common.proxy import Proxy,ProxyType
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import NoSuchElementException
from selenium import webdriver
import re
import  time  #调入time函数
import pandas as pd
import os
import pickle

#p = Pool(processes=2,maxtasksperchild=1)

def whatstime():
    import time
    #  时间格式
    fmt = "[%Y-%b-%d  %H:%M:%S]: "
    timestr = time.strftime(fmt,time.localtime())
    return timestr
 
def get_ip_pool():
    url = 'http://vip22.daxiangdaili.com/ip/?tid=557863646314073&num=20'+\
            '&delay=2&category=2&foreign=none&longlife=5'
    browser = webdriver.Firefox()
    browser.get(url)
    ip_pool_text = browser.find_elements_by_tag_name('pre')[0].text
    ip_pool_list = ip_pool_text.split('\n')
    browser.quit()
    return ip_pool_list
   
def set_browser_proxy(ip_pool_list,url,needed_elements):
    ele_1 = needed_elements[0]
    ele_2 = needed_elements[1]
    for each in ip_pool_list:
        myProxy = each
        
        proxy = Proxy({
        'proxyType': ProxyType.MANUAL,
        'httpProxy': myProxy,
        'ftpProxy': myProxy,
        'sslProxy': myProxy,
        'noProxy': '' # set this value as desired
        })
        browser = webdriver.Firefox(proxy=proxy)
        browser.implicitly_wait(3)
        browser.get(url)
        print(whatstime() + 'opening site.......')
        #time.sleep(3)
        flag = False
        try:
            #WebDriverWait(browser,3).\
            #   until(EC.presence_of_element_located(browser.find_element_by_class_name(ele_1)))
            browser.find_element_by_class_name(ele_1)
            #browser.find_element_by_id('alibar')
            flag = True
            print(whatstime() + myProxy + ' is usable! Y')
        except BaseException as be:
            try:
                #WebDriverWait(browser,3).\
                #   until(EC.presence_of_element_located(browser.find_element_by_class_name(ele_2)))
                browser.find_element_by_class_name(ele_2)
                flag = True
                print(whatstime() + myProxy + ' is usable! Y')
            except BaseException as be:
                print(whatstime() + myProxy + ' is unusable! X')
                browser.quit()
                browser='none'
                flag = False
            
        if flag:
            break
        else:
            continue        
    return browser,flag
    
def get_search_widget(browser):
    search_pannel = browser.find_element_by_class_name('search-pannel')
    search_widget = search_pannel.find_element_by_class_name('search-i-wrap')
    search_widget = search_widget.find_element_by_tag_name('input')
    search_widget.clear()
    search_button = search_pannel.find_element_by_class_name('search-i-action')
    return search_widget,search_button
    
def get_cat_link(url):
    # 获取各个类别和它对应的连接
    browser = webdriver.Firefox()
    browser.get(url)
#    browser = webdriver.PhantomJS(executable_path=\
#    '/home/stevenkwong/文档/web_app/phantomjs-2.1.1-linux-x86_64/bin/phantomjs',\
#    service_args=['--ignore-ssl-errors=true'])
#    print(whatstime() + 'opening:\n'+ url)
#    browser.get(url)
#    print(whatstime() + 'got web:\n' + browser.current_url)

# more_cate 存放所有类别的容器
    more_cate = browser.find_element_by_css_selector('#more-categorate')
# detail 是具体再细分的类别所在的容器
    details = more_cate.find_elements_by_class_name('detail')
    headers = more_cate.find_elements_by_css_selector('.header.fd-clr')
    cates_links_dict = dict()
    for ii in range(len(details)):
        each_header = headers[ii]
        each_header.click()
        time.sleep(1)
        each_detail = more_cate.find_elements_by_class_name('detail')[ii]
        cell_tags = each_detail.find_elements_by_css_selector('.cell-tags')
        pm_list = each_detail.find_element_by_css_selector('.promotion-list')
        
        
        for each_cell_tags in cell_tags:
            cate_name = each_cell_tags.find_element_by_tag_name('a').text
            print(cate_name)
            dd = each_cell_tags.find_element_by_tag_name('dd')
            a_s = dd.find_elements_by_tag_name('a')
            links = list()
            for each_a in a_s:
                #print('-' + each_a.text)
                if each_a.text == '更多':
                    pass
                else:
                    #print(each_a.get_attribute('href'))
                    links.append(each_a.get_attribute('href'))
            cates_links_dict[cate_name] = links
      # 每个栏目下更多的商家分类
        dds = pm_list.find_elements_by_tag_name('dd')
        for each_dd in dds:
            links = list()
            if each_dd.text.find('更多') == -1:
                pass
            else:
                continue
            cate_name = each_dd.text
            link = each_dd.find_element_by_tag_name('a').\
                                get_attribute('href')
            print('-' + cate_name)
            print(link)
            links.append(link)
            cates_links_dict[cate_name] = links
    browser.quit()
    return cates_links_dict

def browse_web_actions(browser):
    # 模拟浏览器的动作，在一个网站停留10s以上
            #将页面滚动条拖到底部
    for num in range(20):
        js="var q=document.documentElement.scrollTop=%d"%((num + 1)*800 - 100)
        time.sleep(0.3)
        browser.execute_script(js)         
    for num in range(20):
        js="var q=document.documentElement.scrollTop=%d"%((10 - num + 1)*800 - 100)
        time.sleep(0.3)
        browser.execute_script(js)  
#    time.sleep(3)

def get_shop_contact_info(browser,url):
    # 这里访问的是每个企业的子页面url

    print(whatstime() + 'opening:\n'+ url +'... ...')
    browser.get(url)
    print(whatstime() + 'got web:\n' + browser.current_url)
    # 先检查一下有无被马云发现，强制我登陆再访问，有的话就加cookies再打开
    current_url = browser.current_url
    
    if current_url.find('login') == -1:
        pass
    else:
        browser = add_cookies_browser(browser,url)
        
# 预防封号，增加浏览器动作和延时
#    browse_web_actions(browser)
#    # 再检查是否进入了验证码页面
#    needed_elements = ['mod.mod-contactBig','mod.mod-contactSmall']

        
    contact_info = dict()
    try:
        # 找到联系方式所在的容器
        contact_big = browser.find_element_by_css_selector('.mod.mod-contactBig')
        # 联系方式都是存放在一条条叫 dl 的tag中的
        dls = contact_big.find_elements_by_tag_name('dl')
    
        for each in dls:
            # item_name是联系方式的栏目名称 “联系人”/“地址”等
            dt_text = each.find_element_by_tag_name('dt').text
            dt_text = dt_text.replace(':','')
            dt_text = dt_text.replace('：','') # 以防万一，不知道他用的是中文还是英文字符下的冒号
            item_name = ''.join(dt_text.split(' '))
            if item_name == '联系人':
                # 如果是联系人，是藏在dl - dd - a的第一个a里面
                value = each.find_element_by_tag_name('dd').text
                value = value.replace('和我联系','').strip()
            else:
                value = each.find_element_by_tag_name('dd').text
            # 根据运营部的要求，地址不要空格
            if item_name == '地址':
                value = each.find_element_by_tag_name('dd').text
                value = ''.join(value.split(' '))
            else:
                pass
            contact_info[item_name]=value
    except NoSuchElementException as NSEE:
        try:
            # 针对某些企业没有大的联系表，只有小的，这里作小的处理
            contact_small = browser.find_element_by_css_selector('.mod.mod-contactSmall')
            dls = contact_small.find_elements_by_tag_name('dl')

            for each in dls:
                # item_name是联系方式的栏目名称 “联系人”/“地址”等
                dt_text = each.find_element_by_tag_name('dt').text
                dt_text = dt_text.replace(':','')
                dt_text = dt_text.replace('：','') # 以防万一，不知道他用的是中文还是英文字符下的冒号
                item_name = ''.join(dt_text.split(' '))
                
                value = each.find_element_by_tag_name('dd').text
                            # 根据运营部的要求，地址不要空格
                if item_name == '地址':
                    value = each.find_element_by_tag_name('dd').text
                    value = ''.join(value.split(' '))
                else:
                    pass
                contact_info[item_name]=value
        except NoSuchElementException as NSEE:
            # 还有的企业甚至连小的联系表都没有的。。。囧
            pass
     
    return browser,contact_info
    
def add_cookies_browser(browser,url):

    if browser.current_url.find('login') == -1:
        pass
    else:       
        f = open('cookies20160930.pkl','rb')
        cookies = pickle.load(f)
        print(whatstime() + 'opening:\n'+ url +'... ...')
        browser.get(url)
        print(whatstime() + 'got web:\n' + browser.current_url)
        # 预防封号，增加延时
        time.sleep(1)   
        try:
            for each_cook in cookies:
                browser.add_cookie(each_cook)
        except BaseException as be:
            pass
        
        print(whatstime() + 'opening:\n'+ url +'... ...')
        browser.get(url)
        print(whatstime() + 'got web:\n' + browser.current_url)
        # 预防封号，增加延时
#        time.sleep(1)     
    return browser
    
def get_shop_info(cat,url):
     #    获取各家店的店名和店铺连接，销量信息
    start = time.clock()
    browser = webdriver.Firefox()
    browser.get(url)
#    browser = webdriver.PhantomJS(executable_path=\
#    '/home/stevenkwong/文档/web_app/phantomjs-2.1.1-linux-x86_64/bin/phantomjs',\
#    service_args=['--ignore-ssl-errors=true'])
#    print(whatstime() + 'opening:\n'+ url +'... ...')
#    browser.get(url)
#    print(whatstime() + 'got web:\n' + browser.current_url)
    browser = add_cookies_browser(browser,url)   
    
    try:
        total_page = browser.find_element_by_css_selector('.page-op')\
                        .find_element_by_tag_name('input').get_attribute('data-max')
    except BaseException as be:
        print(whatstime() + cat + ' Error !')
        if browser.current_url[:20].find('sec') == -1:
            pass
        else:
            pass
            #p.terminate()
        browser.quit()
        return 0
        
    shop_info_dict_list = list()
    
    for ii in range(int(total_page)):
        
########调试用#########
#        if ii <=1:
#            pass
#        else:
#            break
########调试用#########
        
        print(whatstime() + 'catching' +  cat + 'page_%d'%ii)
        suffix = '?spm=0.0.0.0.fVZFzk&pageSize=30&offset=3&beginPage=%d'%(ii+1)
        page_url = url + suffix
        print(whatstime() + 'opening:\n'+ page_url +'... ...')
#        actions = ActionChains(browser)
#        actions.key_down(Keys.CONTROL).send_keys('t').key_up(Keys.CONTROL).perform()
        browser.get(page_url)
        print(whatstime() + 'got web:\n' + browser.current_url)
        browser = add_cookies_browser(browser,page_url)
        current_url = browser.current_url
        if current_url[:20].find('sec') == -1:
            pass
        else:
            #time.sleep(2)
            print( current_url.find('sec') ) 
            print(whatstime() + current_url + '\n' +'需要验证码登陆！')
            browser.quit()
            save_file(cat, shop_info_dict_list)
            count_time = 0
            flag = False
            while not(flag) or (count_time<=5) :
                print(whatstime() + 'finding valid proxy......')
                needed_elements = ['page-op','page-op']
                ip_pool_list = get_ip_pool()
                browser,flag = set_browser_proxy(ip_pool_list,url,needed_elements)
                count_time = count_time + 1
            
            if count_time <= 5:
                print(whatstime() + '找到了有效的proxy!')   
                # 清零，然后继续用新的代理爬
                shop_info_dict_list = list()
            else:
                # fail ! give up
                browser.quit()
                print(whatstime() + 'timeout ! ')
                return
            
        
        browse_web_actions(browser)
        time.sleep(10)
        containers = browser.find_elements_by_class_name('list-item-title-text')
        list_item_details = browser.find_elements_by_class_name('list-item-detail')
        for jj,each_shop in enumerate(containers):           
            shop_info_dict = dict()
########调试用#########
#            if jj<=4:
#                pass
#            else:
#                break
########调试用#########
            
            # 获取店名
            shop_name = each_shop.get_attribute('title')
            # 获取店连接
            shop_alisite = each_shop.get_attribute('href')
            # 获取店铺地址
            shop_addr = list_item_details[jj].find_element_by_class_name('sm-offerResult-areaaddress').get_attribute('title')
            shop_addr = ''.join(shop_addr.split(' '))
            print(shop_addr)
            
            #sub_browser,contact = get_shop_contact_info(sub_browser,shop_alisite)
            #sub_browser = add_cookies_browser(sub_browser,shop_alisite)
             
            shop_info_dict['商户名称'] = shop_name
            shop_info_dict['阿里巴巴网址'] = shop_alisite
            shop_info_dict['商户地址'] = shop_addr
            
#            contact_items_list = ['联系人','电话','移动电话','传真','地址','邮编','公司主页']
#            for each_item in contact_items_list:
#                try:
#                    shop_info_dict[each_item] = contact[each_item]
#                except KeyError as ke:
#                    shop_info_dict[each_item] = ' '
#            print(whatstime() + shop_name)        
            shop_info_dict_list.append(shop_info_dict)
#        actions = ActionChains(browser)
#        actions.key_down(Keys.CONTROL).send_keys('w').key_up(Keys.CONTROL).perform();time.sleep(2)
    browser.quit() 
    save_file(cat, shop_info_dict_list)
    end = time.clock()
    print(whatstime() + cat + '总共耗时：%.2f 秒'%(end - start))       
       
    return shop_info_dict_list

def save_file(cate_name, shop_info_dict_list):
    first_ele = shop_info_dict_list[0]
    cols_name = [each for each in first_ele.keys()]
    cols_name.insert(0,'店铺类别')
    shop_info_table = pd.DataFrame(columns = cols_name)
    shop_info_dict = dict()
    for each in cols_name:
        if (each == '店铺类别'):
            shop_info_dict[each] = [cate_name for ii in range(len(shop_info_dict_list))]
        else:
            shop_info_dict[each] = [each_II[each] for each_II in shop_info_dict_list]
    tmp_df = pd.DataFrame(shop_info_dict)
    shop_info_table = pd.concat([shop_info_table,tmp_df])
    
    fmt = "%Y%m%d_%H%M%S"
    timestr = time.strftime(fmt,time.localtime())
    file_path = '/mnt/hgfs/VMWare/crawler_outcome'    
    file_name = timestr + cate_name + '.xlsx'
    shop_info_table.to_excel(file_path +'/' + file_name)
    
if __name__ == '__main__':
    
    url = 'https://page.1688.com/cp/cp1.html?spm=a260k.635'+\
        '.1998396936.6.jNch7m&tracelog=cnindex_service_company'
    # 获取各个类别和它对应的连接    
    cat_link_dict = get_cat_link(url)
    result = dict()
    
    total_task_num = len(cat_link_dict)
    now_num = 0
    #print(whatstime() + '父进程 %s'% os.getpid())
    
    # find those existed
    origin_path = os.getcwd()
    os.chdir('/mnt/hgfs/VMWare/crawler_outcome')
    files_list = os.listdir(os.getcwd())
    pattern = re.compile(r'(\d+?_\d+)(\D+).xlsx')
    existed_cate = list()
    for each in files_list:
        match = pattern.match(each)
        try:
            existed_cate.append(match.group(2))
        except AttributeError as ae:
            pass
    os.chdir(origin_path)
    existed_cate = list(set(existed_cate))
    
    for each in existed_cate:
        print('deleting' + each)
        del cat_link_dict[each]
    del cat_link_dict['医药包装']
    
    for each_cat in cat_link_dict:
        cat_link_list = cat_link_dict[each_cat]
        for each_link in cat_link_list:               
    ########调试用#########
#            if now_num <1:
#                pass
#            else:
#                break
    ########调试用#########
            shop_info_dict_list = get_shop_info(each_cat,each_link)
            #p.apply_async(get_shop_info,args=(each_cat,each_link,))
            now_num = now_num + 1 
    print(whatstime() + '等待所有进程完毕... ...')
    #p.close()
    #p.join()
    print(whatstime() + '把马云的网店都扒光啦！Mission Complete！')
    