# -*- coding: utf-8 -*-
"""
Created on Thu Dec  8 10:13:30 2016
临时脚本，构建代码块。
browser.current_url
需要构建的模块如下：
1、读取标签集
2、任务分块
3、爬取对应信息
4、保存信息至对应文件夹

新增代码块：
1 统计已完成的类别数目
2 将已完成类目下的多级分类合并到一张大表中
@author: Administrator
"""
import time
import numpy as np
import pandas as pd
from selenium.webdriver.common.proxy import Proxy,ProxyType
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import NoSuchElementException,WebDriverException,TimeoutException,\
StaleElementReferenceException
from selenium import webdriver
#加载显式等待所需lib
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import os
from multiprocessing import Pool
import pickle as pk

fail_shortcut_path = os.path.join(os.getcwd(),'fail_web_shortcut')
os.mkdir(fail_shortcut_path)

def separate_task(input_mission,seg_num):
#####################################################
#           将任何具有len属性的数据input_mission分成seg_num份       
#			得到input_mission每一份的索引对  		               
#==================================================#
# 参数说明：（无）                    	               	                             
####################################################
	ind_pair = list()
	try:
		total_data_n = len(input_mission)

		d = np.round(np.linspace(0,total_data_n,num=seg_num))
		for ii in range(len(d)):
			try:
				ind_pair.append((int(d[ii]),int(d[ii+1])))
			except BaseException:
				pass
		return ind_pair
	except BaseException:
		print('请检查输入数据是否含有len属性！')
		return ind_pair
		
		
def whatstime():
#####################################################
# 		        	根据指定格式输出日期字符串 				
#==================================================#
# 参数说明：（无）                    	               	                              
####################################################
    #  时间格式
    fmt = "[%Y-%b-%d  %H:%M:%S]: "
    timestr = time.strftime(fmt,time.localtime())
    return timestr

def file_time_suffix():
#####################################################
# 		        	根据指定格式输出日期字符串 				
#==================================================#
# 参数说明：（无）                    	               	                              
####################################################
    #  时间格式
    fmt = "%Y%b%d_%H%M%S"
    timestr = time.strftime(fmt,time.localtime())
    return timestr

def get_total_cur_page(browser):
#####################################################
#   获取当前页面内当前的页码以及总页码				
#==================================================# 
    try:
        page_skip = browser.find_element_by_css_selector('.ui-page-skip')
        totalpage_num = page_skip.find_element_by_css_selector(\
            'input[name="totalPage"]').get_attribute('value')   
        currentpage_num = browser.find_element_by_css_selector('.ui-page-cur').text
        output_str = '%s/%s'%(currentpage_num,totalpage_num)
    except BaseException:
        print('没有页码信息！%s'%browser.current_url)
        output_str = '没有页码信息！'
    return output_str
    
    
def get_shops_info(browser,input_df,cate_type,fail_list):
#####################################################
#   获取某一类目标签下的所有店铺的信息				
#==================================================#
# 参数说明：
#   input_df:
#       | 分类（x级标签） | 网址 |
#   cate_type:
#       标签类型：【（分类）, 一级标签，二级标签，三级标签】
####################################################
    shop_name_list = list()
    shop_brand_list = list()
    shop_addr_list = list()
    shop_cate_list = list()
    shop_href_list = list()

    print(input_df.iloc[:,0].unique())
    for each_line in input_df.itertuples():
        existed_next_page = True # 是否存在下一页，是（True）；否（False）, 每个类别需要复位一次
        
        # 每个循环就是一个类目标签
        website = each_line[2] # 每个类目标签就是一个入口
        cate_name = each_line[1]

        while existed_next_page:
            # 每一个while循环就是一页网页
            try:
                browser.get(website)
                time.sleep(3.5)
                page_info = get_total_cur_page(browser)
                print(whatstime() + '正在爬取 %s-%s 下的商户信息，%s'%(cate_type,cate_name,page_info))           
                
                # @@@@@@破 · 页面打开分类错误@@@@@@@
                # 有的页面即使提示有下一页，但再点击下一页就不是当前这个分类的了
                # 针对该种情况增加如下代码
                crumbSlide = browser.find_element_by_css_selector('.crumbSlide-con.clearfix')
                lis = crumbSlide.find_elements_by_css_selector('li')
                title = ''
                for each_li in lis:
                    title = title + each_li.text
                category_list = [each_word.strip() for each_word in title.split('>')]#分类 - 一级分类 - 二级分类的列表
                print(title)
                if cate_name in category_list:
                    pass
                else:
                    #也就相当于没有下一页啦
                    print('下一页内容和分类不符')
                    break
                # @@@@@@@破 · 页面打开分类错误@@@@@@@@
#                for ii in range(10):
#                    print(ii)
#                    browser.execute_script("window.scrollTo(0, 1200*%d)"%ii)
#                    time.sleep(0.2)                
#                browser.execute_script("window.scrollTo(0, 0)")
                # 需要再等2秒，等待页面内容加载完毕。                
                
                # 调试用-start
#                if int(page_info.split('/')[0]) >=39:
#                    pass
#                else:
#                    next_page_url = go_to_next_page(browser)
#                    if next_page_url == 'the_end':
#                        existed_next_page = False
#                    else:
#                        website = next_page_url
#                    continue
                # 调试用-end   
                try:
                    shop_headers = browser.find_elements_by_css_selector('.shopHeader')
                except Exception as inst:
                    fail_list.append((type(inst),shop_headers,cate_name))
                    pic_path = os.path.join(fail_shortcut_path,'%s_%s.png'%(cate_name,file_time_suffix()))
                    browser.save_screenshot(pic_path)
                
                # 一个for循环就是找到一个网页下的所有的商铺信息
                for each_shop_header in shop_headers:
                    shopheader_info = each_shop_header.find_element_by_css_selector('.shopHeader-info')
                    shop_name = shopheader_info.find_element_by_css_selector('a').text
                    shop_href = shopheader_info.find_element_by_css_selector('a').get_attribute('href')
                    p_s = shopheader_info.find_elements_by_css_selector('p')
                    main_brand,addr = get_shop_brand_addr(p_s)        
                    # 将信息入表
                    shop_name_list.append(shop_name)
                    shop_brand_list.append(main_brand)
                    shop_addr_list.append(addr)
                    shop_cate_list.append(cate_name)
                    shop_href_list.append(shop_href)   
                
                next_page_url = go_to_next_page(browser)
                if next_page_url == 'the_end':
                    existed_next_page = False
                else:
                    if website == next_page_url:
                        print(whatstime() + '出现下一页和当前页相同的错误！')
                        #也就相当于没有下一页啦
                        existed_next_page = False
                    else:
                        try:
                            if page_info.split('/')[0] == page_info.split('/')[1]:
                            # 当前页面已经是最后一页了，如果仍然有下一页，他会返回最开始的店铺页面
                            # 这中情况也相当于没有下一页了
                                print(whatstime() + '到最后一页了！')
                                existed_next_page = False
                            else:
                                # 真实的有下一页
                                website = next_page_url
                        except BaseException:
                            #如果page_info是 '没有页码信息！'
                            pass                   
            except TimeoutException:
                print(whatstime() + '网页%s读取超时，准备重新打开……'%website)
                pass
            except StaleElementReferenceException:
                print(whatstime() + '网页%s出现过时的引用错误，准备重新打开……'%website)
                pass
                
        print(whatstime() + '完成%s类目下所有商户信息的爬取！'%cate_name)                
    summary_info_df = pd.DataFrame({
                        cate_type:shop_cate_list,
                        '店名':shop_name_list,
                        '主营品牌':shop_brand_list,
                        '所在地':shop_addr_list,
                        '店铺网址':shop_href_list})
    return summary_info_df,fail_list
                            
def go_to_next_page(browser):
#####################################################
#   查看当前页面是否有下一页，如果有则返回下一页				
#==================================================#
# 参数说明：
#   browser:
#       当前浏览器
# 输出参数：
#   next_page_url:
#       下一页的网址
####################################################
    next_page_url = None
    try:
        next_page_button = browser.find_element_by_css_selector('.ui-page-next')    
        next_page_url = next_page_button.get_attribute('href') 
    except NoSuchElementException:
        print(whatstime() + '没有下一页了！')
        
    if type(next_page_url) == type(None):
        next_page_url = 'the_end'
    else:
        pass
    
    return next_page_url
        

def get_shop_brand_addr(input_p_s):
#####################################################
# 		查找该店铺的主营品牌和所在地址				
#==================================================#
# 参数说明：
#   input_p_s:网页的p标签，包含有主营品牌和所在地址
####################################################
    if len(input_p_s) == 0:
        main_brand = np.NaN
        addr = np.NaN        
    elif len(input_p_s) == 2:
        main_brand = input_p_s[0].text
        main_brand = main_brand.replace('主营品牌：','')
        main_brand = main_brand.strip()
        
        addr = input_p_s[1].text
        addr = addr.replace('所在地：','')
        addr = addr.strip()
    else:
        tmp_str = input_p_s[0].text
        if '所在地' in tmp_str:
            addr = tmp_str
            addr = addr.replace('所在地：','')
            addr = addr.strip()
            main_brand = np.NaN
        else:
            main_brand = tmp_str
            main_brand = main_brand.replace('主营品牌：','')
            main_brand = main_brand.strip()
            addr = np.NaN
    return main_brand,addr

def mission_conduct(data_top_path,each_category,summary_df):
# 针对多线程执行任务的函数    
#    browser = webdriver.PhantomJS(executable_path=\
#    'E:\\KXF_WORK\\2016\\NOV\\JIRA_YWSYS-2077爬虫\\'+\
#    'phantomjs-2.1.1-windows\\bin\\phantomjs.exe',
#    service_args=['--ignore-ssl-errors=true'])
    browser = webdriver.PhantomJS(executable_path=\
    '/home/stevenkwong/文档/web_app/phantomjs-2.1.1-linux-x86_64/bin/phantomjs',\
    service_args=['--ignore-ssl-errors=true'])
#    browser = webdriver.Firefox()
    browser.set_page_load_timeout(12)
    fail_list = list()
    
    data_category_path = os.path.join(data_top_path,each_category.replace('/','_'))
        
    if os.path.exists(data_category_path):
        pass
    else:    
        os.mkdir(data_category_path)      
    
    sub_summary_df = summary_df[summary_df.分类.isin([each_category])]
    cata_df = sub_summary_df.ix[:,['分类','分类_网址']].dropna().drop_duplicates()
    tags_lv1_df = sub_summary_df.ix[:,['一级标签', '一级标签_网址']].dropna().drop_duplicates()
    tags_lv2_df = sub_summary_df.ix[:,['二级标签', '二级标签_网址']].dropna().drop_duplicates()
    tags_lv3_df = sub_summary_df.ix[:,['三级标签', '三级标签_网址']].dropna().drop_duplicates()
    #####################
    # 获取分类信息
    #####################
    t1 = time.time()        
    sub_summary_info_df,fail_list = get_shops_info(browser,cata_df,'分类',fail_list)
    t2 = time.time()
    print('耗时：%.2f秒'%(t2-t1))
    
    filename = os.path.join(data_category_path,'分类_%s.csv'%each_category.replace('/','_'))
    sub_summary_info_df.to_csv(filename)
    #####################
    # 获取一级标签信息
    #####################
    if len(tags_lv1_df)!=0:
        t1 = time.time()        
        sub_summary_info_df,fail_list = get_shops_info(browser,tags_lv1_df,'一级标签',fail_list)
        t2 = time.time()
        print('耗时：%.2f秒'%(t2-t1))
        
        filename = os.path.join(data_category_path,'一级标签_%s.csv'%each_category.replace('/','_'))
        sub_summary_info_df.to_csv(filename)
    else:
        pass
    #####################
    # 获取二级标签信息
    #####################
    if len(tags_lv2_df)!=0:
        t1 = time.time()        
        sub_summary_info_df,fail_list = get_shops_info(browser,tags_lv2_df,'二级标签',fail_list)
        t2 = time.time()
        print('耗时：%.2f秒'%(t2-t1))
        
        filename = os.path.join(data_category_path,'二级标签_%s.csv'%each_category.replace('/','_'))
        sub_summary_info_df.to_csv(filename)
    else:
        pass
    #####################
    # 获取三级标签信息
    #####################
    if len(tags_lv3_df)!=0:
        t1 = time.time()        
        sub_summary_info_df,fail_list = get_shops_info(browser,tags_lv3_df,'三级标签',fail_list)
        t2 = time.time()
        print('耗时：%.2f秒'%(t2-t1))
        
        filename = os.path.join(data_category_path,'三级标签_%s.csv'%each_category.replace('/','_'))
        sub_summary_info_df.to_csv(filename)   
    else:
        pass 
    filename = os.path.join(data_category_path,'fail_list.pkl')
    f = open(filename,'wb')
    pk.dump(fail_list,f)
    f.close()
    browser.quit()

def mission_conduct_test(data_top_path,each_category,summary_df):
# 针对多线程执行任务的函数    
#    browser = webdriver.PhantomJS(executable_path=\
#    'E:\\KXF_WORK\\2016\\NOV\\JIRA_YWSYS-2077爬虫\\'+\
#    'phantomjs-2.1.1-windows\\bin\\phantomjs.exe',
#    service_args=['--ignore-ssl-errors=true'])
    browser = webdriver.Firefox()
    browser.set_page_load_timeout(12)
    fail_list = list()
    
    data_category_path = os.path.join(data_top_path,each_category.replace('/','_'))
        
    if os.path.exists(data_category_path):
        pass
    else:    
        os.mkdir(data_category_path)      
    
    sub_summary_df = summary_df[summary_df.分类.isin([each_category])]
    cata_df = sub_summary_df.ix[:,['分类','分类_网址']].dropna().drop_duplicates()
    tags_lv1_df = sub_summary_df.ix[:,['一级标签', '一级标签_网址']].dropna().drop_duplicates()
    tags_lv2_df = sub_summary_df.ix[:,['二级标签', '二级标签_网址']].dropna().drop_duplicates()
    tags_lv3_df = sub_summary_df.ix[:,['三级标签', '三级标签_网址']].dropna().drop_duplicates()

    #####################
    # 获取分类信息
    #####################
    t1 = time.time()        
    sub_summary_info_df,fail_list = get_shops_info(browser,cata_df,'分类',fail_list)
    t2 = time.time()
    print('耗时：%.2f秒'%(t2-t1))
    
    filename = os.path.join(data_category_path,'分类_%s.csv'%each_category.replace('/','_'))
    sub_summary_info_df.to_csv(filename)
    
#    #####################
#    # 获取一级标签信息
#    #####################
#    if len(tags_lv1_df)!=0:
#        t1 = time.time()        
#        sub_summary_info_df,fail_list = get_shops_info(browser,tags_lv1_df,'一级标签',fail_list)
#        t2 = time.time()
#        print('耗时：%.2f秒'%(t2-t1))
#        
#        filename = os.path.join(data_category_path,'一级标签_%s.csv'%each_category.replace('/','_'))
#        sub_summary_info_df.to_csv(filename)
#    else:
#        pass

    filename = os.path.join(data_category_path,'fail_list.pkl')
    f = open(filename,'wb')
    pk.dump(fail_list,f)
    f.close()
    browser.quit()

def detect_mission_schedule(data_top_path,summary_df):
#####################################################
# 		检查当前任务的完成进度				
#==================================================#
# 参数说明：
#   data_top_path:存放爬虫数据的最顶层的文件夹名称（'../../爬虫数据'）
#   summary_df: 目标df
#################################################### 
    category_tags = summary_df.分类.unique()
    
    for each_cate in category_tags:
        files_num = 0 # 一个分类目录下有多少个文件
        sub_summary_df = summary_df[summary_df.分类 == each_cate]
        if len(sub_summary_df.三级标签.dropna()) == 0:
            # 没有三级标签
            print(whatstime() +'%s下没有三级标签。'%each_cate) 
            
            if len(sub_summary_df.二级标签.dropna()) == 0:
                # 没有二级标签
                print(whatstime() +'%s下没有二级标签。'%each_cate) 
                
                if len(sub_summary_df.一级标签.dropna()) == 0:                
                    # 没有一级标签
                    print(whatstime() +'%s下没有一级标签。'%each_cate) 
                    files_num = 1
                else:
                    # 有一级标签
                    files_num = 2
            else:
                # 有二级标签
                files_num = 3
        else:
            # 有三级标签
            files_num = 4
        
        files_num = files_num + 1# 每个分类文件夹下还有一个fail_list.pkl
        each_cate_path = os.path.join(data_top_path,each_cate.replace('/','_'))
        actual_files_num = len(os.listdir(each_cate_path))
        if files_num == actual_files_num:
        # 该类目下所有任务已完成            
            pass
        else:
            # 该类目下差几个文件没完成
            print(whatstime() + '%s 分类下应该有%d个结果输出，实际只有%d个。'%(each_cate,files_num,actual_files_num))

def drop_duplicate_col(input_df):
####################################################
# 		将两张表合并后的大表中重复的列合并	
#==================================================#
# 合并逻辑：
#   检查indicator_sign这一列标志位，是right_only，取r表，是left_only
#   取l表的子段，是both，取r表子段
# 参数说明：
#   input_df: 
#       形式如下的表，有三列需要融合去重
#       |……| 所在地_r | 所在地_l |……|
#   summary_df: 目标df
# 输出：
#   result_df：结果表，一张完整的包含所有类目，各级标签信息的大表
#################################################### 
    input_df['主营品牌'] = ''
    input_df['店铺网址'] = ''
    input_df['所在地'] = ''
    # 处理主营品牌的列
    input_df.ix[input_df.indicator_sign == 'both','主营品牌'] = \
    input_df.ix[input_df.indicator_sign== 'both','主营品牌_l']
    input_df.ix[input_df.indicator_sign== 'right_only','主营品牌'] = \
    input_df.ix[input_df.indicator_sign== 'right_only','主营品牌_r']
    input_df.ix[input_df.indicator_sign == 'left_only','主营品牌'] = \
    input_df.ix[input_df.indicator_sign == 'left_only','主营品牌_l']    
    input_df = input_df.drop(['主营品牌_l','主营品牌_r'],axis = 1)    
    
    # 处理店铺网址的列
    input_df.ix[input_df.indicator_sign == 'both','店铺网址'] = \
    input_df.ix[input_df.indicator_sign == 'both','店铺网址_l']
    input_df.ix[input_df.indicator_sign == 'right_only','店铺网址'] = \
    input_df.ix[input_df.indicator_sign == 'right_only','店铺网址_r']
    input_df.ix[input_df.indicator_sign == 'left_only','店铺网址'] = \
    input_df.ix[input_df.indicator_sign == 'left_only','店铺网址_l'] 
    input_df = input_df.drop(['店铺网址_l','店铺网址_r'],axis = 1)
    
    # 处理所在地的列
    input_df.ix[input_df.indicator_sign == 'both','所在地'] = \
    input_df.ix[input_df.indicator_sign == 'both','所在地_l']
    input_df.ix[input_df.indicator_sign == 'right_only','所在地'] = \
    input_df.ix[input_df.indicator_sign == 'right_only','所在地_r']
    input_df.ix[input_df.indicator_sign == 'left_only','所在地'] = \
    input_df.ix[input_df.indicator_sign == 'left_only','所在地_l']        
    input_df = input_df.drop(['所在地_l','所在地_r'],axis = 1)
    input_df = input_df.drop('indicator_sign',axis = 1)
    return input_df
    
    
def generate_full_table(data_top_path,summary_df):
#####################################################
# 		将所有的结果合成到一张最终的大表		
#==================================================#
# 参数说明：
#   data_top_path:存放爬虫数据的最顶层的文件夹名称（'../../爬虫数据'）
#   summary_df: 目标df
# 输出：
#   result_df：结果表，一张完整的包含所有类目，各级标签信息的大表
#################################################### 
    # 创建一个文件夹存放所有爬虫结果
    current_path = os.getcwd()
    output_path = os.path.join(current_path,'爬虫数据汇总')
    if os.path.exists('爬虫数据汇总'):
        pass
    else:    
        os.mkdir(output_path)

    category_tags = summary_df.分类.unique()    
    title_df = pd.DataFrame(columns = ['分类', '店名', '一级标签', '二级标签', '三级标签', 
    '主营品牌', '店铺网址', '所在地'])
#    title_df.to_csv('天猫_店铺三级标签信息_%s.csv'%time_suffix(),header=1, index=False)
    
    for ii, each_cate in enumerate(category_tags):
        if each_cate == '天猫图书':
            continue
        else:
            pass
        print(whatstime() + '正在写入%s 分类的数据，已完成总进度：%.2f'%(each_cate,(ii/len(category_tags)*100)) + '%')
        each_cate_path = os.path.join(data_top_path,each_cate.replace('/','_'))        
        outcome_list = [each_file for each_file in os.listdir(each_cate_path) if '.csv' in each_file]
        df_list = list()
        for each_file_name in outcome_list:
                file_path = os.path.join(each_cate_path,each_file_name)
                tmp_df = pd.read_csv(file_path)
                tmp_df = tmp_df.drop('Unnamed: 0',axis = 1)
                df_list.append(tmp_df)
        each_cate_full_df = df_list[0]

        for each_df in df_list[1:]:
            each_cate_full_df = pd.merge(each_cate_full_df,each_df,on='店名',how='outer',
                                         suffixes=['_l', '_r'],indicator='indicator_sign')
             # 需要丢掉一些多余的列
            each_cate_full_df = drop_duplicate_col(each_cate_full_df)
        each_cate_full_df = title_df.append(each_cate_full_df) # 将标题补上，有的表没有第三级标签，此时也可以自动补nan
        each_cate_full_df = pd.DataFrame(each_cate_full_df,columns = ['分类', '店名', '一级标签', '二级标签', '三级标签', 
                                            '主营品牌', '店铺网址', '所在地']) # 重排列顺序，保证每个类别输出顺序一致
                                            
        # 有的店铺是在1级标签中冒出来的，在最开始的分类中没有，所以这里需要将分类补齐
        category = each_cate_full_df.分类
        category.fillna(method='ffill',inplace=True)
        each_cate_full_df.分类 = category
        
        file_name = os.path.join(output_path,each_cate.replace('/','_') + '.csv')     
        each_cate_full_df.to_csv(file_name, index=False,sep = '^')
        
        print(whatstime() + '已完成%s 分类的信息整合。'%each_cate)
    
            
            
if __name__ == '__main__':
    import os
    filename = 'label_cluster.csv'
    summary_df = pd.read_csv(filename)
    summary_df = summary_df.iloc[:,1:].drop_duplicates()
    mission_mode = 'h'
    mission_mode = input('你需要跑全量任务吗？全量:f；半量:[h]\n')
    # 将任务分一半出来
    if mission_mode == 'h':
        print(whatstime() + '正在跑半量任务……')
        
        split_index = int(len(summary_df)/2)
        summary_df = summary_df.iloc[:split_index,:]
        
    elif mission_mode == 'f':
        print(whatstime() + '正在跑全量任务……')
    else:
        print(whatstime() + '只能输入f或者h，你输入的什么鬼？！手动再见，请再次启动程序。')
        print(whatstime() + '姑且当你跑全量任务吧……')
        
    categorys = summary_df.分类.unique()
    
    # 创建一个文件夹存放所有爬虫结果
    current_path = os.getcwd()
    data_top_path = os.path.join(current_path,'爬虫数据')
    if os.path.exists('爬虫数据'):
        pass
    else:    
        os.mkdir(data_top_path)
	#######################################
	#	多线程部署及启动
	#######################################    
    p = Pool(4)
       
    for each_category in categorys:
        # 每个商品类别建一个文件夹存储所有 分类、一级、二级、三级数据
        p.apply_async(mission_conduct,(data_top_path,each_category,summary_df))
    print(whatstime() + '多线程启动……')
    p.close()
    p.join()
    print(whatstime() + '结束！')
	#######################################
	#	检查任务完成度及表合并
	#######################################
    # 检查任务是否完成了
    detect_mission_schedule(data_top_path,summary_df)
    generate_full_table(data_top_path,summary_df)
	
	#######################################
	#	调试单一类别时使用
	#######################################
#    for ii,each_category in enumerate(categorys):
#        if not(each_category in ['珠宝/钻石/翡翠/黄金']):
#            continue
#        else:
#            pass
#        mission_conduct_test(data_top_path,each_category,summary_df)

