# coding=utf-8


import os
import re
import urlparse
import codecs
import time
import sys
import json
import random
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
import selenium.webdriver.support.ui as ui
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.wait import WebDriverWait


reload(sys) 
sys.setdefaultencoding('utf-8')
driver = webdriver.Firefox(executable_path ='C:\Python27\geckodriver-v0.19.1-win64\geckodriver.exe')    #需要预先下载安装geckodriver
#driver.implicitly_wait(10)     #该语句作用于drive的整个寿命周期，只声明一次即可。达到某个条件等待的最大时间。条件满足，停止等待，立即执行语句。


class GetInfo(object):
    
    def loginwb(self,username, password):
        try:
            login_link ="https://weibo.com/"
            driver.get(login_link)
            time.sleep(15)
            elem_user = driver.find_element_by_xpath(".//*[@id='loginname']")
            elem_pwd = driver.find_element_by_xpath(".//*[@id='pl_login_form']/div/div[3]/div[2]/div/input")
            login_button = driver.find_element_by_xpath(".//div[@class='info_list login_btn']/a")
            elem_user.send_keys(username)
            elem_pwd.send_keys(password)
            login_button.click()
            time.sleep(20)      #如果此处出现验证码，请手动输入验证码
            login_button.click()    #有时不出现验证码，则此处会返回找不到class="W_btn_a btn_32px "的error，忽略即可
        except Exception,e:
            print 'error:',e
        finally:
            print u'Login!\n'
        
    def parser_wb(self,url,page_end,init_page):
        
        '''
            url:微博地址
            page_end:爬取的最大页码
        '''    
        if url!='':
            driver.get(url)
            time.sleep(5)
        page_num = init_page    # 起始页
        iSigma = 0      # 已经爬取的微博总条数
        pSigma = 0      # 已经爬取的微博总页数
        loopSigma = 0     # 一次循环里已经爬取的页数，上限值99，爬取100页微博
        contents = []      # 构造json数组
        
        self.drop_down_loading()
        #driver.page_source
        print 'Starting crawling.........'
        time.sleep(2)                

        while page_num <= page_end:

            print '/+++++++++++++++++++++++++++++++/'
            print 'Page_Num: '+ `page_num`
            infos_cont = driver.find_elements_by_xpath(".//*[@class='WB_cardwrap WB_feed_type S_bg2 WB_feed_like 'and @mid]")
            #print infos_cont
            i=0     # 每页爬取的微博条数
            for value in infos_cont:
                mid_init = value.get_attribute('mid')
                element_text = value.find_element_by_xpath(".//div[@class='WB_text W_f14']").text
                #print element_text
                contents.append({'mid':mid_init,'content':element_text})
                i+=1
            print 'Already crawling '+ `i` + ' weibo_mids & contents'
            iSigma =iSigma+i
            pSigma+=1
            loopSigma+=1
            if loopSigma >99:     # 每100页结果写成一个json文件
                fileName='wb_com_%s.json'%(page_num-99)
                filePath=targetPath+os.path.sep+fileName        # 在前面创建的文件夹内写入json文件
                self.storage(filePath,contents)
                loopSigma = 0     # 重置循环计数
                contents = []  
            try:
                text =u'下一页'
                next_page = WebDriverWait(driver, 2).until(
                ec.visibility_of(driver.find_element_by_link_text(text))
                )
                next_page.click()
                page_num+=1
                if page_num > page_end:
                    fileName='wb_com_%s.json'%(page_num - loopSigma)
                    filePath=targetPath+os.path.sep+fileName        # 在前面创建的文件夹内写入json文件
                    self.storage(filePath,contents)
                    break
                else:
                    time.sleep(5)
                    self.drop_down_loading()
                    #time.sleep(5)
                    #driver.page_source
            except Exception,e:
                fileName='wb_com_%s.json'%(page_num - loopSigma + 1)
                filePath=targetPath+os.path.sep+fileName        # 在前面创建的文件夹内写入json文件
                self.storage(filePath,contents)                
                print e
                print u'已经是最后一页！！！'
                break
        return pSigma,iSigma
    '''
    def format_data(self,mid_init,content):
        
        mid = str(mid_init)
        mid = mid[2:]
        pat_0 = re.compile(r'\[\d{1,5}\]*')     
        pat_1 = re.compile(r'\[.{1,2}\]*')      # 去除 表情符 [心] [鼓励]  
        pat_2 = re.compile(r'\[.{3,4}\]*')      # 去除 表情符 [棒棒哒] [并不简单]
        pat_3 = re.compile(r'.{1,2}(?=\]{1})')  # 去除 表情符残留 心] 鼓励] 匹配汉字后为']'
        pat_4 = re.compile(r'\]')               # 去除 ']'
        pat_5 = re.compile(r'\#')               # 去除 '#'
        pat_6 = re.compile(u'赞.+?Android$')
        pars_1 = pat_0.sub('',content)
        pars_2 = pat_1.sub('',pars_1)
        pars_3 = pat_2.sub('',pars_2)
        pars_4 = pat_3.sub('',pars_3)
        pars_5 = pat_4.sub('',pars_4)
        pars_6 = pat_5.sub('',pars_5)
        pars_7 = pat_6.sub('',pars_6)
        return mid,pars_7  
    '''
    
    def drop_down_loading(self):
        
        next_page_btn = False
                
        while next_page_btn == False:
            js = "window.scrollTo(0, document.body.scrollHeight);"
            driver.execute_script(js)
            time.sleep(5)
            #driver.page_source            
            try:
                text =u'下一页'
                next_page = WebDriverWait(driver, 2).until(
                    ec.visibility_of(driver.find_element_by_link_text(text))
                )                
            except Exception, e:
                next_page_btn = False
            else:
                next_page_btn = True        

    def storage(self,filePath,contents):

        with open(filePath,'w') as fp:
            json.dump(contents, fp=fp,indent=4, ensure_ascii=False)
            print u'文件写入成功！'

    def spider(self,item_id,page_end,init_page,cont_type):

        '''
            item_id:微博地址
            page_end:爬取的最大页码
            init_page:起始页码
            cont_type:爬取的内容类型
        '''    
                
        print u'准备访问个人微博.....'
        '''
        https://weibo.cn/u/5467852665
        https://weibo.cn/u/5467852665?page=2
        https://weibo.com/u/5467852665?is_all=1
        https://weibo.com/u/5467852665?profile_ftype=1&is_all=1#_0
        https://weibo.com/u/5467852665?is_search=0&visible=0&is_all=1&is_tag=0&profile_ftype=1&page=2#feedtop
        '''
        prefix_url ='https://weibo.com/u/'
        if init_page == 1:
            full_url = urlparse.urljoin(prefix_url,item_id) + "?profile_ftype=1&is_" + cont_type + "1#_0"
            print full_url
        else:
            full_url = urlparse.urljoin(prefix_url,item_id) + "?is_search=0&visible=0&is_" + cont_type + "=1&is_tag=0&profile_ftype=1&page=" + str(init_page) + "#feedtop"
            print full_url            
        pSum,iSum = self.parser_wb(full_url,page_end,init_page)
        return pSum,iSum
          
        
if __name__=='__main__':
            
    crawl = GetInfo()

    with open('./usr_pwd.json', 'r') as fp:
        data = json.load(fp)
        #print data
        lines = len(data) 
        print "The Json.File contains %d lines" %lines
        m = random.randrange(0,lines-1)
        select_line = data[m]
        username = select_line['usr']
        password = select_line['pwd']       
    crawl.loginwb(username,password)

    print u"输入需要爬取的微博ID账号："
    item_id =raw_input()
    print u"输入要爬取的内容类型："
    print u"all--->全部\rori--->原创\rpic--->图片\rvideo--->视频\rmusic--->音乐\rarticle--->文章\rtag--->标签"
    cont_type =raw_input()
    print u"输入该微博的爬取起始页码："
    init_page =int(raw_input())
    print u"输入该微博的爬取最大页码："
    page_end =int(raw_input())
    time.sleep(2)
    #item_id ='5467852665'
    
    while item_id!="":
        curPath=os.getcwd()     # 在程序当前目录创建一个数据存储文件夹，命名，构造路径
        tempPath='weibo.com_' + item_id
        targetPath=curPath+os.path.sep+tempPath
        if not os.path.exists(targetPath):
            os.makedirs(targetPath)
        else:
            print u'路径已存在！'
        pSum,iSum= crawl.spider(item_id,page_end,init_page,cont_type)
        break
    print '/++++++++++++++++++++++/\r\n'
    print 'Successful crawling '+`pSum`+' Pages '+`iSum`+' mids & contents'
    print 'Crawling to be End'
    #driver.close()
