# coding=utf-8


import os
import re
import urlparse
import codecs
import time
import sys
import json
import random
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
import selenium.webdriver.support.ui as ui
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.wait import WebDriverWait


reload(sys) 
sys.setdefaultencoding('utf-8')
driver = webdriver.Firefox(executable_path ='C:\Python27\geckodriver-v0.19.1-win64\geckodriver.exe')    #需要预先下载安装geckodriver
#driver.implicitly_wait(10)


class GetInfo(object):
    
    def loginwb(self,username, password):
        try:
            login_link ="http://passport.weibo.cn/signin/login?entry=mweibo&r=http%3A%2F%2Fweibo.cn%2F&backTitle=%CE%A2%B2%A9&vt="
            driver.get(login_link)
            time.sleep(5)
            elem_user = driver.find_element_by_id('loginName')
            elem_pwd = driver.find_element_by_xpath("//*[@id='loginPassword']")
            login_button = driver.find_element_by_xpath("//*[@class='btn btnRed']")
            elem_user.send_keys(username)
            elem_pwd.send_keys(password)
            login_button.click()
            time.sleep(5)                                      
        except Exception,e:
            print 'error:',e
        finally:
            print u'Login!\n'
        
    def parser_wb(self,url,page_end,init_page):
        
        '''
            url:微博地址
            page_end:爬取的最大页码
        '''    
        if url!='':
            driver.get(url)
            driver.implicitly_wait(3)         
        page_num = init_page    # 起始页
        iSigma = 0      # 已经爬取的微博总条数
        pSigma = 0      # 已经爬取的微博总页数
        loopSigma = 0     # 一次循环里已经爬取的页数，上限值99，爬取100页微博
        contents = []      # 构造json数组
        print 'Starting crawling.........'
        time.sleep(2)

        while page_num <= page_end:

            print '/+++++++++++++++++++++++++++++++/'
            print 'Page_Num: '+ `page_num`
            infos_cont = driver.find_elements_by_xpath(".//*[@class='c' and @id]")
            i=0     # 每页爬取的微博条数
            for value in infos_cont:
                try:
                    #element_cmt = WebDriverWait(driver, 5).until(ec.presence_of_elements_located_of((By.CLASS_NAME,"cmt")))
                    element_cmt = value.find_element_by_class_name('cmt')
                except Exception,e:
                    cmt_exist = False
                    cont = value.find_element_by_class_name('ctt').text
                    #print value.text  该元素下的所有text被提取
                    mid_init = value.get_attribute('id')
                    mid,text = self.format_data(mid_init, cont)
                    contents.append({'mid':mid,'content':text})
                    i+=1                    
                else:
                    cmt_exist = True                
                    cont = value.find_element_by_xpath(".//div[last()]").text
                    conts = cont[5:]
                    mid_init = value.get_attribute('id')
                    mid,text = self.format_data(mid_init, conts)
                    contents.append({'mid':mid,'content':text})
                    i+=1                    
            
            print 'Already crawling '+ `i` + ' weibo_mids & contents'
            iSigma =iSigma+i
            pSigma+=1
            loopSigma+=1
            if loopSigma >99:     # 每100页结果写成一个json文件
                fileName='wb_cn_%s.json'%(page_num-99)
                filePath=targetPath+os.path.sep+fileName        # 在前面创建的文件夹内写入json文件
                self.storage(filePath,contents)
                loopSigma = 0     # 重置循环计数
                contents = []  
            try:
                text =u'下页'
                next_page = WebDriverWait(driver, 3).until(
                ec.visibility_of(driver.find_element_by_link_text(text))
                )
                next_page.click()
                page_num+=1
                if page_num > page_end:
                    fileName='wb_cn_%s.json'%(page_num - loopSigma)
                    filePath=targetPath+os.path.sep+fileName        # 在前面创建的文件夹内写入json文件
                    self.storage(filePath,contents)
                    break
                else:
                    time.sleep(5)
            except Exception,e:
                fileName='wb_cn_%s.json'%(page_num - loopSigma + 1)
                filePath=targetPath+os.path.sep+fileName        # 在前面创建的文件夹内写入json文件
                self.storage(filePath,contents)                
                print e
                print u'已经是最后一页！！！'
                break
        return pSigma,iSigma
    
    def format_data(self,mid_init,content):
        
        mid = str(mid_init)
        mid = mid[2:]
        pat_0 = re.compile(r'\[\d{1,5}\]*')     
        pat_1 = re.compile(r'\[.{1,2}\]*')      # 去除 表情符 [心] [鼓励]  
        pat_2 = re.compile(r'\[.{3,4}\]*')      # 去除 表情符 [棒棒哒] [并不简单]
        pat_3 = re.compile(r'.{1,2}(?=\]{1})')  # 去除 表情符残留 心] 鼓励] 匹配汉字后为']'
        pat_4 = re.compile(r'\]')               # 去除 ']'
        pat_5 = re.compile(r'\#')               # 去除 '#'
        pat_6 = re.compile(u'赞.+?Android$')
        pars_1 = pat_0.sub('',content)
        pars_2 = pat_1.sub('',pars_1)
        pars_3 = pat_2.sub('',pars_2)
        pars_4 = pat_3.sub('',pars_3)
        pars_5 = pat_4.sub('',pars_4)
        pars_6 = pat_5.sub('',pars_5)
        pars_7 = pat_6.sub('',pars_6)
        return mid,pars_7  

    def storage(self,filePath,contents):

        with open(filePath,'w') as fp:
            json.dump(contents, fp=fp,indent=4, ensure_ascii=False)
            print u'文件写入成功！'

    def spider(self,item_id,page_end,init_page):

        '''
            item_id:微博地址
            page_end:爬取的最大页码
            init_page:起始页码
        '''    
                
        print u'准备访问个人微博.....'
        '''
        https://weibo.cn/u/5467852665
        https://weibo.cn/u/5467852665?page=2
        '''
        prefix_url ='https://weibo.cn/u/'
        full_url = urlparse.urljoin(prefix_url,item_id) + "?page=" + str(init_page)
        print full_url
        pSum,iSum = self.parser_wb(full_url,page_end,init_page)
        return pSum,iSum
          
        
if __name__=='__main__':
            
    crawl = GetInfo()

    with open('./usr_pwd.json', 'r') as fp:
        data = json.load(fp)
        #print data
        lines = len(data) 
        print "The Json.File contains %d lines" %lines
        m = random.randrange(0,lines-1)
        select_line = data[m]
        username = select_line['usr']
        password = select_line['pwd']       
        #print username +'//'+ password
    crawl.loginwb(username,password)

    print u"输入需要爬取的微博ID账号："
    item_id =raw_input()
    print u"输入该微博的爬取起始页码："
    init_page =int(raw_input())
    print u"输入该微博的爬取最大页码："
    page_end =int(raw_input())
    time.sleep(2)
    #item_id ='5467852665'
    
    while item_id!="":
        curPath=os.getcwd()     # 在程序当前目录创建一个数据存储文件夹，命名，构造路径
        tempPath='weibo.cn_' + item_id
        targetPath=curPath+os.path.sep+tempPath
        #print(targetPath)
        if not os.path.exists(targetPath):
            os.makedirs(targetPath)
        else:
            print u'路径已存在！'
        pSum,iSum= crawl.spider(item_id,page_end,init_page)
        break
    print '/++++++++++++++++++++++/\r\n'
    print 'Successful crawling '+`pSum`+' Pages '+`iSum`+' mids & contents'
    print 'Crawling to be End'
