# coding=utf-8

import os
import re
import urlparse
import codecs
import time
import sys
import json
import random 
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
import selenium.webdriver.support.ui as ui
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.wait import WebDriverWait

reload(sys) 
sys.setdefaultencoding('utf-8')
driver = webdriver.Firefox(executable_path ='C:\Python27\geckodriver-v0.19.1-win64\geckodriver.exe')
#driver.implicitly_wait(10)

class GetInfo(object):
    
    def loginwb(self,username, password):
        try:
            login_link ="http://passport.weibo.cn/signin/login?entry=mweibo&r=http%3A%2F%2Fweibo.cn%2F&backTitle=%CE%A2%B2%A9&vt="
            driver.get(login_link)
            time.sleep(3)
            elem_user = driver.find_element_by_id('loginName')
            elem_pwd = driver.find_element_by_xpath("//*[@id='loginPassword']")
            login_button = driver.find_element_by_xpath("//*[@class='btn btnRed']")
            elem_user.send_keys(username)
            elem_pwd.send_keys(password)
            login_button.click()
            time.sleep(3)                                      
        except Exception,e:
            print 'error:',e
        finally:
            print u'Login!\n'
            
    def parser_wb(self,url,page_end,init_page):
            
        if url!='':
            driver.get(url)
            driver.implicitly_wait(3)         
        page_num = init_page
        iSigma = 0
        pSigma = 0
        contents = []
        print 'Starting crawling.........'
        
        while page_num <= page_end:
            print '/+++++++++++++++++++++++++++++++/'
            print 'Page_Num: '+ `page_num`
            infos_cont = driver.find_elements_by_xpath(".//*[@class='c' and @id]")
            i=0
            for value in infos_cont:
                try:
                    element_cmt = value.find_element_by_class_name('cmt')
                except Exception,e:
                    mid_0 = value.get_attribute('id')
                    pars_0 = value.find_element_by_class_name('ctt').text   # parser:pars
                    mid,text = self.format_data(mid_0, pars_0)
                    contents.append({'mid':mid,'content':text})
                    i+=1                    
                else:
                    mid_0 = value.get_attribute('id')
                    pars = value.find_element_by_xpath(".//div[last()]").text
                    pars_0 = pars[5:]
                    mid,text = self.format_data(mid_0, pars_0)
                    contents.append({'mid':mid,'content':text})
                    i+=1                                        
                                    
            print 'Already crawling '+ `i` + ' weibo_mids & contents'
            iSigma =iSigma+i
            pSigma+=1
            try:
                text =u'下页'
                next_page = WebDriverWait(driver, 5).until(
                ec.visibility_of(driver.find_element_by_link_text(text))
                )
                next_page.click()
                page_num+=1
                time.sleep(5)
            except Exception,e:
                print e
                print u'已经是最后一页！！！'
                break
        return pSigma,iSigma,contents
    
    def format_data(self,mid_init,content):
    
        mid = str(mid_init)
        mid = mid[2:]
        pat_0 = re.compile(r'\[\d{1,5}\]*')     
        pat_1 = re.compile(r'\[.{1,2}\]*')      # 去除 表情符 [心] [鼓励]  
        pat_2 = re.compile(r'\[.{3,4}\]*')      # 去除 表情符 [棒棒哒] [并不简单]
        pat_3 = re.compile(r'.{1,2}(?=\]{1})')  # 去除 表情符残留 心] 鼓励] 匹配汉字后为']'
        pat_4 = re.compile(r'\]')               # 去除 ']'
        pat_5 = re.compile(r'\#')               # 去除 '#'
        pat_6 = re.compile(u'赞.+?Android$')
        pars_1 = pat_0.sub('',content)
        pars_2 = pat_1.sub('',pars_1)
        pars_3 = pat_2.sub('',pars_2)
        pars_4 = pat_3.sub('',pars_3)
        pars_5 = pat_4.sub('',pars_4)
        pars_6 = pat_5.sub('',pars_5)
        pars_7 = pat_6.sub('',pars_6)
        return mid,pars_7               

    def storage(self,contents):
            
        curPath=os.getcwd()     # 在程序当前目录创建一个数据存储文件夹，命名，构造路径
        tempPath='weibo.cn_' + item_id
        targetPath=curPath+os.path.sep+tempPath
        #print(targetPath)
        if not os.path.exists(targetPath):
            os.makedirs(targetPath)
        else:
            print u'路径已存在！'
        fileName='weibo.cn.%s_%d-%d.json'%(item_id,init_page,page_end)
        filePath=targetPath+os.path.sep+fileName        # 在前面创建的文件夹内写入json文件        
        with codecs.open(filePath,'w') as fp:
            json.dump(contents,fp=fp,indent=4,ensure_ascii=False)
            print u'文件写入成功！'
    
    def spider(self,item_id,page_end,init_page):

        print u'准备访问个人微博.....'
        '''
        https://weibo.cn/u/5467852665
        https://weibo.cn/u/5467852665?page=2
        '''
        prefix_url ='https://weibo.cn/u/'
        full_url = urlparse.urljoin(prefix_url,item_id) + "?page=" + str(init_page)
        print full_url
        pSum,iSum,text = self.parser_wb(full_url,page_end,init_page)
        self.storage(text)
        return pSum,iSum

if __name__=='__main__':

    crawl = GetInfo()

    with open('./usr_pwd.json', 'r') as fp:
        data = json.load(fp)
        lines = len(data) 
        print "The Json.File contains %d lines" %lines
        m = random.randrange(0,lines-1)
        select_line = data[m]
        username = select_line['usr']
        password = select_line['pwd']
    crawl.loginwb(username,password)

    print u"输入需要爬取的微博ID账号："
    item_id =raw_input()
    print u"输入该微博的爬取起始页码："
    init_page =int(raw_input())
    print u"输入该微博的爬取最大页码："
    page_end =int(raw_input())
    time.sleep(2)

    while item_id!="":
        pSum,iSum= crawl.spider(item_id,page_end,init_page)
        break
    print '/++++++++++++++++++++++/\r\n'
    print 'Successful crawling '+`pSum`+' Pages '+`iSum`+' mids & contents'
    print 'Crawling to be End'