#-*- coding:utf-8 -*-
import re
import time
import os
import hashlib

import requests
from lxml import etree
from weibohelper import DbUtils
import traceback
from proxyhelper import Proxy_DbUtils
from config import Config
from flaghelper import Flag_DbUtils

from datetime import datetime
import os
from time_utils import timeutils
from apscheduler.schedulers.blocking import BlockingScheduler
class myproxy:
    def __init__(self):
        print 'myproxy init...'

    def getIp(self, i):
        #r = requests.get('http://127.0.0.1:8000/?types=0&count=30&country=国内')
        #ip_ports = json.loads(r.text)
        ip_ports = Proxy_DbUtils().queryProxy()
        ip = ip_ports[i][0]
        port = ip_ports[i][1]
        proxies = {
            'http': 'http://%s:%s' % (ip, port),
            'https': 'http://%s:%s' % (ip, port)
        }
        return proxies


    def weibo_check(self, proxies, url, cookies):
        flag = False
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8',
            'Connection': 'keep-alive',
            'Accept-Encoding': 'gzip, deflate',
        }
        try:
            #r = requests.get(url=url, headers=headers, timeout=3,
            #                proxies=proxies)
            #r.encoding = chardet.detect(r.content)['encoding']

            html = requests.get(url=url, cookies=cookies, headers=headers, proxies=proxies, timeout=3).content
            if html.strip() == "" or len(re.findall(r'Access Denied',html)) or len(re.findall(r'414 Request-URI Too Large',html))  or len(re.findall(r'location.replace',html)):
                flag = False
            else:
                flag = True
        except Exception as e:
            flag = False
            html = ""

        return flag, html


class weibo:
    #proxy_i = 1
    #first_time = True
    #cookie = {"Cookie": "SCF=AlnE5jaK7xjdG9AxuHx3xAprP_D5mWbcJyxa7r7rZs1Dppi0-nht0EC7aLWd2T2gpY3MStH0SbNT7TJgjifekTI.; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WFMU-CyUqgxpa7s1VnzVKZx5JpX5o2p5NHD95Q0eoBpeonc1K.NWs4Dqcjwi--ciK.Ni-27i--4i-2EiK.R1K.0eoBpeonc1K.N; _T_WM=c4eb91b320e3932decf6f1a967403ee7; SUB=_2A250a2rWDeRhGeVM71MT-CjFwjuIHXVXlHaerDV6PUJbkdANLUvskW0k0ODIXTaf3eevSuQXnOgYI07ANA..; SUHB=0gHiWxzZaOVfeA; SSOLoginState=1500453510"} #将your cookie替换成自己的cookie
    cookie = {}
    #weibo类初始化
    def __init__(self,user_id,filter = 0,tmp_cookies={},updateCycle=0,updateNum=0,first_time=True,classify="",medium="",media="",company="",province="",city=""):
            self.user_id = user_id #用户id，即需要我们输入的数字，如昵称为“Dear-迪丽热巴”的id为1669879400
            self.filter = filter #取值范围为0、1，程序默认值为0，代表要爬取用户的全部微博，1代表只爬取用户的原创微博
            self.userName = '' #用户名，如“Dear-迪丽热巴”
            self.weiboNum = 0 #用户全部微博数
            self.weiboNum2 = 0 #爬取到的微博数
            self.following = 0 #用户关注数
            self.followers = 0 #用户粉丝数
            self.weibos = [] #微博内容
            self.num_zan = [] #微博对应的点赞数
            self.num_forwarding = [] #微博对应的转发数
            self.num_comment = [] #微博对应的评论数
            self.urls = []
            self.times = []

            self.proxy_i = 0
            self.updateCycle = updateCycle
            self.updateNum = updateNum

            self.first_time = first_time

            self.classify = classify
            self.medium = medium
            self.media = media
            self.company = company
            self.province = province
            self.city = city

            print 'classify: ' + self.classify + 'medium: ' + self.medium + 'media: ' + self.media + 'company: ' + self.company + 'province' + self.province

            weibo.cookie = tmp_cookies
    #获取用户昵称
    def getUserName(self):
      try:

        # print proxy
        url = 'http://weibo.cn/%d/info'%(self.user_id)

        proxy = myproxy().getIp(self.proxy_i)
        flag, html = myproxy().weibo_check(proxy, url,weibo.cookie)
        while (flag == False and self.proxy_i < 30):
              self.proxy_i += 1
              proxy = myproxy().getIp(self.proxy_i)
              flag, html = myproxy().weibo_check(proxy, url, weibo.cookie)
        print '有效代理: ' + str(proxy)
        #print html
        #html = requests.get(url, cookies = weibo.cookie,headers={'Connection':'close'},proxies=proxy).content
        selector = etree.HTML(html)
        userName = selector.xpath("//title/text()")[0]
        self.userName = userName[:-3].encode('gbk')
        print '用户昵称：' + self.userName.decode('gbk').encode('utf-8')
      except Exception,e:
        print "Error: ",e
        traceback.print_exc()

    #获取用户微博数、关注数、粉丝数
    def getUserInfo(self):
      try:

        url = 'http://weibo.cn/u/%d?filter=%d&page=1'%(self.user_id,self.filter)  #,proxies=myproxy().getIp(weibo.proxy_i)

        proxy = myproxy().getIp(self.proxy_i)
        flag, html = myproxy().weibo_check(proxy, url, weibo.cookie)
        while (flag == False and self.proxy_i < 30):
            self.proxy_i += 1
            proxy = myproxy().getIp(self.proxy_i)
            flag, html = myproxy().weibo_check(proxy, url, weibo.cookie)
        print '有效代理: ' + str(proxy)
        print html
        #html = requests.get(url, cookies = weibo.cookie,headers={'Connection':'close'},proxies=proxy).content
        selector = etree.HTML(html)
        pattern = r"\d+\.?\d*"

        #微博数
        str_wb = selector.xpath("//div[@class='tip2']/span[@class='tc']/text()")[0]
        guid = re.findall(pattern, str_wb, re.S|re.M)
        for value in guid:
            num_wb = int(value)
            break
        self.weiboNum = num_wb
        #print '微博数: ' + str(self.weiboNum)
  
        #关注数
        str_gz = selector.xpath("//div[@class='tip2']/a/text()")[0]
        guid = re.findall(pattern, str_gz, re.M)
        self.following = int(guid[0])
        #print '关注数: ' + str(self.following)

        #粉丝数
        str_fs = selector.xpath("//div[@class='tip2']/a/text()")[1]
        guid = re.findall(pattern, str_fs, re.M)
        self.followers = int(guid[0])
        #print '粉丝数: ' + str(self.followers)
      except Exception,e:
        print "Error: ",e
        traceback.print_exc()

    #获取用户微博内容及对应的点赞数、转发数、评论数
    def getWeiboInfo(self):
      try:
        url = 'http://weibo.cn/u/%d?filter=%d&page=1'%(self.user_id,self.filter)  #,proxies=myproxy().getIp(weibo.proxy_i)

        proxy = myproxy().getIp(self.proxy_i)
        flag, html = myproxy().weibo_check(proxy, url, weibo.cookie)
        while (flag == False and self.proxy_i < 30):
            self.proxy_i += 1
            proxy = myproxy().getIp(self.proxy_i)
            flag, html = myproxy().weibo_check(proxy, url, weibo.cookie)
        print '有效代理: ' + str(proxy)
        #html = requests.get(url, cookies = weibo.cookie,headers={'Connection':'close'},proxies=proxy).content
        selector = etree.HTML(html)
        if selector.xpath('//input[@name="mp"]')==[]:
           pageNum = 1
        else:
           pageNum = (int)(selector.xpath('//input[@name="mp"]')[0].attrib['value'])
        pattern = r"\d+\.?\d*"

        if(self.first_time == True):
         self.first_time = False
        else:
         pageNum=int(self.updateNum)

        #tmp_myproxy = myproxy()
        #pageNum+1
        for page in range(1,pageNum+1):

          print '正在爬取第%d页......' % page
          #proxy = myproxy().getIp(self.proxy_i)
          url2 = 'http://weibo.cn/u/%d?filter=%d&page=%d' % (self.user_id, self.filter, page)
          #if(page % 5 == 0):

            #proxy = myproxy().getIp(self.proxy_i)
          #while (myproxy().weibo_check(proxy, url2) == False and self.proxy_i < 30):
          #        self.proxy_i += 1
          #        proxy = myproxy().getIp(self.proxy_i)
          proxy = myproxy().getIp(self.proxy_i)
          flag, html2 = myproxy().weibo_check(proxy, url2, weibo.cookie)
          while (flag == False):
              self.proxy_i += 1
              proxy = myproxy().getIp(self.proxy_i)
              flag, html2 = myproxy().weibo_check(proxy, url2, weibo.cookie)
          print '有效代理: ' + str(proxy)
          #time.sleep(5)
          #html2 = requests.get(url2, cookies = weibo.cookie,headers={'Connection':'close'},proxies=proxy).content
          selector2 = etree.HTML(html2)
          info = selector2.xpath("//div[@class='c']")
          #print len(info)

          time_utils = timeutils()
          if len(info) > 3:
            for i in range(0,len(info)-2):

              #self.urls.append(url2)

              self.weiboNum2 = self.weiboNum2 + 1

              #获取时间
              str_time = info[i].xpath("div/span[@class='ct']")[0].text.encode('utf-8','ignore')
              #print str_time
              print time_utils.processTime(str_time)#.decode('gbk', 'ignore').encode('utf-8')
              self.times.append(time_utils.processTime(str_time))

              '''#获取url
              str_url = info[i].xpath("div/span[@class='ctt']/a/@href")[-1].encode('utf-8','ignore')
              print 'url = ' + str_url'''

              #微博内容
              str_t = info[i].xpath("div/span[@class='ctt']")

              str_cmt = info[i].xpath("div/span[@class='cmt']")
              str_content1 = ""
              str_content2 = ""

              #print 'cmt长度' + str(len(str_cmt))

              for i in range(len(str_cmt)):
                  #转发别人的微博
                  if i == 0 :
                      str_content1 += str_cmt[i].xpath('string(.)').encode('utf-8','ignore')
                  # 转发理由
                  if(i == 3):
                      str_content2 += '转发理由:  '
                      str_content2 += str_cmt[i].xpath('../text()')[0].encode('utf-8', 'ignore')


              weibocontent = str_t[0].xpath('string(.)').encode('gbk','ignore')
              weibocontent = str_content1 + weibocontent.decode('gbk', 'ignore').encode('utf-8') + str_content2
              print '微博内容： ' + weibocontent

              self.weibos.append(weibocontent)
              #print '微博内容：'+ weibos

              if (len(str_cmt) > 3):
                  str_zan = str_cmt[i].xpath('../a')[-4].text.encode('utf-8', 'ignore')
                  guid = re.findall(pattern, str_zan, re.M)
                  num_zan = int(guid[0])
                  self.num_zan.append(num_zan)
                  #print '点赞数: ' + str(num_zan)

                  forwarding = str_cmt[i].xpath('../a')[-3].text.encode('utf-8', 'ignore')
                  guid = re.findall(pattern, forwarding, re.M)
                  num_forwarding = int(guid[0])
                  self.num_forwarding.append(num_forwarding)
                  #print '转发数: ' + str(num_forwarding)

                  comment = str_cmt[i].xpath('../a')[-2].text.encode('utf-8', 'ignore')

                  guid = re.findall(pattern, comment, re.M)
                  num_comment = int(guid[0])
                  self.num_comment.append(num_comment)

                  # 获取url
                  str_url = str_cmt[i].xpath('../a/@href')[-2].encode('utf-8', 'ignore')
                  self.urls.append(str_url)
                  print 'url = ' + str_url
                  #print '评论数: ' + str(num_comment)
              else:
                  # 点赞数
                  str_zan = info[i].xpath("div/a/text()")[-4]
                  guid = re.findall(pattern, str_zan, re.M)
                  num_zan = int(guid[0])
                  self.num_zan.append(num_zan)
                  # print '点赞数: ' + str(str_zan.encode('utf-8', 'ignore'))
                  #print '点赞数: ' + str(num_zan)
                  # 转发数
                  forwarding = info[i].xpath("div/a/text()")[-3]
                  guid = re.findall(pattern, forwarding, re.M)
                  num_forwarding = int(guid[0])
                  self.num_forwarding.append(num_forwarding)
                  # print '转发数: ' + str(forwarding.encode('utf-8', 'ignore'))
                  #print '转发数: ' + str(num_forwarding)
                  # 评论数
                  comment = info[i].xpath("div/a/text()")[-2]
                  guid = re.findall(pattern, comment, re.M)
                  num_comment = int(guid[0])
                  self.num_comment.append(num_comment)

                  # 获取url
                  str_url = info[i].xpath("div/a/@href")[-2].encode('utf-8', 'ignore')
                  self.urls.append(str_url)
                  print 'url = ' + str_url
                  # print '评论数: ' + str(comment.encode('utf-8', 'ignore'))
                  #print '评论数: ' + str(num_comment)

              print '准备插入第%d条微博' % (i + 1)

              '''m2 = hashlib.md5()
              m2.update(weibo)     
              flag = m2.hexdigest()
              if DbUtils().queryByFlag(flag):
                  continue
              else:
                  print '插入的微博内容为: %s' % weibocontent.decode('gbk', 'ignore').encode('utf-8')
                  DbUtils().insert(url2,weibocontent.decode('gbk', 'ignore').encode('utf-8'),flag,num_forwarding,num_zan,num_comment)
              '''
        if self.filter == 0:
          print '共'+str(self.weiboNum2)+'条微博'
        else:
          print '共'+str(self.weiboNum)+'条微博，其中'+str(self.weiboNum2)+'条为原创微博'
      except Exception,e:
        print "Error: ",e
        traceback.print_exc()

    #主程序
    def start(self):
      try:
        weibo.getUserName(self)
        weibo.getUserInfo(self)
        weibo.getWeiboInfo(self)
        print '信息抓取完毕'
        print '==========================================================================='
      except Exception,e:
        print "Error: ",e

    def writeDb(self):

        dbutils = DbUtils()

        flagutils = Flag_DbUtils()
        for i in range(1, self.weiboNum2 + 1):
            '''text = str(i) + ':' + self.weibos[i - 1].decode('gbk', 'ignore').encode('utf-8') + '\n' + '点赞数：' + str(
                self.num_zan[i - 1]) + '	 转发数：' + str(self.num_forwarding[i - 1]) + '	 评论数：' + str(
                self.num_comment[i - 1]) + '\n\n'''

            content = self.weibos[i - 1]
            m2 = hashlib.md5()
            m2.update(content)

            print '准备第%d条微博' % i
            flag = m2.hexdigest()

            num_forwarding = self.num_forwarding[i - 1]
            num_zan = self.num_zan[i - 1]
            num_comment = self.num_comment[i - 1]
            url = self.urls[i - 1]
            dateline = self.times[i - 1]
            #print Flag_DbUtils().queryFlag(flag)
            if  Flag_DbUtils().queryFlag(flag):

              id = Flag_DbUtils().queryId(flag)
              now_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
              dbutils.update(num_forwarding, num_zan, num_comment, id, now_time)
              print '更新的微博内容为: %s id为: %d 时间为: %s' % (content,id,now_time)
              continue
            else:
              print '插入的微博内容为: %s' % content
              now_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))


              insert_id = dbutils.insert(url, content,  num_forwarding, num_zan,
                             num_comment,self.classify, self.medium,self.media, self.company, self.province,now_time, dateline,title=content, city=self.city)
              flagutils.insert(flag,insert_id)
        dbutils.close()
        flagutils.close()
    #将爬取的信息写入文件	
    def writeTxt(self):
      try:
        if self.filter == 1:
           resultHeader = '\n\n原创微博内容：\n'
        else:
           resultHeader = '\n\n微博内容：\n'
        result = '用户信息\n用户昵称：' + self.userName.decode('gbk', 'ignore').encode('utf-8') + '\n用户id：' + str(self.user_id) + '\n微博数：' + str(self.weiboNum) + '\n关注数：' + str(self.following) + '\n粉丝数：' + str(self.followers) + resultHeader
        for i in range(1,self.weiboNum2 + 1):
          text=str(i) + ':' + self.weibos[i-1] + '\n'+'点赞数：' + str(self.num_zan[i-1]) + '	 转发数：' + str(self.num_forwarding[i-1]) + '	 评论数：' + str(self.num_comment[i-1]) + '\n\n'
          result = result + text
        if os.path.isdir('weibo') == False:
           os.mkdir('weibo')
        f = open("weibo/%s.txt"%self.user_id, "wb")
        f.write(result)
        f.close()
        file_path=os.getcwd()+"\weibo"+"\%d"%self.user_id+".txt"
        print '微博写入文件完毕，保存路径%s'%(file_path)
      except Exception,e:
        print "Error: ",e
        traceback.print_exc()

    def setFirst_time(self, flag):
        self.first_time = flag




def begin():
    config = Config('spider.conf')

    if config.read()  and config.readWeibo():

        weibo_num = len(config.medias)

        for i in range(0, weibo_num):
          # 使用实例,输入一个用户id，所有信息都会存储在wb实例中
          user_id = int(config.weiboids[i])  # 可以改成任意合法的用户id（爬虫的微博id除外）
          filter = 0  # 值为0表示爬取全部的微博信息（原创微博+转发微博），值为1表示只爬取原创微博
          cookie = {"Cookie": config.tmp_cookies}
          updateCycle = config.updateCycle
          updateNum = config.updateNum
          classify = config.classify
          medium = config.medium

          media = config.medias[i]
          company = config.companys[i]
          province = config.provinces[i]
          city = config.citys[i]

          #print 'classify: ' + config.classify + 'medium: ' + config.medium + 'media: ' + config.media + 'company: ' + config.company + 'province' + config.province

          wb = weibo(user_id, filter, cookie, updateCycle, updateNum,classify=classify,medium=medium,media=media,company=company,province=province,city=city)  # 调用weibo类，创建微博实例wb
          wb.setFirst_time(config.first_time)
          wb.start()  # 爬取微博信息
          print '用户名：' + wb.userName.decode('gbk', 'ignore').encode('utf-8')
          print '全部微博数：' + str(wb.weiboNum)
          print '关注数：' + str(wb.following)
          print '粉丝数：' + str(wb.followers)
          print '最新一条微博为：' + wb.weibos[0]#.decode('gbk', 'ignore').encode('utf-8')  # 若filter=1则为最新的原创微博，如果该用户微博数为0，即len(wb.weibos)==0,打印会出错，下同
          print '最新一条微博获得的点赞数：' + str(wb.num_zan[0])
          print '最新一条微博获得的转发数：' + str(wb.num_forwarding[0])
          print '最新一条微博获得的评论数：' + str(wb.num_comment[0])
          wb.writeTxt()  # wb.writeTxt()只是把信息写到文件里，大家可以根据自己的需要重新编写writeTxt()函数
          wb.writeDb()

    else:

        #db= Flag_DbUtils()
        # for page in range(1, 1200):
        # dbutils.insert('https://weibo.cn/u/3938812452?page=1')
        # dbutils.insert("url", "content", "flag", 1, 1, 1)
        #print db.queryFlag("a9b80b9bbe0a1211c6443c379c9bdd27")
        '''i = 0
        proxy = myproxy().getIp(i)
        print proxy
        while(myproxy().weibo_check(proxy) == False and i < 30):
            i += 1
            proxy = myproxy().getIp(i)
        print myproxy().weibo_check(proxy)
        print i'''
        print '请查看配置是否正确'

if __name__ == '__main__':
    #begin()
    scheduler = BlockingScheduler()

    begin()

    config = Config('spider.conf')
    if config.read():

        updateCycle = config.updateCycle
        #设置定时任务时间

        scheduler.add_job(begin, 'interval', seconds=60 * 60 * 24 * int(updateCycle))
    #print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

        try:
          scheduler.start()
        except (KeyboardInterrupt, SystemExit):
          pass
