#! /opt/python/bin/python
# -*- coding: utf8 -*-
# 中文注释不会导致warning

import sys
import socket
import urllib2
from BeautifulSoup import *
from urlparse import urljoin
import MySQLdb
import hashlib

import df_classifier 
import html_extractor
import content_extractor
from url_utf8_getter import Utf8UrlGetter
import datetime
import time
import random

reload(sys)
sys.setdefaultencoding('utf8') 

class Author:
  def __init__(self, id, name, title, url):
    self.id_ = id
    self.name_ = name
    self.title_ = title
    self.url_ = url

class Article:
  def __init__(self, title, url, digest, create_time):
    self.title_ = title
    self.url_ = url
    self.digest_ = digest
    self.create_time_ = create_time


class Crawler:
  def __init__(self, classifier):
    self.indexed_ = set()
    index_file = open("value_index", "r")
    for line in index_file: 
      self.indexed_.add(line.strip())
    index_file.close()

    self.classifier_ = classifier

    self.section_patterns_ = {
        "blog.sina.com" : {
          "TTL" : ("div", {"class" : "blog_title"}), 
          "DGT" : ("div", {"class" : "content"}),
          "URL" : ("div", {"class" : "blog_title"}),
          "TSP" : ("span", {"class" : "time SG_txtc"})
        },
        "blog.163.com" : {
          "TTL" : ("a", {"class" : "ztag m2a fc03"}), 
          "DGT" : ("div", {"class" : "btxt nophoto"}),
          "URL" : ("a", {"class" : "ztag m2a fc03"}),
          "TSP" : ("span", {"class" : "ztag xtag"})
        }
      }
    self.url_patterns_ = {
        "blog.sina.com" :  re.compile('^http://blog.sina.com.cn/s/blog_[0-9a-z]+.html'),
        "blog.163.com" :  re.compile('^http://.*.blog.163.com/blog/static/[0-9]+/')
        }

    self.db_conn_ = MySQLdb.connect(host="127.0.0.1",user="root",passwd="",db="xingtan")
    self.db_cursor_ = self.db_conn_.cursor()

  def __del__(self):
    index_file = open("value_index", "w")
    for url in self.indexed_:
      index_file.write(url + '\n')
    index_file.close()
    self.db_cursor_.close()
    self.db_conn_.close()

  def DbInsertArticle(self, author_id, title, url, main_html):
    if len(main_html) <= 0:
      return

    m = hashlib.md5()
    m.update(url)
    print m.hexdigest()

    try:
      ts = datetime.datetime.now()

      sql = "SELECT MAX(id) AS max_id FROM article_content"
      self.db_cursor_.execute(sql)
      article_id = 1
      row = self.db_cursor_.fetchone()
      if row and row[0]:
        article_id = row[0] + 1
      print article_id

      sql = """INSERT IGNORE INTO article_content(id, content) VALUES 
          (%d, '%s')""" % (article_id, self.db_conn_.escape_string(main_html.encode()))
      #print sql
      self.db_cursor_.execute(sql)
      self.db_conn_.commit();
      sql = """INSERT IGNORE INTO articles(id, author, title, create_time, url, url_md5, digest) VALUES 
          (%d, %d, '%s', '%s', '%s', '%s', '%s')""" % (article_id, author_id, 
          self.db_conn_.escape_string(title.encode()), 
          str(ts.strftime("%Y-%m-%d %H:%M:%S")), url.encode(), self.db_conn_.escape_string(m.digest()), '')
      #print sql
      self.db_cursor_.execute(sql)
      self.db_conn_.commit();
    except Exception, e:
      print "insert article error %s" % (str(e))

  def MatchUrlPattern(self, url):
    for site in self.url_patterns_:
      if url.find(site) >= 0:
        return self.url_patterns_[site]
    return None

  def MatchPatterns(self, url):
    for site in self.section_patterns_:
      if url.find(site) >= 0:
        return self.section_patterns_[site]
    return None

  def IsIndexed(self, url):
    if url in self.indexed_:
      return True
    return False

  def GetDateTime(self, soup):
    date = ''
    time = ''
    text = str(soup)
    date_pat = re.compile('\d{4}-\d{1,2}-\d{1,2}')
    g = date_pat.search(text)
    if g: date = g.group()

    time_pat = re.compile('\d{1,2}:\d{1,2}')
    g = time_pat.search(text)
    if g: time = g.group()

    return "%s %s" % (date, time)

  def GetHref(self, soup):
    if soup.name != 'a':
      soup = soup.find('a')
    print str(soup)
    if soup.name == 'a':
      for attr in soup.attrs:
        if attr[0] == 'href':
          return attr[1]
    return None

  # 从html页面中提取文字（不带标签）
  def GetTextOnly(self, soup):
    SPACE = '&nbsp;'
    v = soup.string
    if v == None:
      c = soup.contents
      result = ''
      for t in c:
        subtext = self.GetTextOnly(t)
        if subtext[len(subtext) - len(SPACE):] == SPACE:
          subtext = subtext[0 : len(subtext) - len(SPACE)]

        if len(result) > 0 and result[-1] != ' ':
          result += ' '
        result += subtext 
      return result
    else:
      # 过滤无信息的标签
      if v.parent.name in ('script', 'noscript', 'style'):
        return ''
      return v.strip()

  def ExtractArticles(self, soup, patterns):
    fields = {}
    for pat in patterns:
      pat_soups = soup.findAll(patterns[pat][0], patterns[pat][1])
      if pat_soups:
        for s in pat_soups:
          text = ''
          if pat == 'URL':
            text = self.GetHref(s)
          elif pat == 'TSP':
            text = self.GetDateTime(s)
          else:
            text = self.GetTextOnly(s)

          if pat not in fields:
            fields[pat] = []
          fields[pat].append(text)
      else:
        print patterns[pat][0], 'NOT FOUND'
    count = 0
    for pat in fields:
      count = len(fields[pat])
      break
    print "item count ", count
    articles = []

    for i in range(count):
      article = {}
      if self.IsIndexed(fields['URL'][i]):
        print 'Been indexed before', fields['URL'][i]
        continue
      for pat in fields:
        article[pat] = fields[pat][i]
      articles.append(article)
      # print pat, ":", fields[pat][i]

    return articles

  def GetEncoding(self, content):
    pos = content.find('<meta ')
    while pos > 0:
      end = content.find("/>", pos)
      meta = content[pos : end]
      pos = content.find('<meta ', end)
      pattern = 'charset='
      charset_pos = meta.find(pattern)
      if charset_pos > 0:
        end = charset_pos + len(pattern) + 1
        while meta[end].isalpha() or meta[end].isdigit() or meta[end] == '-':
          end += 1
        encoding = meta[charset_pos + len(pattern) : end]
        try:
          if codecs.lookup(encoding):
            return encoding
        except:
          print "error : unsupported encoding ", encoding 
          return None
    return None

  def GetArticleUrls(self, url):
    url_getter = Utf8UrlGetter()
    html = url_getter.GetHtml(url, 8)
    if len(html) <= 0:
      return;
    html = html_extractor.RemoveAnnotation(html)
    soup = BeautifulSoup(''.join(html))
    article_urls = set()
    av = soup.findAll('a')
    for a in av:
      href = html_extractor.GetHref(a)
      if not href:
        continue
      url_pattern = self.MatchUrlPattern(url)
      m = url_pattern.match(href)
      if m:
        article_urls.add(m.group())

    sv = int(random.random() * 5) + 5 
    print "Blog home sleep ", sv, " seconds."
    time.sleep(sv)

    return article_urls

  def Crawl(self, authors):
    url_getter = Utf8UrlGetter()
    for author in authors:
      urls = self.GetArticleUrls(author.url_)
      if not urls:
        continue
      for url in urls:
        if self.IsIndexed(url):
          print '------ Been indexed url', url 
          continue
        print url
        html = url_getter.GetHtml(url, 4)
        if len(html) <= 0:
          continue;
        title = ""
        main_html = ""
        try:
          title = content_extractor.ExtractTitle(html)
          #main_html = content_extractor.ExtractMainText(html, True)
          main_html = content_extractor.SinaBlogExtractMainText(url, html, True)
        except Exception, e:
          print "insert article error %s" % (str(e))

        self.indexed_.add(url)
        if len(main_html) > 0:
          print '++++++ New url', url 
          self.DbInsertArticle(author.id_, title, url, main_html)
        sv = int(random.random() * 4) + 4
        print "sleep ", sv, " seconds."
        time.sleep(sv)
        # main_text = content_extractor.ExtractMainText(html, True)
        # doc_category = self.classifier_.Classify(title + main_text)
        # print title, doc_category
        # print text, doc_category
        # if doc_category == 2: 
        #   self.DbInsertArticle(article, author.id_)

def LoadAuthors():
  db_conn = MySQLdb.connect(host="127.0.0.1",user="root",passwd="",db="xingtan")
  db_cursor = db_conn.cursor()

  sql = "SELECT id, name, title, url FROM authors ORDER BY RAND();"
  db_cursor.execute(sql)
  row = db_cursor.fetchone()
  authors = []
  while row:
    if (row[0] > 0):
      print "%d %s %s %s" % (row[0], row[1], row[2], row[3])
      authors.append(Author(row[0], row[1], row[2], row[3]))
    row = db_cursor.fetchone()

  db_cursor.close()
  db_conn.close()
  return authors

if __name__ == '__main__':
  # time.sleep(int(random.random() * 3600))

  cats = ('food', 'clothing', 'finance', 'history', 'sports')
  classifier = df_classifier.DocCatProbClassifier(cats, "all.df")
  crawler = Crawler(classifier)

  authors = LoadAuthors()
  #authors = [Author(0, 't', 't', 'http://blog.sina.com/danbin168')]
  #authors = [Author(0, 't', 't', 'http://blog.sina.com.cx/pubanyaobin')]
  #authors = [Author(0, 't', 't', 'http://blog.sina.com/lichi')]
  crawler.Crawl(authors)


