#coding=utf-8
import urllib2,urllib,datetime
import socket,os,sys,re
socket.setdefaulttimeout(60)
import urlparse, urllib, urllib2
import re, pickle
import cookielib

from dateutil.relativedelta import *
debug_level = 0
user = 'jayyoung.cn@gmail.com'
password = '101783'

login_url = 'https://login.sina.com.cn/sso/login.php?username=%s&password=%s&returntype=TEXT'
search_url = 'http://t.sina.com.cn/k/%s'
search_url_time_filter = 'http://t.sina.com.cn/k/%s&filter_ori=1&filter_ret=1&filter_text=1&filter_pic=1&filter_music=1&filter_video=1&filter_userscope=0&province=0&city=0&starttime=%s&endtime=%s&nickname=&filter_search='
publish_url = 'http://t.sina.com.cn/mblog/publish.php'
add_comment_url = 'http://t.sina.com.cn/comment/addcomment.php'
get_comments_url = 'http://t.sina.com.cn/comment/commentlist.php?act=1&from=0&ownerUid=%s&productId=miniblog2&resId=%s&resInfo=&type=1'
person_info_url = 'http://t.sina.com.cn/%s/info'
user_agent = 'Mozilla/5.0 (X11; U; Linux i686; zh-CN; rv:1.9.1.7) Gecko/20100106 Ubuntu/9.10 (karmic) Firefox/3.5.7'

referer = 'http://t.sina.com.cn'

class LoginFailError(Exception):
  def __init__(self, response=None):
    self.respose = response
    
  def __str__(self):
    return "Login Fail"

class ParseSearchError(Exception):
  def __init__(self, info=''):
    self.info = info
    
  def __str__(self):
    return "Parse HTML ERROR: %s"%self.info
                
class TsinaRobot(object):
    
  def __init__(self, user=user, password=password):
    self.u = user
    self.p = password
    self.opener = self._build_opener()

    self._add_opener_header('user-agent', user_agent)
    self._add_opener_header('referer', referer)
    
    #print self.opener.addheaders
        
  def _add_opener_header(self, k, v):
    i = 0
    for x, y  in self.opener.addheaders:
        if x.lower() == k.lower():break
        i = i + 1
    if i < len(self.opener.addheaders):
        self.opener.addheaders.remove(self.opener.addheaders[i])
            
    self.opener.addheaders.append((k,v))
            
  def _build_opener(self):
    cookie_support = urllib2.HTTPCookieProcessor()
    opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=debug_level), cookie_support)
    return opener
      
  def login(self):
    response = self.opener.open(login_url%(self.u, self.p))
    try:
      self.uid = eval(response.read())['uid']
    except:
      raise LoginFail_Error(response)
      
  def publish(self, content):
    body = {'content': content}
    req = urllib2.Request(publish_url, data=urllib.urlencode(body))

    f = self.opener.open(req)
    resp, content = f.info(), f.read()
    return resp, content
      
  def search(self, keyword, time_filter=None):
    if not time_filter:
      url = search_url%keyword
    else:
      s, e = time_filter 
      url = search_url_time_filter%(keyword, s, e)
      print url, '\n' 
    f = self.opener.open(url)
    resp, content = f.info(), f.read()
    m = re.search(r'话题\((\d+)\)', content)
    return m.group(1)
  
  def add_comment(self, own_id, resource_id, res_content, re_content):
    post_data = {}
    post_data.setdefault('productId', 'miniblog2')
    post_data.setdefault('productName', '新浪微博')
    
    post_data['uid'] =self.uid
    #print data
    post_data['ownerUid'] = own_id
    post_data['resourceId'] = resource_id
    post_data['resTitle'] = res_content
    post_data['content'] = re_content
    
    req = urllib2.Request(add_comment_url, data=urllib.urlencode(post_data))
    f = self.opener.open(req)
    resp, content = f.info(), f.read()
    return resp, content
  
  def _fetch_url(self, url):
    f = self.opener.open(url)
    code, resp, content = f.getcode(), f.info(), f.read()
    return code, resp, content
  
  def get_comments(self, message):
    m = message
    url = get_comments_url%(m.own_id, m.resource_id)
    return self._fetch_url(url)
    
def has_digit(s):
  m = re.search(r'\d', s)
  if m:
    return True
  else:
    return False

def remove_quotes(s):
  s = s.replace("'", '')
  return s

def parse_date(datestring, now):
  #print datestring
  n = now
  re_fenzhong = r'(\d{1,2})分钟前'
  re_jintian = r'今天 (\d{1,2}):(\d{1,2})'
  re_yueri = r'(\d{1,2})月(\d{1,2})日 (\d{1,2}):(\d{1,2})'
  re_nian = r'(\d{4})-(\d{1,2})-(\d{1,2}) (\d{1,2}):(\d{1,2})'
  
  m = re.match(re_fenzhong, datestring)
  if m:
    mins = int(m.group(1))
    r = now + relativedelta(minutes=-mins)
    return datetime.datetime(r.year, r.month, r.day, r.hour, r.minute)
  
  m = re.match(re_jintian, datestring)  
  if m:
    h = int(m.group(1))
    m = int(m.group(2))
    return datetime.datetime(n.year, n.month, n.day, h, m)
  
  m = re.match(re_yueri, datestring)
  if m:
     month = int(m.group(1))
     day = int(m.group(2))
     hour = int(m.group(3))
     mins = int(m.group(4))
     return datetime.datetime(n.year, month, day, hour, mins)
  
  m = re.match(re_nian, datestring)
  if m:
    year = int(m.group(1))
    month = int(m.group(2))
    day = int(m.group(3))
    hour = int(m.group(4))
    mins = int(m.group(5))
    return datetime.datetime(year, month, day, hour, mins)

  return None
    
def get_created_at(html_content, now):
  h = html_content
  data = re.findall(r'<cite><a href=.+?>(.+?)</a></cite>', h)
  data = [parse_date(i, now) for i in data if parse_date(i, now)]
  return data

def get_mes(html_content):
  h = html_content
  r = re.findall(r'scope.loadCommentByRid\(.*?\)', h)
  data = []
  for i in r:
    d = i.split('(')[1][:-1].split(',')
    d = [ remove_quotes(j) for j in d]
    data.append(d)
  return data
  
class Message(object):
  def __init__(self, own_id, resource_id, content, created_at):
    self.own_id =  own_id
    self.resource_id = resource_id
    self.content = content
    self.created_at = created_at
          
def parse_search(html_content, now):
  data = get_mes(html_content)
  created_at_s = get_created_at(html_content, now)
  
  if len(data) != len(created_at_s):
    raise ParseSearchError('len(created_at_s) != len(results)')

  results = []
  for i in range(len(data)):
    m = Message(data[i][0].strip(), data[i][3].strip(), urllib.unquote(data[i][4]), created_at_s[i])
    results.append(m)
  return results
        
re_fenzhong = r'(\d{1,2})分钟前'
re_jintian = r'今天 (\d{1,2}):(\d{1,2})'
re_yueri = r'(\d{1,2})月(\d{1,2})日 (\d{1,2}):(\d{1,2})'
re_nian = r'(\d{4})-(\d{1,2})-(\d{1,2}) (\d{1,2}):(\d{1,2})'

test_date_string = ['2009-12-4 15:52', '1月25日 18:51', '3分钟前', '43分钟前', '今天 17:28', '今天 07:28']


import datetime
n = datetime.datetime.now()

if __name__ == '__main__':
  '''
  uid=1688157870&ownerUid=1684183727&resourceId=2011002201805269&productId=miniblog2&productName=%E6%96%B0%E6%B5%AA%E5%BE%AE%E5%8D%9A&resTitle=new%2520weapon%2520ready%25EF%25BC%258C%25E5%2593%2588%25E5%2593%2588&resInfo=&listInDiv=1&content=test
  '''
  t = TSinaRobot()
  t.login()
  
  reply_content = '淘宝上超可爱植物大战僵尸小东东http://tsina.cndjango.com/1?own_id=%s'%'1'
  mes_content = 'new weapon ready，哈哈'
  own_id = '1684183727'
  resource_id = '2011002201805269'
  
  t.add_comment(own_id, resource_id, mes_content, reply_content)
  
  
  
  
