#coding=utf-8
import re,datetime,urllib
from tsina.exception import ParseSearchError
from tsina.models import Message
from created_at import _get_created_at

def _remove_quotes(s):
  s = s.replace("'", '')
  return s
  
def _get_nick_names(html_content):
    h = html_content
    data = re.findall(r'<p class=".+?" mid=".+?" type=".+?"><a href="(.+?)" title="(.+?)">', h)
    return [d[1] for d in data]
    
def _get_clients(html_content):
    h = html_content
    clients = []
    data = re.findall(r'来自</strong>(.+?)</div>', h)
    for d in data:
        c = re.findall(r'<a.+?>(.+?)</a>', d)
        if not c:clients.append(d)
        else:
            clients.append(c[0])
    return clients
    
def _get_url_id(html_content):
   h = html_content
   data = re.findall(r'<cite><a href="http://t.sina.com.cn/\d+/(\w+?)">.+?</a></cite>', h)
   data = [d.split('/')[-1] for d in data ]
   return data
   
def _get_mes(html_content):
  h = html_content
  r = re.findall(r'scope.loadCommentByRid\(.*?\)', h)
  data = []
  for i in r:
    d = i.split('(')[1][:-1].split(',')
    d = [ _remove_quotes(j) for j in d]
    data.append(d)
  return data
            
def parse_search(html_content, now=datetime.datetime.now()):
  nick_names = _get_nick_names(html_content)
  data = _get_mes(html_content)
  created_at_s = _get_created_at(html_content, now)
  url_ids = _get_url_id(html_content)
  cs = _get_clients(html_content)

  if len(data) != len(created_at_s):
    raise ParseSearchError('len(created_at_s) != len(mes)')
  if len(data) != len(url_ids):
    raise ParseSearchError('len(url_ids) != len(mes)')
  
  results = []
  
  print len(data)
  for i in range(len(data)):
    m = Message(data[i][0].strip(), nick_names[i], data[i][3].strip(), \
                url_ids[i], urllib.unquote(data[i][4]), created_at_s[i], cs[i])
    results.append(m)
    
  return results

def parse_profile(html_content, nick_name):
  data = _get_mes(html_content)
  created_at_s = _get_created_at(html_content)
  url_ids = _get_url_id(html_content)
  cs = _get_clients(html_content)

  if len(data) != len(created_at_s):
    raise ParseSearchError('len(created_at_s) != len(mes)')
  if len(data) != len(url_ids):
    raise ParseSearchError('len(url_ids) != len(mes)')
  
  results = []
  
  print len(data)
  for i in range(len(data)):
    m = Message(data[i][0].strip(), nick_name, data[i][3].strip(), \
                url_ids[i], urllib.unquote(data[i][4]), created_at_s[i], cs[i])
    results.append(m)
    
  return results