﻿import logging
import re
from datetime import tzinfo, timedelta, datetime
import time
import traceback

from google.appengine.api import urlfetch

import PyRSS2Gen
import feedparser

import models

smthhost = "http://www.newsmth.net"
#smthhost = "http://www.2.newsmth.net"




class TZ(tzinfo):
    def utcoffset(self, dt): 
        return timedelta(hours=8)


def converFromGB2312ToUTF8(onestr):
    newstr = onestr
    try:
        newstr = unicode(newstr, 'cp936', 'ignore')
    except:
        pass
    return newstr.encode('utf-8', 'ignore')

def getPostId(board_id, article_id, post_type=None):
    global smthhost
    url = smthhost+"/bbscon.php?bid="+board_id+"&id="+article_id
    
    #logging.info(post_type)
    
    if post_type=="thread_previous":
        url += '&p=tp'
    elif post_type=="thread_next":
        url += '&p=tn'
    elif post_type=="previous":
        url += '&p=p'
    elif post_type=="next":
        url += '&p=n'
    else:
        return article_id

    response = urlfetch.fetch(url=url, follow_redirects=False)
    if response and response.status_code in [301,302]:
        if response.headers.has_key('location'):
            url = response.headers['location']
        elif response.headers.has_key('Location'):
            url = response.headers['Location']
        else:
            raise Exception('Could not find location in headers: %r' % (response.headers,))
    
    #logging.info("redirect url:"+url)
    
    
    match_obj = re.search('''&id=(\d+)''', url)
    article_id = match_obj.group(1)
    
    return article_id
    

def getContent(url, method=urlfetch.GET, data=None):
    count = 1;
    while(count < 3):
        response = None
        try:
            #urlfetch.REDIRECT_STATUSES = frozenset([httplib.SEE_OTHER])
            response = urlfetch.fetch(url=url, follow_redirects=False, method=method, payload=data)
            
            redirect_counts = 0
            while redirect_counts < 5 and response and response.status_code in [301,302]:
                logging.info(str(count)+"--"+url+", status_code:"+str(response.status_code))
                if response.headers.has_key('location'):
                    url = response.headers['location']
                elif response.headers.has_key('Location'):
                    url = response.headers['Location']
                else:
                    raise Exception('Could not find location in headers: %r' % (response.headers,))
                global smthhost
                url = smthhost + '/' +url
                #logging.info("new url:"+url)
                response = urlfetch.fetch(url=url, follow_redirects=False)
                redirect_counts += 1

        except:
            logging.info(traceback.format_exc())
            count = count + 1
            continue
        logging.info(str(count)+"--"+url+", status_code:"+str(response.status_code))
        if response and response.status_code==200:
            return converFromGB2312ToUTF8(response.content)
        count = count + 1


def cleanArcicleContent(content):
    newcontent = content
    newcontent = newcontent.replace("\\n", "\n")
    newcontent = newcontent.replace("\\'", "'")
    newcontent = newcontent.replace("\\\"", "\"")
    newcontent = newcontent.replace("\\\\", "\\")
    newcontent = newcontent.replace("\\/", "/")
    newcontent = re.sub("\\\\r\[\\d*?(;\\d*?)*?[a-zA-Z]", "", newcontent)
    return newcontent

def getVeryCleanText(content):

    #remove signature
    match_obj = re.compile("""^--\\n.*^[^\\n]*?\\[FROM\\:\\s*?\\d*?\\.\\d*?\\.\\d*?\\..*?\\]""", re.MULTILINE | re.DOTALL)
    if match_obj:
        content = match_obj.sub("", content)

    #remove the title
    match_obj = re.compile("""^发信人.*?^发信站.*?站内\\n""", re.MULTILINE | re.DOTALL)
    if match_obj:
        content = match_obj.sub("", content)


    #remove the reference article
    match_obj = re.compile("""^(【 在.*? 的大作中提到.*?】\\n((\\:.*?\n)|(\\n))*)|^(\\:.*?\\n)""", re.MULTILINE | re.DOTALL)
    if match_obj:
        content = match_obj.sub("", content)

    #remove the reference board
    match_obj = re.compile("""^【 以下文字转载自.*? 讨论区 】\\n""", re.MULTILINE | re.DOTALL)
    if match_obj:
        content = match_obj.sub("", content)

    #remove blank line
    match_obj = re.compile("""^\\n""", re.MULTILINE | re.DOTALL)
    if match_obj:
        content = match_obj.sub("", content)

    return content

def getCleanText(content):

    #remove signature
    match_obj = re.compile("""^--\\n.*^[^\\n]*?\\[FROM\\:\\s*?\\d*?\\.\\d*?\\.\\d*?\\..*?\\]""", re.MULTILINE | re.DOTALL)
    if match_obj:
        content = match_obj.sub("", content)

    #remove the title
    match_obj = re.compile("""^发信人.*?^发信站.*?\\n""", re.MULTILINE | re.DOTALL)
    if match_obj:
        content = match_obj.sub("", content)

    #process the reference article
    ref = ''
    match_obj = re.search("""^(【 在.*? 的大作中提到.*?】\\n((\\:.*?\n)|(\\n))*)|^(\\:.*?\\n)""", content, re.MULTILINE | re.DOTALL)
    if match_obj:
        start = match_obj.start()
        end = match_obj.end()
        content = content[:start]+content[end:]
        ref = '<div class="ref">'+content[start:end]+'</div>'

    #remove the reference board
    match_obj = re.compile("""^【 以下文字转载自.*? 讨论区 】\\n""", re.MULTILINE | re.DOTALL)
    if match_obj:
        content = match_obj.sub("", content)

    #remove blank line
    match_obj = re.compile("""^\\n""", re.MULTILINE | re.DOTALL)
    if match_obj:
        content = match_obj.sub("", content)

    return content, ref


def getArticleMetadata(html):
    
    match_obj = re.search('''conWriter\(\d+,.*?, \d+, (\d+), (\d+),''', html)
    post_id = match_obj.group(1)
    thread_id = match_obj.group(2)
    
    match_obj = re.search("prints\('.*?发信站.*?\((\w\w\w)\s*?(\w\w\w)\s*?(\d*?)\s*?(\d*?\:\d*?\:\d*?)\s*?(\d*?)\)", html)
    if match_obj:
        match_groups = match_obj.groups()
        week, month, day, hms, year = match_groups
        pub_time = time.strptime(month+" "+day+" "+hms+" "+year, "%b %d %H:%M:%S %Y")
    else:
        pub_time = None

    from_ip = ""
    user_name = ""
    post_title =""

    match_objs = re.findall("\[FROM\: (\d*?\.\d*?\.\d*?\..*?)\]", html)
    if match_objs:
        from_ip = match_objs[len(match_objs)-1]

    match_obj = re.search("prints\('发信人\: (.*?\(.*?\))", html)
    if match_obj:
        user_name = match_obj.group(1)

    match_obj = re.search("标  题: (.*?)\\\\n", html)
    if match_obj:
        post_title = match_obj.group(1)
    
    match_obj = re.search("prints\('(.*?)'\);((o\.h\(0\);o\.t\(\);)|(attach\('))", html)
    content = match_obj.group(1)
    content = cleanArcicleContent(content)

    signature = ""
    match_obj = re.search("""^--\\n(.*?)^[^\\n]*?\\[FROM\\:\\s*?\\d*?\\.\\d*?\\.\\d*?\\..*?\\]""", content, re.MULTILINE | re.DOTALL)
    if match_obj:
        signature = match_obj.group(1)
    #print signature

    content_text, ref = getCleanText(content)

    return post_id, thread_id, post_title, pub_time, from_ip, user_name, signature, content_text, ref

def getAttachments(board_id, article_id, html):
    matched_objs = re.findall("""attach\('(.*?)', \d+, (\d+)\);""", html)
    attachments = list()
    for matched_obj in matched_objs:
        #print matched_obj
        attach_name, attach_id = matched_obj
        url = "http://att.newsmth.net/att.php?p."+board_id+"."+article_id+"."+attach_id+".jpg"
        attachments.append((attach_name, url))
    return attachments

def getPostContent(board_id, article_id, post_type=None):
    global smthhost
    url = smthhost+"/bbscon.php?bid="+board_id+"&id="+article_id
    
    logging.info(post_type)
    
    if post_type=="thread_previous":
        url += '&p=tp'
    elif post_type=="thread_next":
        url += '&p=tn'
    elif post_type=="previous":
        url += '&p=p'
    elif post_type=="next":
        url += '&p=n'
        
    content = getContent(url)
    #logging.info(content)

    attachments = getAttachments(board_id, article_id, content)
    post_id, thread_id, post_title, pub_time, from_ip, user_name, signature, content_text, ref = getArticleMetadata(content)

    head = '<div class="head"><span class="attr">发信人</span>:<span class="val">' + user_name + '</span>\n'
    head = head + '<span class="attr">时间</span>:<span class="val">' + time.strftime('%Y-%m-%d %a %H:%M:%S', pub_time) + '</span>\n<span class="attr">来自</span>:<span class="val">' + from_ip + '</span></div>'

    attachment_html = "\n"
    if attachments and len(attachments)>0:
        for attach in attachments:
            attachment_html = attachment_html + "\n<br/>"+attach[0]+":\n<br/><img src='" + attach[1]+"'/>\n"

    logging.info("signature:"+signature)
    signature = '<div class="signature">' + signature.rstrip().replace('<', '&lt;').replace('>', '&gt;').replace("&", "&amp;").replace(' ', '&nbsp;') +'</div>'
    
    return post_id, thread_id, post_title, head, content_text, ref, attachment_html, signature

def getThreadPostList(board_name, thread_id, page_num):
    global smthhost
    url = ''
    if page_num:
        url = smthhost+"/bbstcon.php?board="+board_name+"&gid="+thread_id+"&start="+thread_id+"&pno="+page_num
    else:
        url = smthhost+"/bbstcon.php?board="+board_name+"&gid="+thread_id

    content = getContent(url)

    thread_title = ''
    match_obj = re.search('''<h1 class="ttit">(.*?) </h1>''', content)
    if match_obj:
        thread_title = match_obj.group(1)

    board_id = 0
    page_count = 0

    match_obj = re.search('''tconWriter\('(.*?)',(\d+),(\d+),\d+,(\d+),(\d+),\d+,.*?,''', content)
    if match_obj:
        board_id = match_obj.group(2)
        page_count = match_obj.group(4)
        page_num = match_obj.group(5)

    posts = []
    matched_objs = re.findall('''\[(\d+),'(.*?)'\]''', content)
    for matched_obj in reversed(matched_objs):
        post_id, post_user = matched_obj
        post_url = smthhost+"/bbscon.php?bid="+board_id+"&id="+post_id
        posts.append(models.Post(board_id=int(board_id), post_id=int(post_id), author=post_user))

    return thread_title, posts, page_num, page_count

def getThreadList(board_name, page_num):
    global smthhost
    url = ''
    if page_num > 0:
        url = smthhost+"/bbsdoc.php?board="+board_name+"&ftype=6&page="+str(page_num)
    else:
        url = smthhost+"/bbsdoc.php?board="+board_name+"&ftype=6"

    content = getContent(url)

    board_id = 0

    match_obj = re.search('''docWriter\('(.*?)',(\d+),(\d+),\d+,\d+,(\d+),\d+,.*?,''', content)
    if match_obj:
        board_id = match_obj.group(2)
        page_num = match_obj.group(4)

    threads = []
    matched_objs = re.findall('''c\.o\((\d+),(\d+),.*?,.*?,\d+,'(.*?)',(\d+),\d+,\d+\)''', content)
    for matched_obj in reversed(matched_objs):
        article_id, thread_id, title, word_count = matched_obj
        #item_url = smthhost+"/bbscon.php?bid="+board_id+"&id="+article_id
        thread_url = smthhost+"/bbstcon.php?board="+board_name+"&gid="+thread_id
        threads.append((thread_id, title, word_count, thread_url))
        #logging.info(article_id+", "+thread_id+", "+title+", "+word_count)

    return board_id, threads, page_num

def getPostList(board_name, page_num):
    global smthhost
    url = ''
    if page_num > 0:
        url = smthhost+"/bbsdoc.php?board="+board_name+"&page="+str(page_num)
    else:
        url = smthhost+"/bbsdoc.php?board="+board_name

    content = getContent(url)

    board_id = 0

    match_obj = re.search('''docWriter\('(.*?)',(\d+),(\d+),\d+,\d+,(\d+),\d+,.*?,''', content)
    if match_obj:
        board_id = match_obj.group(2)
        page_num = match_obj.group(4)

    posts = []
    matched_objs = re.findall('''c\.o\((\d+),(\d+),'(.*?)','(.*?)',(\d+),'(.*?)',(\d+),\d+,\d+\)''', content)
    for matched_obj in matched_objs:
        post_id, thread_id, user, tag, post_time, title, word_count = matched_obj
        try:
            post = models.Post(post_id=int(post_id), thread_id=int(thread_id), author=unicode(user, 'utf-8', 'ignore'), tag=unicode(tag, 'utf-8', 'ignore'), post_time=( datetime.fromtimestamp(int(post_time))+timedelta(hours=8) ), title=unicode(title, 'utf-8', 'ignore'), word_count=unicode(word_count, 'utf-8', 'ignore'))
            posts.append(post)
        except:
            logging.info(traceback.format_exc())
            continue
            
    return board_id, posts, page_num

# extract top 10 articles from RSS of SMTH
def getTop10ArticlesList():
    global smthhost
    seed = smthhost+"/rssi.php?h=1"
    response = urlfetch.fetch(url=seed)
    content = response.content
    #logging.info(unicode(content, 'utf-8', 'ignore'))

    d = feedparser.parse(content)
    
    
    threads = []
    #loop over all the entries
    for e in d.entries:
        title = e.title
    	author = e.author
        thread_url = e.link
        #logging.info('yyyy:'+thread_url)
        board_name = ''
        gid = ''
        match_obj = re.search('''board=(.*?)&gid=(\d+)''', thread_url)
        
        if match_obj:
            board_name = match_obj.group(1)
            gid = match_obj.group(2)

    	#pubDate = e.pubDate
    	#description = e.description
    	threads.append( models.Thread(title=title[title.find(u']')+1:], author=author, board_name=board_name, thread_id=int(gid)) )

    return threads
    
    
if __name__ == "__main__":
    print getContent('http://www.newsmth.net/bbscon.php?bid=874&id=1017193&p=tn')
    