#coding=utf-8

'''
Created on 2011-8-30

@author: chris
'''
import sys
import urllib2
import urllib
import cookielib
from ripper.core import EncodingConvert
from ripper.core.Utils import enable_proxy #@UnresolvedImport
from ripper.core.Utils import disable_proxy #@UnresolvedImport
sys.path.append('/harvester/ripper/source/src/')


from ripper.parser.Parser import Parser
import os.path
from ripper.handler.converter.langconv import Converter
import re
import datetime
import codecs
from ripper.handler.TorrentDownload import canDownload
from ripper.core.Utils import clear_url #@UnresolvedImport
gheaders = None

reload(sys)
sys.setdefaultencoding('utf-8') #@UndefinedVariable

class SisParser(Parser):

    ''' aisex.com 论坛的分析器 '''
    # 扫描页数
    POST_PAGES = 5
    # 论坛板块样式
    FORUM_PATTERN       = r"<a href='(thread\.php\?fid=[0-9]+)' class='fnamecolor'>[\n.]*<b>(.*?)</b></a><br>"
    # 帖子列表样式
    POST_PATTERN        = r"<a target=_blank href='(htm_data.*?)'>(.*?)</a>.*?(\n.*?)*?</td>\n<td class=smalltxt>\n(.*?)\n<br>(.*?)</td>"
    # 帖子内容样式
    THREAD_CONTENT      = r"<span class='tpc_content'>(.*)"
    # 种子链接的地址
    TORRENT_URL_PATTERN = r'h\s*?t\s*?t\s*?p\s*?:\s*?/\s*?/.*?/.*?\s' # not enough yet, sth like ',' may appears in the tail
    # 图片地址样式
    IMAGE_URL_PATTERN1   = r"<img(.*?)src='(.*?)'\s*.*?>"
    IMAGE_URL_PATTERN2   = r'<img(.*?)src="(.*?)"\s*.*?>'
    
    
    def goto_page(self, pageUrl, num=1):
        p = str(num) + '.html'
        return pageUrl + p 
        
    # 解析列表
    def parse_obj_list(self, pageIndex, keyProp, otherProps, item):
        c = Converter('zh-hans')
        # 配置模板
        itemName = self.item.get_property('name')
        # 对应原数据库里面的版面ID
        forumMap = {
                     u'sis_64' : 64, # 贴图, 东方靓女集中营
                     u'sis_62' : 64, # 贴图, 自拍, 合并
                     u'sis_68' : 68, # 贴图, 西洋
                    }
        
        # 登陆
        if self.isLogin == False:
            self.login_sis()
            self.isLogin = True
        
#        text = self.get_content('http://www.sexinsex.net/forum/forumdisplay.php?fid=64')
        text = self.get_content(pageIndex.url)
        
        
        sp = self.get_soup(text=text)
        
        tables = sp.findAll('table')
        # 取倒数第二个table的帖子 (其他的为置顶或广告)
        table = list(tables)[-2]
        
        soup = table
        spans = table.findAll('span')
        
        posts = [] # url, name
        # 标题和url
        for span in spans:
            if not span.has_key('id'):continue
            sid = span['id']
            if sid == None or 'thread_' not in sid : continue
            a = span.findAll('a')
            if len(a) == 0 : continue
            a = a[0]
            name = a.contents[0].__str__()
            href = a['href']
            
            posts.append([href, name])
            
        # 发布时间 
        ems = soup.findAll('em')
        pt = r'[0-9]+-[0-9]+-[0-9]+'
        bems = []
        for a in ems:
            if a.contents and a.contents[0].__str__() and re.match(pt, a.contents[0].__str__()):
                bems.append(a.contents[0].__str__())
        bems = list(p for p in bems if p != None)
        if len(bems) > len(posts):
            bems = bems[1:]
        print len(bems), len(posts)
        ct = 0
        for p in posts:
            p.append(bems[ct])
            ct = ct + 1
        
        # 返回帖子列表
        for p in posts:
            
            postDetailUrl = p[0]
            title = p[1]
            titleSimple = title
            # convert title to simple chinese
            try:
                titleSimple = c.convert(title)
            except Exception, ex:
                pass
            postDate = p[2]
            postDateTime = datetime.datetime.strptime(postDate,'%Y-%m-%d')
            
            yield {'postDetailUrl' : 'http://www.sexinsex.net/forum/' + postDetailUrl, 
                   'title' : title, 
                   'titleSimple' : titleSimple, 
                   'author' : 'harvester', 
                   'forumId' : forumMap[itemName], 
                   'postDate' : postDate, 
                   'postDateTime' : postDateTime}
    
    
    # 解析属性
    def get_collectDate(self, url, prop, obj):
        return datetime.datetime.utcnow()
    
    # 解析属性
    def get_rawContent(self, url, prop, obj):
        return 'not available'
#        return self.get_content(url)
    
    # 解析属性
    def get_ccontent(self, url, prop, obj):
        ct = self.__get_post_content(url)
        return ct
        
    # 解析属性
    def get_urls(self, url, prop, obj):
        return []
    
    # 解析属性
    def get_torrents(self, url, prop, obj):
        return []
    
    # 解析属性
    def get_images(self, url, prop, obj):
        text = self.__get_post_content(url)
        list_imgs1 = []
        list_imgs2 = []
        for m in re.findall(SisParser.IMAGE_URL_PATTERN1,text):
            list_imgs1.append(clear_url(m[1]))
        for m in re.findall(SisParser.IMAGE_URL_PATTERN2,text):
            list_imgs1.append(clear_url(m[1]))
        list_imgs1.extend(list_imgs2)
        return list_imgs1
    
    def __get_post_content(self, url):
        soup =self.get_soup(url=url)
        authors = []
        contents = []
        authorTds = soup.findAll('td', {'class' : 'postauthor'})
        contentDivs = soup.findAll('div', {'class' : 't_msgfont'})
        # authors
        for i in range(0, len(authorTds)):
            td = authorTds[i]
            cites = td.findAll('cite')
            if len(cites) == 0 : continue
            if len(cites[0].findAll('a')) == 0 : 
                authors.append(cites[0].contents[0].__str__().strip())
            else:
                a = cites[0].findAll('a')[0]
                authors.append(a.contents[0].__str__())
        # contents
        for i in range(1, len(contentDivs), 2):
            div = contentDivs[i]
            if len(div.contents) != 0:
                contents.append(''.join(map(lambda a:a.__str__(), div.contents)))
            else:
                contents.append('no content for this')
#        if len(contents) == 0 : # one floor:
#            contents = [contentDivs[0].__str__(), ]
            
        ct = ''.join(map(lambda a : a.__str__(), contentDivs[0].contents))
        
        return ct
        
def testDetail(url):
    a = SisParser(None, True)
    t = a.get_torrents(url, None)
    print t
    
    
        
if __name__ == '__main__':
    parser = SisParser(None, needProxy=True)
    parser.parse_obj_list(None, None, None, None)
#    testDetail('http://www.aisex.com/bt/htm_data/4/1109/485578.html')
        