#coding=utf-8

'''
Created on 2011-8-30

@author: chris
'''
import sys
import urllib2
import urllib
import cookielib
from ripper.core import EncodingConvert
from ripper.core.Utils import enable_proxy #@UnresolvedImport
from ripper.core.Utils import disable_proxy #@UnresolvedImport
import json
import urlparse
from ripper.parser import BeautifulSoup
import os
sys.path.append('/harvester/ripper/source/src/')


from ripper.parser.Parser import Parser
from ripper.handler.converter.langconv import Converter
import re
import datetime
from ripper.core.Utils import clear_url #@UnresolvedImport
gheaders = None

reload(sys)
sys.setdefaultencoding('utf-8') #@UndefinedVariable

NEED_TAGS = (60 , 249, 6)
SIS_BBS_PREFIX = 'http://sexinsex.net/bbs/'

class SisParser(Parser):

    ''' sis 论坛的分析器 '''
    # 扫描页数
    POST_PAGES = 5
    # 论坛板块样式
    FORUM_PATTERN       = r"<a href='(thread\.php\?fid=[0-9]+)' class='fnamecolor'>[\n.]*<b>(.*?)</b></a><br>"
    # 帖子列表样式
    POST_PATTERN        = r"<a target=_blank href='(htm_data.*?)'>(.*?)</a>.*?(\n.*?)*?</td>\n<td class=smalltxt>\n(.*?)\n<br>(.*?)</td>"
    # 帖子内容样式
    THREAD_CONTENT      = r"<span class='tpc_content'>(.*)"
    # 种子链接的地址
    TORRENT_URL_PATTERN = r'h\s*?t\s*?t\s*?p\s*?:\s*?/\s*?/.*?/.*?\s' # not enough yet, sth like ',' may appears in the tail
    # 图片地址样式
    IMAGE_URL_PATTERN1   = r"<img(.*?)src='(.*?)'\s*.*?>"
    IMAGE_URL_PATTERN2   = r'<img(.*?)src="(.*?)"\s*.*?>'
    
    # 下载附件必须登录, 提供给Engine的回调函数
    def relogin(self, isFinishHtml=False):
        if False == isFinishHtml:
            return 
        if self.isLogin == False:
            for i in range(0, 10):
                try:
                    self.login_sis()
                    return
                except Exception, ex:
                    print ex, 'retry...'
                    continue
        print 'login failed.'
        sys.exit()
    
    def goto_page(self, pageUrl, num=1):
        p = str(num) + '.html'
        return pageUrl + p 
        
    # 解析列表
    def parse_obj_list(self, pageIndex, fid=0):
#        pageIndex.url = pageIndex.url.replace('www.sexinsex.net', '67.220.90.30')
        c = Converter('zh-hans')
        
        # 配置模板
#        itemName = self.item.get_property('name')
#        # 对应原数据库里面的版面ID
#        forumMap = {
#                     u'sis_60' : 60, # 贴图, 卡通
#                     u'sis_64' : 64, # 贴图, 东方靓女集中营
#                     u'sis_62' : 64, # 贴图, 自拍, 合并
#                     u'sis_68' : 68, # 贴图, 西洋
#                     u'sis_sw' : 249, # 丝袜高跟
#                     u'sis_sm' : 249, # sm
#                    }
#        fid = forumMap[itemName]
        
        fid = urlparse.urlparse(pageIndex)[2].split('-')[1]
        # 登陆
        if self.isLogin == False:
            self.login_sis()
            self.isLogin = True
        
#        text = self.get_content('http://www.sexinsex.net/forum/forumdisplay.php?fid=64')
        text = self.get_content(pageIndex)
        
        
        sp = self.get_soup(text=text)
        
        tables = sp.findAll('table')
        # 取倒数第二个table的帖子 (其他的为置顶或广告)
        table = list(tables)[-2]
        
        soup = table
        spans = table.findAll('span')
        ths = table.findAll('th')
        
        posts = [] # url, name
        # 标题和url
        for th in ths:
            spans = list(th.findAll('span'))
            if len(spans) == 0 : continue
            # 分类
            em = ''
            try:
                em = th.findAll('em')[0].findAll('a')[0].contents[0].__str__()
            except Exception:
                pass
            span = spans[0]
            if not span.has_key('id'):continue
            sid = span['id']
            if sid == None or 'thread_' not in sid : continue
            a = span.findAll('a')
            if len(a) == 0 : continue
            a = a[0]
            name = a.contents[0].__str__()
            href = a['href']
            
            posts.append([href, name,em])
            
        # 发布时间 
        ems = soup.findAll('em')
        pt = r'[0-9]+-[0-9]+-[0-9]+'
        bems = []
        for a in ems:
            if a.contents and a.contents[0].__str__() and re.match(pt, a.contents[0].__str__()):
                bems.append(a.contents[0].__str__())
        bems = list(p for p in bems if p != None)
        if len(bems) > len(posts):
            bems = bems[1:]
        print len(bems), len(posts)
        ct = 0
        for p in posts:
            p.append(bems[ct])
            ct = ct + 1
        
        # 返回帖子列表
        for p in posts:
            
            postDetailUrl = p[0]
            title = p[1]
            titleSimple = title
            # convert title to simple chinese
            try:
                titleSimple = c.convert(title)
            except Exception, ex:
                pass
            tag = p[2]
            postDate = p[3]
            postDateTime = datetime.datetime.strptime(postDate,'%Y-%m-%d')
            
            
            obj = {'postDetailUrl' : 'http://www.sexinsex.net/forum/' + postDetailUrl, 
                   'title' : title, 
                   'titleSimple' : titleSimple, 
#                   'author' : 'harvester', 
                   'forumId' : fid, 
                   'postDate' : postDate, 
                   'tag' : tag
                   }
            
            # 需要tag
#            if fid[0] in NEED_TAGS:
#                if itemName == 'sis_sw':
#                    tag = u'丝袜-' + tag
#                if itemName == 'sis_sm':
#                    tag = u'SM-' + tag
#                obj['tag'] = tag
            
            yield obj
    
    def getpid(self, purl):
        return 'sis_'+ purl.split('/')[-1].split('-')[1]
    # 解析属性
    def get_collectDate(self, url, prop, obj):
        return str(datetime.datetime.utcnow())
    
    # 解析属性
    def get_rawContent(self, url, prop, obj):
        return 'not available'
#        return self.get_content(url)
    
    # 解析属性
    def get_ccontent(self, url, prop=None, obj=None):
        ct = self.__get_post_content(url)
        return ct
        
    # 解析属性
    def get_urls(self, url, prop, obj):
        return []
    
    def get_torrents(self, url, pid, destdir):
        '''  下载种子  '''
        names = []
        soup = self.get_soup(url=url)
        attdl = soup.findAll('dl', {'class' : 't_attachlist'})
        if attdl != None:
            for dl in attdl:
                if u'查看BT种子信息' in dl.text:
                    a = dl.findAll('a')[0]
                    # 种子文件名
                    fname = a.text
                    fname = '%s_%s.torrent' % (fname.split('.')[0], pid) 
                    turl = SIS_BBS_PREFIX + a['href']
                    self.get_file(turl, destdir, fname)
                    names.append(fname)
        return names
            
    # 解析属性
    def get_images(self, text):
#        url = url.replace('www.sexinsex.net', '67.220.90.30')
        list_imgs1 = []
        list_imgs2 = []
        for m in re.findall(SisParser.IMAGE_URL_PATTERN1,text):
            list_imgs1.append(clear_url(m[1]))
        for m in re.findall(SisParser.IMAGE_URL_PATTERN2,text):
            list_imgs1.append(clear_url(m[1]))
        list_imgs1.extend(list_imgs2)
        list_imgs1 = [i for i in list_imgs1 if '.gif' not in i.lower()]
        return list_imgs1
    
    def __get_post_content(self, url):
        contents = self.get_all_content(url)
        
        ct = ''
#        try:    
#            ct = ''.join(map(lambda a : a.__str__(), contentDivs[0].contents))
#        except Exception, err:
#            print 'SisParser.__get_post_content => %s, %s fails' %(str(err), url)
        print contents[0]['author']
        return contents[0]['content']
    
    def get_all_content(self, url):
        
        if self.isLogin == False:
            self.login_sis()
            self.isLogin = True
            
        c = Converter('zh-hans')
        soup =self.get_soup(url=url)
        authors = []
        contents = []
        authorTds = soup.findAll('td', {'class' : 'postauthor'})
        contentDivs = soup.findAll('div', {'class' : 't_msgfont'})
        # authors
        for i in range(0, len(authorTds)):
            td = authorTds[i]
            cites = td.findAll('cite')
            if len(cites) == 0 : continue
            if len(cites[0].findAll('a')) == 0 : 
                authors.append(cites[0].contents[0].__str__().strip())
            else:
                a = cites[0].findAll('a')[0]
                authors.append(a.contents[0].__str__())
        # contents
#        for i in range(1, len(contentDivs), 2):
        for i in range(0, len(contentDivs)):
            div = contentDivs[i]
            if len(div.contents) != 0:
                contents.append(''.join(map(lambda a:a.__str__(), div.text)))
            else:
                contents.append('no content for this')
                
        if len(contents) == 0 : # one floor:
            # 擂台区
            if len(contentDivs) == 0:
                contentDivs = soup.findAll('td', {'class' : 'postcontent'})
                content = contentDivs[0].text
                images = self.get_images(contentDivs[0].__str__())
                return [{'author': None, 'content': content, 'images': images}, ]
                
            # else
            contents = [contentDivs[0].__str__(), ]
            try:
                contents[0] = c.convert(contents[0])
            except Exception, ex:
                pass
            return [ {'author': authors[0], 'content': contents[0]} ]
        
        content_list = []
        
        ###
        ### 存在不等的情况：  
        ###    如果某楼被管理员禁言, 那么该楼的content就为空
        ### t_msgfont 不存在, class="notice"取代之
        ### 解决办法是填补contentdiv数组的空白
        lct, lau =  len(contents) , len(authors)
        if lct < lau:
            gap = lau - lct
            for i in range(0, gap):
                contents.append('  ')
            
        assert len(contents) == len(authors)
        for i, text in enumerate(contents):
            ctdict = {}
            ctdict['author'] = authors[i]
            try:
                text = c.convert(text)
            except Exception, ex:
                pass
            ctdict['content'] = text
            content_list.append(ctdict)
        # 一楼图片
        content_list[0]['images'] = self.get_images(contentDivs[0].__str__())
        content_list[0]['content'] = contentDivs[0].text
        ct = content_list[0]['content']
        ### 去垃圾
        # 下载链接：圖片+種子
        
        if 'HTTP 免空下載' in ct:
            content_list[0]['content'] = ct.split('HTTP 免空下載')[0]
            with open('1.txt', 'w') as f:
                f.write(content_list[0]['content'])
                
        return content_list
        
def testDetail(url): 
    a = SisParser(None, needProxy=True)
#    a.login_sis()
#    with open('json.txt', 'w') as f:
#        for obj in a.parse_obj_list('http://sexinsex.net/bbs/forum-64-2.html'):
#            ct = json.dumps(obj, ensure_ascii=False)
#            f.write(ct + '\n')
    ct = None
    error_log = 'errors.txt'
    try:
        ct = a.get_all_content(url)
    except Exception, err:
        # 解析时出现问题
        if not os.path.exists(error_log):
            with open(error_log, 'w') as f:
                f.write('%s, %s \n' % (err, url) )
        else:
            with open(error_log, 'a') as f:
                f.write('%s, %s \n' % (err, url) )
    a.get_torrents(url, 'sis_123', 'e:/')
#    cc = '\n'.join(map(lambda x: x['content'], ct))
#    print cc
#    with open('11.txt', 'w') as f:
#        f.write(cc)
    
def test_text():
    fn = 'torrenttest.html'
    ct = open(fn).read()
    soup = BeautifulSoup.BeautifulSoup(ct)
    attdl = soup.findAll('dl', {'class' : 't_attachlist'})
    dl = attdl[0]
    if u'查看BT种子信息' in dl.text:
        a = dl.findAll('a')[0]
        # 种子文件名
        torrentfilename = a.text
        turl = SIS_BBS_PREFIX + a['href']
        print turl
    
        
if __name__ == '__main__':
#    parser = SisParser(None, needProxy=True)
#    parser.parse_obj_list(None, None, None, None)
#    testDetail('http://www.aisex.com/bt/htm_data/4/1109/485578.html')
#    testDetail('http://www.sexinsex.net/bbs/thread-1307060-5-1.html')        
#    testDetail('http://sexinsex.net/bbs/thread-4776774-1-1.html')        
#    a = open('json.txt').read().split('\n')[0]
#    posts = json.loads(a)
#    print posts['title']
#    testDetail('http://sexinsex.net/bbs/thread-4536351-1-1.html')
#    testDetail('http://sexinsex.net/bbs/viewthread.php?tid=3940994&highlight=HAVD')
#    testDetail('http://sexinsex.net/bbs/thread-4774084-1-2.html') # 权限不够的页面
    testDetail('http://sexinsex.net/bbs/thread-4784055-1-3.html') # 去垃圾
#    test_text()
    
