#coding=utf-8

'''
Created on 2011-8-30

@author: chris
'''
import sys
import urllib2
import urllib
import cookielib
from ripper.core import EncodingConvert
from ripper.core.Utils import enable_proxy #@UnresolvedImport
from ripper.core.Utils import disable_proxy #@UnresolvedImport
sys.path.append('/harvester/ripper/source/src/')


from ripper.parser.Parser import Parser
import os.path
from ripper.handler.converter.langconv import Converter
import re
import datetime
import codecs
from ripper.handler.TorrentDownload import canDownload
from ripper.core.Utils import clear_url #@UnresolvedImport
gheaders = None

class AisexParser(Parser):

    ''' aisex.com 论坛的分析器 '''
    # 扫描页数
    POST_PAGES = 5
    # 论坛板块样式
    FORUM_PATTERN       = r"<a href='(thread\.php\?fid=[0-9]+)' class='fnamecolor'>[\n.]*<b>(.*?)</b></a><br>"
    # 帖子列表样式
    POST_PATTERN        = r"<a target=_blank href='(htm_data.*?)'>(.*?)</a>.*?(\n.*?)*?</td>\n<td class=smalltxt>\n(.*?)\n<br>(.*?)</td>"
    # 帖子内容样式
    THREAD_CONTENT      = r"<span class='tpc_content'>(.*)"
    # 种子链接的地址
    TORRENT_URL_PATTERN = r'h\s*?t\s*?t\s*?p\s*?:\s*?/\s*?/.*?/.*?\s' # not enough yet, sth like ',' may appears in the tail
    # 图片地址样式
    IMAGE_URL_PATTERN   = "<img(.*?)src='(.*?)'\s*.*?>"
    
    
    def goto_page(self, pageUrl, num=1):
        p = '&page=' + str(num)
        return pageUrl + p 
        
    # 解析列表
    def parse_obj_list(self, pageIndex, keyProp, otherProps, item):
        c = Converter('zh-hans')
        # 配置模板
        itemName = self.item.get_property('name')
        # 对应原数据库里面的版面ID
        forumMap = {
                    'aisex_asia' : 1,
                    'aisex_euro' : 2,
                    'aisex_hentai' : 3,
                    'aisex_game' : 4,
                    'aisex_pic_euro' : 5,
                    'aisex_pic_asia' : 6,
                    }
        
#        self.login_aisex()
        text = self.get_content(pageIndex.url)
        sp = self.get_soup(text=text)
        trs = sp.findAll('tr', {'class':'t_one'})
        
        for tr in trs:
            
            postDateTime = ''
            titleSimple = ''
            
            tds = tr.findAll('td')
            td1 = tds[1]
            postDetailUrl = os.path.join('http://www.aisex.com/bt/', self.get_soup_attr(td1.findAll('a')[0], 1, 1, 'href'))
            title = td1.findAll('a')[0].text
            
            b = tds[2].text # xxxx2009-01-02
            s = b.split('-')
            author = s[0][:-4]
            postDate = '-'.join([ s[0][-4:], s[1], s[2] ])
            
            author = author.replace('\r\n', '')
            postDate = postDate.replace('\r\n', '')
            
            # convert title to simple chinese
            if title :
                titleSimple = c.convert(title)
    
            # convert old str date col to new datime col
            postDateTime = None
            try:
                postDateTime = datetime.datetime.strptime(postDate,'%Y-%m-%d')
            except Exception, ex:
                pass
            
                
            yield {'postDetailUrl' : postDetailUrl, 
                   'title' : title, 
                   'titleSimple' : titleSimple, 
                   'author' : author, 
                   'forumId' : forumMap[itemName], 
                   'postDate' : postDate, 
                   'postDateTime' : postDateTime}
    
    
    # 解析属性
    def get_collectDate(self, url, prop, obj):
        return datetime.datetime.utcnow()
    
    # 解析属性
    def get_forumId(self, url, prop, obj):
        return int(re.findall(r'fid=([0-9])', AisexParser._pageIndex.url)[0])
    
    # 解析属性
    def get_rawContent(self, url, prop, obj):
        return 'not available'
#        return self.get_content(url)
    
    # 解析属性
    def get_ccontent(self, url, prop, obj):
        soup = None
        try:
            soup = self.get_soup(url=url)
            floors = soup.findAll('span', {'class' : 'tpc_content'})
            ct = floors[0].__str__()
            return ct
        except Exception , ex:
            return u'Error getting content data'
        except RuntimeError, err:
            return u'Error getting content data'
        finally:
            pass
        
    # 解析属性
    def get_urls(self, url, prop, obj):
        text = self.get_content(url)
        images = self.get_images(url, prop, obj)
        pageUrls = []
        text = text.replace('@', '')
        for m in re.findall(AisexParser.TORRENT_URL_PATTERN,text):
            x = m.strip()
            # url 的地址不和图片地址重复
            if x not in images\
                    and '.css' not in x.lower() :
                pageUrls.append(clear_url(x))
        return list(set(pageUrls))
    
    # 解析属性
    def get_torrents(self, url, prop, obj):
        torrentUrls = []
        urls = self.get_urls(url, prop, obj) 
        print urls
        for url in urls:
            if canDownload(url) == True:
                torrentUrls.append(clear_url(url))
        return torrentUrls
    
    # 解析属性
    def get_images(self, url, prop, obj):
        text = self.get_content(url)
        list_imgs = []
        lzIdx = text.index('<a href="javascript:scroll(0,0)">')
        for m in re.findall(AisexParser.IMAGE_URL_PATTERN,text):
            if text.index(m[1]) < lzIdx:
                list_imgs.append(clear_url(m[1]))
    
        
        list_imgs = list(set(list_imgs))
        return list(set(list_imgs))
    
    def parse_index(self):
#        text = open('h:/aisexlist.htm').read()
        enable_proxy()
        loginTarget='http://www.aisex.com/bt/login.php'
        SITE_HEADER='http://www.aisex.com/'
        
        loginText = self.get_content(loginTarget)
        #cookie
        cookieJar = cookielib.CookieJar()
        cookie_support= urllib2.HTTPCookieProcessor(cookieJar)
        opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
        urllib2.install_opener(opener)
        #login
        postdata=urllib.urlencode({
             'loginuser':'outlookxp',
             'loginpwd':'123123',
             'hideid':'0',
             'cktime':'31536000',
             'jumpurl':'http://www.aisex.com/bt/thread.php?fid=4',
             'loginpwd':'123123',
             'step':'2',
        })
        gheaders = {
             'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',
             'referer':'http://www.aisex.com/bt'
        }
        
        req = urllib2.Request(
              url = 'http://www.aisex.com/bt/login.php',
              data = postdata,
              headers = gheaders
        )
        
        
        text = urllib2.urlopen(req).read()
        
        text = EncodingConvert.zh2utf8(text)[1]

        text = self.get_content('http://70.85.48.252/bt/thread.php?fid=4&search=&page=2')
        c = Converter('zh-hans')
        for m in re.findall(AisexParser.POST_PATTERN,text):
            postDateTime = ''
            titleSimple = ''
            pageUrl, title, author,postDate \
                                    = os.path.join('http://www.aisex.com/bt/', m[0]), m[1], m[-2],m[-1]
            
            # convert title to simple chinese
            if title :
                titleSimple = c.convert(codecs.getdecoder('utf-8')(title)[0])
    
            # convert old str date col to new datime col
            postDateTime = datetime.datetime.strptime(postDate,'%Y-%m-%d')
                
            print pageUrl, title, titleSimple, author, postDate, postDateTime
            
    
    def parse_detail(self):
        text = open('h:/Best of Five Dynatec Ti Reviews.htm').read()
        soup = self.get_soup(text=text)
        ratings = soup.findAll('td', {'class' : 'rating'})
        fratings = soup.findAll('td', {'class' : 'rating firstrating'})
        td = ratings[0]
             
        img = soup.findAll('meta', {'property' : 'og:image'})
        print img[0]['content']
        
        priceSpan = soup.findAll('span', {'id' : 'actual_price'})
        price = priceSpan[0].text
        print price
        
def testDetail(url):
    a = AisexParser(None, True)
    t = a.get_torrents(url, None)
    print t
    
    
        
if __name__ == '__main__':
    parser = AisexParser(None, needProxy=True)
    parser.parse_index()
#    testDetail('http://www.aisex.com/bt/htm_data/4/1109/485578.html')
        