#coding=utf-8

'''
Created on 2011-8-30

@author: chris
'''
import sys
sys.path.append('/harvester/ripper/source/src/')
from ripper.core.DataTypes import createImage
from ripper.core.Utils import getRegFromText #@UnresolvedImport


from ripper.parser.Parser import Parser
import os.path
from ripper.handler.converter.langconv import Converter
import re
import datetime
import codecs
from ripper.handler.TorrentDownload import canDownload

class HentaiSchoolParser(Parser):

    ''' http://hentaischool.com 分析器 '''
    
    
    def goto_page(self, pageUrl, num=1):
        p = str((num-1)*10)
        return pageUrl + p 
        
    # 解析列表
    def parse_obj_list(self, pageIndex, keyProp, otherProps, item):
        
#        text = open('h:/Mangas - Hentai School - Page 20.htm').read()
#        soup = self.get_soup(text)
        soup = self.get_soup(url=pageIndex.url)
        
        xx = soup.findAll('div', {'class' : 'idm_category_row clearfix row1 '})
        xx2 = soup.findAll('div', {'class' : 'idm_category_row clearfix row2 '})
        divs = []
        divs.extend(list(xx))
        divs.extend(list(xx2))
        
        c = 0
        header = ''
        for x in divs:
            
            href = None
            image = None
            title = None
            
            try:
                href = header + x.findAll('a')[0].attrs[0][1]
            except Exception:
                href = header + x.findAll('div')[0].findAll('a')[0].attrs['href']
            
            try:
                image = x.findAll('a')[0].findAll('img')[0].attrs[0][1]
            except Exception:
                image = x.findAll('a')[0].findAll('img')[0].attrs['src']
            
            try:
                title = x.findAll('a')[0].attrs[1][1]
            except Exception:
                title = x.findAll('a')[0].attrs['title']

            
            
            author = 'harvester'
            
            dateStr = ''
            
#            fakeLength = -1
#            for p in item.get_property('properties'):
#                if p['name'] == 'titleImage':
#                    fakeLength = p['fakeLength']
            
            href = href.split('#')[0]
            
            yield {'title' : title,
                   'titleImage' : createImage(image),
                   'gIndexUrl' : href,
                   'author' : author,
                   'pubDate' : dateStr
                   }
            
    
    # 解析属性
    def get_titleImage(self, url, prop, obj):
        return obj['titleImage']
    
    # 解析属性
    def get_collectDate(self, url, prop, obj):
        return datetime.datetime.utcnow()
    
    # 解析属性
    def get_images(self, url, prop, obj):
        images = []
        
#        htmlContent = open('h:/k-ricchan-ni-furimawasarete-mi-youcopyshi-daioujou.htm').read()
#        htmlContent = open('h:/ariha-toyukai-nakama-tachi-omoni-rpg-soushuuhen.htm').read()
#        htmlContent = open('h:/319.htm').read()
#        htmlContent = open('h:/Rei - One Student Compilation 2 - Hentai School.htm').read()
#        htmlContent = open('h:/Isutoshi, Ero Isu - Chapter 5 - Hentai School0.htm').read()
#        htmlContent = open('h:/Isutoshi, Ero Isu - Chapter 5 - Hentai School.htm').read()
        
        htmlContent = self.get_content(url)
        # read img list page #1 
        onlineUrl = None
        soup = self.get_soup(htmlContent)
        a = soup.findAll('a', {'class':'download_button right'})[0]
        try:
            onlineUrl = a.attrs[0][1]
        except Exception:
            onlineUrl = a.attrs['href']
        
        preImgLinks = [] # 所有img页面的连接
        
        # 读取gallery中所有图片链接    
        
        # 404法获取包含大图的网页url
        _onlineUrl = onlineUrl
        # 点击download后, 如果跳转到实际相册, url会发生重定向
        __0, onlineUrl = self.get_content(onlineUrl, needUrl=True)
        
        
        # 中间需要再跳转一次
        if onlineUrl == _onlineUrl:
            finalUrl = None
            sp = self.get_soup(text=__0)
            aList = sp.findAll('a', {'class' : 'download_button'})
            for a in aList:
                txt = a.text
                if 'Online' in txt:
                    try:
                        finalUrl = a.attrs[0][1]
                    except Exception:
                        finalUrl = a.attrs['href']
            if finalUrl == None:
                return [] # 没有View Online
            else:
                __0, onlineUrl = self.get_content(finalUrl, needUrl=True)
                
        
        # 相册URL        
        print 'onlineUrl', onlineUrl        
        imagePageUrl = onlineUrl
        ptOrgImgUrl = r'<img src="(.*?)".*?border="0">'
        preContents = []
        for i in xrange(1, 5000):
            imagePageUrl = onlineUrl + '/imagepages/image%d.html' % i
            imagePageUrl = imagePageUrl.replace('index.html', '')
            try:
                pageContent = self.get_content(imagePageUrl, needException=True)
                orgLnk = getRegFromText(ptOrgImgUrl, pageContent)
                if orgLnk == None :
                    break
                preImgLinks.append(orgLnk)
                preContents.append([imagePageUrl, pageContent])
            except Exception, e:
                print 'Error :', e, imagePageUrl
                break
        
        totalImgCount = len(preImgLinks)
        print 'total preLinkPages:', totalImgCount
        
        if totalImgCount == 0:
            return None
        
        # 猜测法获取图片最终url
        for lnk in preImgLinks:
            lnk = onlineUrl.rpartition('/')[0] + '/FAKE/' + lnk
            images.append(lnk)
        
        # 校验猜测法结果, 如果不正确则进入实际页面获取图片最终url 
        if not self.__verify_image_url(images):
            images = self.__load_image_url(preContents)
        
        # 考虑没有图片的情况, 下次重新检查
        if images == []:
            return None
        
        return images
    
    # 校验url有效性
    def __verify_image_url(self, images):
        # 选择头尾两个作为检验url
        try:
            print 'veryfing.. '
            fn, header1 = self.get_file(images[0])
            fn, header2 = self.get_file(images[-1])
            tp1 = str(header1.gettype())
            tp2 = str(header2.gettype())
            if self._check_is_img(tp1) and self._check_is_img(tp2):
                print 'correct.'
                return True
        except Exception, ex:
            print ex
            print 'failed.'
            return False
        
        print 'failed'
        return False
        
    def __load_image_url(self, preContents):
        print 'loading img from preLinks...'
        images = []
        for imagePageUrl, pageContent in preContents:
            soup = self.get_soup(text=pageContent)
            imgs = list(soup.findAll('img'))
            img = imgs[-1]
            url = ''
            try:
                url = img.attrs[0][1]
            except Exception:
                url = img.attrs['src']
            imageUrl = imagePageUrl.rpartition('/')[0] + '/' + url
            images.append(imageUrl)
        return images
    
     
    def _check_is_img(self, mimeType):
        mimeType = mimeType.lower()
        if re.match('.*?jp[e]*g.*',mimeType):
            return True
        elif re.match('.*?gif.*',mimeType):
            return True
        elif re.match('.*?bmp.*',mimeType):
            return True
        elif re.match('.*?png.*',mimeType):
            return True
        elif mimeType == 'application/octet-stream':
            return True
        return False
    
    def test_image(self, fname):
        soup= self.get_soup(open(fname).read())
        imgs = list(soup.findAll('img'))
        img = imgs[-1]
        url = ''
        try:
            url = img.attrs[0][1]
        except Exception:
            url = img.attrs['src']
        print url
        
    def test_index(self, url):
        soup = self.get_soup(url=url)
            
        xx = soup.findAll('div', {'class' : 'idm_category_row clearfix row1 '})
        xx2 = soup.findAll('div', {'class' : 'idm_category_row clearfix row2 '})
        divs = []
        divs.extend(list(xx))
        divs.extend(list(xx2))
        
        print 'len',len(divs)
        
        return
        c = 0
        header = ''
        for x in divs:
            
            href = None
            image = None
            title = None
            
            try:
                href = header + x.findAll('a')[0].attrs[0][1]
            except Exception:
                href = header + x.findAll('div')[0].findAll('a')[0].attrs['href']
            
            try:
                image = x.findAll('a')[0].findAll('img')[0].attrs[0][1]
            except Exception:
                image = x.findAll('a')[0].findAll('img')[0].attrs['src']
            
            try:
                title = x.findAll('a')[0].attrs[1][1]
            except Exception:
                title = x.findAll('a')[0].attrs['title']
    
            
            
            author = 'harvester'
            
            dateStr = ''
            
            href = href.split('#')[0]

def testDetail(url):
    a = HentaiSchoolParser(None, True)
    t = a.get_images(url, None, None)
#    t = a.test_image('h:/image10.html')
    print t
    
def testIndex(url):
    a = HentaiSchoolParser(None, True)
    t = a.test_index('http://hentaischool.com/files/category/5-mangas/page__sort_by__DESC__sort_key__file_submitted__num__10__st__')
#    t = a.test_image('h:/image10.html')


        
if __name__ == '__main__':
#    parser = AisexParser(None, needProxy=True)
#    parser.parse_index()
#    testDetail('http://www.aisex.com/bt/htm_data/4/1109/485578.html')
    testIndex(None)
#     testDetail(None)        