#coding=utf-8

'''
Created on 2011-8-30
http相关, dom解析相关
@author: chris
'''
from ripper.core.Exceptions import ParseException, DownloadException
import urllib, os
import cookielib
from ripper.core.EncodingConvert import zh2gbk, zh2utf8, zh218030
from cookielib import FileCookieJar

try:
    from bs4 import BeautifulSoup
except Exception:
    from BeautifulSoup import BeautifulSoup
#from bs4 import BeautifulSoup

from ripper.core.Utils import enable_proxy, disable_proxy #@UnresolvedImport
import HTMLParser
import re

from ripper.parser import BeautifulSoup as bs32

#from bs4 import BeautifulSoup

import urllib2
import time
from ripper.core import EncodingConvert

HTMLParser.attrfind = re.compile(
r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[^\s>^\[\]{}\|\'\"]*))?') 



class Parser(object):
    
    '''
            网页文本解析器
    '''

#    def __init__(self, url='', text=''):
#        '''
#        Constructor
#        '''
#        if '' != text:
#            self.content = text
#        elif '' != url and '' == text:
#            self.content = self.__request_content(url)
#        else :
#            raise ParseException('解析异常, 没有可解析的内容或URL')

    def __init__(self, item=None, needProxy=False, needConvert=True):
        self.item = item
        self.curUrl = ''
        self.isLogin = False
        self.needProxy = needProxy
        self.cache = {} # url:content
        cookieJar = cookielib.CookieJar()
        cookie_support= urllib2.HTTPCookieProcessor(cookieJar)
        opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
        urllib2.install_opener(opener)
        self.needConvert = needConvert
        
    def login_aisex(self):
        #login
        postdata=urllib.urlencode({
             'loginuser':'outlookxp',
             'loginpwd':'123123',
             'hideid':'0',
             'cktime':'31536000',
             'jumpurl':'http://www.aisex.com/bt/thread.php?fid=4',
             'loginpwd':'123123',
             'step':'2',
        })
        gheaders = {
             'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',
             'referer':'http://www.aisex.com/bt'
        }
        
        req = urllib2.Request(
              url = 'http://www.aisex.com/bt/login.php',
              data = postdata,
              headers = gheaders
        )
        enable_proxy()
        r = urllib2.urlopen(req)
        text = r.read()
        text = EncodingConvert.zh2utf8(text)[1]
        disable_proxy()
        return    
        
    def login_mySite(self):
        #login
        postdata=urllib.urlencode({
             'user':'dajiji',
             'pass':'1',
        })
        gheaders = {
             'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',
        }
        
        req = urllib2.Request(
              url = 'http://h.pphime.com/z/mlist/',
              data = postdata,
              headers = gheaders
        )
        r = urllib2.urlopen(req)
        text = r.read()
        text = EncodingConvert.zh2utf8(text)[1]
        return    
    
    def login_sis(self):
        #login
        pattern = re.compile("<input type=\"hidden\" name=\"formhash\" value=\"\w*\" \/>")
        content = self.get_content('http://sexinsex.net/forum/logging.php?action=login')
        formhash = pattern.findall(content)
        formhash = formhash[0][-12:-4]
        postdata=urllib.urlencode({
             'loginfield':'username',
             'username':'outlookxx123',
             'password':'bjitsm123456',
             'referer': 'http://sexinsex.net/forum',
             'formhash':formhash,
             'questionid':'0',
             'answer':''
        })
        self.gheaders = {
             'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',
             'referer':'http://sexinsex.net'
        }
        
        req = urllib2.Request(
              url = 'http://sexinsex.net/forum/logging.php?action=login&loginsubmit=yes&inajax=1',
              data = postdata,
              headers = self.gheaders
        )
        if self.needProxy == True:
            enable_proxy()
#        filename = 'cookie.txt'
#        ckjar = cookielib.MozillaCookieJar(filename) 
#        ckproc = urllib2.HTTPCookieProcessor(ckjar)
#        opener = urllib2.build_opener(ckproc)
        r = urllib2.urlopen(req)
#        r = opener.open(req)
        text = r.read()
#        print zh2utf8(text)[1]
#        ckjar.save(ignore_discard=True, ignore_expires=True)
        return
    
    def login_dg(self):
        print 'loggin in dg2012...'
        #login
        pattern = re.compile("<input type=\"hidden\" name=\"formhash\" value=\"\w*\" \/>")
        content = self.get_content('http://bbs.dg2012.com/logging.php?action=login')
        formhash = pattern.findall(content)
        formhash = formhash[0][-12:-4]
        postdata=urllib.urlencode({
             'loginfield':'username',
             'username': zh2gbk('牙疼小狐狸')[1],
             'password':'123',
             'referer': 'index.php',
             'cookietime': '2592000',
             'formhash':formhash,
             'questionid':'0',
             'answer':'',
             'styleid':'',
             'loginmode':'',
             'loginsubmit':'true',
        })
        self.gheaders = { 
             'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',
             'referer':'http://bbs.dg2012.com'
        }
        
        req = urllib2.Request(
              url = 'http://bbs.dg2012.com/logging.php?action=login',
              data = postdata,
              headers = self.gheaders
        )
        r = urllib2.urlopen(req)
        text = r.read()
        self.isLogin = True
        return
    
    def get_soup_attr(self, obj, idx1=None, idx2=None, name=None):
        try:
            return obj.attrs[idx1][idx2]
        except Exception, ex:
            return obj.attrs[name]
    
    # needUrl : 返回 [内容, 可能的302转接后的实际url]
    def get_content(self, url, needException=False, needUrl=False, gheaders=None, needContent=True):
        # 重复请求缓冲
        if self.cache.has_key(url):
            return self.cache[url]
        try:
            ct, burl = self.get_page_html(url, needException, needUrl=True, gheaders=gheaders, needContent=needContent)
            self.cache[url] = ct
            if needUrl:
                return ct, burl
            else:
                return ct
        except ParseException, ex:
            print ex
            raise ex
        except DownloadException, ex: # needed by uHentai
            raise ex
        self.curUrl = url
        self.ct = self.ct.replace('\r\n','\n')
        return self.ct
    
    def get_soup(self, text=None, url=None):
        soup = None
        if text != None :
            soup = self.__get_soup(text)
            return soup
        if url != None:
            text = self.get_content(url)
            soup = self.__get_soup(text)
            return soup
        
    def __get_soup(self, text):
        soup = None
        try:
            soup = BeautifulSoup(text)
        except HTMLParser.HTMLParseError, ex:
            print ex 
            print 'switch to 3.2'
            soup = bs32.BeautifulSoup(text)
        return soup
    
    # 取得某页列表的url, 返回分页后的url
    # pageUrl 不 包含分页参数
    def goto_page(self, pageUrl, num=1):
        return pageUrl
    
    #生成实体数据列表和每个实体数据的唯一标识(名称, url, etc...)    
    def parse_obj_list(self, pageIndex, keyProp, otherProps, needProxy=False):
        pass
    
    # 解析属性
    def parse_property(self, property):
        pass
    
    # 请求web页面
    def get_page_html(self, uri, retrys=10, needException=False, needUrl=False, gheaders=None, needContent=True):
        #i_lock.acquire()
        #time.sleep(WORK_INTERVAL)
        if self.needProxy == True:
            # 开启代理服务器
            enable_proxy()
        try:
            ct, url = self._get_page_html(uri, gheaders=None, needContent=needContent)
            if needUrl:
                return ct, url
            else:
                return ct
        except Exception, ex:
            if needException==True:
                raise DownloadException('needException')
            print 'error getting', uri
            print ex
            if retrys > 0:
                print 'retry'
                time.sleep(5)
                return self.get_page_html(uri, retrys=retrys-1)
            else :
                msg = 'already retry %d times, unable to fetch.' % retrys
                print msg
                raise ParseException(msg) 
        finally:
            pass
            if self.needProxy == True:
                disable_proxy()
            #i_lock.release()
            
    def _get_page_html(self, uri, gheaders=None, needContent=True):
        print 'fetching ', uri
        gheaders = {
             'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',
        }
        req = None
#        urlOpener = urllib2.build_opener \
#                    (urllib2.HTTPCookieProcessor(FileCookieJar('cookie.txt')))
                    
        if gheaders != None:
            try:
                req = urllib2.Request(url=uri, headers=gheaders)
            except AttributeError, err:
                raise ParseException(str(err))
        else:
            try:
                req = urllib2.Request(uri)
            except AttributeError, err:
                raise ParseException(str(err))
        if needContent == False:
            return None, None
        
        breq = urllib2.urlopen(req)
#        breq = urlOpener.open(req)
        actualUrl = breq.url
        ct = breq.read()
        if self.needConvert == True:
            ct = zh2utf8(ct)[1]
        return ct,actualUrl
    
    def get_file(self, uri, dir, fname):
        if self.needProxy:
            enable_proxy()
        print 'fetching ', uri
        gheaders = {
             'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',
        }
        req = None
        if gheaders != None:
            try:
                req = urllib2.Request(url=uri, headers=gheaders)
            except AttributeError, err:
                raise ParseException(str(err))
        else:
            try:
                req = urllib2.Request(uri)
            except AttributeError, err:
                raise ParseException(str(err))
        breq = urllib2.urlopen(req)
        ct = breq.read()
        fn = os.path.join(dir, fname)
        f = open(fn, 'wb')
        f.write(ct)
        f.close()
        if self.needProxy == True:
            disable_proxy()
        return fn,len(ct)
    
    def get_cache(self, key):
        if self.cache.has_key(key):
            return self.cache[key]
        else:
            return None
        