#! /usr/bin/env python
# -*- coding: utf-8 -*-
from urllib import urlencode
import urllib2 
from StringIO import StringIO
import os, re, time, threading, gzip, json, random, cookielib
from xml.etree import ElementTree as ET
import config
def encode_str(str, format=None):
    """Format the string with coded.
    
    Arguments:
    
    str - string to code. 
    format - the coding format you want. Default is the system coding.
    """
    if format:
        return str.decode('UTF-8').encode(format) 
    else:
        return str.decode('UTF-8').encode(sys.getfilesystemencoding())
##过滤HTML中的标签
#将HTML中标签等信息去掉
#@param htmlstr HTML字符串.
def filter_tags(htmlstr):
    #先过滤CDATA
    re_cdata=re.compile('//<!\[CDATA\[[^>]*//\]\]>',re.I) #匹配CDATA
    re_script=re.compile('<\s*script[^>]*>[^<]*<\s*/\s*script\s*>',re.I)#Script
    re_style=re.compile('<\s*style[^>]*>[^<]*<\s*/\s*style\s*>',re.I)#style
    re_br=re.compile('<br\s*?/?>')#处理换行
    re_h=re.compile('</?\w+[^>]*>')#HTML标签
    re_comment=re.compile('<!--[^>]*-->')#HTML注释
    s=re_cdata.sub('',htmlstr)#去掉CDATA
    s=re_script.sub('',s) #去掉SCRIPT
    s=re_style.sub('',s)#去掉style
    s=re_br.sub('\n',s)#将br转换为换行
    s=re_h.sub('',s) #去掉HTML 标签
    s=re_comment.sub('',s)#去掉HTML注释
    #去掉多余的空行
    blank_line=re.compile('\n+')
    s=blank_line.sub('\n',s)
    s=replaceCharEntity(s)#替换实体
    return s

##替换常用HTML字符实体.
#使用正常的字符替换HTML中特殊的字符实体.
#你可以添加新的实体字符到CHAR_ENTITIES中,处理更多HTML字符实体.
#@param htmlstr HTML字符串.
def replaceCharEntity(htmlstr):
    CHAR_ENTITIES={'nbsp':' ','160':' ',
                'lt':'<','60':'<',
                'gt':'>','62':'>',
                'amp':'&','38':'&',
                'quot':'"','34':'"',}
   
    re_charEntity=re.compile(r'&#?(?P<name>\w+);')
    sz=re_charEntity.search(htmlstr)
    while sz:
        entity=sz.group()#entity全称，如&gt;
        key=sz.group('name')#去除&;后entity,如&gt;为gt
        try:
            htmlstr=re_charEntity.sub(CHAR_ENTITIES[key],htmlstr,1)
            sz=re_charEntity.search(htmlstr)
        except KeyError:
            #以空串代替
            htmlstr=re_charEntity.sub('',htmlstr,1)
            sz=re_charEntity.search(htmlstr)
    return htmlstr

def repalce(s,re_exp,repl_string):
    return re_exp.sub(repl_string,s)

def strip_html_tags(text):
    return re.sub(r'<[^>]*?>', ' ', text)

def makeEasyTag(dom, tagname, value, type='text'):
    """
    for minidom only
    """
    tag = dom.createElement(tagname)
    if value.find(']]>') > -1:
        type = 'text'
    if type == 'text':
        value = value.replace('&', '&amp;')
        value = value.replace('<', '&lt;')
        text = dom.createTextNode(value)
    elif type == 'cdata':
        text = dom.createCDATASection(value)
    tag.appendChild(text)
    return tag

def getR():
    lst = '1234567890'
    randnum = '0.' + ''.join([random.choice(lst) for i in range(16)]) + '_' + str(int(time.time() * 1000))
    return randnum


def get_verify(url):
    resp = httprequest.openurl( url, method = 'Get' )
    content = resp['data']
    verify = re.search('var g_verify = "(?P<verify>.*)";' , content ).group('verify')
    uid = verify[:verify.find('_')]
    return [verify,uid]

def gardenVerify():
    url = 'http://www.kaixin001.com/!house/garden/index.php'
    return get_verify(url)
def ranchVerify():
    url = 'http://www.kaixin001.com/!house/ranch/index.php'
    return get_verify(url)
def fishingVerify():
    url = 'http://www.kaixin001.com/!fish/index.php'
    return get_verify(url)

class Httpsocket:
    """ Build for Make a full HttpRequest via POST/GET """

    def __init__(self):
        self.cj = cookielib.LWPCookieJar()
        if config.currentUser:
            self.COOKIEFILE = config.currentUser + '.txt'
            if os.path.isfile(self.COOKIEFILE):
                self.cj.load(self.COOKIEFILE)
        else:
            self.COOKIEFILE = ''
        self.cookies = urllib2.HTTPCookieProcessor(self.cj)
        self.opener = urllib2.build_opener(self.cookies)
        self.opener.addheaders = [
                ('User-agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-us; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10'),
                ('Accept-encoding', 'gzip'),
                #('Connection', 'keep-alive'), urllib2 不支持此项
                ('Keep-Alive', '300')
                ]
        #urllib2.install_opener(self.opener)

    def connect(self,loginurl,param={}):
        """make a Container for Cookie"""
        self.loginurl = loginurl
        self.param = param
        self.encodeparam = urlencode(self.param)
        result = self.opener.open(self.loginurl,self.encodeparam)
        return result

    def openurl(self,url,param={}, method = 'Post'):
        """ get Response. """
        time.sleep(config.global_connet_delay)
        self.loginurl = url
        self.param = param
        self.encodeparam = urlencode(self.param)
        result = {}
        if method == 'Get':
            if self.param:
                f = self.opener.open(self.loginurl+'?'+self.encodeparam)
            else:
                f = self.opener.open(self.loginurl)
        else:
            f = self.opener.open(self.loginurl,self.encodeparam)
        result['data'] = f.read()
        if hasattr(f, 'headers'):
            # save ETag, if the server sent one
            result['etag'] = f.headers.get('ETag')
            # save Last-Modified header, if the server sent one
            result['lastmodified'] = f.headers.get('Last-Modified')
            if f.headers.get('content-encoding', '') == 'gzip':
                # data came back gzip-compressed, decompress it
                result['data'] = gzip.GzipFile(fileobj=StringIO(result['data'])).read()
        if hasattr(f, 'url'):
            result['url'] = f.url
            result['status'] = 200
        if hasattr(f, 'status'):
            result['status'] = f.status
        if self.COOKIEFILE:
            self.cj.save(self.COOKIEFILE)
        f.close()
        return result

httprequest = Httpsocket()

def logout():
    return
    url = 'http://www.kaixin001.com/login/logout.php'
    f = httprequest.openurl( url, method = 'Get' )
    if f['url'] == 'http://www.kaixin001.com/':
        return True

class accountInfo():
    def __init__(self, parent = None):
        self.accountfile = ET.parse('account.xml')
        self.users = self.accountfile.findall('account')
        self.userlist = []
        for elm in self.users:
            self.userlist.append(elm.find('user').text)

    def login(self, username, param = {}):
        for elm in self.users:
            if username == elm.find('user').text:
                password = elm.find('password').text
        user_data = param        
        user_data['email'] = username
        user_data['password'] = password
        user_data['remember'] = '1'

        url = 'http://www.kaixin001.com/login/login.php'
        #url = 'http://wml.kaixin001.com/login/login.php'
        f = httprequest.connect( url, user_data )
        if f.geturl().find('home') > 0:
            src = f.read()
            return True
        else:
            src = f.read()
            if hasattr(f, 'headers'):
                if f.headers.get('content-encoding', '') == 'gzip':
                    src = gzip.GzipFile(fileobj=StringIO(src)).read()
            match = re.search(r'var obj = \$\("prompt" \+ "(?P<find>\d*)"', src)
            if match: 
                prompt = match.group('find')
                if prompt in ['10','11','12']:
                    pass
                #print getR()


def getAllFriend():
        req = httprequest.openurl(
            'http://www.kaixin001.com/interface//suggestfriend.php',
            {'pars':'f1', 'type':'all'}, 'Get')
        data = json.loads(req['data'])
        result = {}
        friends = [elm for elm in data if elm['uid']]
        for elm in friends:
            result[ elm['uid'] ] = elm['real_name']
        return result
