from google.appengine.ext import db
import datetime
import urllib,urllib2
from BeautifulSoup import BeautifulSoup
import logging
import string
import xmw
from google.appengine.api import memcache
from xmw.db.counter import PostCounter
import global_setting
import xmlrpclib

class TianyaThread(xmw.Thread):
    last_refresh=db.DateTimeProperty()
    
    
    def on_page_finish(self,page):
        """
            called when the offset finished a scan
        """
        if page.offset+1<self.page_cnt:
            self.offset=self.offset+1
        self.last_refresh=xmw.get_cn_now_time()
        self.put()
        
    
    def get_offset_page(self):
        page=self.get_page(self.offset)
        return page
        
    def on_RUN(self):
        page=self.get_page(self.offset)
        
        cnt=page.get_form_cnt()
        if cnt>self.page_cnt:
            self.page_cnt=cnt
        
        return
        
            
        
    def on_INIT(self):
        """
            called by refresher
        """
        if self.thread_id == "NA":
            self.thread_id=xmw.get_uuid_hex()
        page_url=self.lst_page_url[0]
        page=TianyaPage(url=page_url)
        page.thread_id=self.thread_id
        #page.init()
        page.reload()
       
        self.title=page.get_title()
        self.chrAuthor=page.get_chrAuthor()
        self.lst_page_url=page.get_lst_page_url()
        self.page_cnt=page.get_form_cnt()
        
        #now set offset to the latest page.
        self.offset=self.page_cnt-1
        
        self.status=xmw.Thread_Status.RUN
        #self.put()
        s = xmlrpclib.Server("http://"+global_setting.db_host+"/xmlrpc/db_mgr")
        s.thread_mgr.add_thread(self.to_thread_dict())
        return
        
    
   
        
    def get_default_page_key(self):
        my_key=self.thread_id+'_default'
        return my_key
    def get_default_cached_page(self):
        my_key  = self.get_default_page_key()
        default_page= memcache.get(key=my_key)
        return default_page
    
    def refresh(self):
        
        

        page=self.get_last_page()
        
        self.title=page.get_title()
        self.chrAuthor=page.get_chrAuthor()
        self.lst_page_url=page.get_lst_page_url()
        offset=page.get_form_cnt()-1
        
        """
        if self.status==xmw.Thread_Status.INIT:
            self.status=xmw.Thread_Status.RUN
        """
        page.put()
        
        self.put()
        return
    
    def get_last_page(self):
        page = self.get_page(self.page_cnt-1)
        return page
    
    def get_first_page(self):
        """
        url = self.lst_page_url[0]
        sock = urllib.urlopen(url)
        html = sock.read()            
        sock.close()
        page    =TianyaPage(html,url,thread_id=self.thread_id,offset=0)
        """
        url = self.lst_page_url[0]
        page=TianyaPage(url=url)
        page.reload()
        return page
    def get_page(self,page_number):
        #offset is page_number
        
        #assert self.page_type ="FORM"
        #page_number < self.page_cnt
        #page_number >=0
        url = self.lst_page_url[0]
        first_page = self.get_first_page()
        if page_number ==0:
            return first_page
        
        #read post params      
        rs_strTitle_aa = first_page.get_rs_strTitle_aa()
        intLogo = first_page.get_intLogo()
        rs_permission = first_page.get_rs_permission()
        apn = first_page.get_apn()
        
        str_page_number = "" + str(page_number)
        
        #I found that only apn & pID are needed
        params_dict={
                               'rs_strTitle_aa':repr(rs_strTitle_aa),\
                               'intLogo':intLogo,\
                               'rs_permission':rs_permission,\
                               'apn':apn,\
                               'pID':str_page_number
                               }
        logging.info("send post req:%s"%params_dict)
        params = urllib.urlencode(params_dict)
        
        sock = urllib.urlopen(url, params)
        html = sock.read()
        #self.soup = BeautifulSoup(soc, fromEncoding="GB18030")
        sock.close()
        
        page    =TianyaPage(html,url,self.thread_id,page_number-1)
        return page
    
    @staticmethod
    def query_thread_by_url(url):
        thread_q=TianyaThread().all()
        thread_q.filter('lst_page_url =',url)
        thread=thread_q.get()
        return thread
    
    @staticmethod
    def add_thread(url,thread_id=None):
        if thread_id is None:
            thread_id=xmw.get_uuid_hex()
        
        thread=TianyaThread(thread_id=thread_id,lst_page_url=[url])
        
        thread.put()
        return thread
        
class PAGE_TYPE:
    LINK = "LINK"
    FORM = "FORM"
    
def unicode_to_datetime(unicode_str):
    asc_str=unicode_str.encode('ascii')
    
    return ascii_to_datetime(asc_str)

def ascii_to_datetime(ascii_str):
    #http://www.tianya.cn/techforum/content/16/619502.shtml
    #floor 1095,only date,without time
    date_str=ascii_str
    if len(date_str) >12:
        return datetime.datetime.strptime(ascii_str,'%Y-%m-%d %H:%M:%S')
    else:
        return datetime.datetime.strptime(ascii_str,'%Y-%m-%d')


class Head_Table():
    def __init__(self,table,page):
        self.page=page
        self.table=table
        
        #print "!%s!"%repr(table)
        #logging.log(logging.INFO, "!%s!"%repr(table))
        self.parse()
        
    def parse(self):
        first_td=self.table.tr.td
        follow_tds=first_td.fetchNextSiblings()
        self.floor_info=follow_tds.pop()
        self.author_info=follow_tds.pop()

        
    def get_post_date(self):
        front=self.author_info.findChild()
        d =front.contents[2]
        #logging.log(logging.INFO, "D%sD"%repr(d))
        #print d
        ss=d.split(u"\uff1a")
        #ss=d.split(u'\xba')
        post_date=unicode_to_datetime(ss[1])
        
        return post_date
    def get_author(self):
        front=self.author_info.findChild()
        #print front.contents
        a=front.findChild()
        #logging.info("author type:%s"%type(a.contents))
        #return str(a.contents[0]).decode('gb18030')
        #a.contents.append(str(a.contents[0]).decode('gb18030'))
        return str(a.contents[0]).decode('utf-8')

    def get_floor(self):
        front=self.floor_info.findChild()
        #print self.floor_info
        floor=front.contents[0]
        
        #logging.info("floor type:%s"%type(floor))
        return str(floor).decode('utf-8')
 

 
class Post(db.Model):
    author=db.StringProperty()
    date=db.DateTimeProperty( )
    floor=db.StringProperty()
    content=db.TextProperty()    

    url=db.StringProperty()
    
    #post_id=db.StringProperty(multiline=False, required=True,default=xmw.get_uuid_hex())
    post_id     =db.StringProperty(multiline=False, required=True)
    thread_id   = db.StringProperty (multiline=False, required=True)
    offset  =   db.IntegerProperty()
    
    #inced when a post posted
    seq         =db.IntegerProperty(default=0)
    
    def save(self):
        self.seq=PostCounter.inc_and_get()
        self.put()
    
class TianyaPage(xmw.Page):
    def __init__(self,html=None,url=None,thread_id=None,offset=None):
        self.url=url
        self.thread_id=thread_id
        
        self.html=html
        
        if html is not None:
            self.soup=BeautifulSoup(html, fromEncoding="GB18030")
        
        self.offset=offset
    
    
        
    def get_lst_page_url(self):
        """
            get all the url of the thread
        """
        return [self.url]
    
    def reload(self):
        #reload from url
        sock = urllib2.urlopen(self.url)
        #page = urllib2.urlopen(url)
        
        self.html=sock.read()
        #soup = BeautifulSoup(page)
        sock.close()
        self.soup = BeautifulSoup(self.html, fromEncoding="GB18030")

    def get_lst_post(self):
        lst_table   =self.soup.findAll('table',{'align':'center','width':'100%'})
        lst_content =self.soup.findAll('div',{'class':'content','style':'WORD-WRAP:break-word'})
        logging.info("read %d tables"%len(lst_table))
        logging.info("read %d contents"%len(lst_content))
        post_cnt=len(lst_content)
        
        lst_post=[]
        index=0
        """
        print "table:%s"%lst_table[len(lst_table)-1]
        print "content:%s"%lst_content[len(lst_content)-1]
        return lst_post
        """
        length=0
        if self.offset ==0:
            length=post_cnt
        else:
            length =post_cnt -1
            
        while index < length:
            if self.offset==0:
                content=lst_content[index]
            else:
                content=lst_content[index+1]
                
            table=lst_table[index+1]
            """
            #in first page,table counts 1 more
            if self.offset ==0:
                table=lst_table[index+1]
            else:
                table=lst_table[index]
            """
            #print "<B>TABLE %d</B>:%s"%(index,table)
            #print "<B>CONTENT %d</B>:%s"%(index,content)
            
            
            ht=Head_Table(table,self)
            

            #logging.info("CONTENT:%s"%repr(content))
            cc=""
            for c in content.contents:
                cc =cc+ str(c).decode("utf-8")
            #content1=str(content.contents[0]).decode("utf-8")
            content1    =cc
            #content2=content1.encode('ascii')
            #logging.info("url=%s"%self.url)
            #print len(content1)
            #ht.get_author(),\
            logging.info("author:%s,floor:%s"%(ht.get_author(),ht.get_floor()))
            uid=xmw.get_uuid_hex()
            post=Post(author=ht.get_author(),\
                      date=ht.get_post_date(),\
                      floor=ht.get_floor(),\
                      #floor="floor",\
                      content=content1,\
                      #content="content",\
                      url=(self.url).encode('ascii'),\
                      post_id=uid,\
                      thread_id = self.thread_id,\
                      offset = self.offset)
            #post.put()
            lst_post.append(post)
            index=index+1
        
        return lst_post

         
    def is_BAD(self):
        div=self.soup.find('div',{'class':'x'})
        #print type(div)
        if div is None:
            return False
        else:
            return True
        
        

    
    def get_chrAuthor(self):
        author_input=self.soup.find('input', {'name':'chrAuthor'})
        #print author_input
        chrAuthor=author_input['value']
        #print chrAuthor
        #print "author is %s"%chrAuthor
        #print "len =%s"%str(len(chrAuthor))
        is_from_soup=True
        if len(chrAuthor) ==0:
            is_from_soup=False
            #print "len ==0"
            ss=self.html.split("name=\"chrAuthor\" value=")[1]
            
            ss=ss.split(">")
            #print "ss=%s"%str(ss)
            chrAuthor=(ss[0]).decode("GB18030")
        
        if is_from_soup is True:
            chrAuthor=chrAuthor.decode('utf-8')
            
        return chrAuthor
        
    def get_title(self):
        title_tag = self.soup.find('title')
        title=str(title_tag.contents[0])
        #print str(title_tag.contents[0])
        
        #print "title=%s"%title.decode('utf-8')
        return title.decode('utf-8')
        #return self.get_rs_strTitle_aa()
        
    def get_page_type(self):
        lst_pageForm = self.soup.find('form', id='pageForm')
    
        if (lst_pageForm is not None):
            return PAGE_TYPE.FORM 
        else:
            return PAGE_TYPE.LINK
    
    def get_form_cnt(self):
        cnt = None
        if self.page_type is None:
            self.reset_page_type()
        if self.page_type == PAGE_TYPE.FORM:
            cnt = self.get_page_cnt_Form()
        
        if self.page_type == PAGE_TYPE.LINK:
            cnt = self.get_page_cnt_LINK()
            
        return cnt
    
    def get_idArticleslist_LINK(self):
        idArticleslist_input = self.soup.find('input',{'name':'idArticleslist'})
        if idArticleslist_input is None:
            return None
        idArticleslist=idArticleslist_input['value']
        
        self.idArticleslist = idArticleslist
        
        return idArticleslist 
    
    def get_page_cnt_LINK(self):
        #<input type="hidden" value="412933,430222,438557,452467,494059,601073,678931,703820,733555,754878," name="idArticleslist">
        #idArticleslist_input = self.soup.find('input',{'name':'idArticleslist'})
        #if idArticleslist_input is None:
        #    return 1
        #idArticleslist=idArticleslist_input['value']
        idArticleslist = self.get_idArticleslist_LINK()
        #print idArticleslist 
        lst_page_id=string.split(idArticleslist, ',')
        
        return len(lst_page_id)-1
    
    def get_page_cnt_Form(self):
        apn = self.soup.find('input', {'name':'apn'})
        lst_pages = string.split(apn['value'], ',')
        #print "len=%d"%len(lst_pages)
        return len(lst_pages)

    
    def get_rs_strTitle_aa(self):
        
        rs_strTitle_aa = self.soup.find('input', {'name':'rs_strTitle_aa'})

        return rs_strTitle_aa['value']
    def get_intLogo(self):
        intLogo = self.soup.find('input', {'name':'intLogo'})
        return intLogo['value']

    def get_rs_permission(self):
        rs_permission = self.soup.find ('input', {'name':'rs_permission'})
        return rs_permission['value']
    def get_apn(self):
        apn = self.soup.find('input', {'name':'apn'})
        return apn['value']    