from google.appengine.ext import db
import string
import datetime
import md5
import urllib
from BeautifulSoup import BeautifulSoup
import urllib2
import xmw  
import logging

class Head_Table():
    def __init__(self,table,page):
        self.page=page
        self.table=table
        
        #print "!%s!"%repr(table)
        #logging.log(logging.INFO, "!%s!"%repr(table))
        self.parse()
        
    def parse(self):
        first_td=self.table.tr.td
        follow_tds=first_td.fetchNextSiblings()
        self.floor_info=follow_tds.pop()
        self.author_info=follow_tds.pop()

        
    def get_post_date(self):
        front=self.author_info.findChild()
        d =front.contents[2]
        #logging.log(logging.INFO, "D%sD"%repr(d))
        #print d
        ss=d.split(u"\uff1a")
        #ss=d.split(u'\xba')
        post_date=unicode_to_datetime(ss[1])
        
        return post_date
    def get_author(self):
        front=self.author_info.findChild()
        #print front.contents
        a=front.findChild()
        #logging.info("author type:%s"%type(a.contents))
        #return str(a.contents[0]).decode('gb18030')
        #a.contents.append(str(a.contents[0]).decode('gb18030'))
        return str(a.contents[0]).decode('utf-8')

    def get_floor(self):
        front=self.floor_info.findChild()
        #print self.floor_info
        floor=front.contents[0]
        
        #logging.info("floor type:%s"%type(floor))
        return str(floor).decode('utf-8')
 
 
 
class Post(db.Model):
    author=db.StringProperty()
    date=db.DateTimeProperty( )
    floor=db.StringProperty()
    content=db.TextProperty()    

    url=db.StringProperty()
    
    #post_id=db.StringProperty(multiline=False, required=True,default=xmw.get_uuid_hex())
    post_id     =db.StringProperty(multiline=False, required=True)
    thread_id   = db.StringProperty (multiline=False, required=True)
    offset  =   db.IntegerProperty()

def ascii_to_datetime(ascii_str):
    #http://www.tianya.cn/techforum/content/16/619502.shtml
    #floor 1095,only date,without time
    date_str=ascii_str
    if len(date_str) >12:
        return datetime.datetime.strptime(ascii_str,'%Y-%m-%d %H:%M:%S')
    else:
        return datetime.datetime.strptime(ascii_str,'%Y-%m-%d')

class TianyaPage:
    def __init__(self,html,url,thread_id,offset):
        self.html=html
        self.url=url
        self.soup=BeautifulSoup(html, fromEncoding="GB18030")
        self.thread_id=thread_id
        self.offset=offset
    
    def get_lst_post(self):
        lst_table   =self.soup.findAll('table',{'align':'center','width':'100%'})
        lst_content =self.soup.findAll('div',{'class':'content','style':'WORD-WRAP:break-word'})
        logging.info("read %d tables"%len(lst_table))
        logging.info("read %d contents"%len(lst_content))
        post_cnt=len(lst_content)
        
        lst_post=[]
        index=0
        """
        print "table:%s"%lst_table[len(lst_table)-1]
        print "content:%s"%lst_content[len(lst_content)-1]
        return lst_post
        """
        length=0
        if self.offset ==0:
            length=post_cnt
        else:
            length =post_cnt -1
            
        while index < length:
            if self.offset==0:
                content=lst_content[index]
            else:
                content=lst_content[index+1]
                
            table=lst_table[index+1]
            """
            #in first page,table counts 1 more
            if self.offset ==0:
                table=lst_table[index+1]
            else:
                table=lst_table[index]
            """
            #print "<B>TABLE %d</B>:%s"%(index,table)
            #print "<B>CONTENT %d</B>:%s"%(index,content)
            
            
            ht=Head_Table(table,self)
            

            #logging.info("CONTENT:%s"%repr(content))
            cc=""
            for c in content.contents:
                cc =cc+ str(c).decode("utf-8")
            #content1=str(content.contents[0]).decode("utf-8")
            content1    =cc
            #content2=content1.encode('ascii')
            #logging.info("url=%s"%self.url)
            #print len(content1)
            #ht.get_author(),\
            logging.info("author:%s,floor:%s"%(ht.get_author(),ht.get_floor()))
            uid=xmw.get_uuid_hex()
            post=Post(author=ht.get_author(),\
                      date=ht.get_post_date(),\
                      floor=ht.get_floor(),\
                      #floor="floor",\
                      content=content1,\
                      #content="content",\
                      url=(self.url).encode('ascii'),\
                      post_id=uid,\
                      thread_id = self.thread_id,\
                      offset = self.offset)
            #post.put()
            lst_post.append(post)
            index=index+1
        
        return lst_post

         
    def is_BAD(self):
        div=self.soup.find('div',{'class':'x'})
        #print type(div)
        if div is None:
            return False
        else:
            return True
        
        

    
    def get_chrAuthor(self):
        author_input=self.soup.find('input', {'name':'chrAuthor'})
        #print author_input
        chrAuthor=author_input['value']
        #print chrAuthor
        #print "author is %s"%chrAuthor
        #print "len =%s"%str(len(chrAuthor))
        is_from_soup=True
        if len(chrAuthor) ==0:
            is_from_soup=False
            #print "len ==0"
            ss=self.html.split("name=\"chrAuthor\" value=")[1]
            
            ss=ss.split(">")
            #print "ss=%s"%str(ss)
            chrAuthor=(ss[0]).decode("GB18030")
        
        if is_from_soup is True:
            chrAuthor=chrAuthor.decode('utf-8')
            
        return chrAuthor
        
    def get_title(self):
        title_tag = self.soup.find('title')
        title=str(title_tag.contents[0])
        #print str(title_tag.contents[0])
        
        #print "title=%s"%title.decode('utf-8')
        return title.decode('utf-8')
        #return self.get_rs_strTitle_aa()
        
    def get_page_type(self):
        lst_pageForm = self.soup.find('form', id='pageForm')
    
        if (lst_pageForm is not None):
            return PAGE_TYPE.FORM 
        else:
            return PAGE_TYPE.LINK
    
    def get_form_cnt(self):
    
        apn = self.soup.find('input', {'name':'apn'})
        lst_pages = string.split(apn['value'], ',')
        #print len(lst_pages)
        return len(lst_pages)

    
    def get_rs_strTitle_aa(self):
        
        rs_strTitle_aa = self.soup.find('input', {'name':'rs_strTitle_aa'})

        return rs_strTitle_aa['value']
    def get_intLogo(self):
        intLogo = self.soup.find('input', {'name':'intLogo'})
        return intLogo['value']

    def get_rs_permission(self):
        rs_permission = self.soup.find ('input', {'name':'rs_permission'})
        return rs_permission['value']
    def get_apn(self):
        apn = self.soup.find('input', {'name':'apn'})
        return apn['value']

class Bot_Thread(db.Model):
    thread_id=db.StringProperty(multiline=False, required=True)
    
    title=db.StringProperty()            
    created =db.DateTimeProperty(auto_now_add=True)    
    lst_page_url=db.StringListProperty()
    page_cnt=db.IntegerProperty(required=True,default=0)    # if FORM page,it means form counts
    page_type=db.StringProperty(multiline=False)  #LINK or FORM    
    last_scan_post=db.DateTimeProperty()    
    last_scan_thread=db.DateTimeProperty()
    latest_post_date=db.DateTimeProperty(default    =   ascii_to_datetime("1970-01-01 00:00:00"))
    
    
    status=db.StringProperty(required=True,default  = xmw.Thread_Status.INIT)
    chrAuthor=db.StringProperty()
    offset =db.IntegerProperty(default =0)

    def on_INIT(self):
        """
            1.fetch basic thread info
            2.set thread status to RUN
        """
        s=urllib2.urlopen(self.lst_page_url[0]) 
        html=s.read()
        s.close()
        #soup=BeautifulSoup(html,fromEncoding="GB18030")
        page=TianyaPage(html,self.lst_page_url[0],thread_id=self.thread_id,offset=0)

        is_bad=page.is_BAD()

        if is_bad:
            self.status = xmw.Thread_Status.BAD
            return
        self.title=page.get_title()
        self.last_scan=datetime.datetime.today()
        self.page_type=page.get_page_type()
        logging.info("page type:%s"%self.page_type)
        self.page_cnt=page.get_form_cnt()
        self.chrAuthor=page.get_chrAuthor()
        self.status=xmw.Thread_Status.RUN
        
        self.last_scan_thread = datetime.datetime.now()
    def on_RUN(self):
        """
            1.refresh thread info
            2.if offset == page_cnt it shoud be SYN
            3.test page type again
        """
        s=urllib2.urlopen(self.lst_page_url[0]) 
        html=s.read()
        s.close()
        #soup=BeautifulSoup(html,fromEncoding="GB18030")
        page=TianyaPage(html,self.lst_page_url[0],self.thread_id,offset=0)
        self.page_cnt=page.get_form_cnt()
        if self.page_cnt == self.offset:
            self.status=xmw.Thread_Status.SYN
        
        #when it has more than 2 pages
        self.page_type=page.get_page_type()
        self.last_scan_thread = datetime.datetime.now()
        pass
    def on_SYN(self):
        s=urllib2.urlopen(self.lst_page_url[0]) 
        html=s.read()
        s.close()
        #soup=BeautifulSoup(html,fromEncoding="GB18030")
        page=TianyaPage(html,self.lst_page_url[0],self.thread_id,offset=0)
        page_cnt=page.get_form_cnt()
        if page_cnt > self.page_cnt:
            self.page_cnt = page_cnt
            self.status=xmw.Thread_Status.RUN
            
        self.last_scan_thread = datetime.datetime.now()

    def walk_to_page(self,page_number):
        #assert self.page_type ="FORM"
        #page_number < self.page_cnt
        #page_number >=0
        url = self.lst_page_url[0]
        first_page = self.get_first_page()
        if page_number ==0:
            return first_page
        
        #read post params      
        rs_strTitle_aa = first_page.get_rs_strTitle_aa()
        intLogo = first_page.get_intLogo()
        rs_permission = first_page.get_rs_permission()
        apn = first_page.get_apn()
        
        str_page_number = "" + str(page_number)
        
        #I found that only apn & pID are needed
        params_dict={
                               'rs_strTitle_aa':repr(rs_strTitle_aa),\
                               'intLogo':intLogo,\
                               'rs_permission':rs_permission,\
                               'apn':apn,\
                               'pID':str_page_number
                               }
        logging.info("send post req:%s"%params_dict)
        params = urllib.urlencode(params_dict)
        
        sock = urllib.urlopen(url, params)
        html = sock.read()
        #self.soup = BeautifulSoup(soc, fromEncoding="GB18030")
        sock.close()
        
        page    =TianyaPage(html,url,self.thread_id,page_number-1)
        return page
        
    def get_last_page(self):
        page = self.walk_to_page(self.page_cnt-1)
        return page
    
    def get_first_page(self):
        url = self.lst_page_url[0]
        sock = urllib.urlopen(url)
        html = sock.read()            
        sock.close()
        page    =TianyaPage(html,url,thread_id=self.thread_id,offset=0)

        return page
    
class Post_Update_Request(db.Model):
    url =db.StringProperty(multiline=False, required=True)
    latest=db.DateTimeProperty(required=True)
    #thread_id =db.StringProperty(multiline=False, required=True)
    accept  =db.DateTimeProperty(auto_now_add=True)
    #if True,means we can find it in Progress
    is_in_progress=db.BooleanProperty(default=False)
    is_finished =db.BooleanProperty(default=False)

class Progress(db.Model):
    thread_id   =db.StringProperty(multiline=False, required=True)
    setp    =db.IntegerProperty(default=1)
    is_finished =db.BooleanProperty(default=False)    
        
class Page(db.Model):
    url =db.StringProperty(multiline=False, required=True)
    title   =db.StringProperty(multiline=False, required=True)
    seq =db.IntegerProperty(required=True)
    sibling =db.StringListProperty()
    lastPostDate    =db.DateTimeProperty()

class ThreadRequest(db.Model):
    seq =    db.IntegerProperty(required=True)
    #thread_id =db.StringProperty(multiline=False, required=True)
    url =db.StringProperty(multiline=False, required=True)
    latest=db.DateTimeProperty(required=True)
    accepted   =db.BooleanProperty(required=True,default=False)

class Task_ScanPost_Status:
    WAIT='WAIT'
    FINISHED='FINISHED'
    WORK='WORK'
    
class Task_ScanPost(db.Model):
    #the following 2 should be assigned when initialized
    type=db.StringProperty(multiline=False, required=True)
    page_url=db.StringListProperty(required=True)
    page_cnt=db.IntegerProperty(required=True)
    
    task_id =db.StringProperty(multiline=False, required=True,default=xmw.get_uuid_hex())
    #for FORM page to post.useless if its LINK type. 
    offset  =db.IntegerProperty(required=True,default=0)    
    created =db.DateTimeProperty(auto_now_add=True)
    #is_locked=db.BooleanProperty(required=True,default=False)
    status=db.StringProperty(multiline=False, required=True,default=Task_ScanPost_Status.WAIT)
    
    
"""
class LinkThread(db.Model):
    type=db.StringProperty(multiline=False, required=True)  #LINK or FORM
    offset=db.IntegerProperty(required=True)    #
"""

class URL(db.Model):
    url =db.StringProperty(multiline=False, required=True)
    scaned=db.BooleanProperty()

def get_url():
    q = URL.all()
    q.filter("scaned = ", False)        
    url = q.get()
    return url 

def put_url(url):
    url_tmp=URL(url=url,scaned=False)
    url_tmp.put()


        

def get_chrAuthor(soup):
    author_input=soup.find('input', {'name':'idItem'})
    print author_input
    chrAuthor=author_input['value']
    #print chrAuthor
    return chrAuthor

def get_rs_strTitle_aa(soup):


    rs_strTitle_aa = soup.find('input', {'name':'rs_strTitle_aa'})

    #print rs_strTitle_aa
    
        
    return rs_strTitle_aa['value']
    

def get_intLogo(soup):
    intLogo = soup.find('input', {'name':'intLogo'})
    return intLogo['value']
class PAGE_TYPE:
    LINK = "LINK"
    FORM = "FORM"
    
def get_page_type(soup):
    #lst_pageForm=soup.find('form',{'id':'pageForm'})
    lst_pageForm = soup.find('form', id='pageForm')
    
    if (lst_pageForm is not None):
        return PAGE_TYPE.FORM 
    else:
        return PAGE_TYPE.LINK

def get_form_cnt(soup):
    
    apn = soup.find('input', {'name':'apn'})
    lst_pages = string.split(apn['value'], ',')
    #print len(lst_pages)
    return len(lst_pages)
    
def get_rs_permission(soup):
    rs_permission = soup.find ('input', {'name':'rs_permission'})
    return rs_permission['value'] 

def get_apn(soup):
    apn = soup.find('input', {'name':'apn'})
    return apn['value']

def get_title(soup):
    return get_rs_strTitle_aa(soup)


def unicode_to_datetime(unicode_str):
    asc_str=unicode_str.encode('ascii')
    
    return ascii_to_datetime(asc_str)

def get_last_post_date(soup):
    
    lst_table=soup.findAll('table',{'align':'center','width':'100%'})
    t=lst_table.pop()    
    tr=t.tr
    td=tr.contents[3]
    c=td.contents
    s=c[0]

    d=s.contents[2]
    ss=d.split(u"\uff1a")
    post_date=unicode_to_datetime(ss[1])
    return post_date 

def get_md5(str):
    print type(str)
    m = md5.new()
    m.update(repr(str))
    m.digest()
    return m.hexdigest()

def now():
    return datetime.datetime.today().strftime("%Y-%m-%d %H:%M:%S")    
def main():
    pass

if __name__ == '__main__':
    main()            