import glob
import re
import os
import datetime

from BeautifulSoup import BeautifulSoup
from BeautifulSoup import Tag
from BeautifulSoup import Comment as Co


_DEBUG_flag = 1


def _DEBUG(msg):
    global _DEBUG
    if _DEBUG_flag == 1:
        print msg

class NError(Exception):
    def __init__(self, id, strerror):
        self.id = id
        self.strerror = strerror
    
class InputError(Exception):
    def __init__(self, string):
        self.msg = string

    def __str__(self):
        return str(self.msg)

class ParseError(Exception):
    def __init__(self, id, title=None, url=None, code=None):
        self.id = id 
        self.url = url
        if url == None:
            self.url = 'no info.'
        self.title = title
        self.code = code
        self.strerror = 'no message'

    def __str__(self):
        return str(self.id)

    def show_code(self):
        if self.code != None:
            return self.code.prettify()
        else:
            return 'no code'

    def set_url(self, url):
        self.url = url

    def set_title(self, title):
        self.title = title

    def set_id(self, id):
        self.id = id

    def set_message(self, message):
        self.strerror = message

    def set_code(self, code):
        self.code = code

class CheckError(ParseError):
    def __init__(self, id, path, floor, title=None):
        self.id = id
        self.url = path
        self.strerror = 'no message' 
        self.set_message('floor  = '+str(floor))
        if title != None:
            self.title = title
        else:
            self.title = 'NA'


        

class missComment(ParseError):
    def __init__(self, id, title=None, code=None):
        self.id = id
        self.url = 'no info.'
        self.title = title
        self.code = code

    
class Topic:
    def __init__(self, id=None, path=None, url=None):

        '''
        # 
        # self.path string
        # post_time unicode _time()
        # update_time unicode _time()
        # id str _author()
        '''
        
        if path == None and url == None:
            raise InputError

        if path != None:
            if path.endswith('/'):
                path = path[:-1]#remove '/' at the end

        if url != None and path == None:
            path = url[:url.rfind('/')]
        
        if id == None:
            id = path[path.rfind('/')+1:]

        if url == None:
            url = path+'/topic.html'

        path = path + '/'
        self.path = path
        self.id = id
        self.numComments = 0
        
        
        with open(url,'r') as file:
            html = file.read()
        soup = BeautifulSoup(html)

        try:
            self._time(soup)
            self._author(soup)
            self._view(path)
        except ParseError, e:
            e.set_url(path)
            if e.code != None:
                temp1 = e.code.find(text=\
                                    re.compile(u'\u6709\u9519')) #error on web
                temp2 = e.code.find(text=\
                                    re.compile(u'\u5f88\u62b1\u6b49')) #deleted
                if temp1 == None and temp2 == None:
                    e.set_url(path)
                    save_error(e)
                else:
                    # identified errors, jump to call for summary info
                    raise ParseError(id = 2222, title= 'Failed Web') 
            raise # other errors
            save_error(e)

        try:
            self._comment(path)
            self.comments.sort(lambda x,y: cmp(x.floor, y.floor))
        except ParseError, e:
            e.set_url(url)
            '''
            if e.code != None:
                temp1 = e.code.find(text=\
                                    re.compile(u'\u6709\u9519')) #error on web
                temp2 = e.code.find(text=\
                                    re.compile(u'\u5f88\u62b1\u6b49')) #deleted
                if temp1 == None and temp2 == None:
                    e.set_url(path)
                    raise # unexpected Error
                else:
                    raise ParseError(id = 1000,title='Topic():  ')
            raise # other errors
            '''
            save_error(e)
        except IOError, e:
            raise ParseError (1018,'No pageview file')
            

    def write(self, file):
        '''
        write the attributes in to a file
        '''

        separator = '\t'

        path = file.name[:file.name.rfind('/')+1] #keep '/'
        try:
            os.makedirs(path+str(self.id))
        except OSError, e:
            pass # path exist already
        except Exception, e:
            print e
        try:
            self.comments
        except AttributeError, e:
            raise ParseError(1011,'Error: no comments')
        
        try:
            cmtfile = open(path+str(self.id)+'/comments.txt','w')
        except IOError, e:
            print e


        # 1
        try:
            file.write(self.id)
        except AttributeError:
            file.write(separator)

        file.write(separator)

        # 2
        try:
            file.write(str(self.numComments))
        except AttributeError:
            file.write(separator)

        file.write(separator)
        
        # 3
        if self.numComments != 0:
            try:
                file.write(str(self.badCmt))
            except AttributeError:
                pass 
        else:
            file.write('NA')

        file.write(separator)

        # 4
        try:
            file.write(self.post_time)
        except AttributeError:
            file.write(separator)

        file.write(separator)

        # 5
        try:
            file.write(self.update_time)
        except AttributeError:
            file.write(separator)

        file.write(separator)

        # 6
        try:
            file.write(self.au_id)
        except AttributeError:
            file.write(separator)

        file.write(separator)
        '''
        try:
            file.write(self.issue_time.show_year())
        except AttributeError:
            file.write('    ')
        
        file.write(separator)
        '''
        # 7
        if self.comments != 'NA':
            try:
                file.write(cmtfile.name)
            except AttributeError:
                file.write(separator)
        else:
            file.write('NA\t\t\t')
        file.write(separator)

        # 8
        try:
            file.write(self.pageview)
        except AttributeError:
            file.write(separator)

        file.write('\n')
        file.flush

        '''
        cmtfile.write('floor\t')
        cmtfile.write('        time    \t')
        cmtfile.write(' user id\t')
        cmtfile.write('timestamp\n')
        '''
        if self.comments != 'NA':
            for cmt in self.comments:
                try:
                    cmt.write(cmtfile)
                except IOError, e:
                    print e
        cmtfile.close()


    def conver_time(self, dtime):
        '''
        # 2010/08/13 16:21:01
        #
        '''
        date = dtime[:dtime.find(' ')]
        time = dtime[dtime.find(' ')+1:]

        year = int(date[:date.find('/')])
        month = int(date[date.find('/')+1:date.rfind('/')])
        day = int(date[date.rfind('/')+1:])

        hr = int(time[:time.find(':')])
        min = int(time[time.find(':')+1:time.rfind(':')])
        sec = int(time[time.rfind(':')+1:])
        time =datetime.datetime(year,month,day,hr,min,sec)
        return time


        
    '''
    _time will find the post time, update time of the topic and issue time of
    the content. Attributes post_time, update_time and issue_time are set.
    '''
    def _time(self, soup):
        post_fg = 0
        updt_fg = 0

        '''
        It's gonna find 'date-time' span tags followed by post and update.
        '''

        timelist = soup.findAll('span',attrs={'class':'date-time'})
        
        if len(timelist) < 1: #no target block
            self.post_time = 'NA'
            self.update_time = 'NA'
            raise ParseError(1001,\
                            title = 'time: no date-time block',\
                            code = soup)

        for time in timelist:
            if time.nextSibling == u' \u53d1\u5e03 | ': #Chinese-post
                if post_fg == 1:# overwrite prevention
                    self.post_time = 'NA'
                    self.update_time = 'NA'
                    raise ParseError(1002,\
                                     title = 'no post time or update time',\
                                     code = soup)
                
                self.post_time = unicode(time.contents[0])

                dt1 = self.conver_time(self.post_time)
                self.post_time = dt1.strftime('%s')

                post_fg = 1

            if time.nextSibling == u' \u66f4\u65b0': #Chinese-update
                if updt_fg == 1: #overwrite prevention
                    self.post_time = 'NA'
                    self.update_time = 'NA'
                    raise ParseError(1003,\
                                     title='no post time or update time',\
                                    code = soup)
                self.update_time = unicode(time.contents[0])

                dt2 = self.conver_time(self.update_time)
                self.update_time = dt2.strftime('%s')

                updt_fg = 1


        if post_fg == 0 or updt_fg == 0:
            self.post_time = 'NA'
            self.update_time = 'NA'
            raise ParseError(1004,\
                             title='no post time or update time',\
                            code = soup)
        
        '''
        Following code finds a time containg year,month,day after keyword issue
        time in Chinese.  Once a target format is found, break out the for loop
        in case of user-post simular content.

        It assumes the issue-info appears before user content and year is 
        obligate while month and day could be omitted.

        '''
        issuelist = soup.findAll(text=u'\u53d1\u884c\u65e5\u671f')
        # Chinese- issue time
        return True #skip issue time in this version

        try:
            for key in issuelist:
                self.issue_time = Issue(key)
                break
        except ParseError:
            raise

    def _view(self, path):
        '''
        var current_page_views='15527'

        read review time form ./pageview.html 
        '''
        try: 
            file = open(path+'pageview.html','r')
            s = file.read()
        
            self.pageview = s[s.find('\'')+1:s.rfind('\'')]
        except IOError, e:
            self.pageview = 'NA'
            raise ParseError(id=1019, title = 'no page viewfile')


    '''

    _author locate author block by div with id = 'weluser'. The correct
    format should contain one author block only. user id and name are in strong
    tag. 

    '''
    def _author(self, soup):

        authorlist = soup.findAll('div', id='weluser')

        if len(authorlist) > 1:
            print 'Error: invalid author'
            save_error(ParseError(id=888, title='length of the authorlist >1',\
                                  code = soup))
        if len(authorlist) < 1:
            self.au_id = 'NA'
            raise ParseError(1005,\
                             title='author: no author tag',\
                             code = soup)

        author = authorlist[0].find('strong',id='username')

        
        id = author.contents[0]['href'][9:-1]
        if len(id) < 8:
            id = id + '\t'  # for format

        self.au_id = id
        '''
        skip author's name
        '''
        try:
            if len(author.contents[0].contents) >= 1:
               self.au_name = author.contents[0].contents[0]
        except Exception, e:
            pass


    def _comment(self, path):

        comments = []
        badCmt = 0
        pathlist = glob.glob(path+'comments/*.html')

        if len(pathlist) < 1:
            raise 

        for path in pathlist:
            try:
                file = open(path,'r')
                html = file.read()

                soup = BeautifulSoup(html)
                cmtlist = soup.findAll('div',\
                            {'class':re.compile(\
                                'post-comments ')})
            except IOError, e:
                print e
            for soup in cmtlist: 
                try:
                    cmt = Comment(soup)
                    comments.append(cmt)
                    if cmt.floor > self.numComments:
                        self.numComments = cmt.floor
                except missComment, e:
                    badCmt += 1
                    continue 

                except ParseError, e:
                    if e.id == 1016:
                        continue
                    e.set_url = self.path
                    save_error(e)
                    continue

        if len(comments) < 1:
            self.comments = 'NA'
            self.numComments = 0
            raise ParseError(1006,title='Comment: no ')
                
        self.comments = comments
        self.badCmt = badCmt 

class Comment:
    def __init__(self, soup):
        '''
        The soup is a piece of code which is found by class = post-commentsxxx

        arrive time of comments --time unicode
        author id--author str
        sequence number of comment--floor int
        comment size --self.size int
        '''
        #soup.findAll(text=re.compile(u'\u697c\u5df2'))
        if soup == None:
            raise ParseError(1007,\
                             title='Comment: None input',\
                            code=soup)

        date = soup.find('span',{'class':'date-time'})
        if date == None: # invalid comment
            tag = soup.find(text=re.compile(u'\d+\u697c\u5df2'))

            if tag == None:
                raise ParseError(1008,\
                                 title='Comment: None input',\
                                code=soup)
            else:
                raise missComment(1012, title='deleted or hided comment',\
                                  code=soup) #expected err,deleted or hided cmt

        self.time = unicode(date.contents[0])
        
        dt = self._time(self.time)
        self.timestamp = dt.strftime('%s')

        floor = soup.find('span',{'class':'date'}).nextSibling
        self.floor = int(floor[12:floor.find(u'\u697c')])#skip 2 &nbsp,

        contentlist = soup.findAll('div',{'class':'post-contents'})

        if len(contentlist) == 0:
            raise ParseError(1013,'Comment: none content',soup)
        if len(contentlist) > 1:
            raise ParseError(1014,'Comment: incorrect format',soup)

        #length = self._extract(soup)

        #if length == 0:
        #    raise ParseError(1016,'tag only comment is posted')
        
        #self.size = length

        user = soup.find('a', href=re.compile('/members/'))
        if user == None:
            raise ParseError(1009,'Comment: None input',code=soup)

        self.author = str(user['href'][9:-1])

    def _extract(self, soup):
        innerlist = soup.findAll('p',{'class':'inner_content'})
        if len(innerlist) == 1:
            return self._normal(innerlist[0])
        if len(innerlist) == 2:
            return self._double(soup)
        raise ParseError(1017,'unexpected comment format')


    def _double(self, soup):
        if len(soup.contents) != 5:
            raise ParseError(1015,'Error in format', soup)
        tag = soup.contents[3].contents
        strings = []
        counter = 0
        while counter < len(tag):
            if isinstance(tag[counter], Tag):
                tag[counter].extract()
                counter -= 1
            elif isinstance(tag[counter], Co):
                tag[counter].extract()
                counter -= 1
            else:
                if tag[counter].lstrip().rstrip() != u'':
                    strings.append(tag[counter].lstrip().rstrip())
            counter += 1

        chars = 0
        for i in range(len(strings)):
            chars += len(strings[i-1])
        chars *= 2

        l = len(strings)
        if l == 0:
            l = 1
        self.lines = l

        return chars
    
    def _normal(self, soup):
        '''
            self.lines int

            get the characters in the comment, which has only one inner_conntent
            tag
            returns the size of comment in terms of byte
        '''
        strings = []
        counter = 0
        while(counter < len(soup.contents)):
            if isinstance(soup.contents[counter], Tag):
                soup.contents[counter].extract()
                counter -= 1
            elif isinstance(soup.contents[counter], Co):
                soup.contents[counter].extract()
                counter -= 1
            else:
                if soup.contents[counter].lstrip().rstrip()!= u'':
                    strings.append(soup.contents[counter].lstrip().rstrip())
            counter += 1

        chars = 0
        for i in range(len(strings)):
            chars += len(strings[i-1])
        chars *=2

        l = len(strings)
        if l == 0:
            l = 1
        self.lines = l

        return chars




    def write(self, file):

        separator = '\t'

        file.write(str(self.floor))
        file.write(separator)
        
        file.write(self.time)
        file.write(separator)
        
        file.write(self.author)
        file.write(separator)

       # file.write(str(self.size))
       # file.write(separator)

       # file.write(str(self.lines))
       # file.write(separator)

        file.write(self.timestamp)
        file.write('\n')

        file.flush

    def _time(self, dtime):
        '''
        # convert format like 2010/06/09 10:20:40
        # into datetime object
        '''
        date = dtime[:dtime.find(' ')]
        time = dtime[dtime.find(' ')+1:]

        year = int(date[:date.find('/')])
        month = int(date[date.find('/')+1:date.rfind('/')])
        day = int(date[date.rfind('/')+1:])

        hr = int(time[:time.find(':')])
        min = int(time[time.find(':')+1:time.rfind(':')])
        sec = int(time[time.rfind(':')+1:])
        time =datetime.datetime(year,month,day,hr,min,sec)
        return time

            
class Issue:
    def __init__(self, date):

        year = date.parent.parent.nextSibling.find(u'\u5e74') #Chinese-year
        month = date.parent.parent.nextSibling.find(u'\u6708')#Chinese-month
        day =  date.parent.parent.nextSibling.find(u'\u65e5') # Chinese-day
        if year != -1:
            self.year = date.parent.parent.nextSibling[:year]
            if month != -1:
                self.month = date.parent.parent.nextSibling[year+1:month]
                if len(self.month) < 2:
                    self.month = '0'+ self.month
            else:
                self.month = None
            if day != -1:
                self.day = date.parent.parent.nextSibling[month+1:day]
                if len(self.day) < 2:
                    self.day = '0'+ self.day
            else:
                self.day = None
        elif month != -1:
            year = None
            self.month = date.parent.parent.nextSibling[:month]
            if day != -1:
                self.day = date.parent.parent.nextSibling[month+1:day]
                if len(self.day) < 2:
                    self.day = '0'+ self.day
            else:
                self.day = None
        else:
            raise ParseError(1010, title='no issue time',code=date)


    def show(self):

        if self.year == None:
            date = '     '
        else:
            date = self.year

        if self.month != None:
            if date != '     ':
                date = date + '.' + self.month
            else:
                date = date + self.month

        if self.day != None:
            date = date + '.' + self.day
        else:
            date = date + '   '
        return date

    def show_year(self):
        if self.year != None:
            date = self.year

        else:
            date = self.month + '.' + self.day

        return date




def readTopic(file_url): 
   
    '''
    feed the web to soup
    cut id out of file url
    create a topic instance
    '''
    try:
        input = open(file_url,'r')
        html = input.read()
        soup = BeautifulSoup(html)
        id = file_url[file_url.rfind('/')+6:\
                              file_url.rfind('.')]
    except IOError, e:
        print e


    path = file_url[:file_url.rfind('/')+1]
    try:
        topic = Topic(int(id), path)
    except ValueError:
        print 'ID = %s is not a pure number' %id

    return topic


def write_title(file):
    file.write('topic\t')
    file.write('cmt\t')
    file.write('   posted date   \t')
    file.write('   updated date   \t')
    file.write('author  \t')
    #file.write('issue\t')
    file.write('comments url \n')

def urlcmp(x, y):
    '''
    eg: No.15401-15500/293982/
    compare 15401 firstly. Compare 29382 in case of equility
    '''
    result = cmp(int(x[x.find('No.')+3:x.find('-')]),\
                 int(y[y.find('No.')+3:y.find('-')]))
    if result == 0:
        xtag = x.rfind('/')
        ytag = y.rfind('/')
        result = cmp(int(x[x.rfind('/',0,xtag)+1:xtag]),\
                     int(y[y.rfind('/',0,ytag)+1:ytag]))
    return result

def save_error(e):

    try:
        errf = open('./err_2.txt', 'a')
    except IOError, e:
        print e
    try:
        errf.write('error id= ' + str(e.id) +'\t' + str(e.title) + '\n')
        errf.write('url = '+ e.url +'\n')
        errf.write(e.strerror + '\n')
        errf.write('\n')
        errf.flush()
    except Exception, e:
        pass
        
    errf.close()

class DataCheck:
    '''
    # DataCheck does different kinds of verification of the data crawled
    # from verycd.com
    '''
    def __init__(self):
        '''
        '''
        pass
    def verify_timestamp(self, topic):
        '''
        # verify_timestamp check if the timestamps of comments come after the
        # timesamp of topic. 
        # topic.post_timestamp - comment.timestamp
        # raise exception on nagetive results
        
        
        # Useful attributes of topic: 
            post_time(timestamp - str),
            comments(instance list - list)
        # Useful attributes of comment:
            timestamp(timestamp - str)
            floor (sequnce number - int)
        '''
        counter = 0
        for i in topic.comments:
            if int(topic.post_time) - int(i.timestamp) < 0:
                raise CheckError(100, topic.path, i.floor,\
                                 'Wrong Comment Timestamp')


def parse():
    '''
    glob out all topics under movie folder
    sort them according to its urlcmp in reverse order. Newest in the first

    '''
    topiclist = glob.glob('./archives/movie/*/*/topic.html')
    topiclist.sort(lambda x,y: urlcmp(x,y))

    topiclist.reverse()

    ch = CheckError()
    #write in dataX dir
    try:
        os.makedirs('./data5/')
    except:
        pass

    try:
        file = open('./data5/topics.txt','w')
    except IOError, e:
        raise
    
    FailedWeb = 0
    Web = len(topiclist)

    for i in topiclist:
        try:
            t = Topic(url=i)
            t.write(file)
            ch.verify_timestamp(t)
            file.flush()
        except CheckError, e:
            save_error(e)
            file.flush()
            continue
    
        except ParseError, e:
            if e.id == 2222:
                FailedWeb += 1 
                continue # expected error, do not save
            save_error(e)
    file.close()



    print 'Summary:\n'
    print 'There are %d bad Topics out of %d Topics in total\n'%(FailedWeb, Web)

    return True
'''
Compensate exception of data3
For the rest data, generate data4
'''
def parse_new():

    topiclist = glob.glob('./archives/movie/*/*/topic.html')
    topiclist.sort(lambda x,y: urlcmp(x,y))


    #write in dataX dir
    try:
        os.makedirs('./data5/')
    except:
        pass

    try:
        file = open('./data5/topics.txt','w')
    except IOError, e:
        raise
    
    FailedWeb = 0
    Web = len(topiclist)

    for i in range(0, 13500):
        try:
            t = Topic(url = topiclist[i])
            t.write(file)

            file.flush()
    
        except ParseError, e:
            if e.id == 2222:
                FailedWeb += 1 
                continue # expected error, do not save
            save_error(e)
    file.close()

    print 'Summary:\n'
    print 'There are %d bad Topics out of %d Topics in total\n'%(FailedWeb, Web)
    return True

def test():
    path = '/home/hugh/code/cmtverycd/archives/movie/No.27701-27800/2817786/'
            
    t = Topic(path=path)
    
    return t

if __name__ == '__main__':
    i = parse()
    

    '''
    html=urllib2.urlopen('http://www.verycd.com/topics/2826555/comments/page2')
    soup = BeautifulSoup(html)
    cmtlist = soup.findAll('div',{'class':re.compile('post-comments ')})
    al = []
    for i in cmtlist:
        temp = Comment(i)
        al.append(temp)
    '''


