#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Kevin Hanson
#
# 2010-12-04
#
# Notes:
# Try html2text in python, python drive lynx, Beautiful Soup library for python
#
# No idea how to run javascript from html pages to get data
#

import sys
import re
import sqlite3
import logging
import httplib
import urllib
from itertools import groupby
from HTMLParser import HTMLParser
from HTMLParser import HTMLParseError

HEADERS = {'Connection':'keep-alive', 'User-Agent':'Firefox', 'Content-Type':'application/x-www-form-urlencoded'}
SITEALL = ('bbs.sjtu.edu.cn', 'book.dangdang.com',
        'bbs.sjtu.edu.cn/bbsqry?userid=KevinHanson',
        'bbs.sjtu.edu.cn/bbstdoc,board,PPPerson.html',
        'http://bbs.sjtu.edu.cn/bbstopb10',
        'http://bbs.sjtu.edu.cn/bbsall',
        'http://bbs.sjtu.edu.cn/bbsdoc,board,Association.html',
        'http://bbs.sjtu.edu.cn/bbscon,board,Arsenal,file,M.1291458628.A.html',
        'http://bbs.sjtu.edu.cn/bbscon,board,water,file,M.1291463106.A.html'
        )
SITE = {'bbs_sjtu':'bbs.sjtu.edu.cn', 'dangdang':'book.dangdang.com'}
TABLENAME = 'bbsuserinfo'
COLUMN = ['id', 'name', 'login', 'net_age', 'constellation', 'date', 'ip1', 'ip2', 'ip3', 'article', 'state']
USER_INFO = {COLUMN[0] : COLUMN[1:],
             'KevinHanson' : [''] * (len(COLUMN) - 1)
             }

DEPTH = 4
PAGECODEC = 'gb2312'
CODEC = 'utf8'

DIRECTORYSITE = 'wwwin-tools.cisco.com/dir'
DETAILS = 'http://wwwin-tools.cisco.com/dir/details/haiyuzha'
REPORTS = 'http://wwwin-tools.cisco.com/dir/reports/xuawang'

class HTMLParser(HTMLParser):
    ''' Site pages parser
    '''
    def __init__(self):
        HTMLParser.__init__(self)
        self.charset = ''
        self.rawdata = ''
        self.usrname = ''
        
    def handle_starttag(self, tag, attrs):
        if tag == 'meta':
            if isinstance(attrs, list):
                self.charset = attrs[1][1].split('=')[1];
            elif isinstance(attrs, str):
                print 'String', attrs
        elif tag == 'a':
            print 'A hyper-link', attrs

    def handle_endtag(self, tag):
        pass

    def print_usr_name(self):
        print self.usrname

class UserInfoParser(HTMLParser):
    '''
    User information pages from BBS of SJTU site parser
    '''
    def __init__(self):
        HTMLParser.__init__(self)
        self.charset = ''
        self.rawdata = ''
        self.user_info = {'id' : '', 'name' : '', 'login' : 0, 'age' : 0, 
                          'constellation' : '', 'date' : '', 'ip' : '', 'ip2' : '', 'ip3' : '', 'article' : 0}
        self.user_info2 = {7  : ['id', ''],
                           8  : ['name', ''],
                           10 : ['login', ''],
                           12 : ['age', 0],
                           14 : ['constellation', ''],
                           16 : ['date', ''],
                           18 : ['ip', ''],
                           28 : ['article', 0]}
        self.flag_in = {'table' : False, 'pre' : False, 'font' : False}
        self.data_index = {}
        self.index = 0

    def handle_starttag(self, tag, attrs):
        if tag == 'meta':
            if isinstance(attrs, list):
                self.charset = attrs[1][1].split('=')[1];
            elif isinstance(attrs, str):
                self.charset = str
            print 'Page charset: ', self.charset
        elif tag == 'table':
            self.flag_in['table'] = True
#            print 'A hyper-link', attrs
        elif tag == 'pre':
            self.flag_in['pre'] = True
        elif tag == 'font':
            self.flag_in['font'] = True

    def handle_endtag(self, tag):
        if tag == 'table':
            self.flag_in['table'] = False
        elif tag == 'pre':
            self.flag_in['pre'] = False
        elif tag == 'font':
            self.flag_in['font'] = False

    def handle_data(self, data):
        if self.index == 7:
            data = data[1:]
        try:
            self.user_info2[self.index][1] = data.split(' ')[0].strip().decode(PAGECODEC).encode(CODEC)
#            print self.index, '---->', self.user_info2[self.index][1]
        except:
            pass
        self.index += 1

    def print_usr_name(self):
        print self.usrname
        
    def feed(self, data):
        HTMLParser.feed(self, data)

    def get_user_info(self):
        for i in self.user_info2:
            self.user_info[self.user_info2[i][0]] = self.user_info2[i][1]
        return self.user_info

    def reset(self):
        HTMLParser.reset(self)

class UIDSpider(object):
    '''UIDSpider, A spider crawls throught directory collection user ID
    informations of all employees.
    '''
    def __init__(self):
        object.__init__(self)

        self.logger = logging.getLogger('%s.%s' % (__file__, 
                                                   self.__class__.__name__))

        self.name = 'UIDSpider'
        self.http_conn = httplib.HTTPConnection(DIRECTORYSITE)
        COOKIE = {'Connection':'keep-alive', 'User-Agent':'IE8', 'Cookie':''}
        self.db_conn = sqlite3.connect('./uid.sqlite')
        self.cursor = self.db_conn.cursor()

    def __str__(self):
        return self.name

    def db_reset(conn):
        cursor = conn.cursor()
        try:
            cursor.execute('''CREATE TABLE %s  (id            TEXT UNIQUE,        
                                                name          TEXT,
                                                login         TEXT,
                                                net_age       INT,
                                                constellation TEXT,
                                                date          TEXT,
                                                ip1           TEXT,
                                                ip2           TEXT,
                                                ip3           TEXT,
                                                article       INT,
                                                state         TEXT)''' % TABLENAME)
        except sqlite3.DatabaseError, e:
            print e
        conn.commit()

    def get_id_info(conn):
        cursor1 = conn.cursor()
        cursor2 = conn.cursor()

        try:
            cursor1.execute("SELECT id FROM %s" % TABLENAME)
        except sqlite3.DatabaseError, e:
            print e

        uip = UserInfoParser()
        loop = 0
        for id in cursor1.fetchall():
#
#        loop += 1
#        if loop > 10:
#            return
            user_info_url = 'http://bbs.sjtu.edu.cn/bbsqry?userid=' + id[0].decode(PAGECODEC)
            user_info_page = urllib.urlopen(user_info_url)
            content = user_info_page.read()

#        print len(content), content[:1100].decode(PAGECODEC)
        try:
            uip.feed(content[:1200].decode(PAGECODEC))
        except UnicodeError, e:
            print e
        except HTMLParseError, e:
            print e
        
        user_info = uip.get_user_info()
        assert user_info

        try:
            cursor2.execute("UPDATE %s SET name = '%s', login = %d, net_age = %d, constellation = '%s',    \
                                           date = '%s', ip1 = '%s', article = %d WHERE id = '%s'"
                            % (TABLENAME, user_info['name'], int(user_info['login']), int(user_info['age']), user_info['constellation'], 
                               user_info['date'], user_info['ip'], int(user_info['article']), user_info['id']))
        except sqlite3.DatabaseError, e:
            print e

        self.uip.reset()
        self.conn.commit()

    def get_online_ids():
        ''' Search BBS online-users bulletin for all online users.
        '''
        online_user_url = 'http://bbs.sjtu.edu.cn/bbsufind?search=*'
        online_user_page = urllib.urlopen(online_user_url)
        content = online_user_page.read()
        user_ids = re.findall('bbsqry.userid=[a-zA-Z]+', content)
        new_user_ids = [key.split('=')[-1] for key, _ in groupby(user_ids)]

        new_id_amount = 0
        id_amount = 0
        for id in new_user_ids:
            print 'User ', id
            try:
                cursor.execute("INSERT INTO %s VALUES ('%s', '%s', %d, '%s', '%s', '%s', '%s', '%s', '%s', %d, '%s')" \
                                   % (TABLENAME, id, '', 0,  0, '', '', '', '', '', 0, ''))
            except sqlite3.IntegrityError, e:       # ignore duplicated user id
                print e
            except sqlite3.DatabaseError, e:        # catch any database error
                print e
            else:
                new_id_amount += 1
                id_amount += 1
                print 'Add %d new user ids\tTotal %d' % (new_id_amount, id_amount)

        self.db_conn.commit()
        self.cursor.close()

    def get_posts():
        section_url = 'http://bbs.sjtu.edu.cn/bbsall'
        section_page = urllib.urlopen(section_url)
        content = section_page.read()
        print content.decode(PAGECODEC)

    def set_cookies(r1):
        ''' Get cookies from http response.
        '''
        cookies = re.findall('Set.Cookie.*', str(r1.msg))
        print cookies
        new_cookie = [c.split(' ')[1] + ' ' for c in cookies]   
        COOKIE['Cookie'] = COOKIE['Cookie'].join(new_cookie)    # Not sure of it: utmpnum=137; utmpkey=287755515; utmpuserid=KevinHanson; 
        print COOKIE['Cookie'] 

    def login(self):
        self.http_conn.request('POST', '/bbslogin', 'id=kevinhanson&pw=951753&submit=', HEADERS)
        r1 = self.http_conn.getresponse()
        print r1.status, r1.reason

        #self.http_conn.request('GET', '/bbsinfo','',{'Connection':'keep-alive','Cookie':'utmpnum=76; utmpkey=845081348; utmpuserid=KevinHanson', 'User-Agent':'IE8'})

    def collect(self):
        self.login()
        #print cursor.execute('.table')

        self.db_reset(db_conn)
        #self.get_online_ids()
        self.get_id_info(db_conn)
        #self.get_posts()
    
        pass

def main():
    s = UIDSpider()
    s.collect()

if __name__ == '__main__':
    main()

