# -*- coding: utf8 -*-

import urllib
import re
import pickle
from datetime import datetime
from time import sleep
from DisciplinesList import DisciplinesList


class UfrgsCrawler:
    '''class to crawl trough UFRGS pages'''

    def __init__(self):
        month = int(datetime.now().month)
        semester = '01' if (month <= 6) else '02'
        self.period = '%s%s2' % (datetime.now().year, semester)

        base_url = 'http://www1.ufrgs.br/graduacao/xInformacoesAcademicas'
        self.COURSES_URL = '%s/cursos.php' % base_url
        self.QUALIFICATIONS_URL = '%s/habilitacoes.php' % base_url
        self.DISCIPLINES_URL = '%s/curriculo.php' % base_url
        self.ABSTRACT_URL = '%s/sumula.php' % base_url

        self.CHARSET = 'ISO-8859-1'  # didn't find charset in headers :(

    def _fetch_data(self, url, regex, all=False):
        '''returns first regex match in document referenced by url'''

        try:
            sock = urllib.urlopen(url)
        except IOError, err:
            print 'Error while connecting to %s\n retrying in 5 seconds...' % \
                    url
            sleep(5)
            sock = urllib.urlopen(url)

        regex = re.compile(regex)
        data = [] if all else None
        for line in sock.readlines():
            line = unicode(line, self.CHARSET)
            match = regex.findall(line)
            if match:
                if not all:
                    data = match[0]
                    break
                else:
                    data.extend(match)

        sock.close()
        return data

    def get_course_code(self, course_name):
        '''gets course code by its name'''

        course_name = course_name.upper()
        regex = r'.*CodCurso=(\d+)"\>%s.*' % course_name
        return self._fetch_data(self.COURSES_URL, regex)

    def get_qualification_code(self, course_code):
        '''gets first listed course qualification code'''

        url = '%s?CodCurso=%s' % (self.QUALIFICATIONS_URL, course_code)
        regex = r'.*CodHabilitacao=(\d+).*'
        return self._fetch_data(url, regex)

    def get_curriculum_code(self, course_code, qualification_code):
        '''gets first listed qualification curriculum code'''

        url = '%s?CodHabilitacao=%s&CodCurso=%s&sem=%s' % \
              (self.QUALIFICATIONS_URL, qualification_code,
               course_code, self.period)
        regex = r'.*CodCurriculo=(\d+).*'
        return self._fetch_data(url, regex)

    def _get_disciplines(self, course_code, qualification_code,
            curriculum_code):
        '''gets all disciplines codes and names'''

        url = '%s?CodCurso=%s&CodHabilitacao=%s&CodCurriculo=%s&sem=%s' % \
              (self.DISCIPLINES_URL, course_code, qualification_code,
               curriculum_code, self.period)

        codes_regex = r".*'sumula.php'.*,'%s','(\d+)'.*" % self.period
        codes = self._fetch_data(url, codes_regex, all=True)

        disciplines_list = DisciplinesList(url, self.CHARSET).data
        data = []

        for i, disc_tuple in enumerate(disciplines_list):
            data.append((codes[i], disc_tuple[0], disc_tuple[1]))

        return data

    def _get_abstract_text(self, discipline_code):
        '''gets the text from a discipline docket'''

        url = '%s?CodCurriculo=&CodHabilitacao=&sem=%s&codatividadeensino=%s'\
                % (self.ABSTRACT_URL, self.period, discipline_code)
        regex = r'<br><font face="verdana" size="2">(.*)</font>'
        return self._fetch_data(url, regex)

    def get_abstract_dict(self, course_code, qualification_code,
            curriculum_code):
        '''
        returns a dict, indexed by disciplines codes,
        containing its names and abstracts
        '''

        codes_names = self._get_disciplines(course_code, qualification_code,
                                            curriculum_code)
        abstract_dict = {}
        for code, disc_code, name in codes_names:
            abstract_dict[disc_code] = (name, self._get_abstract_text(code))
        return abstract_dict


class CrawlerInfo:
    '''holds data fetched by UfrgsCrawler'''

    def __init__(self, course_name):
        self.ufrgs_crawler = UfrgsCrawler()
        self.course_code = self.ufrgs_crawler.get_course_code(course_name)
        self.qualification_code = \
            self.ufrgs_crawler.get_qualification_code(self.course_code)
        self.curriculum_code = \
            self.ufrgs_crawler.get_curriculum_code(self.course_code,
                                                   self.qualification_code)
        self.abstract_dict = None
        self.save_file = '%s_abstracts.pkl' % course_name

    def load_abstract_dict(self):
        '''
        loads abstract dict
        '''

        self.abstract_dict = \
            self.ufrgs_crawler.get_abstract_dict(self.course_code,
                                                 self.qualification_code,
                                                 self.curriculum_code)

    def dump_to_file(self, file_name=None):
        '''
        dumps abstract_dict to a file
        '''

        if not self.abstract_dict:
            self.load_abstract_dict()
        file_name = file_name or self.save_file
        output = open(file_name, 'wb')
        pickle.dump(self.abstract_dict, output)
        output.close()
        print 'saved abstract dict in %s' % file_name

    def load_from_file(self, file_name=None):
        '''
        loads a previously saved abstract dict
        from a file
        '''

        file_name = file_name or self.save_file
        pickled = open(file_name, 'rb')
        self.abstract_dict = pickle.load(pickled)
        pickled.close()
        print 'loaded abstract dict from %s' % file_name

if __name__ == '__main__':
    print 'a python crawls into a bar'
    '''
    c = CrawlerInfo(u'ciência da computação')
    c.load_abstract_dict()
    c.abstract_dict
    '''
