﻿#!/usr/bin/env python
# -*- coding: utf-8 -*-

import hashlib
import os
import re
import string
import sys

import BeautifulSoup
import html2text

import connections

from additional import convert_date, enum

def parse_person_site(id):
    soup = connections.soup_from_url('http://stafflist.wmi.amu.edu.pl/details/' + str(id))
    dane = dict()
    dane['id'] = id
    name = soup.find(name='h2').contents[0].strip()
    dane['nazwisko'] = name.split()[-1]
    dane['imie'] = name[:string.rfind(name, dane['nazwisko']) - 1]
    dept = soup.find(name='h3').contents[0].strip()
    dane[u'zakład'] = dept
    rows = soup.find(name='table').findChildren('tr')
    for row in rows:
        dane[row.contents[1].text] = row.contents[3].text
    return dane

def consulting_hours(h_dict, person):
    if person in h_dict:
        return h_dict[person]
    else:
        ""

def parse_worker_list():
    soup = connections.soup_from_url('http://stafflist.wmi.amu.edu.pl/')
    hours_soup = connections.soup_from_url('http://www.wmi.amu.edu.pl/pl/dyzury-pracownikow')
    dane = []
    wo_soup = soup.find('table')
    hours_soup = hours_soup.find('table')
    h_rows = []
    temp_rounder = -1
    temp_row = []
    for row in hours_soup.findChildren('td'):
        if (temp_rounder == 0):
            temp_row.append(row.find('strong').contents[0].strip())
        if (temp_rounder == 1):
            temp_temp_rows = []
            for element in row.contents:
                if type(element) == BeautifulSoup.NavigableString:
                    temp_temp_rows.append(element.strip())
            temp_row.append('\n'.join(temp_temp_rows))
            h_rows.append(temp_row)
            temp_row = []
        temp_rounder += 1
        temp_rounder = temp_rounder % 2
    h_dict = dict(h_rows)
    itera = 0
    next_w = dict()
    for td in wo_soup.findChildren('td'):
        if itera == 0:
            next_w['degree'] = td.find(name='span').contents[0].strip()
            idstr = td.find(name='a')['href']
            next_w['id'] = idstr[string.find(idstr, "/") + 1:]
            next_w['name'] = td.find(name='a').contents[0].strip()
            # Deprecated
            # next_w['nazwisko'] = name.split()[-1]
            # next_w['imie'] = name[:string.rfind(name, next_w['nazwisko']) - 1]
        elif itera == 1:
            next_w[u'room'] = td.contents[0].strip()
        elif itera == 2:
            telefon = ""
            if td.find(name='span') != None:
                telefon = td.find(name='span').contents[0].strip() + td.contents[1].strip()
            next_w['phone'] = telefon
        elif itera == 3:
            mailto = td.find(name='a')['href']
            next_w['email'] = mailto[string.find(mailto, ":") + 1:]
        else:
            next_w['c_hours'] = consulting_hours(h_dict, next_w['degree'] + " " + next_w['name'])
            dane.append(next_w)
            next_w = dict()
        itera = (itera + 1) % 5
    v_hash = hashlib.md5()
    v_hash.update(str(dane))
    return (dane, v_hash.hexdigest())

term = enum(SUMMER = 'lato', WINTER = 'zima')

def parse_schedule(term_t):
    soup = connections.soup_from_url('http://plan-' + term_t + '.wmi.amu.edu.pl/')
    dane = []
    sch_soup = soup.findAll('table')[3]
    next_cl = dict()
    for class_row in sch_soup.findChildren('tr'):
        itera = 0
        for td in class_row.findChildren('td'):
            if itera == 0:
                next_cl['name'] = td.contents[0].strip()
            elif itera == 1:
                next_cl['code'] = td.contents[0].strip()
            elif itera == 2:
                next_cl['group'] = td.contents[0].strip()
            elif itera == 3:
                next_cl['weekday'] = td.contents[0].strip()
            elif itera == 4:
                next_cl['startTime'] = td.contents[0].strip()
            elif itera == 5:
                next_cl['room'] = td.contents[0].strip()
            elif itera == 6:
                next_cl['teacher'] = td.contents[0].strip()
                next_cl['id'] = next_cl['code'] + "." + next_cl['group'] + "." + next_cl['room']
                dane.append(next_cl)
                next_cl = dict()
            itera = (itera + 1) % 7
    dane.pop(0)
    v_hash = hashlib.md5()
    v_hash.update(str(dane))
    return (dane, v_hash.hexdigest())

def remove_markups(text):
    g = html2text.HTML2Text()
    g.ignore_links = True
    g.ignore_images = True
    g.body_width = None
    replaced = g.handle(text).replace('_', '').replace('**', "")
    return replaced.strip().encode('utf-8')

def create_summary(text):
    text = text.split("\r\n")[1].split("&lt;br")[0]
    end_p_mark_ind = string.find(text, "&lt;/p&gt;")
    if end_p_mark_ind == -1:
        text += "&lt;/p&gt;"
    return text

def relative_links_buildup(html):
    changed = html
    next = changed.find("href=\"", 0) + 6
    while next != 5:
        if changed.find("http", next) != next:
            changed = changed[0:next] + "http://www.wmi.amu.edu.pl/" + changed[next:]
        next = changed.find("href=\"", next) + 6
    next = changed.find("src=\"", 0) + 5
    while next != 4:
        if changed.find("http", next) != next:
            changed = changed[0:next] + "http://www.wmi.amu.edu.pl/" + changed[next:]
        next = changed.find("src=\"", next) + 5
    return changed.replace("h1", "h3")

def parse_news():
    dane = []
    atom_feed = connections.easy_download("http://www.wmi.amu.edu.pl/pl/?format=feed&type=atom")
    atom_soup = BeautifulSoup.BeautifulSoup(atom_feed)
    max_id = 0
    next_n = dict()
    for one_n in atom_soup.findChildren("entry"):
        try:
            next_n['name'] = one_n.find("title").contents[0].strip()
            next_n['url'] = one_n.find('link')['href']
            next_n['id'] = int(one_n.find('link')['href'].split('/')[-1].split('-')[0])
            if next_n['id'] > max_id:
                max_id = next_n['id']
            next_n['published'] = convert_date(one_n.find("published").contents[0].strip())
            next_n['updated'] = convert_date(one_n.find("updated").contents[0].strip())
            next_n['summary'] = one_n.find("summary").contents[0].strip()
            next_n['text'] = one_n.find("content").contents[0].strip()
            if next_n['summary'] == next_n['text']:
                next_n['summary'] = create_summary(next_n['summary'])
            h = html2text.HTML2Text()
            h.ignore_links = True
            h.ignore_images = True
            h.body_width = None
            next_n['summary'] = h.handle(next_n['summary']).strip()
            h = html2text.HTML2Text()
            h.ignore_links = True
            h.body_width = None
            next_n['text'] = h.handle(next_n['text']).strip()
            next_n['text'] = relative_links_buildup(next_n['text'])
            next_n['summary'] = remove_markups(next_n['summary'])
            next_n['id'] = str(next_n['id'])
            dane.append(next_n)
            next_n = dict()
        except Exception:
            print "Problem with(" + one_n.find('link')['href'] + ")"
    v_hash = hashlib.md5()
    v_hash.update(str(max_id))
    return (dane, v_hash.hexdigest())
