#!/usr/bin/python

# Copyright 2009, Fei Yu, feiyu [at] berkeley [dot] edu

# This file is part of Topics-Portal.
#
# Topics-Portal is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Topics-Portal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with PyGrams.  If not, see <http://www.gnu.org/licenses/>.

from django.shortcuts import render_to_response, get_object_or_404
from django.http import Http404, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.conf import settings
# from ims.sti.models import *
# from couchdbkit import *
import bibserver
import re
import copy
import difflib
import json
import urllib
import requests
from collections import defaultdict
##############################################################
# updated on August 3, urllib2 no longer works as expected....
#import urllib2
import httplib2
##############################################################
SEARCH_LIMIT = 100
BIGNUMBER = 5000
QUERY_HEADERS = {'content-type': 'application/json'}
QUERY_HEAD = settings.BIBSERVER_HOST + settings.BIBSERVER_DATABASE_NAME + '/_search'
#server = client.Server(settings.BIBSERVER_HOST)
#db = server[settings.BIBSERVER_DATABASE_NAME]
#
#url_head = settings.COUCHDB_HOST + settings.COUCHDB_DATABASE_NAME
#view_doc = {'all': url_head + "/_fti/_design/lucene2/all",
#            'allFields': url_head + "/_fti/_design/lucene2/allf",
#            'allDocs': url_head + "/_all_docs",
#            'bookIndex': url_head + "/_design/lucene2/_view/bookIndex",
#            'topicIndex': url_head + "/_design/lucene2/_view/topicIndex",
#            'courseIndex': url_head + "/_design/lucene2/_view/courseIndex",
#            'topicRefInBooks': url_head + "/_design/lucene2/_view/topicRefInBooks",
#            'topicRefInLectures': url_head + "/_design/lucene2/_view/topicRefInLectures",
#    }
    
    
def unicode_x_to_u00(s):
    """convert unicode in formate \xEE to \u00EE.
        This conversion is necessary for lucene queries to 
        work properly with non-ascii characters.
    """
    if type(s) == type(unicode('a')):
        s1 = repr(s)
        s1 = s1[2:-1]    # format: u'...'
        s1 = s1.replace('\\x', '\\u00')
        return unicode(s1)
    return s


def bulkGetUniqueDocs(all_doc_ids, doc_type):
    """A helper function. Given a list of topic ids which are not necessarily unique,
    the function will return a dict object that maps a topic id to its corresponding
    value in the topicIndex view."""
    unique_doc_ids = list(set(all_doc_ids))
    query = {
        "size": BIGNUMBER, 
        "query": {
            "filtered": {
                "query": {
                    "term": { "type": doc_type}
                },
                "filter": {
                    "or":  [{"term": {"id.exact": doc_id}} for doc_id in unique_doc_ids]
                }
            }
        }
    }
    resp = requests.post(QUERY_HEAD, data=json.dumps(query), headers=QUERY_HEADERS)
    docs = dict([(doc['_source']['id'], doc['_source']) for doc in resp.json['hits']['hits']])
    # change 'id' to '_id'
    for key, val in docs.iteritems():
        docs[key]['id'] = val['_id']
    return docs

def bulkGetUniqueTopics(all_topic_ids):
    """A helper function. Given a list of topic ids which are not necessarily unique,
    the function will return a dict object that maps a topic id to its corresponding
    value in the topicIndex view."""
    unique_topic_ids = list(set(all_topic_ids))
    query = {
        "size": BIGNUMBER, 
        "query": {
            "filtered": {
                "query": {
                    "term": { "type": "topic"}
                },
                "filter": {
                    "or":  [{"term": {"id.exact": topic_id}} for topic_id in unique_topic_ids]
                }
            }
        },
        "fields": ["id", "name", "_id"]
    }
    resp = requests.post(QUERY_HEAD, data=json.dumps(query), headers=QUERY_HEADERS)
    topics = dict([(doc['fields']['id'], doc['fields']) for doc in resp.json['hits']['hits']])
    # change 'id' to '_id'
    for key, val in topics.iteritems():
        topics[key]['id'] = val['_id']
    return topics



def home(request):
    return render_to_response('home.html', {'title': 'Welcome'})


def topicIndex(request):
    """List topics that start with the alphabet requested"""
    ###### pagination
    page = request.GET.get('page', 'A')
    start = page.lower()
    query = {
        "query": {
            "filtered": {    
                "query": {
                    "bool" : {
                        "must": {"term": { "type": "topic"}}
                    }
                },
                "filter": {
                    "script" : {
                        "script": 'if (!doc["id.exact"].empty && doc["id.exact"].stringValue.length() > 0 && doc["id.exact"].stringValue.toLowerCase().substring(0,1) == letter) {true} else {false}' ,
                        "params": {
                            "letter": start                        
                        }
                    }
                }
            }
        },
        "size": BIGNUMBER, 
        "fields": ["name"]
    }
    resp = requests.post(QUERY_HEAD, data=json.dumps(query), headers=QUERY_HEADERS)
    topics = []
    for tt in resp.json['hits']['hits']:
        topics.append({'id': tt['_id'], 'name': tt['fields']['name']})
    topics.sort(cmp=lambda x,y: cmp(x['name'].lower(), y['name'].lower()))
    context = {
        "topics": topics,
        "current_letter": page,
        "title": "Topic Index"
        }
    return render_to_response('topicIndex.html', context)



def personIndex(request):
    """List all the people and topics that they know"""
    query = {
        "size": BIGNUMBER, 
        "query": {
            "bool" : {
                "must":{"term": { "type": "person"}}                        
            }
        },
    }    
    resp = requests.post(QUERY_HEAD, data=json.dumps(query), headers=QUERY_HEADERS)
    person_docs = [doc['_source'] for doc in resp.json['hits']['hits']]
    all_topic_ids = []
    for doc in person_docs:
        if 'knowledge' in doc:
            all_topic_ids += [kk['id'] for kk in doc['knowledge']]
    unique_topics = bulkGetUniqueTopics(all_topic_ids)    
    ###### generate data for template
    data = []
    for doc in person_docs:
        if not 'knowledge' in doc:    
            continue
        topics = [unique_topics[k['id']] for k in doc['knowledge']]
        topics = sorted(topics, lambda x, y: cmp(x['id'].lower(), y['id'].lower()))
        data.append((doc['firstname'], doc['lastname'], topics))
    context = {'title': 'Person Index',
                'data': sorted(data, lambda x, y: cmp(x[1], y[1]))
                }
    return render_to_response('personIndex.html', context)




def courseIndex(request):
    query = {
        "size": BIGNUMBER, 
        "query": {
            "bool" : {
                "must":{"term": { "type": "course"}}                        
            }
        },
        'fields': ['description', 'semester', 'name', '_id']
    }    
    resp = requests.post(QUERY_HEAD, data=json.dumps(query), headers=QUERY_HEADERS)
    courses = [doc['fields'] for doc in resp.json['hits']['hits']]
    for ii, doc in enumerate(courses):
        courses[ii]['id'] = doc['_id']
    context = {
        "courses": courses,
        "title": "Course Index"
        }
    return render_to_response('courseIndex.html', context)




def bookIndex(request):
    query = {
        "size": BIGNUMBER, 
        "query": {
            "bool" : {
                "must":{"term": { "type": "book"}}                        
            }
        },
        'fields': ['author', 'year', 'title', '_id', 'publisher']
    }    
    resp = requests.post(QUERY_HEAD, data=json.dumps(query), headers=QUERY_HEADERS)
    books = [doc['fields'] for doc in resp.json['hits']['hits']]
    for ii, doc in enumerate(books):
        books[ii]['id'] = doc['_id']
    all_author_ids = []
    all_publisher_ids = []
    for bk in books:
        all_author_ids += [doc['id'] for doc in  bk['author']]
        all_publisher_ids.append(bk['publisher']['id'])
    unique_authors = bulkGetUniqueDocs(all_author_ids, 'person')
    unique_publishers = bulkGetUniqueDocs(all_publisher_ids, 'publisher')
    ###### generate data for template
    data = []    
    for bk in books:
        authors = [unique_authors[aa['id']] for aa in bk['author']]
        authors = [' '.join([a['firstname'] ,a['lastname']]) for \
                    a in authors]
        publisher = unique_publishers[bk['publisher']['id']]
        publisher = publisher['name'] if publisher else None
        data.append((bk, authors, publisher))
    data.sort(cmp=lambda x,y: cmp(x[0]['title'], y[0]['title']))
    context = {
        "data": data,
        "title": "Book Index",
        }
    return render_to_response('bookIndex.html', context)





def course(request, courseID, choice='lecture'):
    """
    Key arguments:
    courseName -- the courseName is in doc_id format
    choice -- signify whether to return book data or lecture data
                (default is lecture data)
    """
    def availability(course_doc):
        """check whether book data or lecture data are available"""
        l_flag = False
        b_flag = False
        if course_doc.get('lecture', []): 
            l_flag = True
        if course_doc.get('book', []): 
            b_flag = True
        if l_flag and b_flag: 
            return 'both'
        elif l_flag: 
            return 'lecture'
        elif b_flag: 
            return 'book'
        else: 
            return 'none'   
    url = settings.BIBSERVER_HOST + settings.BIBSERVER_DATABASE_NAME + '/record/' + courseID
    resp = requests.get(url)
    if not resp.status_code == 200:
        Http404
    course_doc = resp.json['_source']    
#    course = copy.deepcopy(course_doc)
    course_doc['id'] = course_doc['_id']
    if choice == 'lecture':
        all_topic_ids = []
        for lecture in course_doc['lecture']:
            all_topic_ids += [ee['id'] for ee in lecture['topic']]
        unique_topics = bulkGetUniqueTopics(all_topic_ids)    
        for ii, lecture in enumerate(course_doc['lecture']):
            course_doc['lecture'][ii]['topic'] = [unique_topics[ee['id']] for ee in lecture['topic'] if ee['id'] in unique_topics]
        all_book_ids = [ee['id'] for ee in course_doc['book']]
        unique_books = bulkGetUniqueDocs(all_book_ids, 'book')
        book_data = unique_books.values()
    else:      
        all_book_ids = [ee['id'] for ee in course_doc['book']]
        unique_books = bulkGetUniqueDocs(all_book_ids, 'book')
        book_data = [generate_book_data(bk) for bk in unique_books.itervalues()]
    semester = "[%s]" % course_doc.get('semester', '')
    name = course_doc.get('name', '')
    description = "-- " + course_doc['description'] if course_doc['description'] else ""
    context = {
        'choice' : choice,
        'availability' : availability(course_doc),
        'course' : course_doc,
        'title' : ' '.join([semester, name, description]),
        'books' : book_data
        }
    return render_to_response('course.html', context)





def generate_book_data(book):
    def getBookIDs(head, container):
        container += [ee['id'] for ee in head['topic']]
        for c in head['child']:
            getBookIDs(c, container)
    
    def setBookTopics(head, unique_topics):
        head['topic'] = [unique_topics[ee['id']] for ee in head['topic']]
        for ii, c in enumerate(head['child']):
            setBookTopics(head['child'][ii], unique_topics)
        
    book = copy.deepcopy(book)
    all_topic_ids = []
    for chap in book['toc']:
        getBookIDs(chap, all_topic_ids)
    unique_topics = bulkGetUniqueTopics(all_topic_ids) 
    for ii, chap in enumerate(book['toc']):        
        setBookTopics(book['toc'][ii], unique_topics)
    all_author_ids = [ee['id'] for ee in book['author']]
    unique_authors = bulkGetUniqueDocs(all_author_ids, doc_type='person')
    book['author'] = unique_authors.values()
    all_publisher_ids = [book['publisher']['id'], ]
    unique_publishers = bulkGetUniqueDocs(all_publisher_ids, doc_type='publisher')
    book['publisher'] = unique_publishers.values()[0]  
    return book



def book(request, bookID):
    url = settings.BIBSERVER_HOST + settings.BIBSERVER_DATABASE_NAME + '/record/' + bookID
    resp = requests.get(url)
    if not resp.status_code == 200:
        Http404
    book_doc = resp.json['_source']  
    # get courses that use this book
    query = {
        "size": BIGNUMBER, 
        "query": {
            "bool" : {
                "must": {"term": { "type": "course"}}
                    }
        },
        'fields': ['semester','name', 'description', 'book', 'title', '_id', 'id']
    }    
    resp = requests.post(QUERY_HEAD, data=json.dumps(query), headers=QUERY_HEADERS)
    course_docs = [doc['fields'] for doc in resp.json['hits']['hits']] 
    course_docs = [doc for doc in course_docs if book_doc['id'] in [ee['id'] for ee in doc['book']] ]
    for ii, doc in enumerate(course_docs):
        course_docs[ii]['id'] = doc['_id']
    
    context = {
        'book' : generate_book_data(book_doc),
        'title': book_doc['title'],
        'courses' : course_docs
        }
    return render_to_response('book.html', context) 
    
    

def topic(request, topicID):
    def getBookRef(topic_doc):
        """Find all occurences of the topic in all the books"""
        def search_page(parent, topic, page_bucket):
            for ch_doc in parent:
                topic_ids = [ee['id'] for ee in ch_doc['topic']]
                if topic in topic_ids:
                    page_bucket.append(ch_doc.get('page', ''))
                if 'child' in ch_doc and ch_doc['child']:
                    search_page(ch_doc['child'], topic, page_bucket)
        
        # search for book docs
        TOC_DEPTH = 5 
        toc_search_names = ['toc.%stopic.id' % ''.join(['child.'] * ii ) for ii in xrange(TOC_DEPTH)]
        toc_part = [{"term": { nn: topic_doc['id']}} for nn in toc_search_names]
        query = {
            "size": BIGNUMBER, 
            "query": {
                "bool": {
                    "must": {"term": { "type": 'book'}},
                    'should': toc_part,
                    'minimum_number_should_match': 1
                }
            }
        }
        resp = requests.post(QUERY_HEAD, data=json.dumps(query), headers=QUERY_HEADERS)
        book_docs = [ee['_source'] for ee in resp.json['hits']['hits']]
        required_fields = ['_id', 'title']
        
        # find page numbers
        references = []
        for bk_doc in book_docs:
            pages = []
            search_page(bk_doc['toc'], topic_doc['id'], pages)
            pages = [ee for ee in pages if len(ee) > 0]
            doc = dict([(key, bk_doc[key]) for key in required_fields if key in bk_doc])
            doc['id'] = doc['_id']
            references.append((doc, pages))
        references.sort(cmp=lambda x,y: cmp(x[0]['title'], y[0]['title']))
        return references
    
    def getLectureRef(topic_doc):
        """Find all occurences of the topic in all the courses"""
        def search_lec(docs, topic, bucket):
            for doc in docs:
                topic_ids = [ee['id'] for ee in doc['topic']]
                if topic in topic_ids:
                    bucket.append(doc)
        
        def cmp_ref(y, x):
            if 'semester' in x[0] and 'semester' in y[0] and \
                x[0]['semester'] and y[0]['semester']:
                return cmp(re.sub('[^0-9]', '', x[0]['semester']), 
                           re.sub('[^0-9]', '', y[0]['semester']))
            if 'semester' in x[0] and x[0]['semester']:
                return 1
            if 'semester' in y[0] and y[0]['semester']:
                return -1
            return cmp(x[0]['title'], y[0]['title'])
        
        query = {
            "size": BIGNUMBER, 
            "query": {
                "bool": {
                    "must": [
                        {"term": { "type": 'course'}},
                        {"term": { "lecture.topic.id": topic_doc['id']}},                    
                    ]
                }
            }
        }
        resp = requests.post(QUERY_HEAD, data=json.dumps(query), headers=QUERY_HEADERS)
        course_docs = [ee['_source'] for ee in resp.json['hits']['hits']]
        required_fields = ['_id', 'semester', 'name', 'description']
        
        # find lectures
        references = []
        for cr_doc in course_docs:
            lec_bucket = []
            search_lec(cr_doc['lecture'], topic_doc['id'], lec_bucket)
            doc = dict([(key, cr_doc[key]) for key in required_fields if key in cr_doc])
            doc['id'] = doc['_id']
            references.append((doc, lec_bucket))
        references.sort(cmp=cmp_ref)
        return references
#    def getPractioners(data):
#        """Extract from data documents of type "Person" """
#        return [doc for doc in data if doc['doc_type'] == 'Person']
    
    url = settings.BIBSERVER_HOST + settings.BIBSERVER_DATABASE_NAME + '/record/' + topicID
    resp = requests.get(url)
    if not resp.status_code == 200:
        Http404
    topic_doc = resp.json['_source']
    topic_doc_fix_id = copy.deepcopy(topic_doc)
    topic_doc_fix_id['id'] = topic_doc_fix_id['_id']
    context = {
        'topic': topic_doc_fix_id,
        'title': topic_doc_fix_id['name'],
        # 'data': [data[category] for category in data],
        'references_book': getBookRef(topic_doc),
        'references_lecture': getLectureRef(topic_doc),        
        }
    return render_to_response('topic.html', context)    

def search(request):
    matches = []
    search_term = request.GET['q'].lower()
    query = {
        "size": SEARCH_LIMIT, 
        "query": {
            "bool" : {
                "must":  {"term": { "type": "topic"}},
                "should": {
                    "query_string": {
                        "query": search_term,
                        "default_field": "name",
                        "default_operator": "AND"
                    }
                }
            }
        },
        "fields": ["name", "_id"]
    } 
    resp = requests.post(QUERY_HEAD, data=json.dumps(query), headers=QUERY_HEADERS)
    approxMatches = [ee['fields'] for ee in resp.json['hits']['hits']]
    if approxMatches:
        matches = approxMatches
    else:
        query = {
            "size": SEARCH_LIMIT, 
            "query": {
                "bool" : {
                    "must":  {"term": { "type": "topic"}},
                    "should": {
                        "fuzzy_like_this": {
                            "like_text": search_term,
                            "fields": ["name"],
                        }
                    }
                }
            },
            "fields": ["name", "_id"]
        } 
        resp = requests.post(QUERY_HEAD, data=json.dumps(query), headers=QUERY_HEADERS)
        fuzzyMatches = [ee['fields'] for ee in resp.json['hits']['hits']]
        if fuzzyMatches:
            matches = fuzzyMatches
    if matches:
        for doc in matches:
            doc['id'] = doc['_id']
        context = {'matches':matches}
        return render_to_response('searchResults.html', context)
    return render_to_response('noResults.html')

def redirect(request,ext_url):
    return HttpResponseRedirect(ext_url)

def about(request):
    return render_to_response('about.html', {'title': 'About'})

