# -*- coding: UTF-8 -*-
'''
Created on 2014年6月2日

@author: areshero
'''

import urllib2
import json
import datetime
from home.models import Ontology,Tag
import threading
import time
import traceback
import ontologies


REST_URL = "http://data.bioontology.org"
API_KEY = "1b466c88-c956-4682-8319-5e01f905b4f4"


class DataDownloadThread(threading.Thread):
    def __init__(self, beginIndex, endIndex,annotations):
        super(DataDownloadThread, self).__init__()
        self.beginIndex = beginIndex
        self.endIndex = endIndex
        self.annotations = annotations
        self.recommend_list = []
        self.ontologoies = []

    def run(self):
        #processing get annotations from recommender
        
        self.recommend_list[:] = []
        self.ontologoies[:] = []
        
        for resultIndex in range(self.beginIndex, self.endIndex):
            
            tag_detail = dict()
            try:
                current_ontology = self.get_json(self.annotations[resultIndex]["ontology"]["@id"])
                
                ontology_object = dict()
                ontology_link = self.annotations[resultIndex]["ontology"]["@id"]
                ontology_object['virtual_id'] = ontology_link
                ontology_object['name'] = current_ontology["name"]
                self.ontologoies.append(ontology_object)
            except urllib2.HTTPError,e:
                print e.reason
                pass
            if len(self.annotations[resultIndex]["annotatedClasses"]) > 0:
                for index2 in range(len(self.annotations[resultIndex]["annotatedClasses"])):
                    try:
                        current_tag = self.get_json(self.annotations[resultIndex]["annotatedClasses"][index2]["links"]["self"])
                        
                        tag_detail = dict()
                        tag_detail["id_in_database"] = 0
                        tag_detail["tag_prefLabel"] =  current_tag["prefLabel"]
                        tag_detail["id_in_bioportal"] = current_tag["@id"]
                        tag_detail["ontology_link"] = ontology_link
                        tag_detail["parent_tag_link"] = current_tag["links"]["parents"]
                        self.recommend_list.append(tag_detail)
                    except urllib2.HTTPError,e:
                        print e.reason
                        print self.annotations[resultIndex]["annotatedClasses"][index2]["links"]["self"]
                        traceback.print_exc(limit=None, file=None)
                        pass
        return self.recommend_list
    
    
    
    def get_json(self,url):
        opener = urllib2.build_opener()
        opener.addheaders = [('Authorization', 'apikey token=' + API_KEY)]
        result = json.loads(opener.open(url).read())
        time.sleep(1)
        return result

#multi thread entrance
#使用多线程查询网络端数据，由于时间限制，当前只查询的一次，得到第一次的结果，
#然后通过分析第一次的结果文件，去修改数据库，并返回到前台
def startGetAnnotationsFromRecommender(figureLegend,ontology_ids):
    SummingThreadInstance = DataDownloadThread(1,1,[])
    annotations = SummingThreadInstance.get_json(
                                             REST_URL + 
                                             "/recommender?&include_classes=true&text=" + 
                                             urllib2.quote(figureLegend) +
                                             "&ontologies=" +
                                             ontology_ids
                                             )

    #annotations = ontologies.recommend(figureLegend, ontology_ids)

    return annotations

def startGetAnnotationsFromRecommender1(figureLegend,threadNUM,ontology_ids):
    SummingThreadInstance = DataDownloadThread(1,1,[])
    
    annotations = SummingThreadInstance.get_json(
                                                 REST_URL + 
                                                 "/recommender?&include_classes=true&text=" + 
                                                 urllib2.quote(figureLegend) +
                                                 "&ontologies=" +
                                                 ontology_ids
                                                 )

    
    
    #print annotations
    index = 0
    thread_arr = []
    tmpIndex = 0
    theadRequestNum = len(annotations) / threadNUM + 1
    while index < len(annotations):
        print "start a new thread"
        thread = DataDownloadThread(index,index+theadRequestNum,annotations)
        
        if index + theadRequestNum < len(annotations):
            index = index +theadRequestNum
            thread_arr.append(thread)
        else:
            tmpIndex = index
            break
    thread = DataDownloadThread(tmpIndex,len(annotations)-1,annotations)
    thread_arr.append(thread)
    for i in thread_arr:
        i.start()
    
        time.sleep(2)
    for j in thread_arr:
        j.join()
        
    # At this point, both threads have completed
    result = []
    ontologies_result = []
    for x in thread_arr:
        result.extend(x.recommend_list)
        ontologies_result.extend(x.ontologoies)
    
    return result,ontologies_result
