# coding=utf-8

# Create your tests here.
import urllib2
import socket
import json
import threading
import time


from home.models import Ontology,Tag
REST_URL = "http://data.bioontology.org"
API_KEY = "1b466c88-c956-4682-8319-5e01f905b4f4"

axes2ontologies={
                 "Anatomy":"SNOMEDCT",
                 "Disease & Symptoms":"SNOMEDCT",
                 "Genetics, Proteins & Processes":"GO",
                 "Imaging":"NCIT",
                 "Medical Intervention":"SNOMEDCT",
                 "Pharmaceutical Agent":"RXNORM"
                 }
'''
axes = "Anatomy", "Disease & Symptoms","Genetics, Proteins & Processes", 
"Imaging","Medical Intervention", "Pharmaceutical Agent"
ontologies = "SNOMEDCT", "SNOMEDCT", "GO","NCIT", "SNOMEDCT", "RXNORM"
        
class PageDownloadThread(threading.Thread):
    def __init__(self,page,name):
        super(PageDownloadThread, self).__init__()
        self.page = page
        self.name = name
        self.allPages = []
        
    def run(self):
        print "start"
        firstPage = get_json(self.page)
        resultFile = open(self.name+'.json','w')
        self.allPages.append(firstPage)
        nextPageLink = firstPage["links"]["nextPage"]
        while nextPageLink != "null":
            print nextPageLink
            nextPageContent = get_json(nextPageLink)
            self.allPages.append(nextPageContent)
            nextPageLink = nextPageContent["links"]["nextPage"]
            print >> resultFile,nextPageContent
        resultFile.close()
        
#     def get_json(self,url):
#         print url
#         opener = urllib2.build_opener()
#         opener.addheaders = [('Authorization', 'apikey token=' + API_KEY)]
#         result = json.loads(opener.open(url).read)
#         print result
#         return result
class DataDownloadThread(threading.Thread):
    def __init__(self, pages):
        super(DataDownloadThread, self).__init__()
        self.pages = pages

    def run(self):
        """
        save the tag without the parent tag restriction
        data format:
            id_in_database
            tag_prefLabel
            id_in_bioportal
            ontology_link
            parent_tag_link
        """
        for currentPage in self.pages:
            
            for currentTagIndex in len(currentPage["collection"]):
                currentOntology = Ontology.objects.get(virtual_id = currentPage["collection"][currentTagIndex]["links"]["ontology"])
                currentTag = Tag(name = currentPage["collection"][currentTagIndex]["prefLabel"],
                                 tag_type_id = '0',
                                 fullId = currentPage["collection"][currentTagIndex]["@id"],
                                 ontology_id = currentOntology.id,
                                 parentId = currentPage["collection"][currentTagIndex]["links"]["parents"]
                                 )
                currentTag.save()
                print currentTag.id
                
        return self.result
    

'''    
def get_json(url):
    
        #get_json_result = ""
        socket.setdefaulttimeout(2000)
        opener = urllib2.build_opener()
        opener.addheaders = [('Authorization', 'apikey token=' + API_KEY)]
        r = opener.open(url).read()
        #get_json_result = json.loads(r)
        #r = opener.open(url).read()
        #print get_json_result
        #f = open('json.txt', 'w')
        #f.write(r)
        #print 'testing...'
        #print opener.open('http://data.bioontology.org/ontologies/TESTONTO/classes/http%3A%2F%2Fwww.owl-ontologies.com%2FOntology1394230494.owl%23truc').read()
        #print 'test over'
        time.sleep(10)
        opener.close()

        return r

#用来从网络端下载json的函数，目前以废弃，原因是网络有限制
def startGetAnnotationsFromRecommender(ontologyLinks):
    '''
    #pages = []
    #allPages = []
    #thread_arr_pageDownload = []
    #download all the pages
    print "get from file"
    resultFile = file("result.json")
    r = resultFile.readlines()
    r1 = r[0].replace('u\'','\'')
    s = json.load(r1)
    print type(s)
    
    
    
    resultFile = file("Cresult.json")
    k = resultFile.readline()
    print type(k)
    f = json.loads(k)
    print f
    
    
    
    '''
    #print "download all the pages"
    resultFile = open('NEWResult.json','a')
    nextPageLink = "http://data.bioontology.org/ontologies/BAO/classes?page=1"
    #ontology = Ontology.objects.get(virtual_id="http://data.bioontology.org/ontologies/NCIT")
    while nextPageLink != "null":
        nextPageContent = get_json(nextPageLink)

        nextPageContentJson = json.loads(nextPageContent)
        nextPageLink = nextPageContentJson["links"]["nextPage"]

        for i in nextPageContentJson["collection"]:
            currentTag = Tag(name = i["prefLabel"],tag_type_id = "4",
                             fullId = i["@id"],
                            ontology_id = '12')
            currentTag.save()
#             currentTag = Tag.objects.get(fullId = i["@id"])
#             currentTag.ontology_id = '12'
#             currentTag.save()
        print nextPageContent
        print >> resultFile,nextPageContent
        #print nextPageLink
        time.sleep(10)
        #save the page content
        
#     for index in ontologyLinks:
#         print ontologyLinks[index]
# #         pageDownloadTread = PageDownloadThread(index,ontologyLinks[index])
# #         thread_arr_pageDownload.append(pageDownloadTread)
#         currentOntologyClassesFirstPage = get_json(index)
#         pages.append(currentOntologyClassesFirstPage)
#         allPages.append(currentOntologyClassesFirstPage)
#         print >> resultFile,currentOntologyClassesFirstPage
    
#     for i in thread_arr_pageDownload:
#         i.start()
#         
#     for j in thread_arr_pageDownload:
#         j.join()
#     
#     for k in thread_arr_pageDownload:
#         allPages.extend(k.allPages)
    
#     
#     for index1 in pages:
#         nextPageLink = index1["links"]["nextPage"]
#         while nextPageLink != "null":
#             print nextPageLink
#             nextPageContent = get_json(nextPageLink)
#             allPages.append(nextPageContent)
#             nextPageLink = nextPageContent["links"]["nextPage"]
#             time.sleep(1)
#             print >> resultFile,nextPageContent
            
        
    #print "download all pages done"
    
    resultFile.close()
    '''
    print len(allPages)
    #download page content and save to database
    print "download page content and save to database"    
    thread_arr = []
    index2 = 0
    currentIndex = 0
    while index2 < len(allPages):
        DataDownlaodThreadInstance = DataDownloadThread(allPages[index2:index2+len(allPages)/3])
        currentIndex = index2
        index2 = index2 + len(allPages)/3
        thread_arr.append(DataDownlaodThreadInstance)
    
    DataDownlaodThreadInstance = DataDownloadThread(allPages[currentIndex:len(allPages)])
    thread_arr.append(DataDownlaodThreadInstance)
    
    for i in thread_arr:
        i.start()
        time.sleep(2)
    for j in thread_arr:
        j.join()
    '''
    return True
# 
# class recommend:
#     def __init__(self):
#         self.recommend_list = []
#     
#        
#     def saveTagToDB1(self,info_list,figureID):
#         """
#         save the tag without the parent tag restriction
#         data format:
#             id_in_database
#             tag_prefLabel
#             id_in_bioportal
#             ontology_link
#             parent_tag_link
#         """
#         for index in range(len(info_list)):
#             currentTagInfo = info_list[index]
#             currentOntology = Ontology.objects.get(virtual_id = currentTagInfo["ontology_link"])
#             currentTag = Tag(name = currentTagInfo["tag_prefLabel"],tag_type_id = "2",fullId = currentTagInfo["id_in_bioportal"],
#                              ontology_id = currentOntology.name, parent_id = currentTagInfo["parent_tag_link"])
#             currentTag.save()
#             Recommendations.objects.get_or_create(tag_id =currentTag.id.id,image_id = figureID )
#             info_list[index]["id_in_database"] = currentTag.id
#         
#         return info_list                   
#     
# 
# def saveTagToDB(result):
#     """
#     save the tag without the parent tag restriction
#     data format:
#         id_in_database
#         tag_prefLabel
#         id_in_bioportal
#         ontology_link
#         parent_tag_link
#     """
#     
#     return True

# USE
#get_json("http://data.bioontology.org/ontologies/GO/classes")

def gogogo():
    ontologyLinks = {"http://data.bioontology.org/ontologies/SNOMEDCT/classes":"SNOMEDCT",
                     "http://data.bioontology.org/ontologies/GO/classes":"GO",
                     "http://data.bioontology.org/ontologies/NCIT/classes":"NCIT",
                     "http://data.bioontology.org/ontologies/RXNORM/classes":"RXNORM"}
    
    startGetAnnotationsFromRecommender(ontologyLinks)
    
    print 'over'

#gogogo()
