# intermine2os.py NAMESPACE = 'wormmine' NAMESPACE = 'mousemine' NAMESPACE = 'flymine' from opensearchpy import OpenSearch from opensearch_dsl import Search import opensearch_py_ml as oml import pandas as pd from os_credential import credentials import json from datetime import datetime from intermine.webservice import Service import time def get_os_client(): (OS_HOST, OS_PORT, OS_USER, OS_PASSWORD) = credentials() # Create the client with SSL/TLS enabled, but hostname verification disabled. client = OpenSearch( hosts = [{'host': OS_HOST, 'port': OS_PORT}], http_compress = True, # enables gzip compression for request bodies http_auth = (OS_USER, OS_PASSWORD), use_ssl = True, verify_certs = False, ssl_assert_hostname = False, ssl_show_warn = False ) #print(client) return client os_client = get_os_client() os_client oml_classes = oml.DataFrame(os_client, 'intermine-classe') oml_classes.shape pd_classes = oml.opensearch_to_pandas(oml_classes) pd_classes.shape pd_classes pd_classes_name = pd_classes.get(['classe', 'namespace', 'count']).query('namespace == "'+NAMESPACE+'"').sort_values('classe') print('max', pd_classes_name['count'].max()) print('sum', pd_classes_name['count'].sum()) #pd_classes_name list_classes_name = pd_classes_name.values.tolist() list_classes_name def list2doc(data_views): data_obj = {} for i in range(0, len(data_views['data'])): key = data_views['views'][i] key = key.split('.')[1] value = data_views['data'][i] #print(key, value) if not value is None: data_obj[key] = value return data_obj test = {'data': [1, '(3R)-3-hydroxyacyl-CoA dehydrogenase', None, 13396260, True], 'views': ['UniProtFeature.begin', 'UniProtFeature.description', 'UniProtFeature.end', 'UniProtFeature.id', 'UniProtFeature.type'], 'index_map': None} list2doc(test) def clase2obj(intermine_service, intermine_class_name, end_pos=10, start_pos=0): query = intermine_service.new_query(intermine_class_name) ctr = 0 for row in query.rows(): ctr += 1 if ctr < start_pos: continue if ctr > end_pos: break #print(row["diseaseId"], row["diseaseType"], row["name"], row["primaryIdentifier"], row["alleles.primaryIdentifier"], row["genes.primaryIdentifier"]) row_dict = list2doc(row.__dict__) #print(row_dict) yield row_dict, ctr #for (row_dict, ctr) in clase2obj(intermine_service, 'CrossReference', end_pos=100*1000*1000, start_pos=0): # pass #print(ctr) class OSBulkBuffer: """ A fixed-size buffer class that overwrites the oldest element when full. """ def __init__(self, size, length, index_name, os_connection, debug_mode=False): """ Initializes the buffer with a specified size. Args: size (int): The maximum number of elements the buffer can hold. """ self.buffer = [] self.str_length = 0 self.head = 0 # Index of the element to be overwritten on next insertion self.max_size = size self.max_length = length self.index = index_name self.connection = os_connection self.debug_mode = debug_mode def is_full(self): """ Checks if the buffer is at its maximum capacity. Returns: bool: True if the buffer is full, False otherwise. """ return len(self.buffer) == self.max_size def transmit(self): """ Checks if the buffer is at its maximum capacity. Returns: bool: True if the buffer is full, False otherwise. """ #print('plein', self.head) os_buffer = '' for doc in self.buffer: #print(doc) #response = self.connection.index(index=self.index, body=doc, id=doc['doc_id']) #print(response) operation = { "index" : { "_index" : self.index, "_id" : doc['doc_id'] } } os_buffer = os_buffer + '\n' + json.dumps(operation) + '\n' + json.dumps(doc) try: response = self.connection.bulk(os_buffer) if self.debug_mode: print(response) except: print("OS retry 3 minutes") time.sleep(180) response = self.connection.bulk(os_buffer) if self.debug_mode: print(response) self.buffer = [] #print(self.buffer) self.str_length = 0 self.head = 0 def put(self, data): """ Adds an element to the buffer. If full, overwrites the oldest element. Args: data: The data to be added to the buffer. """ if self.is_full(): self.transmit() self.head += 1 self.buffer.append(data) #print(len(self.buffer)) def close(self): """ Retrieves the most recently added element from the buffer. Returns: any: The data from the buffer, or None if empty. """ if self.head == 0: return None else: self.transmit() self.connection.close() print('close') return self.connection service_taxon = NAMESPACE class_name = 'Sequence' services = {} services['humanmine'] = "https://www.humanmine.org/humanmine/service" services['flymine'] = "https://www.flymine.org/flymine/service" services['mousemine'] = "https://www.mousemine.org/mousemine/service" services['wormmine'] = "http://intermine.wormbase.org/tools/wormmine/service" intermine_service = Service(services[NAMESPACE]) #service = Service("https://www.wormmine.org/wormmine/service") # Get a new query on the class (table) you will be querying: query = intermine_service.new_query(class_name) log_freq = 100*1000 namespace = NAMESPACE my_buffer = OSBulkBuffer(1000, 10000000, 'intermine-' + namespace + '-object', os_client, False) for class1 in list_classes_name: print(NAMESPACE, class1[0], class1[2]) class_name = class1[0] namespace = class1[1] if class_name < 'ZZZ': continue for (row_dict, ctr) in clase2obj(intermine_service, class_name, end_pos=100*1000*1000, start_pos=0): row_dict['doc_id'] = namespace + '-' + class_name + '-' + str(row_dict['id']) row_dict['timestamp'] = datetime.utcnow().isoformat() my_buffer.put(row_dict) if ctr % log_freq == 0 or ctr == 1: print('clase2obj', NAMESPACE, ctr, datetime.utcnow().isoformat()) my_buffer.close()