from fastapi import FastAPI from fastapi.staticfiles import StaticFiles from fastapi.responses import FileResponse from fastapi import FastAPI, BackgroundTasks, HTTPException, Query from fastapi.responses import StreamingResponse from starlette.concurrency import run_in_threadpool from datasets import load_dataset import random import json from genson import SchemaBuilder from pathvalidate import sanitize_filename from openai import OpenAI import hashlib from pprint import pprint import asyncio import importlib.util import sys import json import jsonschema # import aiosqlite from utils import extract_code import numpy as np app = FastAPI() # DATABASE_FILE = "samples.db" client = OpenAI( base_url="https://openrouter.ai/api/v1", api_key=os.environ.get('OPENROUTER_KEY') ) # async def setup_database(): # async with aiosqlite.connect(DATABASE_FILE) as db: # await db.execute(""" # CREATE TABLE IF NOT EXISTS samples ( # hash TEXT PRIMARY KEY, # data TEXT NOT NULL, # dataset TEXT NOT NULL # ) # """) # await db.commit() # async def insert_sample(hash: str, data: str, dataset: str): # async with aiosqlite.connect(DATABASE_FILE) as db: # # Check if a record with the same hash already exists # cursor = await db.execute("SELECT COUNT(*) FROM samples WHERE hash = ?", (hash,)) # count = await cursor.fetchone() # if count[0] == 0: # # Insert the new record since it doesn't exist # await db.execute("INSERT INTO samples (hash, data, dataset) VALUES (?, ?, ?)", (hash, data, dataset)) # await db.commit() # else: # # A record with the same hash already exists # print("Record with the same hash already exists in the database.") # async def get_sample_by_hash(hash: str): # async with aiosqlite.connect(DATABASE_FILE) as db: # cursor = await db.execute("SELECT data, dataset FROM samples WHERE hash = ?", (hash,)) # row = await cursor.fetchone() # return row def is_sharegpt(sample): schema={'$schema': 'http://json-schema.org/schema#', 'type': 'object', 'properties': {'conversations': {'type': 'array', 'items': {'type': 'object', 'properties': {'from': { 'type': 'string', 'enum': ['human', 'gpt', 'system'] }, 'value': {'type': 'string'}}, 'required': ['from', 'value']}}}, 'required': ['conversations']} try: jsonschema.validate(instance=sample, schema=schema) return True except jsonschema.exceptions.ValidationError as e: return False def sha256(string): # Create a hashlib object for SHA-256 sha256_hash = hashlib.sha256() # Update the hash object with your string encoded as bytes sha256_hash.update(string.encode('utf-8')) return sha256_hash.hexdigest() def get_adapter_name(sample): builder = SchemaBuilder() builder.add_object(sample) schema = builder.to_schema() return sha256(json.dumps(schema)) def has_adapter(sample): adapter_name = get_adapter_name(sample) module_name = f"dataset_adapters.{adapter_name}" module_spec = importlib.util.find_spec(module_name) if module_spec is None: return False return True def auto_tranform(sample): adapter_name = get_adapter_name(sample) if not has_adapter(sample): create_adapter(sample, adapter_name) module_name = f"dataset_adapters.{adapter_name}" spec = importlib.util.spec_from_file_location(module_name, f"dataset_adapters/{adapter_name}.py") dynamic_module = importlib.util.module_from_spec(spec) sys.modules[module_name] = dynamic_module spec.loader.exec_module(dynamic_module) # Use the function from the dynamically imported module transformed_data = dynamic_module.transform_data(sample) if isinstance(transformed_data, list): return {'conversations' : transformed_data} return transformed_data # def create_adapter(sample, adapter_name): # builder = SchemaBuilder() # builder.add_object(sample) # schema = builder.to_schema() # code_string = """def transform_data(data): # raise Exception('')""" with open(f"dataset_adapters/{adapter_name}.py", 'w') as file: file.write(code_string) def create_adapter(sample, adapter_name): builder = SchemaBuilder() builder.add_object(sample) schema = builder.to_schema() prompt = f"""Make me minimal and efficient python code to convert data in the shape of initial data shape ==========➡️📑📐========== ```jsonschema {schema} ``` ==========➡️📑📐========== to equivalent data in the form final data shape ==========⬇️📑📐========== ```jsonschema {{'$schema': 'http://json-schema.org/schema#', 'type': 'object', 'properties': {{'conversations': {{'type': 'array', 'items': {{'type': 'object', 'properties': {{'from': {{ 'type': 'string', 'enum': ['human', 'gpt', 'system'] }}, 'value': {{'type': 'string'}}}}, 'required': ['from', 'value']}}}}}}, 'required': ['conversations']}} ``` ==========⬇️📑📐========== the data to transform is ```json {sample} ``` Inside the data to transform, `input` and `instruction` is usually associated with `"from" : "human"` while `output` is usually associated with `"from" : "gpt"` For transforming the data you shall use python. Make robust and elegant python code that will do the transformation your code will contain a function `def transform_data(data):` that does the transformation and outputs the newly shaped data. Only the data, no schema. Your code snippet will include only the function signature and body. I know how to call it. You won't need to import anything, I will take care of parsing and dumping json. You work with dicts. Remember to be careful if you iterate over the data because I want the output conversation to always start with the prompt. In other words, always process "input" before "output" and "instruction" before "output". Such heuristics are very important. If there is "instruction" and "input" and the "input" is not empty, concat the input at the end of the first message. If the data contains no "system" message, human always speaks first. If it contains a "system" message, the "system" message is first, then human, then gpt, then alternating if needed "human" ALWAYS SPEAKS BEFORE "gpt", if you suspect your code makes "gpt speak first, fix it MOST IMPORTANT IS THAT YOU look at the initial data shape (➡️📑📐) to ground your transformation into final data shape (⬇️📑📐) Your output should contain only the code for `def transform_data(data):`, signature and body. Put the code inside markdown code block""" response = client.chat.completions.create( model="openai/gpt-4-1106-preview", # Optional (user controls the default) messages=[ { "role": "system", "content": """You are ChatGPT, a large language model trained by OpenAI, based on the GPT-4 architecture. Knowledge cutoff: 2023-04 Current date: 2023-11-05 Image input capabilities: Enabled""" }, # {"role": "user", "content": f"""Make me minimal and efficient python code to convert data in the shape of # ```jsonschema # {json.dumps(schema)} # ``` # to equivalent data in the form ``` # {{'$schema': 'http://json-schema.org/schema#', 'type': 'object', 'properties': {{'conversations': {{'type': 'array', 'items': {{'type': 'object', 'properties': {{'from': {{ 'type': 'string', 'enum': ['human', 'gpt', 'system'] }}, 'value': {{'type': 'string'}}}}, 'required': ['from', 'value']}}}}}}, 'required': ['conversations']}} # ``` # the input is # ``` # {json.dumps(sample)} # ``` # `input` is usually associated with `"from" : "human"` while `output` is usually associated with `"from" : "gpt"` # don't transform, make robust and elegant python code that will do the transformation # your code will contain a function `def transform_data(data):` that does the transformation and outputs the newly shaped data. Only the data, no schema. Your code snippet will include only the function signature and body. I know how to call it. You won't need to import anything, I will take care of parsing and dumping json. You work with dicts. Remember to be careful if you iterate over the data because I want the output conversation to always start with the prompt. In other words, always process "input" before "output" and "instruction" before "output". Such heuristics are very important. If there is "instruction" and "input" and the "input" is not empty, concat the input at the end of the first message.""" # } {"role": "user", "content": prompt} ] ) val = response.choices[0].message.content # index = val.index('def transform_data(data)') # def get_code_start(): # for i in range(index,0,-1): # if val[i:i+3] == "```": # idx = val[i:].index('\n') # return i + (idx) + 1 # def get_code_end(): # for i in range(index, len(val)): # if val[i:i+3] == "```": # return i-1 # code_string = val[get_code_start():get_code_end()] # print("###", val) code_string = extract_code(val) if code_string is None: raise Exception("hey la") with open(f"dataset_adapters/{adapter_name}.py", 'w') as file: file.write(code_string) @app.get("/sample") async def get_sample(hash: str = Query(..., alias="hash")): res = await get_sample_by_hash(hash) if res is None: raise HTTPException(status_code=404, detail="Item not found") data, dataset = res sample= auto_tranform(json.loads(data)) return {'sample': sample, 'dataset': dataset} @app.get("/random-sample-stream") async def get_random_sample(background_tasks: BackgroundTasks, dataset_name: str = Query(..., alias="dataset-name"), index: str = Query(None, alias="index")): queue = asyncio.Queue() def event_stream(queue): yield f"data: {json.dumps({'status': 'grab_sample'})}\n\n" try: # dataset = load_dataset(dataset_name,streaming=True) # split = [key for key in dataset.keys() if "train" in key] import requests headers = {"Authorization": f"Bearer {os.environ.get('HF_TOKEN')}"} API_URL = f"https://datasets-server.huggingface.co/info?dataset={dataset_name}" def query(): response = requests.get(API_URL, headers=headers) return response.json() data = query() splits = data['dataset_info']['default']['splits'] split = next(iter(splits.values())) num_samples = split['num_examples'] split_name = split['name'] # dataset = load_dataset(dataset_name, split=split_name, streaming=True) idx = random.randint(0, num_samples) if index is None else int(index) API_URL = f"https://datasets-server.huggingface.co/rows?dataset={dataset_name}&config=default&split=train&offset={idx}&length=1" def query(): headers = {"Authorization": f"Bearer {os.environ.get('HF_TOKEN')}"} response = requests.get(API_URL, headers=headers) if response.status_code != 200: raise Exception("hugging face api error") return response.json() data = query() random_sample = data['rows'][0]['row'] # pprint(random_sample) # selected = dataset.skip(idx) # random_sample = next(iter(selected))#random.choice(samples_buffer) hashed = sha256(json.dumps(random_sample)) # insert_sample(hashed, json.dumps(random_sample), dataset_name) # background_tasks.add_task(insert_sample, hashed, json.dumps(random_sample), dataset_name) except Exception as e: message = "" if hasattr(e, 'message'): message = e.message else: message = str(e) print("error : ", message) yield f"data: {json.dumps({'status': 'error', 'message' : message })}\n\n" transformed_data = random_sample success = True if not is_sharegpt(random_sample): try: if not has_adapter(random_sample): yield f"data: {json.dumps({'status': 'creating_adapter'})}\n\n" transformed_data = auto_tranform(random_sample) except Exception as e: success = False if hasattr(e, 'message'): print("error : ", e.message) else: print("error : ", e) yield f"data: {json.dumps({'status': 'error'})}\n\n" if success: yield f"data: {json.dumps({'status': 'done', 'data' : transformed_data, 'index' : str(idx)})}\n\n" return StreamingResponse(event_stream(queue), media_type="text/event-stream") @app.get("/random-sample") async def get_random_sample(dataset_name: str = Query(..., alias="dataset-name")): try: dataset = load_dataset(dataset_name,streaming=True) split = [key for key in dataset.keys() if "train" in key] dataset = load_dataset(dataset_name, split=split[0], streaming=True) buffer_size = 100 # Define a reasonable buffer size samples_buffer = [sample for _, sample in zip(range(buffer_size), dataset)] random_sample = random.choice(samples_buffer) hashed = sha256(json.dumps(random_sample)) sanitized = sanitize_filename(dataset_name) module_name = f"dataset_adapters.{sanitized}" module_spec = importlib.util.find_spec(module_name) if module_spec is None: create_adapter(random_sample, sanitized) spec = importlib.util.spec_from_file_location(module_name, f"dataset_adapters/{sanitized}.py") dynamic_module = importlib.util.module_from_spec(spec) sys.modules[module_name] = dynamic_module spec.loader.exec_module(dynamic_module) # Use the function from the dynamically imported module transformed_data = dynamic_module.transform_data(random_sample) return transformed_data except FileNotFoundError: raise HTTPException(status_code=404, detail="Dataset not found") except Exception as e: raise HTTPException(status_code=500, detail=str(e)) # @app.on_event("startup") # async def startup_event(): # await setup_database() @app.get("/") def index() -> FileResponse: return FileResponse(path="/app/static/index.html", media_type="text/html")