ChatStudio / scripts /try_model.py
vs4vijay's picture
changes
5b63d6f
import torch
import transformers
from transformers import AutoTokenizer, AutoModelForCausalLM
# model="databricks/dolly-v2-3b"
model = "tiiuae/falcon-7b"
# model = "gpt2"
# model = "bigcode/santacoder"
# model = "bigscience/bloom-560m"
print(f'Model loaded: {model}')
generate_text = transformers.pipeline(model=model, torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto")
prompt = """
# EngSys Studio
EngSys Studio is a tool to help you extract data from Kusto with a simple query.
## Consider Following Kusto Schema:
```
android_build_time (EventInfo_Time, EventInfo_Name, EventInfo_BaseType, EventInfo_Source,
PipelineInfo_AccountId, Username, UserEmail, modelName, totalCores, Build_Date, Build_Time, taskData,
processorName, task, deviceId, memory, timezone, l3Cache, processorSpeed, modelIdentifier,
numberOfProcessor, PipelineInfo_IngestionTime, PipelineInfo_ClientIp, PipelineInfo_ClientCountry, os,
MachineName, sha, cleanBuild, totalTaskTime, versionName, EventInfo_SdkVersion,
PipelineInfo_RoutingGeo, modulesChanged);
android_test_telemetry (test_case_name, test_case_path, phase_name, source_branch, test_case_outcome
(enum: success, failure), build_id, commit_sha, test_case_rerun_count, test_user_account,
test_case_type, sign_in_flow, pipeline_name, testcase_index, time_taken, device_id, pull_request_id,
pipeline_id, build_rerun_count, PipelineInfo_IngestionTime, PipelineInfo_ClientIp,
PipelineInfo_ClientCountry, stacktrace, error_message, pre_signin, PipelineInfo_RoutingGeo, testGuid,
goldenGuid, diffGuid, rerun, EventInfo_Time); shield_prod (EventInfo_Time, EventInfo_Name,
EventInfo_BaseType, EventInfo_Source, PipelineInfo_AccountId, org, project, repository,
pull_request_id, pr_title, committer_name, committer_alias, source_branch, target_branch, work_items,
area_paths, name, overrides, overridden_by, status, message, exception, error, event_time,
PipelineInfo_IngestionTime, PipelineInfo_ClientIp, PipelineInfo_ClientCountry, callback_results,
suggestion, PipelineInfo_RoutingGeo); build_metadata (EventInfo_Time, EventInfo_Name,
EventInfo_BaseType, EventInfo_Source, PipelineInfo_AccountId, EventInfo_SdkVersion, plans, buildNumber,
status, result, queueTime, startTime, finishTime, url, definition_path, definition_type,
definition_name, definition_url, definition_project_id, definition_project_name,
definition_project_url, definition_project_state, project_id, project_name, project_url, project_state,
uri, sourceBranch, sourceVersion, queue_pool_name, queue_name, priority, reason, requestedFor_id,
requestedFor_displayName, requestedFor_uniqueName, requestedFor_url, requestedFor_imageUrl,
requestedBy_id, requestedBy_displayName, requestedBy_uniqueName, requestedBy_url, requestedBy_imageUrl,
lastChangedDate, lastChangedBy_id, lastChangedBy_displayName, lastChangedBy_uniqueName,
lastChangedBy_url, lastChangedBy_imageUrl, orchestrationPlan_planId, logs_type, logs_url,
repository_id, repository_type, EventInfo_OriginalTime, PipelineInfo_IngestionTime,
PipelineInfo_ClientIp, PipelineInfo_ClientCountry, PipelineInfo_IngestionPath, id, definition_revision,
definition_id);
```
## Examples:
Q: Last 5 Build time for Android:
A: android_build_time | take 5
Q: Whats the reliability of the android test "testEnterpriseE3NavigateSavedMessagesPageDetails"
A: android_test_telemetry
| where test_case_name == "testEnterpriseE3NavigateSavedMessagesPageDetails"
| where test_case_outcome == "success" or test_case_outcome == "failure"
| summarize success_count=countif(test_case_outcome == "success"),
failure_count=countif(test_case_outcome == "failure")
| extend reliability = success_count * 1.0 / (success_count + failure_count)
Q: Build time for iOS:
A: ios_build_time
## Notes:
- For ios, use the same table schema, just replace android with ios
- also, ios test name has suffix () in it
## User Input:
when did "testEnterpriseE3NavigateSavedMessagesPageDetails" android test last fail? What where the
error messages, build id.
## Please make sure you return Kusto Query without any explaination
"""
# prompt = 'Write a code in python to print hello world in loop for 100 times'
reponse = generate_text(prompt)
print(f'{reponse=}')
# tokenizer = AutoTokenizer.from_pretrained(model)
# pipeline = transformers.pipeline(
# "text-generation",
# model=model,
# tokenizer=tokenizer,
# torch_dtype=torch.bfloat16,
# trust_remote_code=True,
# device_map="auto",
# )
# sequences = pipeline(
# "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:",
# max_length=200,
# do_sample=True,
# top_k=10,
# num_return_sequences=1,
# eos_token_id=tokenizer.eos_token_id,
# )
# for seq in sequences:
# print(f"Result: {seq['generated_text']}")