row_id
int64 0
48.4k
| init_message
stringlengths 1
342k
| conversation_hash
stringlengths 32
32
| scores
dict |
|---|---|---|---|
44,077
|
hey
|
14d17ed72e5a0856302c80263f414c81
|
{
"intermediate": 0.33180856704711914,
"beginner": 0.2916048467159271,
"expert": 0.3765866458415985
}
|
44,078
|
hi
|
861f393adbfa1429011f64e84e476e19
|
{
"intermediate": 0.3246487081050873,
"beginner": 0.27135494351387024,
"expert": 0.40399640798568726
}
|
44,079
|
hi
|
99604018df0972154699e5059b44b0fa
|
{
"intermediate": 0.3246487081050873,
"beginner": 0.27135494351387024,
"expert": 0.40399640798568726
}
|
44,080
|
Error C2280 'model::custom::v1::recording::Ecg::Ecg(const model::custom::v1::recording::Ecg &)': attempting to reference a deleted function
|
ec5db10d752eaddb0c06ed7876ac6db7
|
{
"intermediate": 0.34613561630249023,
"beginner": 0.456290602684021,
"expert": 0.19757382571697235
}
|
44,081
|
this is the command guide to use whisper in my terminal but i still dont understand, how do i make it so that i put in a video in japanse and it translate into english
make base.en
cc -I. -O3 -std=c11 -pthread -DGGML_USE_ACCELERATE -c ggml.c -o ggml.o
c++ -I. -I./examples -O3 -std=c++11 -pthread -c whisper.cpp -o whisper.o
c++ -I. -I./examples -O3 -std=c++11 -pthread examples/main/main.cpp whisper.o ggml.o -o main -framework Accelerate
./main -h
usage: ./main [options] file0.wav file1.wav ...
options:
-h, --help [default] show this help message and exit
-t N, --threads N [4 ] number of threads to use during computation
-p N, --processors N [1 ] number of processors to use during computation
-ot N, --offset-t N [0 ] time offset in milliseconds
-on N, --offset-n N [0 ] segment index offset
-d N, --duration N [0 ] duration of audio to process in milliseconds
-mc N, --max-context N [-1 ] maximum number of text context tokens to store
-ml N, --max-len N [0 ] maximum segment length in characters
-sow, --split-on-word [false ] split on word rather than on token
-bo N, --best-of N [5 ] number of best candidates to keep
-bs N, --beam-size N [5 ] beam size for beam search
-wt N, --word-thold N [0.01 ] word timestamp probability threshold
-et N, --entropy-thold N [2.40 ] entropy threshold for decoder fail
-lpt N, --logprob-thold N [-1.00 ] log probability threshold for decoder fail
-debug, --debug-mode [false ] enable debug mode (eg. dump log_mel)
-tr, --translate [false ] translate from source language to english
-di, --diarize [false ] stereo audio diarization
-tdrz, --tinydiarize [false ] enable tinydiarize (requires a tdrz model)
-nf, --no-fallback [false ] do not use temperature fallback while decoding
-otxt, --output-txt [false ] output result in a text file
-ovtt, --output-vtt [false ] output result in a vtt file
-osrt, --output-srt [false ] output result in a srt file
-olrc, --output-lrc [false ] output result in a lrc file
-owts, --output-words [false ] output script for generating karaoke video
-fp, --font-path [/System/Library/Fonts/Supplemental/Courier New Bold.ttf] path to a monospace font for karaoke video
-ocsv, --output-csv [false ] output result in a CSV file
-oj, --output-json [false ] output result in a JSON file
-ojf, --output-json-full [false ] include more information in the JSON file
-of FNAME, --output-file FNAME [ ] output file path (without file extension)
-ps, --print-special [false ] print special tokens
-pc, --print-colors [false ] print colors
-pp, --print-progress [false ] print progress
-nt, --no-timestamps [false ] do not print timestamps
-l LANG, --language LANG [en ] spoken language ('auto' for auto-detect)
-dl, --detect-language [false ] exit after automatically detecting language
--prompt PROMPT [ ] initial prompt
-m FNAME, --model FNAME [models/ggml-base.en.bin] model path
-f FNAME, --file FNAME [ ] input WAV file path
-oved D, --ov-e-device DNAME [CPU ] the OpenVINO device used for encode inference
-ls, --log-score [false ] log best decoder scores of tokens
-ng, --no-gpu [false ] disable GPU
bash ./models/download-ggml-model.sh base.en
Downloading ggml model base.en ...
ggml-base.en.bin 100%[========================>] 141.11M 6.34MB/s in 24s
Done! Model 'base.en' saved in 'models/ggml-base.en.bin'
You can now use it like this:
$ ./main -m models/ggml-base.en.bin -f samples/jfk.wav
===============================================
Running base.en on all samples in ./samples ...
===============================================
----------------------------------------------
[+] Running base.en on samples/jfk.wav ... (run 'ffplay samples/jfk.wav' to listen)
----------------------------------------------
whisper_init_from_file: loading model from 'models/ggml-base.en.bin'
whisper_model_load: loading model
whisper_model_load: n_vocab = 51864
whisper_model_load: n_audio_ctx = 1500
whisper_model_load: n_audio_state = 512
whisper_model_load: n_audio_head = 8
whisper_model_load: n_audio_layer = 6
whisper_model_load: n_text_ctx = 448
whisper_model_load: n_text_state = 512
whisper_model_load: n_text_head = 8
whisper_model_load: n_text_layer = 6
whisper_model_load: n_mels = 80
whisper_model_load: f16 = 1
whisper_model_load: type = 2
whisper_model_load: mem required = 215.00 MB (+ 6.00 MB per decoder)
whisper_model_load: kv self size = 5.25 MB
whisper_model_load: kv cross size = 17.58 MB
whisper_model_load: adding 1607 extra tokens
whisper_model_load: model ctx = 140.60 MB
whisper_model_load: model size = 140.54 MB
system_info: n_threads = 4 / 10 | AVX = 0 | AVX2 = 0 | AVX512 = 0 | FMA = 0 | NEON = 1 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | VSX = 0 |
main: processing 'samples/jfk.wav' (176000 samples, 11.0 sec), 4 threads, 1 processors, lang = en, task = transcribe, timestamps = 1 ...
[00:00:00.000 --> 00:00:11.000] And so my fellow Americans, ask not what your country can do for you, ask what you can do for your country.
whisper_print_timings: fallbacks = 0 p / 0 h
whisper_print_timings: load time = 113.81 ms
whisper_print_timings: mel time = 15.40 ms
whisper_print_timings: sample time = 11.58 ms / 27 runs ( 0.43 ms per run)
whisper_print_timings: encode time = 266.60 ms / 1 runs ( 266.60 ms per run)
whisper_print_timings: decode time = 66.11 ms / 27 runs ( 2.45 ms per run)
whisper_print_timings: total time = 476.31 ms
|
9bd773858c67b18be40a5a248e2658ad
|
{
"intermediate": 0.3895935118198395,
"beginner": 0.3747090995311737,
"expert": 0.23569732904434204
}
|
44,082
|
Реши функцию на языке Python:
|
a70bdb29845b44ea2aaf37be3544d3fd
|
{
"intermediate": 0.3070864677429199,
"beginner": 0.29600200057029724,
"expert": 0.39691150188446045
}
|
44,083
|
import {Component, OnInit, ViewChild} from '@angular/core';
import {VehiculeService} from "../services/vehicule.service";
import {ActivatedRoute} from "@angular/router";
import {EntretienService} from "../services/entretien.service";
import {SinistreService} from "../services/sinistre.service";
import {PretvehiculeService} from "../services/pretvehicule.service";
import {MatDialog} from "@angular/material/dialog";
import {OptionsService} from "../services/options.service";
import {KeycloakService} from "keycloak-angular";
import {TrajetService} from "../services/trajet.service";
import {MatPaginator} from "@angular/material/paginator";
import {DriverService} from "../services/driver.service";
import {ToastrService} from "ngx-toastr";
import {FormBuilder, FormControl, FormGroup, Validators} from "@angular/forms";
@Component({
selector: 'app-vehiculesdetails',
templateUrl: './vehiculesdetails.component.html',
styleUrls: ['./vehiculesdetails.component.scss'],
})
export class VehiculesdetailsComponent implements OnInit {
@ViewChild(MatPaginator) paginator!: MatPaginator;
id = this.route.snapshot.params['id'];
vehicle: any;
entretiens: any;
prets: any;
vehicules: any;
sinistres: any;
pretInfosConducteur: any
options: any;
username: string | undefined;
drivers: any;
trajets: any;
email: any;
isAuthenticated: boolean = false;
submitted: boolean = false;
vehicles: any;
driverdetail: any;
sinistresDisplayedColumns: string[] = ['Type', 'Date', 'Description', 'montant'];
optionsDisplayedColumns: string[] = ['Type'];
trajetsDisplayedColumns: string[] = ['Adresse_Depart', 'Adresse_Arrivee', 'Km_depart', 'Km_arrivee', 'Prix_carburant', "type_carburant", "Date_depart", "Date_arrivee", "Statut"];
entretiensDisplayedColumns: string[] = ['Type', 'Date', 'Description', 'Montant', 'Statut'];
pretsDisplayedColumns: string[] = ['NameDriver', 'DateDebut', 'DateFin', 'Statut'];
vehicleForm: FormGroup = this.fb.group({
Id: new FormControl('', [Validators.required]),
IdConducteur: new FormControl('', [Validators.required]),
});
constructor(
private toastr: ToastrService, private keycloackService: KeycloakService, public vehiculeService: VehiculeService, public driverService: DriverService, public trajetService: TrajetService, private route: ActivatedRoute, public entretienService: EntretienService, public sinistreService: SinistreService, public pretVehiculeService: PretvehiculeService, public optionsService: OptionsService,
public dialog: MatDialog, public fb: FormBuilder) {
}
ngOnInit() {
this.GetDetailVehicule();
this.GetDetailEntretien();
this.GetDetailSinistre();
this.GetInfosConducteurPret();
this.GetInfosGeneralPret();
this.GetdetailsOption();
this.GetDetailTrajet();
this.GetDetailDriver();
this.GetAllVehicles();
this.GetAllDrivers();
//this.nameuserkeycloak();
}
/*nameuserkeycloak() {
this.keycloackService.isLoggedIn().then(isAuthenticated => {
this.isAuthenticated = isAuthenticated;
this.keycloackService.loadUserProfile().then(async (userProfile) => {
this.username = userProfile.username;
});
});
}*/
GetDetailTrajet() {
this.trajetService.getDetailsTrajet(this.id).subscribe((res: any) => {
this.trajets = res;
console.log(this.trajets);
})
}
GetDetailVehicule() {
this.vehiculeService.detailsVehicle(this.id).subscribe((res: any) => {
this.vehicules = [res];
console.log(this.vehicules);
})
}
GetAllDrivers() {
this.driverService.getDrivers().subscribe((res: any) => {
this.drivers = res;
console.log(this.drivers);
})
}
GetDetailEntretien() {
this.entretienService.getEntretien(this.id).subscribe((res: any) => {
this.entretiens = [res];
console.log(this.entretiens);
})
}
GetDetailSinistre() {
this.sinistreService.getDetailsSinistre(this.id).subscribe((res: any) => {
this.sinistres = [res];
console.log(this.sinistres);
})
}
GetInfosConducteurPret() {
this.pretVehiculeService.getDetailsPretVehicule(this.id).subscribe((res: any) => {
this.pretInfosConducteur = [res];
console.log(this.pretInfosConducteur);
})
}
GetInfosGeneralPret() {
this.pretVehiculeService.getDetailsPretVehicule(this.id).subscribe((res: any) => {
this.prets = [res];
console.log(this.prets);
})
}
GetdetailsOption() {
this.optionsService.detailOptions(this.id).subscribe((res: any) => {
this.options = res;
console.log(this.options);
})
}
GetAllVehicles() {
this.vehiculeService.getVehicles().subscribe((res: any) => {
this.vehicles = res;
console.log(this.vehicles);
})
}
GetDetailDriver() {
this.vehiculeService.getDetailVehicle(this.id).subscribe((data: any) => {
this.driverdetail = data;
console.log(this.driverdetail);
})
}
onSubmit() {
this.vehicleForm.setValue({
Id: this.vehicleForm.value.Id,
IdConducteur: this.vehicleForm.value.IdConducteur
});
this.vehiculeService.putVehicle(this.vehicleForm.value).subscribe(
{
next: () => {
this.toastr.success('Conducteur modifié avec succès');
this.resetForm();
},
error: () => {
this.toastr.error('Erreur lors de la modification du conducteur');
}
}
);
}
private resetForm() {
this.submitted = false;
this.vehicleForm.reset();
this.vehicleForm.setValue({
Id: '',
IdConducteur: ''
});
}
}
avec ce code je veux ajouter l'option pour ajouter un IdConducteur si il n'y est pas créer
|
5898c13e7fd4578dd9b0597b401b1c8d
|
{
"intermediate": 0.34097638726234436,
"beginner": 0.4319721460342407,
"expert": 0.22705146670341492
}
|
44,084
|
c++ quicksort
|
8583857ad3c9c7c7182c27409ce3cb3c
|
{
"intermediate": 0.2909535765647888,
"beginner": 0.3208329379558563,
"expert": 0.38821351528167725
}
|
44,085
|
I need to write a check for React that if window.location.pathname starts with a string '/${EReactRouterPaths.DASHBOARD}'. Keep in mind that after .DASHBOARD there will be slash and someid.
|
966542969d9d319f220ab92cedd2ec73
|
{
"intermediate": 0.5537343621253967,
"beginner": 0.16039805114269257,
"expert": 0.2858675718307495
}
|
44,086
|
I need to write a check for React that if window.location.pathname starts with a string ‘/${EReactRouterPaths.DASHBOARD}’. Keep in mind that after .DASHBOARD there will be slash and someid.
|
502a6eb2afa9b983245fc9f6cefbe41c
|
{
"intermediate": 0.570316731929779,
"beginner": 0.1746833771467209,
"expert": 0.25499987602233887
}
|
44,087
|
Give me an best case prediction model for given Data:
X = [[A1,A2,A3,…],[B1,B2,B3,…],…]
y = [[A],[B],[C],…]
X has the shape of (1394, 13)
y has the shape of (1394, 1)
each value in sample of x and y are floats
The model is used for tabluar data
|
ef1b1e9f173d6f4bf50b2120a91546c1
|
{
"intermediate": 0.3314158618450165,
"beginner": 0.12924495339393616,
"expert": 0.5393391847610474
}
|
44,088
|
Look at this code first please : import time
import numpy as np
import socket
import base64
# This might be useful with the exploitation of the device at some point!
#import lascar
HOST = '0.0.0.0' # This must be changed to the corresponding value of the live instance
PORT = 1337 # This must be changed to the corresponding value of the live instance
# This function is used to decode the base64 transmitted power trace (which is a NumPy array)
# The function should only be called for the response of the 1. option and on the data received
# after we send the plaintext (as seen in the example code below)
def b64_decode_trace(leakage):
byte_data = base64.b64decode(leakage)
return np.frombuffer(byte_data) # convert binary data into a NumPy array
# This function is used to communicate with the remote machine (Laptop-2) via socket
def connect_to_socket(option, data):
# Initialize a socket connection
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
resp_1 = s.recv(1024)
s.sendall(option)
resp_2 = s.recv(1024) # Receive response
# Send the data
# option one: binary plaintext
# option two: Hex encoded AES KEY
s.sendall(data)
# Receive response
# option one: receive base64 encoded binary data
# that represented the power traces as a Numpy array
# option two: receive an ASCII Message
# (if the key is correct the flag will be returned)
resp_data = b''
while True:
temp_data = s.recv(8096)
if not temp_data:
break
resp_data += temp_data
s.close()
# The print commands can be used for debugging in order to observe the responses
# The following print commands can be commented out.
print(resp_1.decode('ascii'))
print(option)
print(resp_2.decode('ascii'))
print(data)
#print(resp_data)
return resp_data
# Sample binary plaintext
plaintext = b'0123456789ABCDEF'
# Example use of option 1
print("Option 1:")
leakage = connect_to_socket(b'1', plaintext)
power_trace = b64_decode_trace(leakage)
print("Length of power trace: {}".format(power_trace.shape))
print(power_trace) # Outputs the NumPy array that represents the power trace.
# Always use a delay between each connection
# in order to have a stable connection
time.sleep(0.1)
# Sample HEX encoded AES KEY
KEY = b'00112233445566778899AABBCCDDEEFF'
print("\nOption 2:")
# Example use of option 2
response = connect_to_socket(b'2', KEY)
print(response)
|
ecd1638b098a547062115a91974ffacd
|
{
"intermediate": 0.3744634985923767,
"beginner": 0.43183061480522156,
"expert": 0.19370588660240173
}
|
44,089
|
How CONV layer in torch processed N channels image into X filters?
|
eca40c8a71b81c35ccfd1d40d57e7f86
|
{
"intermediate": 0.25367605686187744,
"beginner": 0.10495772957801819,
"expert": 0.6413663029670715
}
|
44,090
|
do rewrite "George VI ordered the jewels to be hidden in the grounds of the castle and the secret was so closely guarded the Queen has only recently found out.
The bid to save them from the Nazis was revealed in a set of letters from a former royal librarian that feature in a BBC documentary.
The "eclectic set of letters" reveal how the gems were hidden in a Bath Oliver biscuit tin and buried in the grounds of Windsor Castle The Times reports.
During the works the excavations had to be covered at night with tarpaulins so when German aircraft flew over they wouldn’t know anything was going on.
Grass was then left to regrow to conceal the hiding place at one of the secure entries to the castle.
The Crown Jewels were then locked inside two chambers built with steel doors - with access only available through a trapdoor.
Royal commentator Alastair Bruce discovered the incredible story and spoke with the Queen for the show about the Coronation to be broadcast this weekend."
|
c17ee641f49c3417c0bfef9e988da330
|
{
"intermediate": 0.31582802534103394,
"beginner": 0.32237863540649414,
"expert": 0.3617933392524719
}
|
44,091
|
I have a react compoent using React IMask Plugin library. I want to be able to set the value attribute of the input element inside of it but I don't know how to do it. Can you write the code that will allow me to do that?
|
faec0ec1498350c2cb17b37e3b734114
|
{
"intermediate": 0.8114774227142334,
"beginner": 0.098396435379982,
"expert": 0.09012613445520401
}
|
44,092
|
help me improve my code
def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:
out = ListNode()
tmp = out
rest = 0
while l1 or l2:
l1Val, l2Val = 0, 0
if l1:
l1Val = l1.val
l1 = l1.next
print(l1Val)
if l2:
l2Val = l2.val
l2 = l2.next
print(l2Val)
sumNode = l1Val + l2Val + rest
rest = 0
if sumNode >= 10:
rest = 1
sumNode -= 10
tmp.next = ListNode(sumNode)
tmp = tmp.next
if rest > 0:
tmp.next = ListNode(rest)
return out.next
|
30e615a88f8968e4e595f00d5db6b3f3
|
{
"intermediate": 0.38076791167259216,
"beginner": 0.3503842353820801,
"expert": 0.268847793340683
}
|
44,093
|
write a python code to convert a json object into a datatable
|
f6b9d350237d28e36cbaef92fa7196c4
|
{
"intermediate": 0.6699255704879761,
"beginner": 0.13191188871860504,
"expert": 0.1981625258922577
}
|
44,094
|
I need to modify the edge features completely in the following code based on the below given requirements,
# Define edge colors based on terminal types
edge_colors = {'D': 'blue', 'G': 'red', 'S': 'green', 'B': 'grey'}
def add_vertex_components_features(G, components):
for component_type, component_list in components.items():
for component in component_list:
name = component['device_name']
print("name", name)
num_component = int(name[1:]) # Extract component number from name
nets = component['nets']
print("nets", nets)
if component_type.startswith(('NMOS', 'PMOS')):
# Designate all transistors under a general ‘transistor’ type but with specific attributes
device_type = 'transistor' # All transistors are considered as 'transistor'
num_terminals = 4 # Number of terminals for transistors
G.add_node(name, type=component_type, vertex_type=component_type[:4], device_type=device_type, nets=nets)
# Add features based on transistor type (NMOS or PMOS)
G.nodes[name]['D_terminal'] = nets[0]
G.nodes[name]['G_terminal'] = nets[1]
G.nodes[name]['S_terminal'] = nets[2]
G.nodes[name]['B_terminal'] = nets[3]
G.nodes[name]['w_value'] = component['width']
G.nodes[name]['l_value'] = component['length']
G.nodes[name]['size'] = 'high' if component['width'] / component['length'] > 380e-9 else \
'medium' if 380e-9 >= component['width'] / component['length'] > 35e-9 else 'low'
# Add edges between transistor nodes and net nodes
for i, terminal in enumerate(['D', 'G', 'S', 'B']):
edge_type = f'{terminal}'
edge_features = {
'device_type': device_type,
'terminal_number': f'{terminal}{num_component}',
'edge_label': f'{terminal}{num_component}',
'connection_detail': f'{name} -> {nets[i]}',
'has_parallel_edges': False # Initially set to False
}
G.add_edge(name, nets[i], edge_type=' + terminal + ', label='{' + terminal + '}', color=edge_colors[terminal], edge_features=edge_features)
# Update the edge feature for existing parallel edges
if G.has_edge(name, nets[i]):
G.edges[name, nets[i], 0]['has_parallel_edges'] = True # Set to True for multi-edge
I am having bipartite_layout multigraph, where one side of the nodes are the 'device components' (M0, M1, M2, M3, M4, M5, M6, M7, C0, C1, R0, L0, I0, V0, V1) present in the netlist and another side of the nodes are 'nets' (Vbias, VOUT, net1, net2, net3, IN1, IN2, vdd!, 0) present in the circuit netlist. I need to make edge features in the graph data. I need the edge features based on my below requirements.
Edge Features:
For vertex_type (NMOS, PMOS, R, L, C, I, V), i require the edge features as follows
If the vertex_type is NMOS/PMOS: it has 8 component (transistors) M0, M1, M2, M3, M4, M5, M6, M7. Each component has four terminals ['D', 'G', 'S', 'B'] respectively. Each terminals connected to respective 'nets' at the another side of the nodes.
sample for name M0: it connect to the nets ['net3', 'IN1', 'net1', '0'] respectively. we need to understand here that the four terminal of the transistor ['D', 'G', 'S', 'B'] are connecting with the corresponding nets respectively, the terminal 'D0' connected with nets 'net3', the terminal 'G0' connected with nets 'IN1', the terminal 'S0' connected with nets 'net1', and the terminal 'B0' connected with nets '0'.
in some cases i had the parallel edges connected between the same terminal and same nets,
sample for name M7: it connect to the nets ['Vbias', 'Vbias', '0', '0'] respectively, here the terminals ['D7', 'G7', 'S7', 'B7'] were connect with corresponding nets like, the terminal 'D7' connected with nets 'Vbias', the terminal 'G7' connected with nets 'Vbias', the terminal 'S7' connected with nets '0', and the terminal 'B7' connected with nets '0'. In this case we have parallel edges, we need to find which of the edges are having parallel edges (true/false) if the parallel edges are present then 'T' true, else 'F' false. and each edges are in respective colors, edge_colors = {'D': 'blue', 'G': 'red', 'S': 'green', 'B': 'grey'}
I need my edge features in the following format with attributes name [device type, device, number of terminals, terminal ('D', 'G', 'S', 'B')]
device type: vertex_type (NMOS, PMOS, R, L, C, I, V)
device: component 'name' (M0, M1, M2, M3, M4, M5, M6, M7, C0, C1, R0, L0, I0, V0, V1)
number of terminals: based on the number of nets "nets = component['nets']"
terminal ('D', 'G', 'S', 'B'): (edges pair, edge_colors, T/F)
sample terminal for device M0 connected with nets ['net3', 'IN1', 'net1', '0'] then, D0: (M0, net3, blue, F), G0: (M0, IN1, red, F), S0: (M0, net1, green, F), B0: (M0, 0, grey, F),
sample terminal for device M7 connected with nets ['Vbias', 'Vbias', '0', '0'] then, D7: (M7, Vbias, blue, T), G7: (M7, Vbias, red, T), S7: (M7, 0, green, T), B7: (M7, 0, grey, T); here for terminal D7 we have edge pair 'M7, Vbias', and for terminal G7 we have edge pair 'M7, Vbias' so the parallel edge were 'T' for both terminals similarly for the terminals S7 and B7 has parallel terminals so it has 'T'.
For all the nets (Vbias, VOUT, net1, net2, net3, IN1, IN2, vdd!, 0), the edge features is,
nets: device type: nets, Device: Nill, Number of terminals: Nill, terminal: Nill.
|
cfdcf10b9a7549c1765100dbcd068ccb
|
{
"intermediate": 0.26971766352653503,
"beginner": 0.52886962890625,
"expert": 0.20141275227069855
}
|
44,095
|
########################################################################
# HELPER FUNCTIONS #
########################################################################
def test_color(color):
""" Tests to see if color exists """
color_tester = codesters.Point(1000, 1000)
color_tester.set_color(color)
def create_landscape():
""" Creates the rest of the landscape """
stage.set_background_color("black")
moon = codesters.Circle(-200, 200, 50, "white")
mountain1 = codesters.TriangleIso(-125, -150, 300, 200, "gray")
mountain_top1 = codesters.TriangleIso(-125, -75, 75, 50, "white", "gray")
mountain2 = codesters.TriangleIso(125, -150, 300, 200, "gray")
mountain_top2 = codesters.TriangleIso(125, -75, 75, 50, "white", "gray")
x = -225
for counter in range(16):
trunk = codesters.Rectangle(x, -240, 5, 20, "brown")
tree = codesters.TriangleIso(x, -220, 30, 30, "green")
x += 30
def create_stars(number, color):
""" Creates the stars for the landscape """
for counter in range(number):
x = random.randint(-250, 250)
y = random.randint(-250, 250)
star = codesters.Star(x, y, 5, 3, color)
def get_user_input():
""" Gets the user's input from the console """
number = int(input("How many stars should there be?"))
color = input("What color should the stars be?")
test_color(color)
return [number, color]
def handle_user_errors():
""" Handles the user's errors """
while True:
print("Infinite loop!")
stage.wait(1)
try:
user_input = get_user_input()
return user_input
except ValueError as e:
print("ERROR: Please enter an integer.")
except Exception as e:
# this custom error message will appear in the console
print("ERROR: Please enter a valid color.")
########################################################################
# MAIN FUNCTION #
########################################################################
def main():
""" Sets up the program and calls other functions """
user_input = handle_user_errors()
create_stars(user_input[0], user_input[1])
create_landscape()
main()
Now customize your program! Be sure to meet these minimum technical requirements:
Use another Input an Integer inside get_user_input() to ask for the star's size or number of points.
Add the user's choice inside the list you return from get_user_input()!
Pass the new input to create_stars() and add it to the code where the star sprites are created.
|
6b4b7fee82cbbbeaf6e802bd1bf8f414
|
{
"intermediate": 0.30892008543014526,
"beginner": 0.502569854259491,
"expert": 0.18851007521152496
}
|
44,096
|
why is the search-box hidden by the map element using this html and css styles '<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Interactive Map with Leaflet</title>
<link
rel="stylesheet"
href="https://unpkg.com/leaflet@1.9.4/dist/leaflet.css"
integrity="sha256-p4NxAoJBhIIN+hmNHrzRCf9tD/miZyoHS5obTRR9BMY="
crossorigin=""
/>
<script
src="https://unpkg.com/leaflet@1.9.4/dist/leaflet.js"
integrity="sha256-20nQCchB9co0qIjJZRGuk2/Z9VM+kNiyxNV1lvTlZBo="
crossorigin=""
></script>
<link rel="stylesheet" href="/style.css" />
</head>
<body>
<div id="map-container">
<div id="search-box">
<input type="text" id="search-input" placeholder="Search Location" />
</div>
<div id="map"></div>
</div>
<script src="script.js"></script>
</body>
</html>
' 'body,
html,
#map {
height: 100%;
width: 100%;
margin: 0;
padding: 0;
}
#map-container {
position: relative;
height: 100vh; /* Added height */
}
#search-box {
position: absolute;
top: 10px;
left: 10px;
padding: 5px;
background-color: #fff;
border: 1px solid #ccc;
z-index: 1; /* Added z-index */
}
'
|
151b65b5af1ca3243989b472906f3ba2
|
{
"intermediate": 0.42420443892478943,
"beginner": 0.3577134907245636,
"expert": 0.21808208525180817
}
|
44,097
|
TypeError: Cannot create column: element at index 9 is of type <class 'str'>, whereas previous elements were int8 when create a datatable using datatable library
|
7312f54e68d7b4e3a88853cfad4f67e3
|
{
"intermediate": 0.6884171366691589,
"beginner": 0.11140438914299011,
"expert": 0.20017851889133453
}
|
44,098
|
import base64
import acrcloud
import os
import eyed3
import eyed3.id3.frames
from eyed3.id3.frames import UserTextFrame
import requests
import json
import re
from bs4 import BeautifulSoup
from genius_api import GeniusApi
from my_shazam_utility import shazam_recognize_song
from applemusic_api import AppleMusicApi
from Acrcloudretrieve import recognize_song, set_id3_tags_mp3
from Retrieve_lyrics import get_lyrics
from erhalten_alb_covers import save_and_embed_album_cover
def load_config():
with open('D:/Eurydice/Encompassing Data by discerning/config/config.json', 'r') as config_file:
config_data = json.load(config_file)
return config_data
# Load the configuration on script start
config = load_config()
# Now also load Spotify credentials
CLIENT_ID = config['Spotify']['CLIENT_ID']
CLIENT_SECRET = config['Spotify']['CLIENT_SECRET']
# Instantiate Genius Api
genius_api = GeniusApi()
def get_user_choice():
# Display a header
print("=" * 50)
print("Welcome to the Song Recognition Service!")
print("=" * 50)
# Provide instructions and options
print("\nPlease select the recognition service you'd like to use:\n")
print(" 1: YoutubeACR - Fast and accurate music recognition")
print(" 2: Shazam - Discover music, artists, and lyrics in seconds")
# Separator for aesthetic purposes
print("-" * 50)
# Input prompt
choice = input("Enter your choice (1 or 2) and press Enter: ")
# More flair to indicate processing/input received
print("\n" + "." * 25 + " Processing " + "." * 25 + "\n")
return choice
def add_or_update_txxx_frame(audiofile, description, value):
found = False
frames = audiofile.tag.frame_set.get(eyed3.id3.frames.USERTEXT_FID, [])
for frame in frames:
if frame.description == description:
frame.text = value
found = True
break
if not found:
# Create a new TXXX frame without specifying encoding
new_frame = eyed3.id3.frames.UserTextFrame(description=description, text=value)
# Previously: When encoding was being passed
# Now: Encoding isn't specified as it's not required or not supported based on the error
if not frames: # If it's the first frame of this type
audiofile.tag.frame_set[eyed3.id3.frames.USERTEXT_FID] = [new_frame]
else:
frames.append(new_frame) # Append to exisiting list of USERTEXT frames
# Your Spotify authentication and song search functions:
def authenticate_spotify(client_id, client_secret):
auth_url = 'https://accounts.spotify.com/api/token'
client_creds = f"{client_id}:{client_secret}"
client_creds_b64 = base64.b64encode(client_creds.encode())
headers = {'Authorization': f'Basic {client_creds_b64.decode()}'}
data = {'grant_type': 'client_credentials'}
response = requests.post(auth_url, headers=headers, data=data)
access_token = response.json().get('access_token')
return access_token
def search_spotify_for_song(access_token, artist_name, title):
base_url = "https://api.spotify.com/v1/search"
query = f"{title} artist:{artist_name}"
headers = {"Authorization": f"Bearer {access_token}"}
params = {"q": query, "type": "track", "limit": 1}
response = requests.get(base_url, headers=headers, params=params)
results = response.json()
try:
track_info = results['tracks']['items'][0]
return track_info
except IndexError:
print("Song not found on Spotify.")
return None
# Function to fetch lyrics from Genius
def get_lyrics_from_genius(artist_name, title):
# Search for the song on Genius
results = genius_api.get_search_by_songs(f"{artist_name} {title}")
if results:
song_info = results[0]['result'] # Take the most relevant result
song_id = str(song_info['id'])
song_details = genius_api.get_song_by_id(song_id, text_format='plain')
return song_details.get('lyrics', "Lyrics not available.")
return "Song not found on Genius."
def save_lyrics_to_lrc(lyrics, lrc_path):
with open(lrc_path, 'w', encoding='utf-8') as lrc_file:
lrc_file.write(lyrics)
def get_high_quality_album_art_url(song_info):
images = song_info['album']['images'] # Get the list of image dicts
if not images:
return None # No images available
# Sort the images by size, pick the largest
highest_quality_image = max(images, key=lambda x: x['width']*x['height'])
return highest_quality_image['url']
def save_high_quality_album_art(image_url, file_path):
try:
response = requests.get(image_url, stream=True)
if response.status_code == 200:
with open(file_path, 'wb') as out_file:
for chunk in response.iter_content(1024):
out_file.write(chunk)
print(f"High quality album art saved: {file_path}")
return True # Indicate success
else:
print("Could not download the album art.")
except Exception as e:
print(f"Error saving high-quality album art: {e}")
return False # Indicate failure
def embed_album_art_to_song(file_path, image_path):
try:
audiofile = eyed3.load(file_path)
if audiofile.tag is None: # If the file has no tags, create a new tag
audiofile.initTag()
with open(image_path, 'rb') as img_file:
audiofile.tag.images.set(3, img_file.read(), 'image/jpeg')
audiofile.tag.save()
print("High quality album art embedded into song.")
except FileNotFoundError:
print(f"Failed to embed album art - No such file: {image_path}")
def process_audio_file_with_spotify_search(audio_file_path):
shazam_data = shazam_recognize_song(audio_file_path)
if shazam_data:
artist_name = shazam_data['track']['subtitle']
title = shazam_data['track']['title']
print(f"Identified Song: {artist_name} - {title}")
access_token = authenticate_spotify(CLIENT_ID, CLIENT_SECRET)
song_info = search_spotify_for_song(access_token, artist_name, title)
if song_info:
print(json.dumps(song_info, indent=4)) # For debugging
print("\n///////////////////////////////\n")
album_name = song_info['album']['name']
album_url = song_info['album']['external_urls']['spotify']
track_number = song_info['track_number']
release_date = song_info['album']['release_date']
isrc = song_info.get('external_ids', {}).get('isrc', "Not Available")
label = song_info['label'] if 'label' in song_info else "Not Available"
explicit = str(song_info['explicit']) if 'explicit' in song_info else "Not Available" # Convert to string
genres = ", ".join(song_info['genres']) if 'genres' in song_info else "Not Available"
author_url = song_info['artists'][0]['external_urls']['spotify'] if 'artists' in song_info else "Not Available"
spotify_url = song_info['external_urls']['spotify']
print(f"Track Number on Spotify: {track_number}")
audiofile = eyed3.load(audio_file_path)
if audiofile.tag is None: # If the file has no tags, create a new tag
audiofile.initTag(version=eyed3.id3.ID3_V2_3)
# Set standard tags
audiofile.tag.artist = artist_name
audiofile.tag.album = album_name
audiofile.tag.album_artist = artist_name
audiofile.tag.title = title
audiofile.tag.recording_date = release_date
# Using helper function to add or update TXXX frames
add_or_update_txxx_frame(audiofile, "Album URL", album_url)
add_or_update_txxx_frame(audiofile, "Eurydice", "True")
add_or_update_txxx_frame(audiofile, "Compilation", "KK")
add_or_update_txxx_frame(audiofile, "Genre", genres)
add_or_update_txxx_frame(audiofile, "Author URL", author_url)
add_or_update_txxx_frame(audiofile, "Label", label)
add_or_update_txxx_frame(audiofile, "Explicit", explicit)
add_or_update_txxx_frame(audiofile, "ISRC", isrc)
add_or_update_txxx_frame(audiofile, "Spotify URL", spotify_url)
audiofile.tag.comments.set(f"ISRC: {isrc}, Label: {label}, Explicit: {explicit}")
audiofile.tag.save() # Save the metadata to the file
print(f"Metadata embedded into the file: {audio_file_path}")
# Fetch high-quality album art URL
high_res_image_url = get_high_quality_album_art_url(song_info)
if high_res_image_url:
# Determine paths
image_file_path = os.path.splitext(audio_file_path)[0] + ".jpg"
# Save and embed album art
if save_high_quality_album_art(high_res_image_url, image_file_path):
embed_album_art_to_song(audio_file_path, image_file_path)
else:
print("Skipping album art embed due to download failure.")
else:
print("No album art available.")
new_file_name = f"{track_number:02d}. {title} - {artist_name} - {album_name} - {isrc}.mp3"
new_file_name = re.sub(r'[/:*?"<>|]', '', new_file_name) # Clean up characters not allowed in file names
new_file_path = os.path.join(os.path.dirname(audio_file_path), new_file_name)
os.rename(audio_file_path, new_file_path) # Rename file
print(f"File has been renamed to: {new_file_name}")
new_image_file_path = os.path.splitext(new_file_path)[0] + ".jpg"
os.rename(image_file_path, new_image_file_path)
print(f"Album art file has been renamed to: {os.path.basename(new_image_file_path)}")
new_lyrics_file_path = os.path.split(new_file_path)
lyrics = get_lyrics_from_genius(artist_name, title)
if 'plain' in lyrics:
lyrics_plain_text = lyrics['plain']
print("Lyrics:\n", lyrics_plain_text)
# base_directory = os.path.dirname(audio_file_path)
lyrics_file_path = os.path.splitext(audio_file_path)[0] + ".lrc"
with open(lyrics_file_path, 'w', encoding='utf-8') as lrc_file:
lrc_file.write(lyrics_plain_text)
# save_lyrics_to_lrc(lyrics_plain_text, track_number, title, artist_name, album_name, isrc, base_directory)
# save_lyrics_to_lrc(lyrics_plain_text, new_file_name)
# print(f"Lyrics saved to: {new_file_name}")
print(f"Saved LRC lyrics to: {lyrics_file_path}")
else:
print("No lyrics available to save.")
else:
print("Song not found on Spotify.")
else:
print("Song could not be identified.")
if __name__ == "__main__":
user_choice = get_user_choice()
audio_file_path = 'D:/Eurydice/Encompassing Data by discerning/Test_file/Unknown_file.mp3'
if user_choice == '1':
print("\n" + "." * 15 + " ᴜsɪɴɢ YᴏᴜᴛᴜʙᴇACR " + "." * 15 + "\n")
song_tags = recognize_song(audio_file_path)
if song_tags:
print(f'Song identified: {song_tags}')
set_id3_tags_mp3(audio_file_path, song_tags)
artist_name = song_tags.get('artists')[0].get('name')
song_title = song_tags.get('title')
safe_artist_name = re.sub(r'[/\:?"<>|]', '', artist_name)
safe_song_title = re.sub(r'[/\:?"<>|]', '', song_title)
new_file_name = f"{safe_artist_name} - {safe_song_title}.mp3"
new_file_path = os.path.join(os.path.dirname(audio_file_path), new_file_name)
os.rename(audio_file_path, new_file_path)
print(f"File has been renamed to: {new_file_name}")
else:
print('Could not identify the song in YᴏᴜᴛᴜʙᴇACR.')
apple_music_api = AppleMusicApi(Exception) # Initialize AppleMusicApi with necessary authentication
apple_music_api.get_access_token()
track_results = apple_music_api.search('songs', f"{artist_name} - {song_title}")
if track_results:
track_id = track_results[0]['id']
album_artwork_url_template = track_results[0]['attributes']['artwork']['url']
save_and_embed_album_cover(new_file_path, artist_name, song_title, album_artwork_url_template)
else:
print("Song not found on Apple Music.")
lrc_lyrics = get_lyrics(safe_artist_name, safe_song_title)
if lrc_lyrics:
lrc_file_path = os.path.join(os.path.dirname(audio_file_path), f"{safe_artist_name} - {safe_song_title}.lrc")
with open(lrc_file_path, 'w', encoding='utf-8') as lrc_file:
lrc_file.write(lrc_lyrics)
print(f"Saved LRC file to: {lrc_file_path}")
else:
print("Could not get the lyrics.")
elif user_choice == '2':
print("\n" + "." * 15 + " ᴜsɪɴɢ Sʜᴀᴢᴀᴍ " + "." * 15 + "\n")
song_tags = shazam_recognize_song(audio_file_path)
print(song_tags)
process_audio_file_with_spotify_search(audio_file_path)
else:
print("Invalid choice. Exiting....")
exit()
after fetching lyrics from genius can you rename the file to track number, track name, artist name, album name, isrc tag
|
45519d6c12a628c8d02b8817ade6c820
|
{
"intermediate": 0.3295801281929016,
"beginner": 0.555325448513031,
"expert": 0.11509445309638977
}
|
44,099
|
각 파일을 나눠서 구분하고 한번에 입력하고 싶은데, 어떻게 구성하면 좋을까? 아래와 같은 파일은 "DMAC/RTL/DMAC_CFG.sv"에 위치하고 있다.
module DMAC_CFG
(
input wire clk,
input wire rst_n, // _n means active low
// AMBA APB interface
input wire psel_i,
input wire penable_i,
input wire [11:0] paddr_i,
input wire pwrite_i,
input wire [31:0] pwdata_i,
output reg pready_o,
output reg [31:0] prdata_o,
output reg pslverr_o,
// configuration registers
output reg [31:0] src_addr_o,
output reg [31:0] dst_addr_o,
output reg [15:0] byte_len_o,
output wire start_o,
input wire done_i
);
// Configuration register to read/write
reg [31:0] src_addr;
reg [31:0] dst_addr;
reg [15:0] byte_len;
//----------------------------------------------------------
// Write
//----------------------------------------------------------
// an APB write occurs when PSEL & PENABLE & PWRITE
// clk : __--__--__--__--__--__--__--__--__--__--
// psel : ___--------_____________________________
// penable : _______----_____________________________
// pwrite : ___--------_____________________________
// wren : _______----_____________________________
//
// DMA start command must be asserted when APB writes 1 to the DMA_CMD
// register
// clk : __--__--__--__--__--__--__--__--__--__--
// psel : ___--------_____________________________
// penable : _______----_____________________________
// pwrite : ___--------_____________________________
// paddr : |DMA_CMD|
// pwdata : | 1 |
// start : _______----_____________________________
wire wren;
assign wren = // fill your code here
always @(posedge clk) begin
// fill
// your
// code
// here
end
wire start;
assign start = // fill your code here
// Read
reg [31:0] rdata;
//----------------------------------------------------------
// READ
//----------------------------------------------------------
// an APB read occurs when PSEL & PENABLE & !PWRITE
// To make read data a direct output from register,
// this code shall buffer the muxed read data into a register
// in the SETUP cycle (PSEL & !PENABLE)
// clk : __--__--__--__--__--__--__--__--__--__--
// psel : ___--------_____________________________
// penable : _______----_____________________________
// pwrite : ________________________________________
// reg update : ___----_________________________________
//
always @(posedge clk) begin
// fill
// your
// code
// here
end
// output assignments
assign pready_o = 1'b1;
assign prdata_o = rdata;
assign pslverr_o = 1'b0;
assign src_addr_o = src_addr;
assign dst_addr_o = dst_addr;
assign byte_len_o = byte_len;
assign start_o = start;
endmodule
|
c3d1330536985d3293933543717c8293
|
{
"intermediate": 0.4187358319759369,
"beginner": 0.40527838468551636,
"expert": 0.17598573863506317
}
|
44,100
|
// DMAC/RTL/DMAC_CFG.sv
module DMAC_CFG
(
input wire clk,
input wire rst_n, // _n means active low
// AMBA APB interface
input wire psel_i,
input wire penable_i,
input wire [11:0] paddr_i,
input wire pwrite_i,
input wire [31:0] pwdata_i,
output reg pready_o,
output reg [31:0] prdata_o,
output reg pslverr_o,
// configuration registers
output reg [31:0] src_addr_o,
output reg [31:0] dst_addr_o,
output reg [15:0] byte_len_o,
output wire start_o,
input wire done_i
);
// Configuration register to read/write
reg [31:0] src_addr;
reg [31:0] dst_addr;
reg [15:0] byte_len;
//----------------------------------------------------------
// Write
//----------------------------------------------------------
// an APB write occurs when PSEL & PENABLE & PWRITE
// clk : __--__--__--__--__--__--__--__--__--__--
// psel : ___--------_____________________________
// penable : _______----_____________________________
// pwrite : ___--------_____________________________
// wren : _______----_____________________________
//
// DMA start command must be asserted when APB writes 1 to the DMA_CMD
// register
// clk : __--__--__--__--__--__--__--__--__--__--
// psel : ___--------_____________________________
// penable : _______----_____________________________
// pwrite : ___--------_____________________________
// paddr : |DMA_CMD|
// pwdata : | 1 |
// start : _______----_____________________________
wire wren;
assign wren = // fill your code here
always @(posedge clk) begin
// fill
// your
// code
// here
end
wire start;
assign start = // fill your code here
// Read
reg [31:0] rdata;
//----------------------------------------------------------
// READ
//----------------------------------------------------------
// an APB read occurs when PSEL & PENABLE & !PWRITE
// To make read data a direct output from register,
// this code shall buffer the muxed read data into a register
// in the SETUP cycle (PSEL & !PENABLE)
// clk : __--__--__--__--__--__--__--__--__--__--
// psel : ___--------_____________________________
// penable : _______----_____________________________
// pwrite : ________________________________________
// reg update : ___----_________________________________
//
always @(posedge clk) begin
// fill
// your
// code
// here
end
// output assignments
assign pready_o = 1'b1;
assign prdata_o = rdata;
assign pslverr_o = 1'b0;
assign src_addr_o = src_addr;
assign dst_addr_o = dst_addr;
assign byte_len_o = byte_len;
assign start_o = start;
endmodule
// DMAC/RTL/filelist.f
-sverilog $LAB_PATH/RTL/DMAC_CFG.sv
// DMAC/SIM/TB/AXI_INTF.sv
interface APB (
input clk
);
logic psel;
logic penable;
logic [31:0] paddr;
logic pwrite;
logic [31:0] pwdata;
logic pready;
logic [31:0] prdata;
logic pslverr;
modport master (
input clk,
input pready, prdata, pslverr,
output psel, penable, paddr, pwrite, pwdata
);
task init();
psel = 1'b0;
penable = 1'b0;
paddr = 32'd0;
pwrite = 1'b0;
pwdata = 32'd0;
endtask
task write(input int addr,
input int data);
#1
psel = 1'b1;
penable = 1'b0;
paddr = addr;
pwrite = 1'b1;
pwdata = data;
@(posedge clk);
#1
penable = 1'b1;
@(posedge clk);
while (pready==1'b0) begin
@(posedge clk);
end
psel = 1'b0;
penable = 1'b0;
paddr = 'hX;
pwrite = 1'bx;
pwdata = 'hX;
endtask
task read(input int addr,
output int data);
#1
psel = 1'b1;
penable = 1'b0;
paddr = addr;
pwrite = 1'b0;
pwdata = 'hX;
@(posedge clk);
#1
penable = 1'b1;
@(posedge clk);
while (pready==1'b0) begin
@(posedge clk);
end
data = prdata;
psel = 1'b0;
penable = 1'b0;
paddr = 'hX;
pwrite = 1'bx;
pwdata = 'hX;
endtask
endinterface
// DMAC/SIM/TB/DMAC_CFG_TB.sv
`define SRC_ADDR 32'h100
`define DST_ADDR 32'h104
`define LEN_ADDR 32'h108
`define STAT_ADDR 32'h110
`define START_ADDR 32'h10c
`define TIMEOUT_CYCLE 10000000
module DMAC_CFG_TB ();
reg clk;
reg rst_n;
// clock generation
initial begin
clk = 1'b0;
forever #10 clk = !clk;
end
// reset generation
initial begin
rst_n = 1'b0; // active at time 0
repeat (3) @(posedge clk); // after 3 cycles,
rst_n = 1'b1; // release the reset
end
// enable waveform dump
initial begin
$dumpvars(0, u_DUT);
$dumpfile("dump.vcd");
end
// timeout
initial begin
#`TIMEOUT_CYCLE $display("Timeout!");
$finish;
end
APB apb_if (.clk(clk));
reg [31:0] test_vector;
initial begin
int data;
apb_if.init();
@(posedge rst_n); // wait for a release of the reset
repeat (10) @(posedge clk); // wait another 10 cycles
apb_if.read(32'h0, data);
$display("---------------------------------------------------");
$display("IP version: %x", data);
if (data!=='h0001_2024)
$display("Wrong IP version");
$display("---------------------------------------------------");
$display("---------------------------------------------------");
$display("Reset value test");
$display("---------------------------------------------------");
apb_if.read(`SRC_ADDR, data);
if (data===0)
$display("DMA_SRC(pass): %x", data);
else begin
$display("DMA_SRC(fail): %x", data);
@(posedge clk);
$finish;
end
apb_if.read(`DST_ADDR, data);
if (data===0)
$display("DMA_DST(pass): %x", data);
else begin
$display("DMA_DST(fail): %x", data);
@(posedge clk);
$finish;
end
apb_if.read(`LEN_ADDR, data);
if (data===0)
$display("DMA_LEN(pass): %x", data);
else begin
$display("DMA_LEN(fail): %x", data);
@(posedge clk);
$finish;
end
apb_if.read(`STAT_ADDR, data);
if (data===1)
$display("DMA_STATUS(pass): %x", data);
else begin
$display("DMA_STATUS(fail): %x", data);
@(posedge clk);
$finish;
end
$display("---------------------------------------------------");
$display("Configuration test");
$display("---------------------------------------------------");
test_vector = 32'h1000;
apb_if.write(`SRC_ADDR, test_vector);
apb_if.read(`SRC_ADDR, data);
if (data===test_vector)
$display("DMA_SRC(pass): %x", data);
else begin
$display("DMA_SRC(fail): %x", data);
@(posedge clk);
$finish;
end
test_vector = 32'h2000;
apb_if.write(`DST_ADDR, test_vector);
apb_if.read(`DST_ADDR, data);
if (data===test_vector)
$display("DMA_DST(pass): %x", data);
else begin
$display("DMA_DST(fail): %x", data);
@(posedge clk);
$finish;
end
test_vector = 32'h100;
apb_if.write(`LEN_ADDR, test_vector);
apb_if.read(`LEN_ADDR, data);
if (data===test_vector)
$display("DMA_LEN(pass): %x", data);
else begin
$display("DMA_LEN(fail): %x", data);
@(posedge clk);
$finish;
end
$display("---------------------------------------------------");
$display("DMA start");
$display("---------------------------------------------------");
test_vector = 32'h1;
apb_if.write(`START_ADDR, test_vector);
$display("---------------------------------------------------");
$display("Wait for a DMA completion");
$display("---------------------------------------------------");
data = 0;
while (data != 1) begin
apb_if.read(`STAT_ADDR, data);
repeat (100) @(posedge clk);
$write(".");
end
$display("");
@(posedge clk);
$display("---------------------------------------------------");
$display("DMA completed");
$display("---------------------------------------------------");
$finish;
end
DMAC_CFG u_DUT (
.clk (clk),
.rst_n (rst_n),
// APB interface
.psel_i (apb_if.psel),
.penable_i (apb_if.penable),
.paddr_i (apb_if.paddr[11:0]),
.pwrite_i (apb_if.pwrite),
.pwdata_i (apb_if.pwdata),
.pready_o (apb_if.pready),
.prdata_o (apb_if.prdata),
.pslverr_o (apb_if.pslverr),
.src_addr_o (/* FLOATING */),
.dst_addr_o (/* FLOATING */),
.byte_len_o (/* FLOATING */),
.start_o (/* FLOATING */),
.done_i (1'b1)
);
endmodule
// DMAC/SIM/TB/filelist.f
-sverilog $LAB_PATH/SIM/TB/timescale.v
-sverilog $LAB_PATH/SIM/TB/AXI_INTF.sv
-sverilog $LAB_PATH/SIM/TB/DMAC_CFG_TB.sv
// DMAC/SIM/TB/timescale.v
`timescale 1ns/1ps
|
897bc89a18abfce89ce4b320a9824d56
|
{
"intermediate": 0.4368215501308441,
"beginner": 0.4042208194732666,
"expert": 0.15895763039588928
}
|
44,101
|
// DMAC/RTL/DMAC_CFG.sv
module DMAC_CFG
(
input wire clk,
input wire rst_n, // _n means active low
// AMBA APB interface
input wire psel_i,
input wire penable_i,
input wire [11:0] paddr_i,
input wire pwrite_i,
input wire [31:0] pwdata_i,
output reg pready_o,
output reg [31:0] prdata_o,
output reg pslverr_o,
// configuration registers
output reg [31:0] src_addr_o,
output reg [31:0] dst_addr_o,
output reg [15:0] byte_len_o,
output wire start_o,
input wire done_i
);
// Configuration register to read/write
reg [31:0] src_addr;
reg [31:0] dst_addr;
reg [15:0] byte_len;
//----------------------------------------------------------
// Write
//----------------------------------------------------------
// an APB write occurs when PSEL & PENABLE & PWRITE
// clk : __--__--__--__--__--__--__--__--__--__--
// psel : ___--------_____________________________
// penable : _______----_____________________________
// pwrite : ___--------_____________________________
// wren : _______----_____________________________
//
// DMA start command must be asserted when APB writes 1 to the DMA_CMD
// register
// clk : __--__--__--__--__--__--__--__--__--__--
// psel : ___--------_____________________________
// penable : _______----_____________________________
// pwrite : ___--------_____________________________
// paddr : |DMA_CMD|
// pwdata : | 1 |
// start : _______----_____________________________
wire wren;
assign wren = // fill your code here
always @(posedge clk) begin
// fill
// your
// code
// here
end
wire start;
assign start = // fill your code here
// Read
reg [31:0] rdata;
//----------------------------------------------------------
// READ
//----------------------------------------------------------
// an APB read occurs when PSEL & PENABLE & !PWRITE
// To make read data a direct output from register,
// this code shall buffer the muxed read data into a register
// in the SETUP cycle (PSEL & !PENABLE)
// clk : __--__--__--__--__--__--__--__--__--__--
// psel : ___--------_____________________________
// penable : _______----_____________________________
// pwrite : ________________________________________
// reg update : ___----_________________________________
//
always @(posedge clk) begin
// fill
// your
// code
// here
end
// output assignments
assign pready_o = 1'b1;
assign prdata_o = rdata;
assign pslverr_o = 1'b0;
assign src_addr_o = src_addr;
assign dst_addr_o = dst_addr;
assign byte_len_o = byte_len;
assign start_o = start;
endmodule
// DMAC/RTL/filelist.f
-sverilog \$LAB_PATH/RTL/DMAC_CFG.sv
// DMAC/SIM/TB/AXI_INTF.sv
interface APB (
input clk
);
logic psel;
logic penable;
logic [31:0] paddr;
logic pwrite;
logic [31:0] pwdata;
logic pready;
logic [31:0] prdata;
logic pslverr;
modport master (
input clk,
input pready, prdata, pslverr,
output psel, penable, paddr, pwrite, pwdata
);
task init();
psel = 1'b0;
penable = 1'b0;
paddr = 32'd0;
pwrite = 1'b0;
pwdata = 32'd0;
endtask
task write(input int addr,
input int data);
#1
psel = 1'b1;
penable = 1'b0;
paddr = addr;
pwrite = 1'b1;
pwdata = data;
@(posedge clk);
#1
penable = 1'b1;
@(posedge clk);
while (pready==1'b0) begin
@(posedge clk);
end
psel = 1'b0;
penable = 1'b0;
paddr = 'hX;
pwrite = 1'bx;
pwdata = 'hX;
endtask
task read(input int addr,
output int data);
#1
psel = 1'b1;
penable = 1'b0;
paddr = addr;
pwrite = 1'b0;
pwdata = 'hX;
@(posedge clk);
#1
penable = 1'b1;
@(posedge clk);
while (pready==1'b0) begin
@(posedge clk);
end
data = prdata;
psel = 1'b0;
penable = 1'b0;
paddr = 'hX;
pwrite = 1'bx;
pwdata = 'hX;
endtask
endinterface
// DMAC/SIM/TB/DMAC_CFG_TB.sv
`define SRC_ADDR 32'h100
`define DST_ADDR 32'h104
`define LEN_ADDR 32'h108
`define STAT_ADDR 32'h110
`define START_ADDR 32'h10c
`define TIMEOUT_CYCLE 10000000
module DMAC_CFG_TB ();
reg clk;
reg rst_n;
// clock generation
initial begin
clk = 1'b0;
forever #10 clk = !clk;
end
// reset generation
initial begin
rst_n = 1'b0; // active at time 0
repeat (3) @(posedge clk); // after 3 cycles,
rst_n = 1'b1; // release the reset
end
// enable waveform dump
initial begin
\$dumpvars(0, u_DUT);
\$dumpfile("dump.vcd");
end
// timeout
initial begin
#`TIMEOUT_CYCLE \$display("Timeout!");
\$finish;
end
APB apb_if (.clk(clk));
reg [31:0] test_vector;
initial begin
int data;
apb_if.init();
@(posedge rst_n); // wait for a release of the reset
repeat (10) @(posedge clk); // wait another 10 cycles
apb_if.read(32'h0, data);
\$display("---------------------------------------------------");
\$display("IP version: %x", data);
if (data!=='h0001_2024)
\$display("Wrong IP version");
\$display("---------------------------------------------------");
\$display("---------------------------------------------------");
\$display("Reset value test");
\$display("---------------------------------------------------");
apb_if.read(`SRC_ADDR, data);
if (data===0)
\$display("DMA_SRC(pass): %x", data);
else begin
\$display("DMA_SRC(fail): %x", data);
@(posedge clk);
\$finish;
end
apb_if.read(`DST_ADDR, data);
if (data===0)
\$display("DMA_DST(pass): %x", data);
else begin
\$display("DMA_DST(fail): %x", data);
@(posedge clk);
\$finish;
end
apb_if.read(`LEN_ADDR, data);
if (data===0)
\$display("DMA_LEN(pass): %x", data);
else begin
\$display("DMA_LEN(fail): %x", data);
@(posedge clk);
\$finish;
end
apb_if.read(`STAT_ADDR, data);
if (data===1)
\$display("DMA_STATUS(pass): %x", data);
else begin
\$display("DMA_STATUS(fail): %x", data);
@(posedge clk);
\$finish;
end
\$display("---------------------------------------------------");
\$display("Configuration test");
\$display("---------------------------------------------------");
test_vector = 32'h1000;
apb_if.write(`SRC_ADDR, test_vector);
apb_if.read(`SRC_ADDR, data);
if (data===test_vector)
\$display("DMA_SRC(pass): %x", data);
else begin
\$display("DMA_SRC(fail): %x", data);
@(posedge clk);
\$finish;
end
test_vector = 32'h2000;
apb_if.write(`DST_ADDR, test_vector);
apb_if.read(`DST_ADDR, data);
if (data===test_vector)
\$display("DMA_DST(pass): %x", data);
else begin
\$display("DMA_DST(fail): %x", data);
@(posedge clk);
\$finish;
end
test_vector = 32'h100;
apb_if.write(`LEN_ADDR, test_vector);
apb_if.read(`LEN_ADDR, data);
if (data===test_vector)
\$display("DMA_LEN(pass): %x", data);
else begin
\$display("DMA_LEN(fail): %x", data);
@(posedge clk);
\$finish;
end
\$display("---------------------------------------------------");
\$display("DMA start");
\$display("---------------------------------------------------");
test_vector = 32'h1;
apb_if.write(`START_ADDR, test_vector);
\$display("---------------------------------------------------");
\$display("Wait for a DMA completion");
\$display("---------------------------------------------------");
data = 0;
while (data != 1) begin
apb_if.read(`STAT_ADDR, data);
repeat (100) @(posedge clk);
\$write(".");
end
\$display("");
@(posedge clk);
\$display("---------------------------------------------------");
\$display("DMA completed");
\$display("---------------------------------------------------");
\$finish;
end
DMAC_CFG u_DUT (
.clk (clk),
.rst_n (rst_n),
// APB interface
.psel_i (apb_if.psel),
.penable_i (apb_if.penable),
.paddr_i (apb_if.paddr[11:0]),
.pwrite_i (apb_if.pwrite),
.pwdata_i (apb_if.pwdata),
.pready_o (apb_if.pready),
.prdata_o (apb_if.prdata),
.pslverr_o (apb_if.pslverr),
.src_addr_o (/* FLOATING */),
.dst_addr_o (/* FLOATING */),
.byte_len_o (/* FLOATING */),
.start_o (/* FLOATING */),
.done_i (1'b1)
);
endmodule
// DMAC/SIM/TB/filelist.f
-sverilog \$LAB_PATH/SIM/TB/timescale.v
-sverilog \$LAB_PATH/SIM/TB/AXI_INTF.sv
-sverilog \$LAB_PATH/SIM/TB/DMAC_CFG_TB.sv
// DMAC/SIM/TB/timescale.v
`timescale 1ns/1ps
|
bd4c0593d3490af485b2a9db0099dae0
|
{
"intermediate": 0.4368215501308441,
"beginner": 0.4042208194732666,
"expert": 0.15895763039588928
}
|
44,102
|
i have a button with an id sound. Give me the javascript to play a sound with the file name 'https://cdn.glitch.global/6675993b-1800-4b5c-aa90-9a81cff1e845/bc19f19f130-2cbc-4284-b16c-b7d8ab121883.wav?v=1711459365896' when the button is clicked
|
a73d5ecb669817178063123150fe486f
|
{
"intermediate": 0.4584246575832367,
"beginner": 0.20411735773086548,
"expert": 0.33745795488357544
}
|
44,103
|
generate code for my ai saas
|
bc6fbba345f4e1cd7869bd0ef9d84933
|
{
"intermediate": 0.2703869342803955,
"beginner": 0.22781480848789215,
"expert": 0.5017983317375183
}
|
44,104
|
the following function create a json object from excel file , the problem is some of the columns in the sheet have int and string values so the json serialize the int as int and the string as string , i want it to instead of that checks if the values of the columns are mixed between int and string to make the whole column string in a way that the 0 becomes "0" in the json
import pandas as pd
import json
import os
def excel_to_json(excel_file_path):
"""Convert a given Excel file into a Dictionary where each one of the sheets in
the object is represented by a key,value pair.
Args:
excel_file_path (str): path to a single excel file.
Returns:
Dict: A dictionary containing the JSON data.
"""
json_data = {}
xls = pd.ExcelFile(excel_file_path)
for sheet_name in xls.sheet_names:
df = pd.read_excel(excel_file_path, sheet_name=sheet_name)
sheet_json = df.to_dict(orient="list")
json_data[sheet_name] = sheet_json
return json_data
def process_excel_files(directory):
"""Convert all the Excel files in a directory to JSON.
Args:
directory (str): path to the directory containing the Excel files.
"""
output_directory = "database/data_temp/json_files"
if not os.path.exists(output_directory):
os.makedirs(output_directory)
for filename in os.listdir(directory):
if filename.endswith(".xlsx") or filename.endswith(".xls"):
excel_file_path = os.path.join(directory, filename)
json_file_path = os.path.join(
output_directory, f"{os.path.splitext(filename)[0]}.json"
)
json_output = excel_to_json(excel_file_path)
with open(json_file_path, "w") as json_file:
json.dump(json_output, json_file, indent=4)
print(f"Converted '{filename}' to JSON: '{json_file_path}'")
directory = "database/data_temp/excel_files"
process_excel_files(directory)
|
bc6db423302fedabd0e001d5fe0d1be3
|
{
"intermediate": 0.6057863235473633,
"beginner": 0.2188241183757782,
"expert": 0.1753895878791809
}
|
44,105
|
In C++, what is the purpose to use explicit keyword?
|
63dca3d763f553e69914b3bb59f51b84
|
{
"intermediate": 0.42740124464035034,
"beginner": 0.30230119824409485,
"expert": 0.2702975869178772
}
|
44,106
|
hi
|
e0e277ca079895703776b1eb0f3b15ea
|
{
"intermediate": 0.3246487081050873,
"beginner": 0.27135494351387024,
"expert": 0.40399640798568726
}
|
44,107
|
please explain the following like i am a layperson
|
d3af431ff8c4c11701cc69761873796c
|
{
"intermediate": 0.37345048785209656,
"beginner": 0.4007735252380371,
"expert": 0.22577600181102753
}
|
44,108
|
does this sound good "Duties included, but not limited to: framing walls, finishing/detail work, assisting with window and door installations."
|
f34e98615734957489dc0aa70363916c
|
{
"intermediate": 0.32702699303627014,
"beginner": 0.3495163917541504,
"expert": 0.3234565854072571
}
|
44,109
|
write me a code to convert coordinates from yolo to coco
|
c310156c3b0de49056f970bbfb458bfb
|
{
"intermediate": 0.45270127058029175,
"beginner": 0.14757968485355377,
"expert": 0.3997190594673157
}
|
44,110
|
hi
|
4f4594fafb571e0c4ff22c42302034da
|
{
"intermediate": 0.3246487081050873,
"beginner": 0.27135494351387024,
"expert": 0.40399640798568726
}
|
44,111
|
In this code i am getting for all the edges, the result of 'Parallel edges present' is getting 'T', the code is need to check edge pairs u,v nodes with all other edge pairs among the 40 edges, is there is an existing reperation then result is 'T' else 'F'.
Number of nodes: 21
Number of edges: 40
List of nodes: ['M7', 'Vbias', '0', 'M6', 'VOUT', 'M4', 'net1', 'M1', 'net2', 'IN2', 'M0', 'net3', 'IN1', 'M5', 'vdd!', 'M3', 'M2', 'C1', 'C0', 'I0', 'V1']
List of edges: [('M7', 'Vbias'), ('M7', 'Vbias'), ('M7', '0'), ('M7', '0'), ('Vbias', 'M6'), ('Vbias', 'M4'), ('Vbias', 'I0'), ('0', 'M6'), ('0', 'M6'), ('0', 'M4'), ('0', 'M4'), ('0', 'M1'), ('0', 'M0'), ('0', 'C1'), ('0', 'V1'), ('M6', 'VOUT'), ('VOUT', 'M5'), ('VOUT', 'C1'), ('VOUT', 'C0'), ('M4', 'net1'), ('net1', 'M1'), ('net1', 'M0'), ('M1', 'net2'), ('M1', 'IN2'), ('net2', 'M5'), ('net2', 'M3'), ('net2', 'C0'), ('IN2', 'V1'), ('M0', 'net3'), ('M0', 'IN1'), ('net3', 'M3'), ('net3', 'M2'), ('net3', 'M2'), ('M5', 'vdd!'), ('M5', 'vdd!'), ('vdd!', 'M3'), ('vdd!', 'M3'), ('vdd!', 'M2'), ('vdd!', 'M2'), ('vdd!', 'I0')]
def get_edge_features(G):
edge_features = []
edge_list = list(G.edges(data=True))
# Check and adjust the edge pairing order
for u, v, data in edge_list:
device_type_u = G.nodes[u].get('vertex_type', 'unknown')
device_type_v = G.nodes[v].get('vertex_type', 'unknown')
if device_type_v in ['NMOS', 'PMOS', 'R', 'L', 'C', 'I', 'V'] and device_type_u not in ['NMOS', 'PMOS', 'R', 'L', 'C', 'I', 'V']:
# Swap u and v if device is at the beginning
u, v = v, u
device = u
net = v
label_name = data.get('label', 'unknown')
terminal_name = label_name[0]
edge_pairs = f"({u}, {v})"
edge_color = edge_colors.get(terminal_name, 'black')
parallel_edges = len([key for key in G[u][v] if G[u][v][key]['label'] == label_name and key != (u, v)]) > 0
parallel_edges_present = 'T' if parallel_edges else 'F'
features = {
'device_type': G.nodes[device].get('vertex_type', 'unknown'),
'device': device,
'terminal_name': terminal_name,
'Edge pairs': edge_pairs,
'edge_colors': edge_color,
'Parallel edges present': parallel_edges_present
}
edge_features.append(features)
return edge_features
def netlist_to_graph(netlist_content):
# Create an empty graph
G = nx.MultiGraph()
# Call the function to add vertex features
add_vertex_components_features(G, components)
add_vertex_nets_features(G, components)
# Print the number of nodes and edges
print("Number of nodes:", G.number_of_nodes())
print("Number of edges:", G.number_of_edges())
# Print the list of nodes and edges
print("List of nodes:", G.nodes())
print("List of edges:", G.edges())
edge_features = get_edge_features(G)
|
40dd3b0503a3a3efd1821b7d9e24ca1b
|
{
"intermediate": 0.3186406195163727,
"beginner": 0.3440687358379364,
"expert": 0.33729061484336853
}
|
44,112
|
please help: : ""PYPI MANAGER
neural-compressor
Error when performing an action.
Reason given:
Error: ERROR: Could not open requirements file: [Errno 13] Permission denied: 'C:\\Users\\bower\\AppData\\Local\\Temp\\tmp4lm2grnh'""
|
80d763f4800d15f83b5ecafd2f515a8c
|
{
"intermediate": 0.2806510329246521,
"beginner": 0.11581157892942429,
"expert": 0.6035373210906982
}
|
44,113
|
generate me astra db time series schema
|
d9748eee610cdf76cfdec8463369ae2d
|
{
"intermediate": 0.3636859059333801,
"beginner": 0.2674926519393921,
"expert": 0.3688214421272278
}
|
44,114
|
generate a astra db schema for tf2 market that holds data for tf2 skin prices for different digital markets
|
9323192636611989e0676df348f0cdda
|
{
"intermediate": 0.27256378531455994,
"beginner": 0.09441225975751877,
"expert": 0.6330239772796631
}
|
44,115
|
in the edge features i am having ‘40’ edge features for the respective edge pairs, after computing all the other features we can able to compute the value for ‘‘Parallel edges present’: parallel_edges_present’ from the edge_features.append(features) for the device whether it has same repeat edge pairs or not
def get_edge_features(G):
edge_features = []
edge_list = list(G.edges(data=True))
rearranged_edges = []
# Check and adjust the edge pairing order
for u, v, data in edge_list:
device_type_u = G.nodes[u].get('vertex_type', 'unknown')
device_type_v = G.nodes[v].get('vertex_type', 'unknown')
if device_type_v in ['NMOS', 'PMOS', 'R', 'L', 'C', 'I', 'V'] and device_type_u not in ['NMOS', 'PMOS', 'R', 'L', 'C', 'I', 'V']:
# Swap u and v if device is at the beginning
u, v = v, u
edge_pair = (u, v)
print("re arranged edge_pair", edge_pair)
device = u
net = v
label_name = data.get('label', 'unknown')
terminal_name = label_name[0]
edge_pairs = f"({u}, {v})"
edge_color = edge_colors.get(terminal_name, 'black')
parallel_edges_present = 'F'
if edge_pair in rearranged_edges:
parallel_edges_present = 'T'
features = {
'device_type': G.nodes[device].get('vertex_type', 'unknown'),
'device': device,
'terminal_name': data.get('label'),
'Edge pairs': edge_pairs,
'edge_colors': edge_color,
'Parallel edges present': parallel_edges_present
}
edge_features.append(features)
rearranged_edges.append(edge_pair)
return edge_features
|
b7244cb4b2f54487883996b6db16a8ce
|
{
"intermediate": 0.37637755274772644,
"beginner": 0.4305262565612793,
"expert": 0.19309623539447784
}
|
44,116
|
generate a astra db schema for tf2 market that holds data for tf2 skin prices time series data for different digital markets such as skinport, dmarket, steam
|
48e7cb55d4e98e6863384d3348173ae3
|
{
"intermediate": 0.33045458793640137,
"beginner": 0.12820935249328613,
"expert": 0.5413360595703125
}
|
44,117
|
how to get huggingface transformers version
|
874cce1895182e6d8dc4114894215287
|
{
"intermediate": 0.23868687450885773,
"beginner": 0.21553291380405426,
"expert": 0.5457801818847656
}
|
44,118
|
How can I run an application locally with gradle?
|
593f1fedfc9d2698a8665e329270b135
|
{
"intermediate": 0.49405330419540405,
"beginner": 0.14685314893722534,
"expert": 0.3590935170650482
}
|
44,119
|
should i install the non-installed servers? what's the use? ""Microsoft Windows [Version 10.0.22621.3296]
(c) Microsoft Corporation. All rights reserved.
C:\Users\bower>jupyter notebook
[I 2024-03-26 22:14:37.318 ServerApp] jupyter_lsp | extension was successfully linked.
[I 2024-03-26 22:14:37.326 ServerApp] jupyter_server_terminals | extension was successfully linked.
[I 2024-03-26 22:14:37.338 ServerApp] jupyterlab | extension was successfully linked.
[I 2024-03-26 22:14:37.346 ServerApp] notebook | extension was successfully linked.
[I 2024-03-26 22:14:37.877 ServerApp] notebook_shim | extension was successfully linked.
[I 2024-03-26 22:14:37.919 ServerApp] notebook_shim | extension was successfully loaded.
[I 2024-03-26 22:14:37.923 ServerApp] jupyter_lsp | extension was successfully loaded.
[I 2024-03-26 22:14:37.924 ServerApp] jupyter_server_terminals | extension was successfully loaded.
[I 2024-03-26 22:14:37.927 LabApp] JupyterLab extension loaded from C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\site-packages\jupyterlab
[I 2024-03-26 22:14:37.927 LabApp] JupyterLab application directory is C:\Users\bower\AppData\Local\Programs\Python\Python311\share\jupyter\lab
[I 2024-03-26 22:14:37.928 LabApp] Extension Manager is 'pypi'.
[I 2024-03-26 22:14:38.315 ServerApp] jupyterlab | extension was successfully loaded.
[I 2024-03-26 22:14:38.321 ServerApp] notebook | extension was successfully loaded.
[I 2024-03-26 22:14:38.322 ServerApp] Serving notebooks from local directory: C:\Users\bower
[I 2024-03-26 22:14:38.322 ServerApp] Jupyter Server 2.13.0 is running at:
[I 2024-03-26 22:14:38.322 ServerApp] http://localhost:8888/tree?token=63212d384d99b1ed0041fb8f9d875740472e851c353b83c1
[I 2024-03-26 22:14:38.323 ServerApp] http://127.0.0.1:8888/tree?token=63212d384d99b1ed0041fb8f9d875740472e851c353b83c1
[I 2024-03-26 22:14:38.323 ServerApp] Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).
[C 2024-03-26 22:14:38.369 ServerApp]
To access the server, open this file in a browser:
file:///C:/Users/bower/AppData/Roaming/jupyter/runtime/jpserver-12468-open.html
Or copy and paste one of these URLs:
http://localhost:8888/tree?token=63212d384d99b1ed0041fb8f9d875740472e851c353b83c1
http://127.0.0.1:8888/tree?token=63212d384d99b1ed0041fb8f9d875740472e851c353b83c1
[I 2024-03-26 22:14:39.687 ServerApp] Skipped non-installed server(s): bash-language-server, dockerfile-language-server-nodejs, javascript-typescript-langserver, jedi-language-server, julia-language-server, pyright, python-language-server, python-lsp-server, r-languageserver, sql-language-server, texlab, typescript-language-server, unified-language-server, vscode-css-languageserver-bin, vscode-html-languageserver-bin, vscode-json-languageserver-bin, yaml-language-server""
|
4b45da552797f0a048f5815aff10730e
|
{
"intermediate": 0.41242527961730957,
"beginner": 0.37691643834114075,
"expert": 0.21065829694271088
}
|
44,120
|
что здесь отвечает за отступы между сктроками, мне нужно увеличить
<div class="container">
<div class="page-success">
<div class="box-title text-center" style="text-align: center;">${caption}</div>
<table class="table form-pay-table table-success-pay">
|
5cf6386b42fe27a9b2c3a69765f17b99
|
{
"intermediate": 0.30600911378860474,
"beginner": 0.4472801387310028,
"expert": 0.24671074748039246
}
|
44,121
|
podman run --name postgres-checkmarx -e POSTGRES_PASSWORD=postgrescheckmarx -e POSTGRES_DB=checkmarx -d -p 5433:5432 postgres
how can i specify schema here?
|
c88a305aaf7ae020eb94ad119623f952
|
{
"intermediate": 0.5014427304267883,
"beginner": 0.2631121277809143,
"expert": 0.23544514179229736
}
|
44,122
|
Invoke-RestMethod -Uri $url -Method Post -Body $body -ContentType "application/json" -OutFile "$downloadFolder\image.jpg"
设置1分钟延迟,重试3次
|
ca7b8c3d5c1000f2890e1986ce6d4a02
|
{
"intermediate": 0.33285224437713623,
"beginner": 0.39541950821876526,
"expert": 0.2717282474040985
}
|
44,123
|
Invoke-RestMethod -Uri $url -Method Post -Body $body -ContentType "application/json" -OutFile "$downloadFolder\image.jpg"
设置最大等待时间1分钟
|
f77c87bfbe8e9b818b63e50f33118453
|
{
"intermediate": 0.3801344633102417,
"beginner": 0.33127960562705994,
"expert": 0.2885858416557312
}
|
44,124
|
def get_most_similar_question(userText, history):
print(userText)
inp_emb = retrieval_model.encode([userText])
corr = np.inner(inp_emb, data_emb)
flat_list = [item for sublist in corr for item in sublist]
top_1_idx = np.argsort(flat_list)[-20:]
top_1_values = [flat_list[i] for i in top_1_idx]
print('fdfd',top_1_values)
# intermediate_model(top_1_idx)
# data_emb_1 = retrieval_model.encode(top_1_values)
# corr = np.inner(inp_emb, data_emb_1)
# flat_list = [item for sublist in corr for item in sublist]
# top_1_idx = np.argsort(flat_list)[-2:]
# top_1_values = [flat_list[i] for i in top_1_idx]
# print('fava',top_1_values)
if top_1_values[0] < 0.7:
return predict(userText, history)
else:
n_data = new_df.iloc[top_1_idx, [1]]
df_html_with_sql = n_data.to_html(index=False)
return df_html_with_sql
after calculating top 20 matches,I again want to creating the embededing for these 20 data and then again find the similarity of usertext and return the top result ,make the required changes in the method
|
ae44e53e40bae60e488ff59c55bcd897
|
{
"intermediate": 0.4277929961681366,
"beginner": 0.2280111163854599,
"expert": 0.3441959023475647
}
|
44,125
|
I am making a c++ sdl based game engine, currently doing the EventManager, and I need your help finishing it.
I currently have the design on how it will be, it is a Pub/Sub architecture, where the EventManager is the message broker, and the other managers or any other class can subscribe for an event and then when it is published the event will be pushed to subscriber.
class EventManager
{
public:
~EventManager();
EventManager(const EventManager&) = delete;
EventManager operator=(const EventManager&) = delete;
static EventManager& GetInstance() noexcept;
template<typename T>
void Subscribe(std::function<void(std::shared_ptr<T>)> handler);
void Publish(std::shared_ptr<Event> event);
void Update();
//...
}
I have everything set to finish it but I got a question, in original SDL_Event, each event could have at least one type but can have multiple types of the same event. I want to change this to make one sub event class per type, so for example here:
SDL_KeyboardEvent
SDL_KEYDOWN
SDL_KEYUP
I would make 2 sub event classes, KeyboardKeyDownEvent and KeyboardKeyUpEvent, and then I would handle them in two different methods:
And the usage would be like this:
InputManager(EventManager& eventManager) {
// Subscribe to specific event types
eventManager.Subscribe<KeyboardKeyDownEvent>([this](std::shared_ptr<Event> event){ this->HandleKeyboardKeyDownEvent(std::static_pointer_cast<KeyboardKeyDownEvent>(event)); });
eventManager.Subscribe<KeyboardKeyUpEvent>([this](std::shared_ptr<Event> event){ this->HandleKeyboardKeyUpEvent(std::static_pointer_cast<KeyboardKeyUpEvent>(event)); });
}
void HandleKeyboardKeyDownEvent(std::shared_ptr<KeyboardEvent> event) {
// Process the keyboard down event
}
void HandleKeyboardKeyUpEvent(std::shared_ptr<KeyboardEvent> event) {
// Process the keyboard down event
}
Is this a good design or not?
|
297adb501afbc0e9e08aef56c664bac0
|
{
"intermediate": 0.5783541202545166,
"beginner": 0.29391494393348694,
"expert": 0.12773096561431885
}
|
44,126
|
C:\Users\bower>conda install -c intel intel_extension_for_transformers
'conda' is not recognized as an internal or external command,
operable program or batch file.
C:\Users\bower>
|
4a1b0793cc4de5dd4c7f81e2d0a4e7dd
|
{
"intermediate": 0.3791077435016632,
"beginner": 0.2688116133213043,
"expert": 0.3520806133747101
}
|
44,127
|
what is this github page about? : ""
## Before you start
ITREX LLM C++ Runtime (`Neural Speed`) has already supported some popular models like `LLAMA`,`GPT-J`, `GPT-NEOX`, `DOLLY`, etc.These LLMs have similar architectures and some of them share the same architect (`DOLLY` and `GPT-NEOX`). Before adding a new model, you can checkout its architecture (from Huggingface `config.json`) whether is in our [supported list](./neural_speed/models/model_utils/model_types.h#L68).
However, LLM inference thing is complicated. It may have its own: 1. special tokenizer (or vocab); 2. architecture (or forward pipeline); 3. operators (or kernels). Generally speaking, the first and second points appear frequently for transformers-LLMs. I will show you how to run a new model as soon as possible when your model hasn't any problems like above or only the problem 1. The next sections will discuss about the problem 2 and the problem 3 is beyond the scope of this document.
For simplicity, we take [polyglot](https://huggingface.co/EleutherAI/polyglot-ko-5.8b) as the example model. It has the same architecture as `GPT-NEOX` but only fewer layers.
Firstly, we need to add its temp buffer in its [related model-arch header file](neural_speed/models/gptneox/gptneox.h) and [re-compile](README.md#Install).
|
b83b50b3ddf2d8c142369673178148d3
|
{
"intermediate": 0.2547188401222229,
"beginner": 0.19333551824092865,
"expert": 0.5519456267356873
}
|
44,128
|
Explain what this github page is documeintg like i am a layperson: ""## Advanced Usage
### One-click scripts
Argument description of run.py ([supported MatMul combinations](#supported-matrix-multiplication-data-types-combinations)):
| Argument | Description |
| -------------- | --------------------------------------------------------------------- |
| model | Directory containing model file or model id: String |
| --weight_dtype | Data type of quantized weight: int4/int8/fp8(=fp8_e4m3)/fp8_e5m2/fp4(=fp4e2m1)/nf4 (default int4) |
| --alg | Quantization algorithm: sym/asym (default sym) |
| --group_size | Group size: Int, 32/128/-1 (per channel) (default: 32) |
| --scale_dtype | Data type of scales: fp32/bf16/fp8 (default fp32) |
| --compute_dtype | Data type of Gemm computation: int8/bf16/fp16/fp32 (default: fp32) |
| --use_ggml | Enable ggml for quantization and inference |
| -p / --prompt | Prompt to start generation with: String (default: empty) |
| -f / --file | Path to a text file containing the prompt (for large prompts) |
| -n / --n_predict | Number of tokens to predict: Int (default: -1, -1 = infinity) |
| -t / --threads | Number of threads to use during computation: Int (default: 56) |
| -b / --batch_size_truncate | Batch size for prompt processing: Int (default: 512) |
| -c / --ctx_size | Size of the prompt context: Int (default: 512, can not be larger than specific model's context window length) |
| -s / --seed | NG seed: Int (default: -1, use random seed for < 0) |
| --repeat_penalty | Penalize repeat sequence of tokens: Float (default: 1.1, 1.0 = disabled) |
| --color | Colorise output to distinguish prompt and user input from generations |
| --keep | Number of tokens to keep from the initial prompt: Int (default: 0, -1 = all) |
| --shift-roped-k | Use [ring-buffer](./docs/infinite_inference.md#shift-rope-k-and-ring-buffer) and thus do not re-computing after reaching ctx_size (default: False) |
| --token | Access token ID for models that require it (e.g: LLaMa2, etc..) |
### 1. Conversion and Quantization
Neural Speed assumes the compatible model format as [llama.cpp](https://github.com/ggerganov/llama.cpp) and [ggml](https://github.com/ggerganov/ggml). You can also convert the model by following the below steps:
|
19cdb77000e45cf2028d68e75db1e353
|
{
"intermediate": 0.41057276725769043,
"beginner": 0.37647703289985657,
"expert": 0.212950199842453
}
|
44,129
|
Schreibe das folgende um als Vue3 Module
class Queue {
constructor() {
this.items = [];
this.queueRunning = false;
this.playQueued = this.playQueued.bind(this);
this.playQueuedEnded = this.playQueuedEnded.bind(this);
}
isEmpty() {
return this.items.length === 0;
}
addToQueue(item) {
item.fromQueue=true;
item.step = 3;
this.items.push(item);
console.log('adding item to queue: ' + this.items.length, "queueRunning: " + this.queueRunning, this.items);
if (!this.queueRunning) {
console.log('added To Queue, start play cause queue is running ', "queueRunning: " + this.queueRunning);
this.playQueued();
}
}
playQueued() {
this.queueRunning = true;
console.log('playQueued, queue length is: ', this.items.length);
// fifo
var item = this.items.shift();
item.step = 4;
console.log("playQueued trigger eventBus", item.type, item);
this.$eventBus.emit( item.type, item );
}
playQueuedEnded(event){
event.target.removeEventListener('ended', this.playQueuedEnded, true);
if (this.isEmpty()) {
this.queueRunning = false;
console.log(
'playQueuedEnded no more entries in queue: ' + this.items.length,
this.queueRunning,
);
} else {
console.log(
'playQueuedEnded item ended, remaining items: ' + this.items.length,
this.queueRunning,
);
console.log('setting timer for next run: ', this.$config.settings.alerts.queueTimeDistance);
var queueTimeDistanceTimeout = window.setTimeout(
this.playQueued,
this.$config.settings.alerts.queueTimeDistance
);
}
}
}
export default new Queue();
|
4684acf3c776bce8d105076c4dfd63bb
|
{
"intermediate": 0.34681692719459534,
"beginner": 0.3755306303501129,
"expert": 0.27765244245529175
}
|
44,130
|
const groups = ref<{ [key: string]: number[] }>({})
const next = () => {
const groups: { [key: string]: number[] } = {}
for (let i = 1; i <= unref(count); i++) {
const group = (this.$refs['group' + i][0] as Group).generateGroup()
Object.assign(groups, group)
}
this.groups = groups
emit('next-step', groups)
}
как переписать на vue 3
|
70b2e71b1cc8a136bfe73646e0804c01
|
{
"intermediate": 0.3407509922981262,
"beginner": 0.44989636540412903,
"expert": 0.20935267210006714
}
|
44,131
|
Schreibe das folgende um als Vue3 Module
class Queue {
constructor() {
this.items = [];
this.queueRunning = false;
this.playQueued = this.playQueued.bind(this);
this.playQueuedEnded = this.playQueuedEnded.bind(this);
}
isEmpty() {
return this.items.length === 0;
}
addToQueue(item) {
item.fromQueue=true;
item.step = 3;
this.items.push(item);
console.log('adding item to queue: ' + this.items.length, "queueRunning: " + this.queueRunning, this.items);
if (!this.queueRunning) {
console.log('added To Queue, start play cause queue is running ', "queueRunning: " + this.queueRunning);
this.playQueued();
}
}
playQueued() {
this.queueRunning = true;
console.log('playQueued, queue length is: ', this.items.length);
// fifo
var item = this.items.shift();
item.step = 4;
console.log("playQueued trigger eventBus", item.type, item);
this.$eventBus.emit( item.type, item );
}
playQueuedEnded(event){
event.target.removeEventListener('ended', this.playQueuedEnded, true);
if (this.isEmpty()) {
this.queueRunning = false;
console.log(
'playQueuedEnded no more entries in queue: ' + this.items.length,
this.queueRunning,
);
} else {
console.log(
'playQueuedEnded item ended, remaining items: ' + this.items.length,
this.queueRunning,
);
console.log('setting timer for next run: ', this.$config.settings.alerts.queueTimeDistance);
var queueTimeDistanceTimeout = window.setTimeout(
this.playQueued,
this.$config.settings.alerts.queueTimeDistance
);
}
}
}
Kannst du daraus ein vue3 Composition modul machen?
|
9a880e0bab3d094113ae7b983be8a822
|
{
"intermediate": 0.328203946352005,
"beginner": 0.35181379318237305,
"expert": 0.31998226046562195
}
|
44,132
|
Hi! can you create a simple linux terminal gtts-cli "hello" python scripts using british male english
|
da346bdd4b937428e019d4856cfc3e15
|
{
"intermediate": 0.2563568949699402,
"beginner": 0.45435160398483276,
"expert": 0.28929150104522705
}
|
44,133
|
Hi! can you create a simple linux terminal gtts-cli “hello” python scripts using british male voice
|
a924e8ddfef137d76a32cc95271be911
|
{
"intermediate": 0.31384897232055664,
"beginner": 0.36223477125167847,
"expert": 0.3239162266254425
}
|
44,134
|
lua defold script with html5.run()
|
5d47dc75dc499b357363c979a75a46bf
|
{
"intermediate": 0.34348204731941223,
"beginner": 0.4094332158565521,
"expert": 0.24708469212055206
}
|
44,135
|
fileallocation.c:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <stddef.h>
#define MAX 100
#define FREE 0
#include "LinkedList.h"
typedef struct File
{
char name[21];
int size;
int start_block;
int end_block;
int *indices;
int length;
} File;
typedef struct Directory
{
File f[MAX];
int size;
} Directory;
typedef struct Block
{
int id;
unsigned status : 1;
struct Block *next_file_blk;
} Block;
void init_file(File *const);
void init_dir(Directory *const);
void init_block(Block *const);
void contiguous(File *const, const int, const int, const int);
void linked(File *const, const int, const int, const int);
void indexed(File *const, const int, const int, const int);
void init_file(File *const f)
{
strcpy(f->name, "");
f->start_block = -1;
f->end_block = -1;
f->size = -1;
f->indices = NULL;
f->length = -1;
}
void init_dir(Directory *const d)
{
d->size = 0;
for (int i = 0; i < MAX; i++)
init_file(&(d->f[i]));
}
void init_block(Block *const b)
{
b->status = FREE;
b->id = -1;
b->next_file_blk = NULL;
}
void contiguous(File *const f, const int n_files, const int blk_size, const int num_blk)
{
List list = createEmptyList();
Block b;
init_block(&b);
Node *ptr, *tmp;
int blocks_visited;
int flag, id;
int counter, blk_req;
int start, end;
for (int i = 0; i < num_blk; i++)
{
b.id = i;
insertLast(list, b);
}
for (int i = 0; i < n_files; i++)
{
blocks_visited = 0;
flag = 0;
blk_req = f[i].size / blk_size;
if (f[i].size % blk_size)
{
blk_req++;
}
while (blocks_visited < num_blk && !flag)
{
id = random() % num_blk;
ptr = search(list, id);
if (ptr->d.status != FREE)
{
blocks_visited++;
continue;
}
counter = 0;
start = ptr->d.id;
tmp = ptr;
while (tmp)
{
if (tmp->d.status == FREE)
{
counter++;
if (counter == blk_req)
{
flag = 1;
break;
}
}
else
{
break;
}
tmp = tmp->next;
}
if (flag)
{
f[i].start_block = start;
f[i].length = blk_req;
tmp = ptr;
for (int i = 0; i < blk_req; i++)
{
tmp->d.status = 1;
tmp = tmp->next;
}
}
else
{
blocks_visited++;
}
}
if (!flag)
printf(" Unable to allocate File : %s !! \n", f[i].name);
}
printf("\n\t\tDIRECTORY STRUCTURE\n");
printf(" +----------------------+-------+--------+\n");
printf(" | File Name | Start | Length |\n");
printf(" +----------------------+-------+--------+\n");
for (int i = 0; i < n_files; i++)
{
if (f[i].length > 0)
{
printf(" | %-20s | %-5d | %-6d |\n", f[i].name, f[i].start_block, f[i].length);
}
}
printf(" +----------------------+-------+--------+\n");
}
void linked(File *const f, const int n_files, const int blk_size, const int num_blk)
{
List list = createEmptyList();
Block b;
init_block(&b);
Node *ptr, *tmp, *left, *right;
int blocks_visited, flag, id, counter, blk_req;
for (int i = 0; i < num_blk; i++)
{
b.id = i;
insertLast(list, b);
}
for (int i = 0; i < n_files; i++)
{
counter = 0;
blocks_visited = 0;
flag = 0;
blk_req = f[i].size / blk_size;
if (f[i].size % blk_size)
{
blk_req++;
}
int *allocated = (int *)calloc(blk_req, sizeof(int));
while (blocks_visited < num_blk && !flag)
{
id = random() % num_blk;
ptr = search(list, id);
if (ptr->d.status != FREE)
{
blocks_visited++;
continue;
}
ptr -> d.status = 1;
allocated[counter++] = id;
if (counter == blk_req)
flag = 1;
}
if (!flag)
{
printf(" Unable To Allocate File : %s !! \n\n", f[i].name);
for(int i = 0; i < counter; i++)
{
ptr = search(list, allocated[i]);
ptr -> d.status = FREE;
}
free(allocated);
}
else
{
f[i].start_block = allocated[0];
f[i].end_block = allocated[blk_req - 1];
f[i].length = blk_req;
for (int i = 0; i < blk_req - 1; i++)
{
left = search(list, allocated[i]);
right = search(list, allocated[i + 1]);
left->d.next_file_blk = &(right->d);
left->d.status = 1;
}
right->d.next_file_blk = NULL;
free(allocated);
}
}
printf("\n\t\tDIRECTORY STRUCTURE\n");
printf(" +----------------------+-------------+-----------+\n");
printf(" | File Name | Start Block | End Block |\n");
printf(" +----------------------+-------------+-----------+\n");
for (int i = 0; i < n_files; i++)
{
if (f[i].end_block >= 0)
{
printf(" | %-20s | %-2d | %-2d |\n",
f[i].name, f[i].start_block, f[i].end_block);
}
}
printf(" +----------------------+-------------+-----------+\n");
printf("\n");
for (int i = 0; i < n_files; i++)
if (f[i].start_block >= 0)
{
printf("\n\n File Name : %s\n ",f[i].name);
ptr = search(list, f[i].start_block);
Block *b = &(ptr->d);
while (b)
{
printf("%-2d ", b->id);
b = b->next_file_blk;
}
}
}
void indexed(File *const f, const int n_files, const int blk_size, const int num_blk)
{
List list = createEmptyList();
Block b;
init_block(&b);
Node *ptr, *tmp;
int blocks_visited, flag, id, counter, blk_req;
int start, end;
for (int i = 0; i < num_blk; i++)
{
b.id = i;
insertLast(list, b);
}
for (int i = 0; i < n_files; i++)
{
blocks_visited = 0;
flag = 0;
blk_req = f[i].size / blk_size;
if (f[i].size % blk_size)
{
blk_req++;
}
f[i].indices = (int *)calloc(blk_req + 1, sizeof(int));
f[i].length = blk_req;
counter = 0;
while (blocks_visited < num_blk && !flag)
{
id = random() % num_blk;
ptr = search(list, id);
if (ptr->d.status == FREE)
{
f[i].indices[counter++] = id;
if (counter == blk_req + 1)
{
flag = 1;
break;
}
}
else
{
blocks_visited++;
}
}
if (!flag)
{
printf(" Unable to allocate memory for File : %s !! \n\n", f[i].name);
free(f[i].indices);
f[i].indices = NULL;
}
}
printf("\n\t\tDIRECTORY STRUCTURE\n");
printf(" +----------------------+-------------+\n");
printf(" | File Name | Index Block |\n");
printf(" +----------------------+-------------+\n");
for (int i = 0; i < n_files; i++)
{
if (f[i].indices)
{
printf(" | %-20s | %-2d |\n", f[i].name, f[i].indices[0]);
}
}
printf(" +----------------------+-------------+\n");
printf("\n\n");
printf(" +----------------------+----------------+\n");
printf(" | File Name | Blocks Indexed |\n");
printf(" +----------------------+----------------+\n");
for (int i = 0; i < n_files; i++)
{
if (f[i].indices)
{
for (int j = 1; j <= f[i].length; j++)
{
printf(" | %-20s | %-2d |\n", ((j > 1) ? "" : f[i].name), f[i].indices[j]);
}
}
printf(" +----------------------+----------------+\n");
}
}
int main()
{
int mem_size;
int blk_size;
int num_blks;
int num_file;
int choice;
File f[MAX];
printf("\n\n File Allocation Techniques \n\n");
printf(" Enter Memory Size : ");
scanf("%d", &mem_size);
printf(" Enter Block Size : ");
scanf("%d", &blk_size);
num_blks = mem_size / blk_size;
printf(" Enter Number Of Files : ");
scanf("%d", &num_file);
getchar();
for (int i = 0; i < num_file; i++)
{
printf(" Enter File Name : ");
scanf("%[^\n]", f[i].name);
printf(" Enter File Size : ");
scanf("%d", &f[i].size);
getchar();
}
while (1)
{
printf("\n\n File Allocation Techniques \n\n");
printf(" 1) Contiguous\n");
printf(" 2) Linked\n");
printf(" 3) Indexed\n");
printf(" 4) Exit\n");
printf(" Enter Your choice : ");
scanf("%d", &choice);
switch (choice)
{
case 4 :
{
printf(" Exit \n\n");
printf("\n\n The End \n\n");
exit(0);
}
case 1 :
{
printf("\n\n Contiguous \n\n");
contiguous(f, num_file, blk_size, num_blks);
break;
}
case 2 :
{
printf("\n\n Linked \n\n");
linked(f, num_file, blk_size, num_blks);
break;
}
case 3 :
{
printf("\n\n Indexed \n\n");
indexed(f, num_file, blk_size, num_blks);
break;
}
default :
{
printf(" Invalid Input!! \n\n");
}
}
}
printf("\n\n The End \n\n");
return 0;
}
LinkedList.h:
typedef Block Data;
typedef struct Node
{
Data d;
struct Node *next;
} Node;
typedef Node *List;
extern void init_block(Block *const);
List createEmptyList()
{
Node *head = (Node *)malloc(sizeof(Node));
init_block(&(head -> d));
head->next = NULL;
return head;
}
void insertLast(List head, const Data d)
{
Node *new = (Node *)malloc(sizeof(Node));
new->d = d;
Node *tmp = head;
while (tmp->next)
{
tmp = tmp->next;
}
new->next = NULL;
tmp->next = new;
}
void insertFirst(List head, const Data d)
{
Node *new = (Node *)malloc(sizeof(Node));
new->d = d;
new->next = head->next;
head->next = new;
}
Data delete (List prev)
{
Data rVal;
if (!prev)
{
return rVal;
}
if (!prev->next)
{
return rVal;
}
Node *tmp = prev->next;
rVal = tmp->d;
prev->next = prev->next->next;
free(tmp);
return rVal;
}
Data deleteFirst(List head)
{
Data rVal;
if (head->next == NULL)
{
printf(" Empty List! \n\n");
return rVal;
}
delete (head);
}
Data deleteLast(List head)
{
Data rVal;
if (head->next == NULL)
{
printf(" Empty List! \n\n");
return rVal;
}
Node *tmp = head;
while (tmp->next->next != NULL)
{
tmp = tmp->next;
}
delete (tmp);
}
void display(List head)
{
Node *tmp = head->next;
if (tmp == NULL)
{
printf(" Empty! \n\n");
return;
}
while (tmp)
{
printf(" BID : %-2d\tStatus: %d\n\n", tmp->d.id, tmp->d.status);
tmp = tmp->next;
}
}
int length(List head)
{
Node *tmp = head->next;
if (tmp == NULL)
{
return 0;
}
int count = 0;
while (tmp)
{
tmp = tmp->next;
count++;
}
return count;
}
Node* search(List head, const int id)
{
if (head->next == NULL)
{
return NULL;
}
Node *tmp = head -> next;
while (tmp)
{
if (tmp->d.id == id)
{
return tmp;
}
tmp = tmp->next;
}
return NULL;
}
errors:
cc fileallocation.c -std=c99
In file included from fileallocation.c:8:0:
LinkedList.h:1:1: error: unknown type name ‘Block’
typedef Block Data;
^
LinkedList.h:11:24: error: unknown type name ‘Block’
extern void init_block(Block *const);
^
LinkedList.h: In function ‘createEmptyList’:
LinkedList.h:17:5: warning: implicit declaration of function ‘init_block’ [-Wimplicit-function-declaration]
init_block(&(head -> d));
^
LinkedList.h: In function ‘display’:
LinkedList.h:119:53: error: request for member ‘id’ in something not a structure or union
printf(" BID : %-2d\tStatus: %d\n\n", tmp->d.id, tmp->d.status);
^
LinkedList.h:119:64: error: request for member ‘status’ in something not a structure or union
printf(" BID : %-2d\tStatus: %d\n\n", tmp->d.id, tmp->d.status);
^
LinkedList.h: In function ‘search’:
LinkedList.h:154:19: error: request for member ‘id’ in something not a structure or union
if (tmp->d.id == id)
^
fileallocation.c: At top level:
fileallocation.c:34:6: warning: conflicting types for ‘init_block’ [enabled by default]
void init_block(Block *const);
^
In file included from fileallocation.c:8:0:
LinkedList.h:17:5: note: previous implicit declaration of ‘init_block’ was here
init_block(&(head -> d));
^
fileallocation.c: In function ‘contiguous’:
fileallocation.c:81:9: error: incompatible type for argument 2 of ‘insertLast’
insertLast(list, b);
^
In file included from fileallocation.c:8:0:
LinkedList.h:24:6: note: expected ‘Data’ but argument is of type ‘Block’
void insertLast(List head, const Data d)
^
fileallocation.c:99:13: warning: implicit declaration of function ‘random’ [-Wimplicit-function-declaration]
id = random() % num_blk;
^
fileallocation.c:101:23: error: request for member ‘status’ in something not a structure or union
if (ptr->d.status != FREE)
^
fileallocation.c:109:27: error: request for member ‘id’ in something not a structure or union
start = ptr->d.id;
^
fileallocation.c:115:27: error: request for member ‘status’ in something not a structure or union
if (tmp->d.status == FREE)
^
fileallocation.c:138:27: error: request for member ‘status’ in something not a structure or union
tmp->d.status = 1;
^
fileallocation.c: In function ‘linked’:
fileallocation.c:181:9: error: incompatible type for argument 2 of ‘insertLast’
insertLast(list, b);
^
In file included from fileallocation.c:8:0:
LinkedList.h:24:6: note: expected ‘Data’ but argument is of type ‘Block’
void insertLast(List head, const Data d)
^
fileallocation.c:205:23: error: request for member ‘status’ in something not a structure or union
if (ptr->d.status != FREE)
^
fileallocation.c:210:21: error: request for member ‘status’ in something not a structure or union
ptr -> d.status = 1;
^
fileallocation.c:222:25: error: request for member ‘status’ in something not a structure or union
ptr -> d.status = FREE;
^
fileallocation.c:237:24: error: request for member ‘next_file_blk’ in something not a structure or union
left->d.next_file_blk = &(right->d);
^
fileallocation.c:238:24: error: request for member ‘status’ in something not a structure or union
left->d.status = 1;
^
fileallocation.c:240:21: error: request for member ‘next_file_blk’ in something not a structure or union
right->d.next_file_blk = NULL;
^
fileallocation.c:265:24: warning: initialization from incompatible pointer type [enabled by default]
Block *b = &(ptr->d);
^
fileallocation.c: In function ‘indexed’:
fileallocation.c:288:9: error: incompatible type for argument 2 of ‘insertLast’
insertLast(list, b);
^
In file included from fileallocation.c:8:0:
LinkedList.h:24:6: note: expected ‘Data’ but argument is of type ‘Block’
void insertLast(List head, const Data d)
^
fileallocation.c:314:23: error: request for member ‘status’ in something not a structure or union
if (ptr->d.status == FREE)
^
[os-2200050049@os 26Mar24]$ nano fileallocation.c
[os-2200050049@os 26Mar24]$ [os-2200050049@os 26Mar24]$ cc fileallocation.c -std=c99
In file included from fileallocation.c:8:0:
LinkedList.h:1:1: error: unknown type name ‘Block’
typedef Block Data;
^
LinkedList.h:11:24: error: unknown type name ‘Block’
extern void init_block(Block *const);
^
LinkedList.h: In function ‘createEmptyList’:
LinkedList.h:17:5: warning: implicit declaration of function ‘init_block’ [-Wimplicit-function-declaration]
init_block(&(head -> d));
^
LinkedList.h: In function ‘display’:
LinkedList.h:119:53: error: request for member ‘id’ in something not a structure or union
printf(" BID : %-2d\tStatus: %d\n\n", tmp->d.id, tmp->d.status);
^
LinkedList.h:119:64: error: request for member ‘status’ in something not a structure or union
printf(" BID : %-2d\tStatus: %d\n\n", tmp->d.id, tmp->d.status);
^
LinkedList.h: In function ‘search’:
LinkedList.h:154:19: error: request for member ‘id’ in something not a structure or union
if (tmp->d.id == id)
^
fileallocation.c: At top level:
fileallocation.c:34:6: warning: conflicting types for ‘init_block’ [enabled by default]
void init_block(Block *const);
^
In file included from fileallocation.c:8:0:
LinkedList.h:17:5: note: previous implicit declaration of ‘init_block’ was here
init_block(&(head -> d));
^
fileallocation.c: In function ‘contiguous’:
fileallocation.c:81:9: error: incompatible type for argument 2 of ‘insertLast’
insertLast(list, b);
^
In file included from fileallocation.c:8:0:
LinkedList.h:24:6: note: expected ‘Data’ but argument is of type ‘Block’
void insertLast(List head, const Data d)
^
fileallocation.c:99:13: warning: implicit declaration of function ‘random’ [-Wimplicit-function-declaration]
id = random() % num_blk;
^
fileallocation.c:101:23: error: request for member ‘status’ in something not a structure or union
if (ptr->d.status != FREE)
^
fileallocation.c:109:27: error: request for member ‘id’ in something not a structure or union
start = ptr->d.id;
^
fileallocation.c:115:27: error: request for member ‘status’ in something not a structure or union
if (tmp->d.status == FREE)
^
fileallocation.c:138:27: error: request for member ‘status’ in something not a structure or union
tmp->d.status = 1;
^
fileallocation.c: In function ‘linked’:
fileallocation.c:181:9: error: incompatible type for argument 2 of ‘insertLast’
insertLast(list, b);
^
In file included from fileallocation.c:8:0:
LinkedList.h:24:6: note: expected ‘Data’ but argument is of type ‘Block’
void insertLast(List head, const Data d)
^
fileallocation.c:205:23: error: request for member ‘status’ in something not a structure or union
if (ptr->d.status != FREE)
^
fileallocation.c:210:21: error: request for member ‘status’ in something not a structure or union
ptr -> d.status = 1;
^
fileallocation.c:222:25: error: request for member ‘status’ in something not a structure or union
ptr -> d.status = FREE;
^
fileallocation.c:237:24: error: request for member ‘next_file_blk’ in something not a structure or union
left->d.next_file_blk = &(right->d);
^
fileallocation.c:238:24: error: request for member ‘status’ in something not a structure or union
left->d.status = 1;
^
fileallocation.c:240:21: error: request for member ‘next_file_blk’ in something not a structure or union
right->d.next_file_blk = NULL;
^
fileallocation.c:265:24: warning: initialization from incompatible pointer type [enabled by default]
Block *b = &(ptr->d);
^
fileallocation.c: In function ‘indexed’:
fileallocation.c:288:9: error: incompatible type for argument 2 of ‘insertLast’
insertLast(list, b);
^
In file included from fileallocation.c:8:0:
LinkedList.h:24:6: note: expected ‘Data’ but argument is of type ‘Block’
void insertLast(List head, const Data d)
^
fileallocation.c:314:23: error: request for member ‘status’ in something not a structure or union
if (ptr->d.status == FREE)
^
fixthe code
|
656a62280e81b98356a42d6469c78d00
|
{
"intermediate": 0.5360240340232849,
"beginner": 0.3589269518852234,
"expert": 0.10504899173974991
}
|
44,136
|
Give me an example of getsockopt so_rcvbuf on Linux in C. Answer shortly
|
5865b312ca57bb1bb53c43237be957ed
|
{
"intermediate": 0.4694051742553711,
"beginner": 0.3137867748737335,
"expert": 0.216808021068573
}
|
44,137
|
Sup!!
|
0fe11a4277d214a112834a898ed51677
|
{
"intermediate": 0.3346889615058899,
"beginner": 0.2636716067790985,
"expert": 0.4016394019126892
}
|
44,138
|
hi! can you make a bootstrap5-css-html5-json tts based on torchaudio using all possible availble voices from dropdown on play audio is saved automaticle in directory show only code.
|
5eb6f727cad78abe8db5a654aab92fcf
|
{
"intermediate": 0.44499045610427856,
"beginner": 0.24920761585235596,
"expert": 0.30580195784568787
}
|
44,139
|
hi! can you make a bootstrap5-css-html5-json gtts based on using all possible availble voices from dropdown on play audio is saved automaticle in directory show only code.
|
107e619abaaadabdb1f1e48e14f4a607
|
{
"intermediate": 0.4001700282096863,
"beginner": 0.20084881782531738,
"expert": 0.3989811837673187
}
|
44,140
|
Hi! can you create a simple bootstrap5-css-json gtts using all possible languages to select and save audio autmatically when played to local machine directory
|
1bec7953c76b4ecb39a6142a91b9496f
|
{
"intermediate": 0.4406929016113281,
"beginner": 0.2666664719581604,
"expert": 0.2926405966281891
}
|
44,141
|
what are tree functions in standard C and C++ libraries? Answer shortly
|
673dc2c14ee19f3c19405b5bc2c3b4f1
|
{
"intermediate": 0.6130505800247192,
"beginner": 0.22198933362960815,
"expert": 0.1649600863456726
}
|
44,142
|
@Prop() readonly defaultForm: Record<number, boolean>
как переписать на vue 3 composition api
|
605a670c6d8902db64eba23b0072db71
|
{
"intermediate": 0.5330133438110352,
"beginner": 0.3121228516101837,
"expert": 0.15486378967761993
}
|
44,143
|
explain this page like i am a layperson
: ""Skip to content
intel
/
neural-speed
Type / to search
Code
Issues
8
Pull requests
11
Actions
Projects
Security
Insights
Owner avatar
neural-speed
Public
intel/neural-speed
Go to file
t
Add file
Folders and files
Name
Latest commit
Zhenzhong1
Zhenzhong1
Update convert_chatglm.py (#185)
1051182
·
4 days ago
History
.github
[BesTLA] New thread pool and hybrid dispatcher (#118)
3 weeks ago
bestla
mha enhance (#180)
last week
docker
add docker file and readme (#14)
3 months ago
docs
Gemma-7b&&Gemma-2b (#171)
4 days ago
neural_speed
Update convert_chatglm.py (#185)
4 days ago
scripts
[Model Enabling] Support ChatGLM3 (#182)
5 days ago
tests
Gemma-7b&&Gemma-2b (#171)
4 days ago
third_party
reorg directory
3 months ago
.clang-format
update readme path and copy hidden files (#185)
3 months ago
.clang-tidy
[Neural Speed] Cont Batching in Offline and Server (GPT-J & Beam Sear…
2 months ago
.editorconfig
update readme path and copy hidden files (#185)
3 months ago
.gitignore
fix clang-tidy issues (#123)
last month
.gitmodules
reorg directory
3 months ago
.pre-commit-config.yaml
enable pre-commit CI (#113)
last month
CMakeLists.txt
[BesTLA] New thread pool and hybrid dispatcher (#118)
3 weeks ago
CMakePresets.json
[BesTLA] New thread pool and hybrid dispatcher (#118)
3 weeks ago
CODE_OF_CONDUCT.md
add code_of_conduct, contributing agreement, and security.md file
4 months ago
CONTRIBUTING.md
add code_of_conduct, contributing agreement, and security.md file
4 months ago
LICENSE
add license file (#34)
2 months ago
README.md
Improve readme (#166)
2 weeks ago
clang-format.py
Init ns doc (#9)
3 months ago
developer_document.md
[Runtime Enhence] Extend long input tokens length (#157)
2 weeks ago
requirements.txt
Improvements Targeting Windows (#136)
last month
security.md
add code_of_conduct, contributing agreement, and security.md file
4 months ago
setup.py
Improvements Targeting Windows (#136)
last month
Repository files navigation
README
Code of conduct
Apache-2.0 license
Security
Neural Speed
Neural Speed is an innovative library designed to support the efficient inference of large language models (LLMs) on Intel platforms through the state-of-the-art (SOTA) low-bit quantization powered by Intel Neural Compressor. The work is inspired by llama.cpp and further optimized for Intel platforms with our innovations in NeurIPS' 2023
Key Features
Highly optimized low-precision kernels on CPUs with ISAs (AMX, VNNI, AVX512F, AVX_VNNI and AVX2). See details
Up to 40x performance speedup on popular LLMs compared with llama.cpp. See details
Tensor parallelism across sockets/nodes on CPUs. See details
Neural Speed is under active development so APIs are subject to change.
Supported Hardware
Hardware Supported
Intel Xeon Scalable Processors ✔
Intel Xeon CPU Max Series ✔
Intel Core Processors ✔
Supported Models
Support almost all the LLMs in PyTorch format from Hugging Face such as Llama2, ChatGLM2, Baichuan2, Qwen, Mistral, Whisper, etc. File an issue if your favorite LLM does not work.
Support typical LLMs in GGUF format such as Llama2, Falcon, MPT, Bloom etc. More are coming. Check out the details.
Installation
Install from binary
pip install neural-speed
Build from Source
pip install -r requirements.txt
pip install .
Note: GCC requires version 10+
Quick Start (Transformer-like usage)
Install Intel Extension for Transformers to use Transformer-like APIs.
PyTorch Model from Hugging Face
from transformers import AutoTokenizer, TextStreamer
from intel_extension_for_transformers.transformers import AutoModelForCausalLM
model_name = "Intel/neural-chat-7b-v3-1" # Hugging Face model_id or local model
prompt = "Once upon a time, there existed a little girl,"
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
inputs = tokenizer(prompt, return_tensors="pt").input_ids
streamer = TextStreamer(tokenizer)
model = AutoModelForCausalLM.from_pretrained(model_name, load_in_4bit=True)
outputs = model.generate(inputs, streamer=streamer, max_new_tokens=300)
GGUF Model from Hugging Face
from transformers import AutoTokenizer, TextStreamer
from intel_extension_for_transformers.transformers import AutoModelForCausalLM
# Specify the GGUF repo on the Hugginface
model_name = "TheBloke/Llama-2-7B-Chat-GGUF"
# Download the the specific gguf model file from the above repo
model_file = "llama-2-7b-chat.Q4_0.gguf"
# make sure you are granted to access this model on the Huggingface.
tokenizer_name = "meta-llama/Llama-2-7b-chat-hf"
prompt = "Once upon a time"
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, trust_remote_code=True)
inputs = tokenizer(prompt, return_tensors="pt").input_ids
streamer = TextStreamer(tokenizer)
model = AutoModelForCausalLM.from_pretrained(model_name, model_file = model_file)
outputs = model.generate(inputs, streamer=streamer, max_new_tokens=300)
PyTorch Model from Modelscope
import sys
from modelscope import AutoTokenizer
from transformers import TextStreamer
from neural_speed import Model
model_name = "qwen/Qwen1.5-7B-Chat" # modelscope model_id or local model
prompt = "Once upon a time, there existed a little girl,"
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
inputs = tokenizer(prompt, return_tensors="pt").input_ids
streamer = TextStreamer(tokenizer)
model = Model()
model.init(model_name, weight_dtype="int4", compute_dtype="int8", model_hub="modelscope")
outputs = model.generate(inputs, streamer=streamer, max_new_tokens=300)
Quick Start (llama.cpp-like usage)
Single (One-click) Step
python scripts/run.py model-path --weight_dtype int4 -p "She opened the door and see"
Multiple Steps
Convert and Quantize
# skip the step if GGUF model is from Hugging Face or generated by llama.cpp
python scripts/convert.py --outtype f32 --outfile ne-f32.bin EleutherAI/gpt-j-6b
Inference
# Linux and WSL
OMP_NUM_THREADS=<physic_cores> numactl -m 0 -C 0-<physic_cores-1> python scripts/inference.py --model_name llama -m ne-q4_j.bin -c 512 -b 1024 -n 256 -t <physic_cores> --color -p "She opened the door and see"
# Windows
python scripts/inference.py --model_name llama -m ne-q4_j.bin -c 512 -b 1024 -n 256 -t <physic_cores|P-cores> --color -p "She opened the door and see"
Please refer to Advanced Usage for more details.
Advanced Topics
New model enabling
You can consider adding your own models, please follow the document: graph developer document.
Performance profiling
Enable NEURAL_SPEED_VERBOSE environment variable for performance profiling.
Available modes:
0: Print full information: evaluation time and operator profiling. Need to set NS_PROFILING to ON and recompile.
1: Print evaluation time. Time taken for each evaluation.
2: Profile individual operator. Identify performance bottleneck within the model. Need to set NS_PROFILING to ON and recompile.
About
An innovative library for efficient LLM inference via low-bit quantization
github.com/intel/neural-speed
Topics
sparsity cpu gpu int8 low-bit int4 fp8 llamacpp llm-inference gaudi2 nf4 fp4 mxformat llm-fine-tuning
Resources
Readme
License
Apache-2.0 license
Code of conduct
Code of conduct
Security policy
Security policy
Activity
Custom properties
Stars
130 stars
Watchers
7 watching
Forks
18 forks
Report repository
Releases 4
Intel® Neural Speed v1.0a Release
Latest
4 days ago
+ 3 releases
Packages
No packages published
Contributors
29
@zhenwei-intel
@Zhenzhong1
@intellinjun
@DDEle
@VincyZhang
@a32543254
@luoyu-intel
@zhentaoyu
@airMeng
@zhewang1-intc
@kevinintel
@yuchengliu1
@hshen14
@aahouzi
+ 15 contributors
Languages
C++
65.1%
C
17.8%
Python
15.5%
Other
1.6%
Footer
© 2024 GitHub, Inc.
Footer navigation
Terms
Privacy
Security
Status
Docs
Contact
Manage cookies
Do not share my personal information
""
|
75d3031b9afed4ea30691b1e279c22ff
|
{
"intermediate": 0.417652428150177,
"beginner": 0.26557672023773193,
"expert": 0.3167707920074463
}
|
44,144
|
can you help me create a windows app to check internet availability?
|
1f39bcf960f069dad1011f12ebb9d79a
|
{
"intermediate": 0.5519528985023499,
"beginner": 0.18058370053768158,
"expert": 0.26746341586112976
}
|
44,145
|
Make a simple html5-bootstrap5-python3 tts for espeak on local ubuntu linux with all possible voices of the world selected from dropdown list, when played back automaticly file is saved to local home directory show only code
|
1218c2a6df88565f9b6e306c63a9edbc
|
{
"intermediate": 0.36545345187187195,
"beginner": 0.3007418215274811,
"expert": 0.33380475640296936
}
|
44,146
|
Make a simple html5-bootstrap5-json gtts for espeak on local ubuntu linux with all possible voices of the world selected from dropdown list and clear text button, when played back automaticly file is saved to local home directory show only code
|
a81929a6d11429199ca98f1f5eff3e14
|
{
"intermediate": 0.341205358505249,
"beginner": 0.3156157433986664,
"expert": 0.3431788980960846
}
|
44,147
|
please help: ""PS C:\Users\bower\augmentoolkit> & C:/Users/bower/AppData/Local/Programs/Python/Python311/python.exe "c:/Users/bower/Videos/AI Agent/model test/modeltest2.py"
2024-03-27 04:01:51 [INFO] Using Neural Speed to load the GGUF model...
alphamonarch-7b.Q6_K.gguf: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████| 5.94G/5.94G [06:59<00:00, 14.2MB/s]
Traceback (most recent call last):
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\site-packages\urllib3\connectionpool.py", line 445, in _make_request
six.raise_from(e, None)
File "<string>", line 3, in raise_from
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\site-packages\urllib3\connectionpool.py", line 440, in _make_request
httplib_response = conn.getresponse()
^^^^^^^^^^^^^^^^^^
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\http\client.py", line 1390, in getresponse
response.begin()
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\http\client.py", line 325, in begin
version, status, reason = self._read_status()
^^^^^^^^^^^^^^^^^^^
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\http\client.py", line 286, in _read_status
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\socket.py", line 706, in readinto
return self._sock.recv_into(b)
^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\ssl.py", line 1314, in recv_into
return self.read(nbytes, buffer)
^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\ssl.py", line 1166, in read
return self._sslobj.read(len, buffer)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
TimeoutError: The read operation timed out
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\site-packages\requests\adapters.py", line 486, in send
resp = conn.urlopen(
^^^^^^^^^^^^^
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\site-packages\urllib3\connectionpool.py", line 755, in urlopen
retries = retries.increment(
^^^^^^^^^^^^^^^^^^
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\site-packages\urllib3\util\retry.py", line 532, in increment
raise six.reraise(type(error), error, _stacktrace)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\site-packages\urllib3\packages\six.py", line 770, in reraise
raise value
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\site-packages\urllib3\connectionpool.py", line 699, in urlopen
httplib_response = self._make_request(
^^^^^^^^^^^^^^^^^^^
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\site-packages\urllib3\connectionpool.py", line 447, in _make_request
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\site-packages\urllib3\connectionpool.py", line 336, in _raise_timeout
raise ReadTimeoutError(
urllib3.exceptions.ReadTimeoutError: HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\site-packages\huggingface_hub\file_download.py", line 1261, in hf_hub_download
metadata = get_hf_file_metadata(
^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\site-packages\huggingface_hub\utils\_validators.py", line 118, in _inner_fn
return fn(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\site-packages\huggingface_hub\file_download.py", line 1667, in get_hf_file_metadata
r = _request_wrapper(
^^^^^^^^^^^^^^^^^
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\site-packages\huggingface_hub\file_download.py", line 385, in _request_wrapper
response = _request_wrapper(
^^^^^^^^^^^^^^^^^
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\site-packages\huggingface_hub\file_download.py", line 408, in _request_wrapper
response = get_session().request(method=method, url=url, **params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\site-packages\requests\sessions.py", line 589, in request
resp = self.send(prep, **send_kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\site-packages\requests\sessions.py", line 703, in send
r = adapter.send(request, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\site-packages\huggingface_hub\utils\_http.py", line 67, in send
return super().send(request, *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\site-packages\requests\adapters.py", line 532, in send
raise ReadTimeout(e, request=request)
requests.exceptions.ReadTimeout: (ReadTimeoutError("HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)"), '(Request ID: 2cc1a2e1-0b59-4318-ae02-193daf950288)')
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "c:\Users\bower\Videos\AI Agent\model test\modeltest2.py", line 15, in <module>
model = AutoModelForCausalLM.from_pretrained(model_name, model_file = model_file)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\site-packages\intel_extension_for_transformers\transformers\modeling\modeling_auto.py", line 167, in from_pretrained
model_config = hf_hub_download(pretrained_model_name_or_path, filename="config.json")
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\site-packages\huggingface_hub\utils\_validators.py", line 118, in _inner_fn
return fn(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^
File "C:\Users\bower\AppData\Local\Programs\Python\Python311\Lib\site-packages\huggingface_hub\file_download.py", line 1406, in hf_hub_download
raise LocalEntryNotFoundError(
huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on.
PS C:\Users\bower\augmentoolkit> ""
|
61041b2ac6e5b38bc0e76dc045d388c8
|
{
"intermediate": 0.36704185605049133,
"beginner": 0.34118038415908813,
"expert": 0.29177772998809814
}
|
44,148
|
create detailed steps necessary to sort a 1 dimensional array of values with a quick sort for a generic computer programming language
|
26e0e322170f65d363c0b0c8b0607dfa
|
{
"intermediate": 0.3075975179672241,
"beginner": 0.1429169625043869,
"expert": 0.549485445022583
}
|
44,149
|
create detailed steps necessary to sort a 1 dimensional array of values with a quick sort for a generic computer programming language and create one sample implementation in QB64 BASIC, another in FreeBASIC, and another in PureBASIC ( Pure BASIC )
|
f1a3d7573d74d64fcf70d1ee232009f0
|
{
"intermediate": 0.38975536823272705,
"beginner": 0.17192494869232178,
"expert": 0.43831968307495117
}
|
44,150
|
Make a 3d cube in JavaScript with Shaders and a ground
|
43c11537f6b66b2ad0e3c53e1ed72a45
|
{
"intermediate": 0.4661915898323059,
"beginner": 0.23006825149059296,
"expert": 0.30374014377593994
}
|
44,151
|
will you please replace gm xhr with standard xhr in this (function () {
'use strict'
const gistName = document.querySelector('.gist-blob-name').innerText
const rawUrl = document.querySelector('.file-actions')
const a = rawUrl.querySelector('a')
const href = a.getAttribute('href')
const button = document.createElement('button')
const rawButton = document.querySelector('.file-actions a')
const rawButtonStyles = getComputedStyle(a)
button.textContent = rawButton.textContent
button.style.cssText = rawButtonStyles.cssText
button.style.marginLeft = '5px'
button.style.padding = '5px'
button.style.backgroundColor = 'green'
button.innerText = 'Download'
button.style.borderRadius = '10px'
rawUrl.appendChild(button)
button.addEventListener('click', () => {
GM_xmlhttpRequest({
method: 'GET',
url: href,
onload: function (response) {
const blob = new Blob([response.responseText], {
type: 'text/plain',
})
const url = URL.createObjectURL(blob)
const a = document.createElement('a')
a.href = url
a.download = gistName
a.click()
},
})
})
})()
|
52f81b3ecfb6cadba9a1263ab4d532e9
|
{
"intermediate": 0.39145252108573914,
"beginner": 0.3310886323451996,
"expert": 0.2774588167667389
}
|
44,152
|
Change label font runtime (Defold engine)
|
b5bf8f8ed4086e6a907642542716aab7
|
{
"intermediate": 0.3223264217376709,
"beginner": 0.25998440384864807,
"expert": 0.4176892340183258
}
|
44,153
|
<?php
function send_initial_players(unique_id)
{
nr_players=0;
to_send[0]=8;
c=1;
for(i=1;i<=10;i++)
{
u_id=get_unique_id_by_local(i);
if(u_id!=0)
{
position=get_position(u_id);
x=position[0];
y=position[1];
rot=position[2];
to_send[c]=x;
c++;
to_send[c]=y;
c++;
to_send[c]=rot;
c++;
to_send[c]=get_player_username(u_id);
c++;
to_send[c]=get_active(u_id);
c++;
}else{
to_send[c]=0;
c++;
to_send[c]=0;
c++;
to_send[c]=0;
c++;
to_send[c]="Offline";
c++;
to_send[c]=0;
c++;
}
}
send_data_to_player(unique_id,to_send,10);
}
?>
PHP to Python
|
223aae817843682dfaaf9113922a9bc0
|
{
"intermediate": 0.4077363610267639,
"beginner": 0.3587917983531952,
"expert": 0.2334718108177185
}
|
44,154
|
When running playwright script, some elements on the page get stuck loading
|
0d4026fec74580baeb0b7580d03f79dc
|
{
"intermediate": 0.24416567385196686,
"beginner": 0.480039119720459,
"expert": 0.2757951617240906
}
|
44,155
|
create detailed steps necessary to sort a 1 dimensional array of values with a quick sort for a generic computer programming language and create one sample implementation in FreeBASIC
|
e0e5b9b45b7d8eed5df424152e3f5962
|
{
"intermediate": 0.4714275598526001,
"beginner": 0.0950341746211052,
"expert": 0.4335382282733917
}
|
44,156
|
Write me a song about peanut butter
|
0dd2e74b89d60cdef85bd803127d62b9
|
{
"intermediate": 0.3624684810638428,
"beginner": 0.38967862725257874,
"expert": 0.24785293638706207
}
|
44,157
|
Hey, i want to record the master output to a track in reaper, just like in ableton resample mode. Answer super briefly
|
c20fefc0387ffaf39d528eb58feb526e
|
{
"intermediate": 0.5438866019248962,
"beginner": 0.16085688769817352,
"expert": 0.29525649547576904
}
|
44,158
|
Hi , can I build an AI agent without using open ai api ?
|
b1f45d36beceecd9bccde5a22674c2ed
|
{
"intermediate": 0.4331309199333191,
"beginner": 0.07940085977315903,
"expert": 0.4874682128429413
}
|
44,159
|
What is the best way to create and work with a database from python code
|
99c64739e540d44218f81a51295241e0
|
{
"intermediate": 0.6212336421012878,
"beginner": 0.16096943616867065,
"expert": 0.2177969366312027
}
|
44,160
|
make a linux ffmpeg6.0 arg using pcm_s16le, 48000 Hz, stero, s16, bitrate 320 kb/s -ac 2 making this arg: ffmpeg -f lavfi -i flite=text='New':voice=awb -o ffm_flt_out.wav
ffplay ffm_flt_out.wav better
|
fb0b44d2f7b8b3892c7bb449b7209e54
|
{
"intermediate": 0.4454067051410675,
"beginner": 0.2578096091747284,
"expert": 0.2967836558818817
}
|
44,161
|
modifícame el siguiente código para solucionar el problema <type object 'Slider' has no attribute 'update'>:
import os
import shutil
import pathlib
import gradio as gr
import roop.utilities as util
import roop.globals
import ui.globals
from roop.face_util import extract_face_images
from roop.capturer import get_video_frame, get_video_frame_total, get_image_frame
from roop.ProcessEntry import ProcessEntry
from roop.FaceSet import FaceSet
last_image = None
IS_INPUT = True
SELECTED_FACE_INDEX = 0
SELECTED_INPUT_FACE_INDEX = 0
SELECTED_TARGET_FACE_INDEX = 0
input_faces = None
target_faces = None
face_selection = None
selected_preview_index = 0
is_processing = False
list_files_process : list[ProcessEntry] = []
no_face_choices = ["Use untouched original frame","Retry rotated", "Skip Frame"]
def faceswap_tab():
global no_face_choices
with gr.Tab("🎭 Face Swap"):
with gr.Row(variant='panel'):
with gr.Column(scale=2):
with gr.Row():
with gr.Column(min_width=160):
input_faces = gr.Gallery(label="Input faces", allow_preview=True, preview=True, height=128, object_fit="scale-down")
with gr.Accordion(label="Advanced Settings", open=False):
mask_top = gr.Slider(0, 256, value=0, label="Offset Face Top", step=1.0, interactive=True)
mask_bottom = gr.Slider(0, 256, value=0, label="Offset Face Bottom", step=1.0, interactive=True)
bt_remove_selected_input_face = gr.Button("❌ Remove selected", size='sm')
bt_clear_input_faces = gr.Button("💥 Clear all", variant='stop', size='sm')
with gr.Column(min_width=160):
target_faces = gr.Gallery(label="Target faces", allow_preview=True, preview=True, height=128, object_fit="scale-down")
bt_remove_selected_target_face = gr.Button("❌ Remove selected", size='sm')
bt_add_local = gr.Button('Add local files from', size='sm')
local_folder = gr.Textbox(show_label=False, placeholder="/content/", interactive=True)
with gr.Row(variant='panel'):
bt_srcfiles = gr.Files(label='Source File(s)', file_count="multiple", file_types=["image", ".fsz"], elem_id='filelist', height=233)
bt_destfiles = gr.Files(label='Target File(s)', file_count="multiple", file_types=["image", "video"], elem_id='filelist', height=233)
with gr.Row(variant='panel'):
gr.Markdown('')
forced_fps = gr.Slider(minimum=0, maximum=120, value=0, label="Video FPS", info='Overrides detected fps if not 0', step=1.0, interactive=True, container=True)
with gr.Column(scale=2):
previewimage = gr.Image(label="Preview Image", height=576, interactive=False)
with gr.Row(variant='panel'):
fake_preview = gr.Checkbox(label="Face swap frames", value=False)
bt_refresh_preview = gr.Button("🔄 Refresh", variant='secondary', size='sm')
bt_use_face_from_preview = gr.Button("Use Face from this Frame", variant='primary', size='sm')
with gr.Row():
preview_frame_num = gr.Slider(0, 0, value=0, label="Frame Number", step=1.0, interactive=True)
with gr.Row():
text_frame_clip = gr.Markdown('Processing frame range [0 - 0]')
set_frame_start = gr.Button("⬅ Set as Start", size='sm')
set_frame_end = gr.Button("➡ Set as End", size='sm')
with gr.Row(visible=False) as dynamic_face_selection:
with gr.Column(scale=2):
face_selection = gr.Gallery(label="Detected faces", allow_preview=True, preview=True, height=256, object_fit="scale-down")
with gr.Column():
bt_faceselect = gr.Button("☑ Use selected face", size='sm')
bt_cancelfaceselect = gr.Button("Done", size='sm')
with gr.Column():
gr.Markdown(' ')
with gr.Row(variant='panel'):
with gr.Column(scale=1):
selected_face_detection = gr.Dropdown(["First found", "All faces", "Selected face", "All female", "All male"], value="First found", label="Select face selection for swapping")
max_face_distance = gr.Slider(0.01, 1.0, value=0.65, label="Max Face Similarity Threshold")
video_swapping_method = gr.Dropdown(["Extract Frames to media","In-Memory processing"], value="In-Memory processing", label="Select video processing method", interactive=True)
no_face_action = gr.Dropdown(choices=no_face_choices, value=no_face_choices[0], label="Action on no face detected", interactive=True)
vr_mode = gr.Checkbox(label="VR Mode", value=False)
with gr.Column(scale=1):
ui.globals.ui_selected_enhancer = gr.Dropdown(["None", "Codeformer", "DMDNet", "GFPGAN", "GPEN", "Restoreformer"], value="None", label="Select post-processing")
ui.globals.ui_blend_ratio = gr.Slider(0.0, 1.0, value=0.65, label="Original/Enhanced image blend ratio")
with gr.Group():
autorotate = gr.Checkbox(label="Auto rotate horizontal Faces", value=True)
roop.globals.skip_audio = gr.Checkbox(label="Skip audio", value=False)
roop.globals.keep_frames = gr.Checkbox(label="Keep Frames (relevant only when extracting frames)", value=False)
roop.globals.wait_after_extraction = gr.Checkbox(label="Wait for user key press before creating video ", value=False)
with gr.Column(scale=1):
chk_useclip = gr.Checkbox(label="Use Text Masking", value=False)
clip_text = gr.Textbox(label="List of objects to mask and restore back on fake image", value="cup,hands,hair,banana" ,elem_id='tooltip')
gr.Dropdown(["Clip2Seg"], value="Clip2Seg", label="Engine")
bt_preview_mask = gr.Button("👥 Show Mask Preview", variant='secondary')
with gr.Row(variant='panel'):
with gr.Column():
bt_start = gr.Button("▶ Start", variant='primary')
gr.Button("👀 Open Output Folder", size='sm').click(fn=lambda: util.open_folder(roop.globals.output_path))
with gr.Column():
bt_stop = gr.Button("⏹ Stop", variant='secondary')
with gr.Column(scale=2):
gr.Markdown(' ')
with gr.Row(variant='panel'):
with gr.Column():
resultfiles = gr.Files(label='Processed File(s)', interactive=False)
with gr.Column():
resultimage = gr.Image(type='filepath', label='Final Image', interactive=False )
resultvideo = gr.Video(label='Final Video', interactive=False, visible=False)
previewinputs = [preview_frame_num, bt_destfiles, fake_preview, ui.globals.ui_selected_enhancer, selected_face_detection,
max_face_distance, ui.globals.ui_blend_ratio, chk_useclip, clip_text, no_face_action, vr_mode, autorotate]
input_faces.select(on_select_input_face, None, None).then(fn=on_preview_frame_changed, inputs=previewinputs, outputs=[previewimage, mask_top, mask_bottom])
bt_remove_selected_input_face.click(fn=remove_selected_input_face, outputs=[input_faces])
bt_srcfiles.change(fn=on_srcfile_changed, show_progress='full', inputs=bt_srcfiles, outputs=[dynamic_face_selection, face_selection, input_faces])
mask_top.input(fn=on_mask_top_changed, inputs=[mask_top], show_progress='hidden')
mask_bottom.input(fn=on_mask_bottom_changed, inputs=[mask_bottom], show_progress='hidden')
target_faces.select(on_select_target_face, None, None)
bt_remove_selected_target_face.click(fn=remove_selected_target_face, outputs=[target_faces])
forced_fps.change(fn=on_fps_changed, inputs=[forced_fps], show_progress='hidden')
bt_destfiles.change(fn=on_destfiles_changed, inputs=[bt_destfiles], outputs=[preview_frame_num, text_frame_clip], show_progress='hidden').then(fn=on_preview_frame_changed, inputs=previewinputs, outputs=[previewimage, mask_top, mask_bottom], show_progress='full')
bt_destfiles.select(fn=on_destfiles_selected, outputs=[preview_frame_num, text_frame_clip, forced_fps], show_progress='hidden').then(fn=on_preview_frame_changed, inputs=previewinputs, outputs=[previewimage, mask_top, mask_bottom], show_progress='hidden')
bt_destfiles.clear(fn=on_clear_destfiles, outputs=[target_faces])
resultfiles.select(fn=on_resultfiles_selected, inputs=[resultfiles], outputs=[resultimage, resultvideo])
face_selection.select(on_select_face, None, None)
bt_faceselect.click(fn=on_selected_face, outputs=[input_faces, target_faces, selected_face_detection])
bt_cancelfaceselect.click(fn=on_end_face_selection, outputs=[dynamic_face_selection, face_selection])
bt_clear_input_faces.click(fn=on_clear_input_faces, outputs=[input_faces])
bt_add_local.click(fn=on_add_local_folder, inputs=[local_folder], outputs=[bt_destfiles])
bt_preview_mask.click(fn=on_preview_mask, inputs=[preview_frame_num, bt_destfiles, clip_text], outputs=[previewimage])
start_event = bt_start.click(fn=start_swap,
inputs=[ui.globals.ui_selected_enhancer, selected_face_detection, roop.globals.keep_frames, roop.globals.wait_after_extraction,
roop.globals.skip_audio, max_face_distance, ui.globals.ui_blend_ratio, chk_useclip, clip_text,video_swapping_method, no_face_action, vr_mode, autorotate],
outputs=[bt_start, resultfiles])
after_swap_event = start_event.then(fn=on_resultfiles_finished, inputs=[resultfiles], outputs=[resultimage, resultvideo])
bt_stop.click(fn=stop_swap, cancels=[start_event, after_swap_event], queue=False)
bt_refresh_preview.click(fn=on_preview_frame_changed, inputs=previewinputs, outputs=[previewimage, mask_top, mask_bottom])
fake_preview.change(fn=on_preview_frame_changed, inputs=previewinputs, outputs=[previewimage, mask_top, mask_bottom])
preview_frame_num.change(fn=on_preview_frame_changed, inputs=previewinputs, outputs=[previewimage, mask_top, mask_bottom], show_progress='hidden')
bt_use_face_from_preview.click(fn=on_use_face_from_selected, show_progress='full', inputs=[bt_destfiles, preview_frame_num], outputs=[dynamic_face_selection, face_selection, target_faces, selected_face_detection])
set_frame_start.click(fn=on_set_frame, inputs=[set_frame_start, preview_frame_num], outputs=[text_frame_clip])
set_frame_end.click(fn=on_set_frame, inputs=[set_frame_end, preview_frame_num], outputs=[text_frame_clip])
def on_mask_top_changed(mask_offset):
global SELECTED_INPUT_FACE_INDEX
if len(roop.globals.INPUT_FACESETS) > SELECTED_INPUT_FACE_INDEX:
roop.globals.INPUT_FACESETS[SELECTED_INPUT_FACE_INDEX].faces[0].mask_offsets[0] = mask_offset
def on_mask_bottom_changed(mask_offset):
global SELECTED_INPUT_FACE_INDEX
if len(roop.globals.INPUT_FACESETS) > SELECTED_INPUT_FACE_INDEX:
roop.globals.INPUT_FACESETS[SELECTED_INPUT_FACE_INDEX].faces[0].mask_offsets[1] = mask_offset
def on_add_local_folder(folder):
files = util.get_local_files_from_folder(folder)
if files is None:
gr.Warning("Empty folder or folder not found!")
return files
def on_srcfile_changed(srcfiles, progress=gr.Progress()):
from roop.face_util import norm_crop2
global SELECTION_FACES_DATA, IS_INPUT, input_faces, face_selection, last_image
IS_INPUT = True
if srcfiles is None or len(srcfiles) < 1:
return gr.Column.update(visible=False), None, ui.globals.ui_input_thumbs
thumbs = []
for f in srcfiles:
source_path = f.name
if source_path.lower().endswith('fsz'):
progress(0, desc="Retrieving faces from Faceset File", )
unzipfolder = os.path.join(os.environ["TEMP"], 'faceset')
if os.path.isdir(unzipfolder):
files = os.listdir(unzipfolder)
for file in files:
os.remove(os.path.join(unzipfolder, file))
else:
os.makedirs(unzipfolder)
util.mkdir_with_umask(unzipfolder)
util.unzip(source_path, unzipfolder)
is_first = True
face_set = FaceSet()
for file in os.listdir(unzipfolder):
if file.endswith(".png"):
filename = os.path.join(unzipfolder,file)
progress.update()
SELECTION_FACES_DATA = extract_face_images(filename, (False, 0))
for f in SELECTION_FACES_DATA:
face = f[0]
face.mask_offsets = (0,0)
face_set.faces.append(face)
if is_first:
image = util.convert_to_gradio(f[1])
ui.globals.ui_input_thumbs.append(image)
is_first = False
face_set.ref_images.append(get_image_frame(filename))
if len(face_set.faces) > 0:
if len(face_set.faces) > 1:
face_set.AverageEmbeddings()
roop.globals.INPUT_FACESETS.append(face_set)
elif util.has_image_extension(source_path):
progress(0, desc="Retrieving faces from image", )
roop.globals.source_path = source_path
SELECTION_FACES_DATA = extract_face_images(roop.globals.source_path, (False, 0))
progress(0.5, desc="Retrieving faces from image")
for f in SELECTION_FACES_DATA:
face_set = FaceSet()
face = f[0]
face.mask_offsets = (0,0)
face_set.faces.append(face)
image = util.convert_to_gradio(f[1])
ui.globals.ui_input_thumbs.append(image)
roop.globals.INPUT_FACESETS.append(face_set)
progress(1.0)
# old style with selecting input faces commented out
# if len(thumbs) < 1:
# return gr.Column.update(visible=False), None, ui.globals.ui_input_thumbs
# return gr.Column.update(visible=True), thumbs, gr.Gallery.update(visible=True)
return gr.Column.update(visible=False), None, ui.globals.ui_input_thumbs
def on_select_input_face(evt: gr.SelectData):
global SELECTED_INPUT_FACE_INDEX
SELECTED_INPUT_FACE_INDEX = evt.index
def remove_selected_input_face():
global SELECTED_INPUT_FACE_INDEX
if len(roop.globals.INPUT_FACESETS) > SELECTED_INPUT_FACE_INDEX:
f = roop.globals.INPUT_FACESETS.pop(SELECTED_INPUT_FACE_INDEX)
del f
if len(ui.globals.ui_input_thumbs) > SELECTED_INPUT_FACE_INDEX:
f = ui.globals.ui_input_thumbs.pop(SELECTED_INPUT_FACE_INDEX)
del f
return ui.globals.ui_input_thumbs
def on_select_target_face(evt: gr.SelectData):
global SELECTED_TARGET_FACE_INDEX
SELECTED_TARGET_FACE_INDEX = evt.index
def remove_selected_target_face():
if len(roop.globals.TARGET_FACES) > SELECTED_TARGET_FACE_INDEX:
f = roop.globals.TARGET_FACES.pop(SELECTED_TARGET_FACE_INDEX)
del f
if len(ui.globals.ui_target_thumbs) > SELECTED_TARGET_FACE_INDEX:
f = ui.globals.ui_target_thumbs.pop(SELECTED_TARGET_FACE_INDEX)
del f
return ui.globals.ui_target_thumbs
def on_use_face_from_selected(files, frame_num):
global IS_INPUT, SELECTION_FACES_DATA
IS_INPUT = False
thumbs = []
roop.globals.target_path = files[selected_preview_index].name
if util.is_image(roop.globals.target_path) and not roop.globals.target_path.lower().endswith(('gif')):
SELECTION_FACES_DATA = extract_face_images(roop.globals.target_path, (False, 0))
if len(SELECTION_FACES_DATA) > 0:
for f in SELECTION_FACES_DATA:
image = util.convert_to_gradio(f[1])
thumbs.append(image)
else:
gr.Info('No faces detected!')
roop.globals.target_path = None
elif util.is_video(roop.globals.target_path) or roop.globals.target_path.lower().endswith(('gif')):
selected_frame = frame_num
SELECTION_FACES_DATA = extract_face_images(roop.globals.target_path, (True, selected_frame))
if len(SELECTION_FACES_DATA) > 0:
for f in SELECTION_FACES_DATA:
image = util.convert_to_gradio(f[1])
thumbs.append(image)
else:
gr.Info('No faces detected!')
roop.globals.target_path = None
if len(thumbs) == 1:
roop.globals.TARGET_FACES.append(SELECTION_FACES_DATA[0][0])
ui.globals.ui_target_thumbs.append(thumbs[0])
return gr.Row.update(visible=False), None, ui.globals.ui_target_thumbs, gr.Dropdown.update(value='Selected face')
return gr.Row.update(visible=True), thumbs, gr.Gallery.update(visible=True), gr.Dropdown.update(visible=True)
def on_select_face(evt: gr.SelectData): # SelectData is a subclass of EventData
global SELECTED_FACE_INDEX
SELECTED_FACE_INDEX = evt.index
def on_selected_face():
global IS_INPUT, SELECTED_FACE_INDEX, SELECTION_FACES_DATA
fd = SELECTION_FACES_DATA[SELECTED_FACE_INDEX]
image = util.convert_to_gradio(fd[1])
if IS_INPUT:
face_set = FaceSet()
fd[0].mask_offsets = (0,0)
face_set.faces.append(fd[0])
roop.globals.INPUT_FACESETS.append(face_set)
ui.globals.ui_input_thumbs.append(image)
return ui.globals.ui_input_thumbs, gr.Gallery.update(visible=True), gr.Dropdown.update(visible=True)
else:
roop.globals.TARGET_FACES.append(fd[0])
ui.globals.ui_target_thumbs.append(image)
return gr.Gallery.update(visible=True), ui.globals.ui_target_thumbs, gr.Dropdown.update(value='Selected face')
# bt_faceselect.click(fn=on_selected_face, outputs=[dynamic_face_selection, face_selection, input_faces, target_faces])
def on_end_face_selection():
return gr.Column.update(visible=False), None
def on_preview_frame_changed(frame_num, files, fake_preview, enhancer, detection, face_distance, blend_ratio, use_clip, clip_text, no_face_action, vr_mode, auto_rotate):
global SELECTED_INPUT_FACE_INDEX, is_processing
from roop.core import live_swap
mask_offsets = (0,0)
if len(roop.globals.INPUT_FACESETS) > SELECTED_INPUT_FACE_INDEX:
if not hasattr(roop.globals.INPUT_FACESETS[SELECTED_INPUT_FACE_INDEX].faces[0], 'mask_offsets'):
roop.globals.INPUT_FACESETS[SELECTED_INPUT_FACE_INDEX].faces[0].mask_offsets = mask_offsets
mask_offsets = roop.globals.INPUT_FACESETS[SELECTED_INPUT_FACE_INDEX].faces[0].mask_offsets
if is_processing or files is None or selected_preview_index >= len(files) or frame_num is None:
return None, mask_offsets[0], mask_offsets[1]
filename = files[selected_preview_index].name
# time.sleep(0.3)
if util.is_video(filename) or filename.lower().endswith('gif'):
current_frame = get_video_frame(filename, frame_num)
else:
current_frame = get_image_frame(filename)
if current_frame is None:
return None, mask_offsets[0], mask_offsets[1]
if not fake_preview or len(roop.globals.INPUT_FACESETS) < 1:
return util.convert_to_gradio(current_frame), mask_offsets[0], mask_offsets[1]
roop.globals.face_swap_mode = translate_swap_mode(detection)
roop.globals.selected_enhancer = enhancer
roop.globals.distance_threshold = face_distance
roop.globals.blend_ratio = blend_ratio
roop.globals.no_face_action = index_of_no_face_action(no_face_action)
roop.globals.vr_mode = vr_mode
roop.globals.autorotate_faces = auto_rotate
if use_clip and clip_text is None or len(clip_text) < 1:
use_clip = False
roop.globals.execution_threads = roop.globals.CFG.max_threads
current_frame = live_swap(current_frame, roop.globals.face_swap_mode, use_clip, clip_text, SELECTED_INPUT_FACE_INDEX)
if current_frame is None:
return None, mask_offsets[0], mask_offsets[1]
return util.convert_to_gradio(current_frame), mask_offsets[0], mask_offsets[1]
def gen_processing_text(start, end):
return f'Processing frame range [{start} - {end}]'
def on_set_frame(sender:str, frame_num):
global selected_preview_index, list_files_process
idx = selected_preview_index
if list_files_process[idx].endframe == 0:
return gen_processing_text(0,0)
start = list_files_process[idx].startframe
end = list_files_process[idx].endframe
if sender.lower().endswith('start'):
list_files_process[idx].startframe = min(frame_num, end)
else:
list_files_process[idx].endframe = max(frame_num, start)
return gen_processing_text(list_files_process[idx].startframe,list_files_process[idx].endframe)
def on_preview_mask(frame_num, files, clip_text):
from roop.core import preview_mask
global is_processing
if is_processing:
return None
filename = files[selected_preview_index].name
if util.is_video(filename) or filename.lower().endswith('gif'):
current_frame = get_video_frame(filename, frame_num)
else:
current_frame = get_image_frame(filename)
if current_frame is None:
return None
current_frame = preview_mask(current_frame, clip_text)
return util.convert_to_gradio(current_frame)
def on_clear_input_faces():
ui.globals.ui_input_thumbs.clear()
roop.globals.INPUT_FACESETS.clear()
return ui.globals.ui_input_thumbs
def on_clear_destfiles():
roop.globals.TARGET_FACES.clear()
ui.globals.ui_target_thumbs.clear()
return ui.globals.ui_target_thumbs
def index_of_no_face_action(dropdown_text):
global no_face_choices
return no_face_choices.index(dropdown_text)
def translate_swap_mode(dropdown_text):
if dropdown_text == "Selected face":
return "selected"
elif dropdown_text == "First found":
return "first"
elif dropdown_text == "Single face frames only [auto-rotate]":
return "single_face_frames_only"
elif dropdown_text == "All female":
return "all_female"
elif dropdown_text == "All male":
return "all_male"
return "all"
def start_swap( enhancer, detection, keep_frames, wait_after_extraction, skip_audio, face_distance, blend_ratio,
use_clip, clip_text, processing_method, no_face_action, vr_mode, autorotate, progress=gr.Progress(track_tqdm=False)):
from ui.main import prepare_environment
from roop.core import batch_process
global is_processing, list_files_process
if list_files_process is None or len(list_files_process) <= 0:
return gr.Button.update(variant="primary"), None
if roop.globals.CFG.clear_output:
shutil.rmtree(roop.globals.output_path)
prepare_environment()
roop.globals.selected_enhancer = enhancer
roop.globals.target_path = None
roop.globals.distance_threshold = face_distance
roop.globals.blend_ratio = blend_ratio
roop.globals.keep_frames = keep_frames
roop.globals.wait_after_extraction = wait_after_extraction
roop.globals.skip_audio = skip_audio
roop.globals.face_swap_mode = translate_swap_mode(detection)
roop.globals.no_face_action = index_of_no_face_action(no_face_action)
roop.globals.vr_mode = vr_mode
roop.globals.autorotate_faces = autorotate
if use_clip and clip_text is None or len(clip_text) < 1:
use_clip = False
if roop.globals.face_swap_mode == 'selected':
if len(roop.globals.TARGET_FACES) < 1:
gr.Error('No Target Face selected!')
return gr.Button.update(variant="primary"), None
is_processing = True
yield gr.Button.update(variant="secondary"), None
roop.globals.execution_threads = roop.globals.CFG.max_threads
roop.globals.video_encoder = roop.globals.CFG.output_video_codec
roop.globals.video_quality = roop.globals.CFG.video_quality
roop.globals.max_memory = roop.globals.CFG.memory_limit if roop.globals.CFG.memory_limit > 0 else None
batch_process(list_files_process, use_clip, clip_text, processing_method == "In-Memory processing", progress)
is_processing = False
outdir = pathlib.Path(roop.globals.output_path)
outfiles = [item for item in outdir.rglob("*") if item.is_file()]
if len(outfiles) > 0:
yield gr.Button.update(variant="primary"),gr.Files.update(value=outfiles)
else:
yield gr.Button.update(variant="primary"),None
def stop_swap():
roop.globals.processing = False
gr.Info('Aborting processing - please wait for the remaining threads to be stopped')
def on_fps_changed(fps):
global selected_preview_index, list_files_process
if len(list_files_process) < 1 or list_files_process[selected_preview_index].endframe < 1:
return
list_files_process[selected_preview_index].fps = fps
def on_destfiles_changed(destfiles):
global selected_preview_index, list_files_process
if destfiles is None or len(destfiles) < 1:
list_files_process.clear()
return gr.Slider.update(value=0, maximum=0), ''
for f in destfiles:
list_files_process.append(ProcessEntry(f.name, 0,0, 0))
selected_preview_index = 0
idx = selected_preview_index
filename = list_files_process[idx].filename
if util.is_video(filename) or filename.lower().endswith('gif'):
total_frames = get_video_frame_total(filename)
else:
total_frames = 0
list_files_process[idx].endframe = total_frames
if total_frames > 0:
return gr.Slider.update(value=0, maximum=total_frames), gen_processing_text(list_files_process[idx].startframe,list_files_process[idx].endframe)
return gr.Slider.update(value=0, maximum=total_frames), ''
def on_destfiles_selected(evt: gr.SelectData):
global selected_preview_index, list_files_process
if evt is not None:
selected_preview_index = evt.index
idx = selected_preview_index
filename = list_files_process[idx].filename
fps = list_files_process[idx].fps
if util.is_video(filename) or filename.lower().endswith('gif'):
total_frames = get_video_frame_total(filename)
if list_files_process[idx].endframe == 0:
list_files_process[idx].endframe = total_frames
else:
total_frames = 0
if total_frames > 0:
return gr.Slider.update(value=list_files_process[idx].startframe, maximum=total_frames), gen_processing_text(list_files_process[idx].startframe,list_files_process[idx].endframe), fps
return gr.Slider.update(value=0, maximum=total_frames), gen_processing_text(0,0), fps
def on_resultfiles_selected(evt: gr.SelectData, files):
selected_index = evt.index
filename = files[selected_index].name
if util.is_video(filename):
return gr.update(visible=False), gr.update(visible=True, value=filename)
else:
if filename.lower().endswith('gif'):
current_frame = get_video_frame(filename)
else:
current_frame = get_image_frame(filename)
return gr.update(visible=True, value=util.convert_to_gradio(current_frame)), gr.update(visible=False)
def on_resultfiles_finished(files):
selected_index = 0
if files is None or len(files) < 1:
return None, None
filename = files[selected_index].name
if util.is_video(filename):
return gr.update(visible=False), gr.update(visible=True, value=filename)
else:
if filename.lower().endswith('gif'):
current_frame = get_video_frame(filename)
else:
current_frame = get_image_frame(filename)
return gr.update(visible=True, value=util.convert_to_gradio(current_frame)), gr.update(visible=False)
|
61d6261ec65b0d54423684e36380cff5
|
{
"intermediate": 0.3777521252632141,
"beginner": 0.44872158765792847,
"expert": 0.17352630198001862
}
|
44,162
|
<div class="click-closed"></div>
<!--/ Form Search Star /-->
<div class="box-collapse">
<div class="title-box-d">
<h3 class="title-d">Search Property</h3>
</div>
<span class="close-box-collapse right-boxed bi bi-x"></span>
<div class="box-collapse-wrap form">
<form class="form-a">
<div class="row">
<div class="col-md-12 mb-2">
<div class="form-group">
<label class="pb-2" for="Type">Keyword</label>
<input
type="text"
class="form-control form-control-lg form-control-a"
placeholder="Keyword"
/>
</div>
</div>
<div class="col-md-6 mb-2">
<div class="form-group mt-3">
<label class="pb-2" for="Type">Type</label>
<select
class="form-control form-select form-control-a"
id="Type"
>
<option>Per Month</option>
<option>Per Day</option>
</select>
</div>
</div>
<div class="col-md-6 mb-2">
<div class="form-group mt-3">
<label class="pb-2" for="city">City</label>
<select
class="form-control form-select form-control-a"
id="city"
>
<option>All City</option>
<option>Fes</option>
<option>Marrakech</option>
<option>Casablanca</option>
<option>Essouira</option>
</select>
</div>
</div>
<div class="col-md-6 mb-2">
<div class="form-group mt-3">
<label class="pb-2" for="bedrooms">Bedrooms</label>
<select
class="form-control form-select form-control-a"
id="bedrooms"
>
<option>Any</option>
<option>01</option>
<option>02</option>
<option>03</option>
</select>
</div>
</div>
<div class="col-md-6 mb-2">
<div class="form-group mt-3">
<label class="pb-2" for="garages">Garages</label>
<select
class="form-control form-select form-control-a"
id="garages"
>
<option>Any</option>
<option>01</option>
<option>02</option>
<option>03</option>
<option>04</option>
</select>
</div>
</div>
<div class="col-md-6 mb-2">
<div class="form-group mt-3">
<label class="pb-2" for="bathrooms">Bathrooms</label>
<select
class="form-control form-select form-control-a"
id="bathrooms"
>
<option>Any</option>
<option>01</option>
<option>02</option>
<option>03</option>
</select>
</div>
</div>
<div class="col-md-6 mb-2">
<div class="form-group mt-3">
<label class="pb-2" for="price">Min Price</label>
<select
class="form-control form-select form-control-a"
id="price"
>
<option>Unlimite</option>
<option>1200 DH</option>
<option>1500 DH</option>
<option>2000 DH</option>
<option>2500 DH</option>
</select>
</div>
</div>
<div class="col-md-12">
<button type="submit" class="btn btn-b">Search Property</button>
</div>
</div>
</form>
</div>
</div>
i want a base donne code to link it with the search bar
|
c8c0f23818f5a70dc2d0aabf7357339d
|
{
"intermediate": 0.29415279626846313,
"beginner": 0.5840485692024231,
"expert": 0.12179870158433914
}
|
44,163
|
<!–/ Form Search Star /–>
<div class=“box-collapse”>
<div class=“title-box-d”>
<h3 class=“title-d”>Search Property</h3>
</div>
<span class=“close-box-collapse right-boxed bi bi-x”></span>
<div class=“box-collapse-wrap form”>
<form class=“form-a”>
<div class=“row”>
<div class=“col-md-12 mb-2”>
<div class=“form-group”>
<label class=“pb-2” for=“Type”>Keyword</label>
<input
type=“text”
class=“form-control form-control-lg form-control-a”
placeholder=“Keyword”
/>
</div>
</div>
<div class=“col-md-6 mb-2”>
<div class=“form-group mt-3”>
<label class=“pb-2” for=“Type”>Type</label>
<select
class=“form-control form-select form-control-a”
id=“Type”
>
<option>Per Month</option>
<option>Per Day</option>
</select>
</div>
</div>
<div class=“col-md-6 mb-2”>
<div class=“form-group mt-3”>
<label class=“pb-2” for=“city”>City</label>
<select
class=“form-control form-select form-control-a”
id=“city”
>
<option>All City</option>
<option>Fes</option>
<option>Marrakech</option>
<option>Casablanca</option>
<option>Essouira</option>
</select>
</div>
</div>
<div class=“col-md-6 mb-2”>
<div class=“form-group mt-3”>
<label class=“pb-2” for=“bedrooms”>Bedrooms</label>
<select
class=“form-control form-select form-control-a”
id=“bedrooms”
>
<option>Any</option>
<option>01</option>
<option>02</option>
<option>03</option>
</select>
</div>
</div>
<div class=“col-md-6 mb-2”>
<div class=“form-group mt-3”>
<label class=“pb-2” for=“garages”>Garages</label>
<select
class=“form-control form-select form-control-a”
id=“garages”
>
<option>Any</option>
<option>01</option>
<option>02</option>
<option>03</option>
<option>04</option>
</select>
</div>
</div>
<div class=“col-md-6 mb-2”>
<div class=“form-group mt-3”>
<label class=“pb-2” for=“bathrooms”>Bathrooms</label>
<select
class=“form-control form-select form-control-a”
id=“bathrooms”
>
<option>Any</option>
<option>01</option>
<option>02</option>
<option>03</option>
</select>
</div>
</div>
<div class=“col-md-6 mb-2”>
<div class=“form-group mt-3”>
<label class=“pb-2” for=“price”>Min Price</label>
<select
class=“form-control form-select form-control-a”
id=“price”
>
<option>Unlimite</option>
<option>1200 DH</option>
<option>1500 DH</option>
<option>2000 DH</option>
<option>2500 DH</option>
</select>
</div>
</div>
<div class=“col-md-12”>
<button type=“submit” class=“btn btn-b”>Search Property</button>
</div>
</div>
</form>
</div>
</div>
i want a base donne code to link it with the search bar
|
311b3c03babe493bc211ae603e2aedb5
|
{
"intermediate": 0.3237345218658447,
"beginner": 0.4274188280105591,
"expert": 0.2488466054201126
}
|
44,164
|
1) Définissez une classe abstraite Forme qui représente une forme géométrique. Cette classe
doit être dotée des méthodes abstraites suivantes :
perimetre() : pour calculer le périmètre d’un objet de type Forme.
aire() : pour calculer l’aire d’un objet de type Forme.
afficher() : pour afficher les informations (attributs, périmètre et aire) d’un objet de
type Forme.
2) Implémentez une classe Rectangle qui hérite de la classe Forme. Cette classe doit avoir un
constructeur prenant la longueur et la largeur comme paramètres, et elle doit implémenter
toutes les méthodes abstraites.
3) Implémentez une classe Cercle qui hérite également de la classe Forme. Cette classe doit
avoir un constructeur prenant le rayon comme paramètre, et elle doit implémenter toutes
les méthodes abstraites.
4) Testez vos implémentations.
rectangle1 = Rectangle(5, 10)
print(rectangle1.perimetre ()) # 30.0
print(rectangle1.aire()) # 50.0
rectangle1.afficher ()
cercle1 = Cercle(7)
print(cercle1.perimetre ()) # 43,96
print(cercle1.aire()) #153.8
|
b30a50494884e8f68cd1b7be0339c4f9
|
{
"intermediate": 0.3617388606071472,
"beginner": 0.3315008580684662,
"expert": 0.3067602217197418
}
|
44,165
|
I am using trying to use transformers[touch] and am getting this error:
OSError: [WinError 126] The specified module could not be found. Error loading "C:\Users\Zach\Documents\AI\Gradio\venv\Lib\site-packages\torch\lib\c10.dll" or one of its dependencies.
|
4dfe23071ce531c2b44723cfa38dbe51
|
{
"intermediate": 0.57341068983078,
"beginner": 0.20649704337120056,
"expert": 0.22009220719337463
}
|
44,166
|
I am using the mkdir function inside a C program. for the second argument I am passing in 0775, but I want to be able to use the integer aliases for the mode instead. What would the corresponding value be?
|
a89b51d8ee0b82ee8a911b48cea93d58
|
{
"intermediate": 0.3947329819202423,
"beginner": 0.2930365204811096,
"expert": 0.31223052740097046
}
|
44,167
|
print("Recording…")
add animation of self.steps = ["⢿", "⣻", "⣽", "⣾", "⣷", "⣯", "⣟", "⡿"]
|
a06adad9d3ee25ff9d07ac4d95c41a60
|
{
"intermediate": 0.30765703320503235,
"beginner": 0.4573844075202942,
"expert": 0.23495855927467346
}
|
44,168
|
create and activate virtualenivironment
|
0131ce284c46cf84cb8282ec271bd087
|
{
"intermediate": 0.19807825982570648,
"beginner": 0.1351262480020523,
"expert": 0.6667954921722412
}
|
44,169
|
A data engineer has ingested a JSON file into a table raw_table with the following schema:
transaction_id STRING,
payload ARRAY<customer_id:STRING, date:TIMESTAMP, store_id:STRING>
The data engineer wants to efficiently extract the date of each transaction into a table with the following schema:
transaction_id STRING,
date TIMESTAMP
Which of the following commands should the data engineer run to complete this task?
|
8c122b192b253e8b47a2a3893341bb1c
|
{
"intermediate": 0.4218820333480835,
"beginner": 0.23532523214817047,
"expert": 0.3427927494049072
}
|
44,170
|
create a bootsrap5-css html5 script on ubuntu linux to build English TTS Voices for Flite enhancing how these args fit into the created new arg:voice=awb -c:a pcm_s16le -ar 48000 -ac 2 -b:a 320k
|
0214d8c087561888114e099b6a8ad567
|
{
"intermediate": 0.3717195391654968,
"beginner": 0.3385540246963501,
"expert": 0.2897263765335083
}
|
44,171
|
I started building an AI agent that:
- provides reliable answers to frequently asked health questions
- notifies users when it’s time to take their medications
- provides emergency assistance. Installed Python, created a virtual environment, and initiated a Rasa.
|
81666ee563662d1258b494e43a5492e5
|
{
"intermediate": 0.16951364278793335,
"beginner": 0.19734714925289154,
"expert": 0.6331391930580139
}
|
44,172
|
in loader.py:
from itertools import cycle
from shutil import get_terminal_size
from threading import Thread
from time import sleep
class Loader:
"""Busy symbol.
Can be called inside a context:
with Loader("This take some Time..."):
# do something
pass
"""
def __init__(self, chan, desc="Loading...", end='', timeout=0.1, mode='prog'):
"""
A loader-like context manager
Args:
desc (str, optional): The loader's description. Defaults to "Loading...".
end (str, optional): Final print. Defaults to "".
timeout (float, optional): Sleep time between prints. Defaults to 0.1.
"""
self.desc = desc
self.end = end
self.timeout = timeout
self.channel = chan
self._thread = Thread(target=self._animate, daemon=True)
if mode == 'std1':
self.steps = ["⢿", "⣻", "⣽", "⣾", "⣷", "⣯", "⣟", "⡿"]
elif mode == 'std2':
self.steps = ["◜","◝","◞","◟"]
elif mode == 'std3':
self.steps = ["😐 ","😐 ","😮 ","😮 ","😦 ","😦 ","😧 ","😧 ","🤯 ","💥 ","✨ ","\u3000 ","\u3000 ","\u3000 "]
elif mode == 'prog':
self.steps = ["[∙∙∙]","[●∙∙]","[∙●∙]","[∙∙●]","[∙∙∙]"]
self.done = False
def start(self):
self._thread.start()
return self
def _animate(self):
for c in cycle(self.steps):
if self.done:
break
Printer.print_loader(self.channel, f"\r\t{c} {self.desc} ")
sleep(self.timeout)
def __enter__(self):
self.start()
def stop(self):
self.done = True
cols = get_terminal_size((80, 20)).columns
Printer.print_loader(self.channel, "\r" + " " * cols)
if self.end != "":
Printer.print_loader(self.channel, f"\r{self.end}")
def __exit__(self, exc_type, exc_value, tb):
# handle exceptions with those variables ^
self.stop()
import base64
import acrcloud
import os
import eyed3
import eyed3.id3.frames
from eyed3.id3.frames import UserTextFrame
import requests
import json
import re
import sounddevice as sd
import numpy as np
import scipy.io.wavfile
from bs4 import BeautifulSoup
from genius_api import GeniusApi
import time
import threading
import time
from my_shazam_utility import shazam_recognize_song
from applemusic_api import AppleMusicApi
from Acrcloudretrieve import recognize_song, set_id3_tags_mp3
from Retrieve_lyrics import get_lyrics
from erhalten_alb_covers import save_and_embed_album_cover
def load_config():
with open('D:/Eurydice/Encompassing Data by discerning/config/config.json', 'r') as config_file:
config_data = json.load(config_file)
return config_data
config = load_config()
CLIENT_ID = config['Spotify']['CLIENT_ID']
CLIENT_SECRET = config['Spotify']['CLIENT_SECRET']
genius_api = GeniusApi()
def get_audio_source_choice(duration=10):
border = "=" * 50
title = "AUDIO SOURCE SELECTION"
padded_title = title.center(len(border))
print(f"\n{border}")
print(padded_title)
print(border)
box_width = max(len(s) for s in ["Microphone - Live audio capture",
"Internal Sound - Detect sounds playing internally on the device",
"File - Detect through an internally saved file"]) + 6
print("\nPlease select the audio source you'd like to use:\n")
print(f"+{'-' * (box_width - 2)}+")
print(f"| 1: Microphone - Live audio capture{' ' * (box_width - len(' 1: Microphone - Live audio capture') - 3)}|")
print(f"| 2: Internal Sound - Detect sounds playing internally on the device{' ' * (box_width - len(' 2: Internal Sound - Detect sounds playing internally on the device') - 3)}|")
print(f"| 3: File - Detect through an internally saved file{' ' * (box_width - len(' 3: File - Detect through an internally saved file') - 3)}|")
print(f"+{'-' * (box_width - 2)}+")
choice = input("Enter your choice (1, 2, or 3) and press Enter: ")
print(f"{border}\n")
return choice
def capture_internal_audio(device, duration=10, sample_rate=44100, filename="internal_audio.wav"):
device_info = sd.query_devices(device, 'input')
max_input_channels = device_info.get('max_input_channels', 1)
channels = min(2, max_input_channels)
print(f"Capturing internal audio using {channels} channel(s).\n Please play the audio you'd like to identify…")
recording = sd.rec(int(duration * sample_rate), samplerate=sample_rate, channels=2, dtype='float64', device=device)
sd.wait()
scipy.io.wavfile.write(filename, sample_rate, (recording * 32767).astype(np.int16))
print("Capture complete.")
print(f"Recording shape (samples, channels): {recording.shape}")
print(recording, sample_rate)
print(filename)
return filename
def capture_and_save_audio_from_mic(duration=10, sample_rate=44100, filename="temp_captured_audio_file.wav"):
print("Recording…")
recording = sd.rec(int(duration * sample_rate), samplerate=sample_rate, channels=2, dtype='int16')
animation = AudioCaptureAnimation(mode='std2')
animation_thread = threading.Thread(target=animation.start_animation, daemon=True)
animation_thread.start()
time.sleep(duration)
animation.stop_animation()
animation_thread.join()
sd.wait()
print("Recording stopped.")
scipy.io.wavfile.write(filename, sample_rate, recording)
print(f"Recorded (samples, channels): {recording.shape}")
print(recording, sample_rate)
print(filename)
return filename
def get_user_choice(duration=10):
print("=" * 50)
print("Welcome to the Song Recognition Service!")
print("=" * 50)
print("\nPlease select the recognition service you'd like to use:\n")
print(" 1: YoutubeACR - Fast and accurate music recognition")
print(" 2: Shazam - Discover music, artists, and lyrics in seconds")
print("-" * 50)
choice = input("Enter your choice (1 or 2) and press Enter: ")
print("\n" + "." * 25 + " Processing " + "." * 25 + "\n")
return choice
def add_or_update_txxx_frame(audiofile, description, value):
found = False
frames = audiofile.tag.frame_set.get(eyed3.id3.frames.USERTEXT_FID, [])
for frame in frames:
if frame.description == description:
frame.text = value
found = True
break
if not found:
new_frame = eyed3.id3.frames.UserTextFrame(description=description, text=value)
if not frames: # If it's the first frame of this type
audiofile.tag.frame_set[eyed3.id3.frames.USERTEXT_FID] = [new_frame]
else:
frames.append(new_frame)
def authenticate_spotify(client_id, client_secret):
auth_url = 'https://accounts.spotify.com/api/token'
client_creds = f"{client_id}:{client_secret}"
client_creds_b64 = base64.b64encode(client_creds.encode())
headers = {'Authorization': f'Basic {client_creds_b64.decode()}'}
data = {'grant_type': 'client_credentials'}
response = requests.post(auth_url, headers=headers, data=data)
access_token = response.json().get('access_token')
return access_token
def search_spotify_for_song(access_token, artist_name, title):
base_url = "https://api.spotify.com/v1/search"
query = f"{title} artist:{artist_name}"
headers = {"Authorization": f"Bearer {access_token}"}
params = {"q": query, "type": "track", "limit": 1}
response = requests.get(base_url, headers=headers, params=params)
results = response.json()
try:
track_info = results['tracks']['items'][0]
return track_info
except IndexError:
print("Song not found on Spotify.")
return None
def get_lyrics_from_genius(artist_name, title):
results = genius_api.get_search_by_songs(f"{artist_name} {title}")
if results:
song_info = results[0]['result'] # Take the most relevant result
song_id = str(song_info['id'])
song_details = genius_api.get_song_by_id(song_id, text_format='plain')
return song_details.get('lyrics', "Lyrics not available.")
return "Song not found on Genius."
def save_lyrics_to_file(audio_file_path, track_number, title, artist_name, album_name, isrc, lyrics):
base_directory = os.path.dirname(audio_file_path)
file_name_format = f"{track_number:02d}. {title} - {artist_name} - {album_name} - {isrc}.lrc"
safe_file_name = re.sub(r'[/:*?"<>|]', '', file_name_format)
lyrics_file_path = os.path.join(base_directory, safe_file_name)
with open(lyrics_file_path, 'w', encoding='utf-8') as lrc_file:
lrc_file.write(lyrics)
print(f"Lyrics saved as: {safe_file_name}")
def get_high_quality_album_art_url(song_info):
images = song_info['album']['images']
if not images:
return None
highest_quality_image = max(images, key=lambda x: x['width']*x['height'])
return highest_quality_image['url']
def save_high_quality_album_art(image_url, file_path):
try:
response = requests.get(image_url, stream=True)
if response.status_code == 200:
with open(file_path, 'wb') as out_file:
for chunk in response.iter_content(1024):
out_file.write(chunk)
print(f"High quality album art saved: {file_path}")
return True
else:
print("Could not download the album art.")
except Exception as e:
print(f"Error saving high-quality album art: {e}")
return False
def embed_album_art_to_song(file_path, image_path):
try:
audiofile = eyed3.load(file_path)
if audiofile.tag is None:
audiofile.initTag()
with open(image_path, 'rb') as img_file:
audiofile.tag.images.set(3, img_file.read(), 'image/jpeg')
audiofile.tag.save()
print("High quality album art embedded into song.")
except FileNotFoundError:
print(f"Failed to embed album art - No such file: {image_path}")
def process_audio_file_with_spotify_search(audio_file_path):
shazam_data = shazam_recognize_song(audio_file_path)
if shazam_data:
artist_name = shazam_data['track']['subtitle']
title = shazam_data['track']['title']
print(f"Identified Song: {artist_name} - {title}")
access_token = authenticate_spotify(CLIENT_ID, CLIENT_SECRET)
song_info = search_spotify_for_song(access_token, artist_name, title)
if song_info:
print(json.dumps(song_info, indent=4))
print("\n///////////////////////////////\n")
album_name = song_info['album']['name']
album_url = song_info['album']['external_urls']['spotify']
track_number = song_info['track_number']
release_date = song_info['album']['release_date']
isrc = song_info.get('external_ids', {}).get('isrc', "Not Available")
label = song_info['label'] if 'label' in song_info else "Not Available"
explicit = str(song_info['explicit']) if 'explicit' in song_info else "Not Available" # Convert to string
genres = ", ".join(song_info['genres']) if 'genres' in song_info else "Not Available"
author_url = song_info['artists'][0]['external_urls']['spotify'] if 'artists' in song_info else "Not Available"
spotify_url = song_info['external_urls']['spotify']
print(f"Track Number on Spotify: {track_number}")
audiofile = eyed3.load(audio_file_path)
if audiofile.tag is None:
audiofile.initTag(version=eyed3.id3.ID3_V2_3)
audiofile.tag.artist = artist_name
audiofile.tag.album = album_name
audiofile.tag.album_artist = artist_name
audiofile.tag.title = title
audiofile.tag.recording_date = release_date
add_or_update_txxx_frame(audiofile, "Album URL", album_url)
add_or_update_txxx_frame(audiofile, "Eurydice", "True")
add_or_update_txxx_frame(audiofile, "Compilation", "KK")
add_or_update_txxx_frame(audiofile, "Genre", genres)
add_or_update_txxx_frame(audiofile, "Author URL", author_url)
add_or_update_txxx_frame(audiofile, "Label", label)
add_or_update_txxx_frame(audiofile, "Explicit", explicit)
add_or_update_txxx_frame(audiofile, "ISRC", isrc)
add_or_update_txxx_frame(audiofile, "Spotify URL", spotify_url)
audiofile.tag.comments.set(f"ISRC: {isrc}, Label: {label}, Explicit: {explicit}")
audiofile.tag.save()
print(f"Metadata embedded into the file: {audio_file_path}")
high_res_image_url = get_high_quality_album_art_url(song_info)
if high_res_image_url:
image_file_path = os.path.splitext(audio_file_path)[0] + ".jpg"
if save_high_quality_album_art(high_res_image_url, image_file_path):
embed_album_art_to_song(audio_file_path, image_file_path)
else:
print("Skipping album art embed due to download failure.")
else:
print("No album art available.")
new_file_name = f"{track_number:02d}. {title} - {artist_name} - {album_name} - {isrc}.mp3"
new_file_name = re.sub(r'[/:*?"<>|]', '', new_file_name)
new_file_path = os.path.join(os.path.dirname(audio_file_path), new_file_name)
os.rename(audio_file_path, new_file_path) # Rename file
print(f"File has been renamed to: {new_file_name}")
new_image_file_path = os.path.splitext(new_file_path)[0] + ".jpg"
os.rename(image_file_path, new_image_file_path)
print(f"Album art file has been renamed to: {os.path.basename(new_image_file_path)}")
lyrics = get_lyrics_from_genius(artist_name, title)
if 'plain' in lyrics:
lyrics_plain_text = lyrics['plain']
print("Printing Lyrics:\n", lyrics_plain_text)
save_lyrics_to_file(audio_file_path, track_number, title, artist_name, album_name, isrc, lyrics_plain_text)
print("Lyrics file saved")
else:
print("No lyrics available to save.")
else:
print("Song not found on Spotify.")
else:
print("Song could not be identified.")
if __name__ == "__main__":
audio_source_choice = get_audio_source_choice(duration=10)
if audio_source_choice == '3':
user_choice = get_user_choice(duration=10)
audio_file_path = 'D:/Eurydice/Encompassing Data by discerning/Test_file/Unknown_file.mp3'
if user_choice == '1':
print("\n" + "." * 15 + " ᴜsɪɴɢ YᴏᴜᴛᴜʙᴇACR " + "." * 15 + "\n")
song_tags = recognize_song(audio_file_path)
if song_tags:
print(f'Song identified: {song_tags}')
set_id3_tags_mp3(audio_file_path, song_tags)
artist_name = song_tags.get('artists')[0].get('name')
song_title = song_tags.get('title')
safe_artist_name = re.sub(r'[/\:?"<>|]', '', artist_name)
safe_song_title = re.sub(r'[/\:?"<>|]', '', song_title)
new_file_name = f"{safe_artist_name} - {safe_song_title}.mp3"
new_file_path = os.path.join(os.path.dirname(audio_file_path), new_file_name)
os.rename(audio_file_path, new_file_path)
print(f"File has been renamed to: {new_file_name}")
else:
print('Could not identify the song in YᴏᴜᴛᴜʙᴇACR.')
apple_music_api = AppleMusicApi(Exception)
apple_music_api.get_access_token()
track_results = apple_music_api.search('songs', f"{artist_name} - {song_title}")
if track_results:
track_id = track_results[0]['id']
album_artwork_url_template = track_results[0]['attributes']['artwork']['url']
save_and_embed_album_cover(new_file_path, artist_name, song_title, album_artwork_url_template)
else:
print("Song not found on Apple Music.")
lrc_lyrics = get_lyrics(safe_artist_name, safe_song_title)
if lrc_lyrics:
lrc_file_path = os.path.join(os.path.dirname(audio_file_path), f"{safe_artist_name} - {safe_song_title}.lrc")
with open(lrc_file_path, 'w', encoding='utf-8') as lrc_file:
lrc_file.write(lrc_lyrics)
print(f"Saved LRC file to: {lrc_file_path}")
else:
print("Could not get the lyrics.")
elif user_choice == '2':
print("\n" + "." * 15 + " ᴜsɪɴɢ Sʜᴀᴢᴀᴍ " + "." * 15 + "\n")
song_tags = shazam_recognize_song(audio_file_path)
print(song_tags)
process_audio_file_with_spotify_search(audio_file_path)
else:
print("Invalid choice. Exiting....")
exit()
elif audio_source_choice == '1':
audio_file_path = capture_and_save_audio_from_mic(duration=10, sample_rate=44100)
print("Attempting to recognize using YᴏᴜᴛᴜʙᴇACR first…\n")
song_tags = recognize_song(audio_file_path)
use_acrcloud = True
if song_tags is None:
print("YᴏᴜᴛᴜʙᴇACR couldn't identify the song. Attempting recognition with Sʜᴀᴢᴀᴍ…\n")
song_tags = shazam_recognize_song(audio_file_path)
use_acrcloud = False
if song_tags:
if use_acrcloud:
artist_name = song_tags.get('artists')[0].get('name')
song_title = song_tags.get('title')
print(f"Song recognized successfully from youtubeACR!\n Artist: {artist_name}, Song: {song_title}\n")
else:
artist_name = song_tags['track']['subtitle']
title = song_tags['track']['title']
access_token = authenticate_spotify(CLIENT_ID, CLIENT_SECRET)
song_info = search_spotify_for_song(access_token, artist_name, title)
if song_info:
album_name = song_info['album']['name']
track_number = song_info['track_number']
isrc = song_info.get('external_ids', {}).get('isrc', "Not Available")
print(f"Song recognized successfully by sha-spo!\n Artist: {artist_name}, Song: {track_number:02d}. {title}, Album: {album_name}, ISRC tag: {isrc}\n")
else:
print(f"Song recognized successfully by Shazam!\n Artist: {artist_name}, Song: {title}\n")
else:
print("Failed to recognize the song from the service.\n")
elif audio_source_choice == '2':
print("\nAvailable audio devices for capture:\n")
devices = sd.query_devices()
for index, device in enumerate(devices):
print(f"{index}: {device['name']} - {'(Default)' if device['default_samplerate'] == device['default_low_output_latency'] else ''}")
device_selection = input("Please enter the device index or name you wish to use for the capture: ").strip()
try:
device_selection = int(device_selection)
except ValueError:
pass
audio_file_path = capture_internal_audio(device=device_selection, duration=10, sample_rate=44100)
print("waiting....\n")
print("Attempting to recognize using YᴏᴜᴛᴜʙᴇACR first…\n")
song_tags = recognize_song(audio_file_path)
use_acrcloud = True
if song_tags is None:
print("YᴏᴜᴛᴜʙᴇACR couldn't identify the song. Attempting recognition with Sʜᴀᴢᴀᴍ…\n")
song_tags = shazam_recognize_song(audio_file_path)
use_acrcloud = False
if song_tags:
if use_acrcloud:
artist_name = song_tags.get('artists')[0].get('name')
song_title = song_tags.get('title')
print(f"Song recognized successfully from youtubeACR!\n Artist: {artist_name}, Song: {song_title}\n")
else:
artist_name = song_tags['track']['subtitle']
title = song_tags['track']['title']
access_token = authenticate_spotify(CLIENT_ID, CLIENT_SECRET)
song_info = search_spotify_for_song(access_token, artist_name, title)
if song_info:
album_name = song_info['album']['name']
track_number = song_info['track_number']
isrc = song_info.get('external_ids', {}).get('isrc', "Not Available")
print(f"Song recognized successfully by sha-spo!\n Artist: {artist_name}, Song: {track_number:02d}. {title}, Album: {album_name}, ISRC tag: {isrc}\n")
else:
print(f"Song recognized successfully by Shazam!\n Artist: {artist_name}, Song: {title}\n")
else:
print("Failed to recognize the song from the service.\n")
else:
exit();
add the functionality to this code loader
|
0f53f2a795661b44fb7b62b5188176b1
|
{
"intermediate": 0.350329726934433,
"beginner": 0.4772484004497528,
"expert": 0.1724218875169754
}
|
44,173
|
/*
* This file is part of CounterStrikeSharp.
* CounterStrikeSharp is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* CounterStrikeSharp is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with CounterStrikeSharp. If not, see <https://www.gnu.org/licenses/>. *
*/
#include <ios>
#include <sstream>
#include "core/log.h"
#include "protobuf/generated/cstrike15_usermessages.pb.h"
#include <igameeventsystem.h>
#include <networksystem/inetworkserializer.h>
#include <networksystem/inetworkmessages.h>
#include "scripting//callback_manager.h"
#include "scripting/autonative.h"
#include "scripting/script_engine.h"
#include "natives_protobufs.h"
#include "google/protobuf/message.h"
#include "google/protobuf/message_lite.h"
#include <google/protobuf/dynamic_message.h>
#include <google/protobuf/compiler/importer.h>
namespace counterstrikesharp {
enum Protobuf_t {
cstrike15_usermessages,
netmessages,
};
void SendProtobuf(ScriptContext &script_context)
{
auto client = script_context.GetArgument<int>(0);
auto is_filter = script_context.GetArgument<bool>(1);
auto msg = script_context.GetArgument<const char*>(2);
auto data = script_context.GetArgument<const char*>(3);
auto protobuf_enum = script_context.GetArgument<Protobuf_t>(4);
CSSHARP_CORE_INFO("{0} | {1} | {2} | {3} | {4}", client, is_filter, msg, data, protobuf_enum);
CRecipientFilter filter;
if(is_filter)
{
CPlayerSlot PlayerSlot = CPlayerSlot(client);
filter.AddRecipient(PlayerSlot);
}
std::string protobuf_file_name;
switch (protobuf_enum) {
case cstrike15_usermessages:
protobuf_file_name = "cstrike15_usermessages";
break;
case netmessages:
protobuf_file_name = "netmessages";
break;
}
INetworkSerializable* message_type = globals::networkMessages->FindNetworkMessagePartial(msg);
::google::protobuf::compiler::DiskSourceTree source_tree;
source_tree.MapPath("", ".");
::google::protobuf::compiler::Importer importer(&source_tree, nullptr);
char import_path[100];
snprintf(import_path, sizeof(import_path), "src/protobuf/%s.proto", protobuf_file_name.c_str());
importer.Import(import_path);
const ::google::protobuf::Descriptor* descriptor = importer.pool()->FindMessageTypeByName(msg);
if (descriptor)
{
::google::protobuf::DynamicMessageFactory factory;
std::unique_ptr<::google::protobuf::Message> message(factory.GetPrototype(descriptor)->New());
if (message->ParseFromArray(data, sizeof(data)))
{
globals::gameEventSystem->PostEventAbstract(0, false, &filter, message_type, message.get(), 0);
CSSHARP_CORE_INFO("Successfully parsed message of type");
}
else
{
CSSHARP_CORE_INFO("Failed to parse message of type");
}
}
else
{
CSSHARP_CORE_INFO("Unknown message type");
}
}
REGISTER_NATIVES(protobufs, {
ScriptEngine::RegisterNativeHandler("SEND_PROTOBUF", SendProtobuf);
})
} // namespace counterstrikesharp
фикс
|
a8c678d50eb939a6e0ed41bf2e295781
|
{
"intermediate": 0.3875397741794586,
"beginner": 0.322031170129776,
"expert": 0.29042908549308777
}
|
44,174
|
In file included from /home/fabius/plugins/metamod-source/samples/counterstrikesharp/src/scripting/natives/natives_protobufs.cpp:26:
/home/fabius/plugins/metamod-source/samples/counterstrikesharp/src/scripting/autonative.h:43:78: warning: backslash-newline at end of file
43 | #define CREATE_SETTER_FUNCTION(type_name, get_type, name, from_type, setter) \
|
/home/fabius/plugins/metamod-source/samples/counterstrikesharp/src/scripting/natives/natives_protobufs.cpp: In member function ‘virtual void Nativesprotobufs::OnAllInitialized()’:
/home/fabius/plugins/metamod-source/samples/counterstrikesharp/src/scripting/natives/natives_protobufs.cpp:104:5: error: ‘ScriptEngine’ has not been declared
104 | ScriptEngine::RegisterNativeHandler("SEND_PROTOBUF", SendProtobuf);
| ^~~~~~~~~~~~
/home/fabius/plugins/metamod-source/samples/counterstrikesharp/src/scripting/autonative.h:29:42: note: in definition of macro ‘REGISTER_NATIVES’
29 | void OnAllInitialized() override method \
| ^~~~~~
/home/fabius/plugins/metamod-source/samples/counterstrikesharp/src/scripting/natives/natives_protobufs.cpp:104:58: error: ‘SendProtobuf’ was not declared in this scope; did you mean ‘counterstrikesharp::SendProtobuf’?
104 | ScriptEngine::RegisterNativeHandler("SEND_PROTOBUF", SendProtobuf);
| ^~~~~~~~~~~~
/home/fabius/plugins/metamod-source/samples/counterstrikesharp/src/scripting/autonative.h:29:42: note: in definition of macro ‘REGISTER_NATIVES’
29 | void OnAllInitialized() override method \
| ^~~~~~
/home/fabius/plugins/metamod-source/samples/counterstrikesharp/src/scripting/natives/natives_protobufs.cpp:42:10: note: ‘counterstrikesharp::SendProtobuf’ declared here
42 | void SendProtobuf(ScriptContext& script_context) {
| ^~~~~~~~~~~~
/home/fabius/plugins/metamod-source/samples/counterstrikesharp/src/scripting/natives/natives_protobufs.cpp: At global scope:
/home/fabius/plugins/metamod-source/samples/counterstrikesharp/src/scripting/natives/natives_protobufs.cpp:106:1: error: expected declaration before ‘}’ token
106 | } // namespace counterstrikesharp
| ^
make[2]: *** [CMakeFiles/counterstrikesharp.dir/build.make:472: CMakeFiles/counterstrikesharp.dir/src/scripting/natives/natives_protobufs.cpp.o] Error 1
make[1]: *** [CMakeFiles/Makefile2:646: CMakeFiles/counterstrikesharp.dir/all] Error 2
make: *** [Makefile:156: all] Error 2
/*
* This file is part of CounterStrikeSharp.
* CounterStrikeSharp is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* CounterStrikeSharp is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with CounterStrikeSharp. If not, see <https://www.gnu.org/licenses/>. *
*/
#include <ios>
#include <sstream>
#include "core/log.h"
#include "protobuf/generated/cstrike15_usermessages.pb.h"
#include <igameeventsystem.h>
#include <networksystem/inetworkserializer.h>
#include <networksystem/inetworkmessages.h>
#include "scripting//callback_manager.h"
#include "scripting/autonative.h"
#include "scripting/script_engine.h"
#include "natives_protobufs.h"
#include "google/protobuf/message.h"
#include "google/protobuf/message_lite.h"
#include <google/protobuf/dynamic_message.h>
#include <google/protobuf/compiler/importer.h>
namespace counterstrikesharp {
enum Protobuf_t {
cstrike15_usermessages,
netmessages,
};
void SendProtobuf(ScriptContext& script_context) {
int client = script_context.GetArgument<int>(0);
bool is_filter = script_context.GetArgument<bool>(1);
const char* msg_type_name = script_context.GetArgument<const char*>(2);
const char* data = script_context.GetArgument<const char*>(3);
auto protobuf_enum = script_context.GetArgument<Protobuf_t>(4);
//CSSHARP_CORE_INFO("{0} | {1} | {2} | {3} | {4}", client, is_filter, msg_type_name, data, protobuf_enum);
CRecipientFilter filter;
if (is_filter) {
CPlayerSlot PlayerSlot(client);
filter.AddRecipient(PlayerSlot);
}
std::string protobuf_file_name = protobuf_enum == cstrike15_usermessages ? "cstrike15_usermessages" : "netmessages";
INetworkSerializable* message_type = globals::networkMessages->FindNetworkMessagePartial(msg_type_name);
if (!message_type) {
CSSHARP_CORE_INFO("Message type not found in registry.");
return;
}
::google::protobuf::compiler::DiskSourceTree source_tree;
source_tree.MapPath("", "./src/protobuf/");
::google::protobuf::compiler::Importer importer(&source_tree, nullptr);
std::string import_path = "src/protobuf/" + protobuf_file_name + ".proto";
const ::google::protobuf::FileDescriptor* file_descriptor = importer.Import(import_path);
if (!file_descriptor) {
CSSHARP_CORE_INFO("Failed to import protobuf definition.");
return;
}
const ::google::protobuf::Descriptor* descriptor = importer.pool()->FindMessageTypeByName(msg_type_name);
if (!descriptor) {
CSSHARP_CORE_INFO("Unknown message type.");
return;
}
::google::protobuf::DynamicMessageFactory factory;
std::unique_ptr<::google::protobuf::Message> message(factory.GetPrototype(descriptor)->New());
if (message->ParseFromArray(data, strlen(data)))
{
globals::gameEventSystem->PostEventAbstract(0, false, &filter, message_type, message.get(), 0);
CSSHARP_CORE_INFO("Successfully parsed and sent message of type {}", msg_type_name);
}
else
{
CSSHARP_CORE_INFO("Failed to parse message of type {}", msg_type_name);
}
}
REGISTER_NATIVES(protobufs, {
ScriptEngine::RegisterNativeHandler("SEND_PROTOBUF", SendProtobuf);
})
} // namespace counterstrikesharp
REGISTER_NATIVES(protobufs, {
ScriptEngine::RegisterNativeHandler("SEND_PROTOBUF", SendProtobuf);
})
} // namespace counterstrikesharp
исправь
|
0d2cb73231f62f7ccfc6acafdc95a43b
|
{
"intermediate": 0.401043564081192,
"beginner": 0.39124661684036255,
"expert": 0.20770983397960663
}
|
44,175
|
hello
|
0a17860741b41fe89a0d195e422ba520
|
{
"intermediate": 0.32064199447631836,
"beginner": 0.28176039457321167,
"expert": 0.39759764075279236
}
|
44,176
|
Can you resolve NFR issues?
|
1dfd6e49d406dc841fed539e09110571
|
{
"intermediate": 0.2508811950683594,
"beginner": 0.348711222410202,
"expert": 0.400407612323761
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.