row_id
int64 0
48.4k
| init_message
stringlengths 1
342k
| conversation_hash
stringlengths 32
32
| scores
dict |
|---|---|---|---|
46,787
|
in this javascript for leafletjs I want to add an event listener for the buyRoads button with the condition if money >= 50,000 console log 'buy roads' - 'var popupContent2 = `
<br/>Buy the land around ${secondCity} Station to start earning rent every day<br/>
<button id="stationtwo" class="trainbutton">Buy Land for £100,000</button>
<br/>Buy roads for ${secondCity} Station to start earning road tax every day<br/>
<button id="buyRoads" class="trainbutton">Buy Roads for £5,000</button>
`;
// Add a popup to the greenCircleMarker2
greenCircleMarker2.bindPopup(popupContent2).openPopup();
// Event listener for the ‘Buy Land’ button in the popup
greenCircleMarker2.on("popupopen", function () {
document
.getElementById("stationtwo")
.addEventListener("click", function () {
if (money >= 100000) {
money -= 100000;
dailybonus += 50000;
menuElement.textContent = `Daily Income: £${dailybonus}`;
// Check if the building data is already fetched
if (buildingData2) {
// Process the existing building data to create polylines with green color
buildingData2.elements.forEach((element) => {
if (element.type === "way") {
const coordinates = element.nodes.map(
(nodeId) => {
const node =
buildingData2.elements.find(
(node) => node.id === nodeId
);
return [node.lat, node.lon];
}
);
// Create a polyline for the building footprint with green color
const polyline = L.polyline(coordinates, {
color: "green",
weight: 1,
fillColor: "green", // Set the fill color to green
fillOpacity: 0.5,
}).addTo(map);
}
});
}
}
});
});'
|
325a339334e0d738a1b83a46c06f1863
|
{
"intermediate": 0.3737408220767975,
"beginner": 0.4375252425670624,
"expert": 0.18873390555381775
}
|
46,788
|
how can i check my tensorflow version
|
beec08b4b0cfd5ac7dce8b87c114954c
|
{
"intermediate": 0.4028242826461792,
"beginner": 0.15463542938232422,
"expert": 0.44254034757614136
}
|
46,789
|
//
// Source code recreated from a .class file by IntelliJ IDEA
// (powered by FernFlower decompiler)
//
package org.springframework.batch.core;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import org.springframework.batch.item.ExecutionContext;
import org.springframework.lang.Nullable;
public class JobExecution extends Entity {
private final JobParameters jobParameters;
private JobInstance jobInstance;
private volatile Collection<StepExecution> stepExecutions;
private volatile BatchStatus status;
private volatile Date startTime;
private volatile Date createTime;
private volatile Date endTime;
private volatile Date lastUpdated;
private volatile ExitStatus exitStatus;
private volatile ExecutionContext executionContext;
private transient volatile List<Throwable> failureExceptions;
private final String jobConfigurationName;
public JobExecution(JobExecution original) {
this.stepExecutions = Collections.synchronizedSet(new LinkedHashSet());
this.status = BatchStatus.STARTING;
this.startTime = null;
this.createTime = new Date(System.currentTimeMillis());
this.endTime = null;
this.lastUpdated = null;
this.exitStatus = ExitStatus.UNKNOWN;
this.executionContext = new ExecutionContext();
this.failureExceptions = new CopyOnWriteArrayList();
this.jobParameters = original.getJobParameters();
this.jobInstance = original.getJobInstance();
this.stepExecutions = original.getStepExecutions();
this.status = original.getStatus();
this.startTime = original.getStartTime();
this.createTime = original.getCreateTime();
this.endTime = original.getEndTime();
this.lastUpdated = original.getLastUpdated();
this.exitStatus = original.getExitStatus();
this.executionContext = original.getExecutionContext();
this.failureExceptions = original.getFailureExceptions();
this.jobConfigurationName = original.getJobConfigurationName();
this.setId(original.getId());
this.setVersion(original.getVersion());
}
public JobExecution(JobInstance job, Long id, @Nullable JobParameters jobParameters, String jobConfigurationName) {
super(id);
this.stepExecutions = Collections.synchronizedSet(new LinkedHashSet());
this.status = BatchStatus.STARTING;
this.startTime = null;
this.createTime = new Date(System.currentTimeMillis());
this.endTime = null;
this.lastUpdated = null;
this.exitStatus = ExitStatus.UNKNOWN;
this.executionContext = new ExecutionContext();
this.failureExceptions = new CopyOnWriteArrayList();
this.jobInstance = job;
this.jobParameters = jobParameters == null ? new JobParameters() : jobParameters;
this.jobConfigurationName = jobConfigurationName;
}
public JobExecution(JobInstance job, JobParameters jobParameters, String jobConfigurationName) {
this(job, (Long)null, jobParameters, jobConfigurationName);
}
public JobExecution(Long id, JobParameters jobParameters, String jobConfigurationName) {
this((JobInstance)null, id, jobParameters, jobConfigurationName);
}
public JobExecution(JobInstance job, JobParameters jobParameters) {
this(job, (Long)null, jobParameters, (String)null);
}
public JobExecution(Long id, JobParameters jobParameters) {
this((JobInstance)null, id, jobParameters, (String)null);
}
public JobExecution(Long id) {
this((JobInstance)null, id, (JobParameters)null, (String)null);
}
public JobParameters getJobParameters() {
return this.jobParameters;
}
public Date getEndTime() {
return this.endTime;
}
public void setJobInstance(JobInstance jobInstance) {
this.jobInstance = jobInstance;
}
public void setEndTime(Date endTime) {
this.endTime = endTime;
}
public Date getStartTime() {
return this.startTime;
}
public void setStartTime(Date startTime) {
this.startTime = startTime;
}
public BatchStatus getStatus() {
return this.status;
}
public void setStatus(BatchStatus status) {
this.status = status;
}
public void upgradeStatus(BatchStatus status) {
this.status = this.status.upgradeTo(status);
}
public Long getJobId() {
return this.jobInstance != null ? this.jobInstance.getId() : null;
}
public void setExitStatus(ExitStatus exitStatus) {
this.exitStatus = exitStatus;
}
public ExitStatus getExitStatus() {
return this.exitStatus;
}
public JobInstance getJobInstance() {
return this.jobInstance;
}
public Collection<StepExecution> getStepExecutions() {
return Collections.unmodifiableList(new ArrayList(this.stepExecutions));
}
public StepExecution createStepExecution(String stepName) {
StepExecution stepExecution = new StepExecution(stepName, this);
this.stepExecutions.add(stepExecution);
return stepExecution;
}
public boolean isRunning() {
return this.startTime != null && this.endTime == null;
}
public boolean isStopping() {
return this.status == BatchStatus.STOPPING;
}
/** @deprecated */
@Deprecated
public void stop() {
Iterator var1 = this.stepExecutions.iterator();
while(var1.hasNext()) {
StepExecution stepExecution = (StepExecution)var1.next();
stepExecution.setTerminateOnly();
}
this.status = BatchStatus.STOPPING;
}
public void setExecutionContext(ExecutionContext executionContext) {
this.executionContext = executionContext;
}
public ExecutionContext getExecutionContext() {
return this.executionContext;
}
public Date getCreateTime() {
return this.createTime;
}
public void setCreateTime(Date createTime) {
this.createTime = createTime;
}
public String getJobConfigurationName() {
return this.jobConfigurationName;
}
void addStepExecution(StepExecution stepExecution) {
this.stepExecutions.add(stepExecution);
}
public Date getLastUpdated() {
return this.lastUpdated;
}
public void setLastUpdated(Date lastUpdated) {
this.lastUpdated = lastUpdated;
}
public List<Throwable> getFailureExceptions() {
return this.failureExceptions;
}
public synchronized void addFailureException(Throwable t) {
this.failureExceptions.add(t);
}
public synchronized List<Throwable> getAllFailureExceptions() {
Set<Throwable> allExceptions = new HashSet(this.failureExceptions);
Iterator var2 = this.stepExecutions.iterator();
while(var2.hasNext()) {
StepExecution stepExecution = (StepExecution)var2.next();
allExceptions.addAll(stepExecution.getFailureExceptions());
}
return new ArrayList(allExceptions);
}
private void readObject(ObjectInputStream stream) throws IOException, ClassNotFoundException {
stream.defaultReadObject();
this.failureExceptions = new ArrayList();
}
public String toString() {
return super.toString() + String.format(", startTime=%s, endTime=%s, lastUpdated=%s, status=%s, exitStatus=%s, job=[%s], jobParameters=[%s]", this.startTime, this.endTime, this.lastUpdated, this.status, this.exitStatus, this.jobInstance, this.jobParameters);
}
public void addStepExecutions(List<StepExecution> stepExecutions) {
if (stepExecutions != null) {
this.stepExecutions.removeAll(stepExecutions);
this.stepExecutions.addAll(stepExecutions);
}
}
}
package com.mns.oms.batch.domain;
import java.time.LocalDateTime;
import java.util.Date;
import java.util.List;
import org.springframework.data.annotation.CreatedDate;
import org.springframework.data.annotation.Id;
import org.springframework.data.annotation.LastModifiedDate;
import org.springframework.data.mongodb.core.mapping.Document;
import com.mns.oms.batch.model.JobStepDetails;
import lombok.Data;
/**
* @author Mrinmoy Mandal
*
* Module: WISMR
*
*/
@Data
@Document(collection = "job-statistics")
public class JobStatistics {
@Id
private String id;
private Long jobId;
private String jobName;
private String jobStatus;
private String jobExitStatus;
private String jobExitDescription;
private List<JobStepDetails> stepDetails;
private List<String> fileList;
private LocalDateTime startTime;
private Date testStartTime;
private LocalDateTime endTime;
@CreatedDate
private LocalDateTime createdDate;
@LastModifiedDate
private LocalDateTime lastModifieDate;
}
below here ......jobStats.setTestStartTime(inn);.....is working
but this code is giving jobStats.setTestStartTime(jobExecution.getStartTime());....incompatible types: java.time.LocalDateTime cannot be converted to java.util.Date
Date inn = new Date();
jobStats.setTestStartTime(inn);
jobStats.setTestStartTime(jobExecution.getStartTime());
how to fix this issue in jobStats.setTestStartTime(jobExecution.getStartTime());
|
5e41d03eb096d2b09036f74be3dbb07f
|
{
"intermediate": 0.33688443899154663,
"beginner": 0.4590712785720825,
"expert": 0.20404428243637085
}
|
46,790
|
write a c code for this "w24fz size1 size2
o The serverw24 must return to the client temp.tar.gz that contains all the files
in the directory tree rooted at its ~ whose file-size in bytes is >=size1 and
<=size2
size1 < = size2 (size1>= 0 and size2>=0)
o If none of the files of the specified size are present, the serverw24 sends “No
file found” to the client (which is then printed on the client terminal by the
client)
o Ex: client24$ w24fz 1240 12450"
|
63674e6bc412c3fbb58e3f022e4575c5
|
{
"intermediate": 0.39427173137664795,
"beginner": 0.31679484248161316,
"expert": 0.2889333963394165
}
|
46,791
|
C:\Users\arisa>python3 -m pip install tensorflow[and-cuda]
Python was not found; run without arguments to install from the Microsoft Store, or disable this shortcut from Settings > Manage App Execution Aliases.
|
625257b44d6b4fe928ce76ebaa6789ac
|
{
"intermediate": 0.41257598996162415,
"beginner": 0.17179465293884277,
"expert": 0.4156293272972107
}
|
46,792
|
i installed followings in conda:
conda install -c conda-forge cudatoolkit=11.2 cudnn=8.1.0
how can i uninstall them
|
c80616965e61b1b7f2c31fbbca7ed911
|
{
"intermediate": 0.521062433719635,
"beginner": 0.22611132264137268,
"expert": 0.2528262436389923
}
|
46,793
|
corrige ce code en faisant en sorte que il est deux inputs :
# Import necessary modules and packages
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, LSTM, Embedding, Dense, Masking
from tensorflow.keras.optimizers import Adam
# Define hyperparameters
embedding_dim = 200
lstm_units = 512
vocab_size_en = len(en_vocab) + len(reserved_tokens)
vocab_size_fr = len(fr_vocab) + len(reserved_tokens)
encoder_inputs = Input(shape=(200,)) # None indicates variable-length sequences
encoder_embedding = Embedding(input_dim=vocab_size_en, output_dim=embedding_dim, mask_zero=True)(encoder_inputs)
encoder_outputs, state_h, state_c = LSTM(lstm_units, return_state=True)(encoder_embedding)
encoder_states = [state_h, state_c]
decoder_embedding = Embedding(input_dim=vocab_size_fr, output_dim=embedding_dim, mask_zero=True)(encoder_outputs)
decoder_lstm = LSTM(lstm_units, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_embedding, initial_state=encoder_states)
decoder_dense = Dense(200, activation='softmax')
x= Embedding(200, output_dim=200, mask_zero=True)
decoder_outputs = (decoder_outputs)
# Define the complete model that combines the encoder and decoder
model = Model(encoder_inputs, decoder_outputs)
# Compile the model with the specified optimizer, loss function, and metrics
model.compile(optimizer=Adam(), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# Summarize the model architecture
model.summary()
|
1d644ae1e453864c06d60a270ffa3490
|
{
"intermediate": 0.3025532364845276,
"beginner": 0.15775826573371887,
"expert": 0.5396884083747864
}
|
46,794
|
//
// Source code recreated from a .class file by IntelliJ IDEA
// (powered by FernFlower decompiler)
//
package org.springframework.batch.core;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.time.LocalDateTime;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import org.springframework.batch.item.ExecutionContext;
import org.springframework.lang.Nullable;
public class JobExecution extends Entity {
private final JobParameters jobParameters;
private JobInstance jobInstance;
private volatile Collection<StepExecution> stepExecutions;
private volatile BatchStatus status;
private volatile LocalDateTime startTime;
private volatile LocalDateTime createTime;
private volatile LocalDateTime endTime;
private volatile LocalDateTime lastUpdated;
private volatile ExitStatus exitStatus;
private volatile ExecutionContext executionContext;
private transient volatile List<Throwable> failureExceptions;
public JobExecution(JobExecution original) {
this.stepExecutions = Collections.synchronizedSet(new LinkedHashSet());
this.status = BatchStatus.STARTING;
this.startTime = null;
this.createTime = LocalDateTime.now();
this.endTime = null;
this.lastUpdated = null;
this.exitStatus = ExitStatus.UNKNOWN;
this.executionContext = new ExecutionContext();
this.failureExceptions = new CopyOnWriteArrayList();
this.jobParameters = original.getJobParameters();
this.jobInstance = original.getJobInstance();
this.stepExecutions = original.getStepExecutions();
this.status = original.getStatus();
this.startTime = original.getStartTime();
this.createTime = original.getCreateTime();
this.endTime = original.getEndTime();
this.lastUpdated = original.getLastUpdated();
this.exitStatus = original.getExitStatus();
this.executionContext = original.getExecutionContext();
this.failureExceptions = original.getFailureExceptions();
this.setId(original.getId());
this.setVersion(original.getVersion());
}
public JobExecution(JobInstance job, Long id, @Nullable JobParameters jobParameters) {
super(id);
this.stepExecutions = Collections.synchronizedSet(new LinkedHashSet());
this.status = BatchStatus.STARTING;
this.startTime = null;
this.createTime = LocalDateTime.now();
this.endTime = null;
this.lastUpdated = null;
this.exitStatus = ExitStatus.UNKNOWN;
this.executionContext = new ExecutionContext();
this.failureExceptions = new CopyOnWriteArrayList();
this.jobInstance = job;
this.jobParameters = jobParameters == null ? new JobParameters() : jobParameters;
}
public JobExecution(JobInstance job, JobParameters jobParameters) {
this(job, (Long)null, jobParameters);
}
public JobExecution(Long id, JobParameters jobParameters) {
this((JobInstance)null, id, jobParameters);
}
public JobExecution(Long id) {
this((JobInstance)null, id, (JobParameters)null);
}
public JobParameters getJobParameters() {
return this.jobParameters;
}
@Nullable
public LocalDateTime getEndTime() {
return this.endTime;
}
public void setJobInstance(JobInstance jobInstance) {
this.jobInstance = jobInstance;
}
public void setEndTime(LocalDateTime endTime) {
this.endTime = endTime;
}
@Nullable
public LocalDateTime getStartTime() {
return this.startTime;
}
public void setStartTime(LocalDateTime startTime) {
this.startTime = startTime;
}
public BatchStatus getStatus() {
return this.status;
}
public void setStatus(BatchStatus status) {
this.status = status;
}
public void upgradeStatus(BatchStatus status) {
this.status = this.status.upgradeTo(status);
}
public Long getJobId() {
return this.jobInstance != null ? this.jobInstance.getId() : null;
}
public void setExitStatus(ExitStatus exitStatus) {
this.exitStatus = exitStatus;
}
public ExitStatus getExitStatus() {
return this.exitStatus;
}
public JobInstance getJobInstance() {
return this.jobInstance;
}
public Collection<StepExecution> getStepExecutions() {
return List.copyOf(this.stepExecutions);
}
public StepExecution createStepExecution(String stepName) {
StepExecution stepExecution = new StepExecution(stepName, this);
this.stepExecutions.add(stepExecution);
return stepExecution;
}
public boolean isRunning() {
return this.status.isRunning();
}
public boolean isStopping() {
return this.status == BatchStatus.STOPPING;
}
public void setExecutionContext(ExecutionContext executionContext) {
this.executionContext = executionContext;
}
public ExecutionContext getExecutionContext() {
return this.executionContext;
}
public LocalDateTime getCreateTime() {
return this.createTime;
}
public void setCreateTime(LocalDateTime createTime) {
this.createTime = createTime;
}
void addStepExecution(StepExecution stepExecution) {
this.stepExecutions.add(stepExecution);
}
@Nullable
public LocalDateTime getLastUpdated() {
return this.lastUpdated;
}
public void setLastUpdated(LocalDateTime lastUpdated) {
this.lastUpdated = lastUpdated;
}
public List<Throwable> getFailureExceptions() {
return this.failureExceptions;
}
public synchronized void addFailureException(Throwable t) {
this.failureExceptions.add(t);
}
public synchronized List<Throwable> getAllFailureExceptions() {
Set<Throwable> allExceptions = new HashSet(this.failureExceptions);
Iterator var2 = this.stepExecutions.iterator();
while(var2.hasNext()) {
StepExecution stepExecution = (StepExecution)var2.next();
allExceptions.addAll(stepExecution.getFailureExceptions());
}
return new ArrayList(allExceptions);
}
private void readObject(ObjectInputStream stream) throws IOException, ClassNotFoundException {
stream.defaultReadObject();
this.failureExceptions = new ArrayList();
}
public String toString() {
String var10000 = super.toString();
return var10000 + String.format(", startTime=%s, endTime=%s, lastUpdated=%s, status=%s, exitStatus=%s, job=[%s], jobParameters=[%s]", this.startTime, this.endTime, this.lastUpdated, this.status, this.exitStatus, this.jobInstance, this.jobParameters);
}
public void addStepExecutions(List<StepExecution> stepExecutions) {
if (stepExecutions != null) {
this.stepExecutions.removeAll(stepExecutions);
this.stepExecutions.addAll(stepExecutions);
}
}
}.....................package com.mns.oms.batch.domain;
import java.time.LocalDateTime;
import java.util.List;
import org.springframework.data.annotation.CreatedDate;
import org.springframework.data.annotation.Id;
import org.springframework.data.annotation.LastModifiedDate;
import org.springframework.data.mongodb.core.mapping.Document;
import com.mns.oms.batch.model.JobStepDetails;
import lombok.Data;
/**
* @author Mrinmoy Mandal
*
* Module: WISMR
*
*/
@Data
@Document(collection = "job-statistics")
public class JobStatistics {
@Id
private String id;
private Long jobId;
private String jobName;
private String jobStatus;
private String jobExitStatus;
private String jobExitDescription;
private List<JobStepDetails> stepDetails;
private List<String> fileList;
private LocalDateTime startTime;
private LocalDateTime endTime;
@CreatedDate
private LocalDateTime createdDate;
@LastModifiedDate
private LocalDateTime lastModifieDate;
}
.........jobStats.setStartTime(jobExecution.getStartTime().toInstant().atZone(ZoneId.systemDefault()).toLocalDateTime());.....giving compile time issue.....fix it
|
504f5b51493d39cd3c2104cd87b9d641
|
{
"intermediate": 0.3504513204097748,
"beginner": 0.4806583821773529,
"expert": 0.16889025270938873
}
|
46,795
|
The kernel failed to start as 'TypeAliasType' could not be imported from 'c:\Users\arisa\AppData\Local\Programs\Python\Python310\lib\site-packages\typing_extensions.py'.
Click here for more info.
|
72a281af466e5d0057dccd07e2d07ce1
|
{
"intermediate": 0.3358921408653259,
"beginner": 0.31427884101867676,
"expert": 0.3498290181159973
}
|
46,796
|
import matplotlib.pyplot as plt
from tensorflow.keras.utils import plot_model
# Create the model
encoder_inputs = Input(shape=(200,))
encoder_embedding = Embedding(input_dim=vocab_size_en, output_dim=embedding_dim, mask_zero=True)(encoder_inputs)
encoder_outputs, state_h, state_c = LSTM(lstm_units, return_state=True)(encoder_embedding)
encoder_states = [state_h, state_c]
decoder_inputs = Input(shape=(200,))
decoder_embedding = Embedding(input_dim=vocab_size_fr, output_dim=embedding_dim, mask_zero=True)(decoder_inputs)
decoder_lstm = LSTM(lstm_units, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_embedding, initial_state=encoder_states)
decoder_dense = Dense(vocab_size_fr, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
model.compile(optimizer=Adam(), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.summary()
history = model.fit([x_train,x2_train], y_train, epochs=10, batch_size=2)
|
723aee6d5c77296b58c0766305be5cdb
|
{
"intermediate": 0.3867577612400055,
"beginner": 0.26395630836486816,
"expert": 0.34928590059280396
}
|
46,797
|
How do i install wine on astra linux?
|
c20cedf1b7cfdf0a49943b75eddc177f
|
{
"intermediate": 0.5421147346496582,
"beginner": 0.24283619225025177,
"expert": 0.21504908800125122
}
|
46,798
|
porter_stemmer =PorterStemmer()
#--------------------------------------
def fn_preprocess_question(question):
return ' '.join([porter_stemmer.stem(word) for word in question.split()])
dict_predefined_answers ={
"Who are you":"I am the SAMS bot, your Virtual Sales Assistant. I’m here to help you navigate through SAMS databases and provide the information you need.",
"Hi":"Hello, Welcome to SAMS Virtual Sales Assistant. I am designed to assist you in retrieving information from various SAMS databases. Please feel free to ask your queries, such as, 'What is the total sellout across India for July 16, 2022? among others.",
"What can you do":"I am equipped to offer you comprehensive insights and data from SAMS databases. Whether you need sales figures or specific reports, just ask, and I’ll provide the most accurate information available.Although my training is still going on.",
"How can I use this service":"Using this service is simple. Just type in your query about any sales or database-related information, like ‘Show the sales trend for product XYZ during the last month,' and I'll fetch the details for you.",
"I'm not sure what to ask.":"No problem at all. You can start with general queries like 'What were the top-selling products last month?'' or 'Update me on the current monthly sales report.' I am here to guide you through accessing the precise information you need.",
"Thank you":"You're most welcome! If you have any more questions or need further assistance, I'm here to help. Your success is my priority.",
"Goodbye":"Farewell! If you ever require any assistance or have more queries in the future, don't hesitate to reach out. Have a great day ahead!",
"Bye":"Farewell! If you ever require any assistance or have more queries in the future, don't hesitate to reach out. Have a great day ahead!",
"How are you":"I am good,Please ask anything related to SAMS and I will try to provide you best possible solution.Thanks"
}
def generate(tokenizer,prompt):
eos_token_id = tokenizer.convert_tokens_to_ids(["
|
66d2be6c59f536b467ad40734e6d6dd4
|
{
"intermediate": 0.41154348850250244,
"beginner": 0.32434946298599243,
"expert": 0.2641070485115051
}
|
46,799
|
Добавь в этот код возможность использовать прокси: param (
[Parameter(Mandatory=$true)]
[string]$UrlListFilePath
)
if (-not $UrlListFilePath) {
Write-Host "Path to the file is not provided."
exit 1
}
if (-not (Test-Path -Path $UrlListFilePath)) {
Write-Host "File not found: $UrlListFilePath"
exit 1
}
function Fetch-Url {
param (
[string]$Url
)
try {
$response = Invoke-WebRequest -Uri $Url -TimeoutSec 10
Write-Host "URL: $Url -> Status code: $($response.StatusCode)"
} catch {
Write-Host "URL: $Url -> Failed to fetch. Error: $($_.Exception.Message)"
}
}
while ($true) {
Get-Content -Path $UrlListFilePath | ForEach-Object {
Fetch-Url -Url $_
}
}
|
a6d7ffde00e30905c8934ed81b051ef0
|
{
"intermediate": 0.40210750699043274,
"beginner": 0.454921156167984,
"expert": 0.14297132194042206
}
|
46,800
|
Hi! Come up with fully working c++ program which uses djikstra algorithm with priority queue using binary heap as implementation. You are not allowed to use external libraries. You laso have to implement class graph, edge, vertex and priority queue with binary heap.
Task: Given an oriented graph, determine if it has a cycle of negative weight and print it (if exists).
Input
Input's first line has number N (1 ≤ N ≤ 100) — number of vertices in the graph. Each of the next N lines contains N numbers — all representing an adjacency matrix. All weights are strictly less than 10000 by absolute value. If there is no edge, corresponding value will be exactly 100000.
Output
First line should YES if there exists a negative cycle, or NO otherwise. If the cycle exists, second line should should contain the number of vertices in that cycle and third line should contain indices of those vertices (in cycle order).
Example
inputCopy
2
0 -1
-1 0
outputCopy
YES
2
2 1
|
1449636314679fb90993d93442d1e02c
|
{
"intermediate": 0.42127564549446106,
"beginner": 0.2429807037115097,
"expert": 0.33574363589286804
}
|
46,801
|
In typescript, the type Record<K, T> represents an object type with property keys that are K. But I fail to see how you could use anything else but string or number for this, or some subset of these two types. What would it even mean for an object to have keys that are of type, say Foo: {one: number, two: string}? Is my understanding correct here?
|
570e4438e2e0eeea4e9d3c38fed0db17
|
{
"intermediate": 0.5337580442428589,
"beginner": 0.2312050759792328,
"expert": 0.23503687977790833
}
|
46,802
|
I am making a SDL based C++ game engine, and I need help on transforming all my SDL raw pointers into smart pointers.
1) How is the format for creating them?
- TTF_Font* font; transform into std::unique_ptr<TTF_Font, TTF_CloseFont> font; ?
- SDL_Texture* texture; transform into ???
- SDL_Window* window; transform into ???
- SDL_Renderer* renderer; transform into ???
And my own object which doesn't relate to an SDL so doesn't have a Close method but uses it's destructor, how would that transform into?
Rect* srcRect;
|
aefae869020456376d032f195779866f
|
{
"intermediate": 0.6915609836578369,
"beginner": 0.1801348179578781,
"expert": 0.12830419838428497
}
|
46,803
|
split the audio file of wav file in most quality into vocals, guitar, drums , piano etc without loss
|
0d28fe187792c0f3dbfe15328737aafa
|
{
"intermediate": 0.39845338463783264,
"beginner": 0.20206958055496216,
"expert": 0.3994770646095276
}
|
46,804
|
import pandas as pd
from pymystem3 import Mystem
import re
data = pd.read_table('SMSSpamCollection',sep='\t')
data.reset_index(inplace = True)
data = data.rename(columns = {'index': 'ham'})
corpus = data['text'].values.astype('U')
|
c0e2e167af3bbec6a7e1ee3e7d59d212
|
{
"intermediate": 0.4843616485595703,
"beginner": 0.2938971221446991,
"expert": 0.221741184592247
}
|
46,805
|
напиши покупку, регистрацию и отмену билетов
from tkinter import *
# Паспорт пассажира
class Passport:
def __init__(self, name, surname, series, number):
self.__series = series
self.__number = number
self.__name = name
self.__surname = surname
# Геттер серии паспорта
@property
def series(self):
return self.__series
# Геттер номера паспорта
@property
def number(self):
return self.__number
# Геттер имени
@property
def name(self):
return self.__name
# Геттер фамилии
@property
def surname(self):
return self.__surname
# Пассажир
class Passenger:
def __init__(self, passport, phone_number, email):
self.__passport = passport
self.__phone_number = phone_number
self.__email = email
# Геттер телефона пассажира
@property
def phone_number(self):
return self.__phone_number
# Геттер почты пассажира
@property
def email(self):
return self.__email
# Геттер имени пассажира
@property
def name(self):
return self.__passport.name
# Геттер фамилии пассажира
@property
def surname(self):
return self.__passport.surname
# Геттер серии паспорта
@property
def series(self):
return self.__passport.series
# Геттер номера паспорта
@property
def number(self):
return self.__passport.number
# Место в самолете
class Seat:
def __init__(self, price, boarding_group):
self.__boarding_group = boarding_group
self.__price = price
self.__status = 0
# Геттер цены места
@property
def price(self):
return self.__price
# Геттер посадочной группы места
@property
def boarding_group(self):
return self.__boarding_group
# Геттер статуса места (0 - свободно, 1 - куплено, 2 - зарегестрировано)
@property
def status(self):
return self.__status
# Сеттер статуса места
@status.setter
def status(self, val):
if val in (0, 1, 2):
self.__status = val
else:
print("Недопустимый статус.")
# Аэропорт
class Airport:
def __init__(self):
self.__flights = [] # Список рейсов
# Удаление рейса из списка
def cancel_flight(self, flight):
if flight in self.__flights:
self.__flights.remove(flight)
print(f"Рейс номер {flight.flight_number} отменён.")
else:
print("Рейс не найден.")
# Добавление рейса в список
def add_flight(self, flight):
self.__flights.append(flight)
print(f"Рейс номер {flight.flight_number} добавлен.")
# Вывод текущих рейсов
def display_flights(self):
if self.__flights:
for flight in self.__flights:
print(f"Рейс номер: {flight.flight_number} - Назначение: "
f"{flight.destination} - Время отправления: {flight.departure_time} - Свободные места: "
f"{flight.available_seats}")
# Самолёт
class Flight:
def __init__(self, flight_number, destination, departure_time, total_seats):
self.__flight_number = flight_number # Номер рейса
self.__destination = destination # Место назначения
self.__departure_time = departure_time # Время отправки
self.__total_seats = total_seats # Всего мест в самолете
self.__available_seats = total_seats # Доступных мест
self.__occupied_seats = 0 # Занятых мест
self.__registered_seats = 0 # Зарегестрированных мест
self.__passengers = [] # Список пассажиров
# Регистрация пассажира
def register_passenger(self, passenger):
print(f"Пассажир {passenger.name} зарегестрирован на рейс номер {self.__flight_number}.")
# Покупка билета
def buy_ticket(self, passenger, seat_price, boarding_group):
print(f"Билет куплен {passenger.name} на рейс номер {self.__flight_number}.")
# Возврат билета
def cancel_ticket(self, passenger):
print(f"Возврат билета пассажиром {passenger.name}.")
# Геттер номера рейса
@property
def flight_number(self):
return self.__flight_number
# Геттер времени отправления
@property
def departure_time(self):
return self.__departure_time
# Геттер количества свободных мест
@property
def available_seats(self):
return self.__available_seats
# Геттер места прибытия
@property
def destination(self):
return self.__destination
# Геттер общего количества мест
@property
def total_seats(self):
return self.__total_seats
# Геттер количества занятых мест
@property
def occupied_seats(self):
return self.__occupied_seats
# Геттер количества зарегестрированных мест
@property
def registered_seats(self):
return self.__registered_seats
class Interface:
airport = Airport
mainscreen = Tk()
# def __init__(self):
# Пример
passport1 = Passport("Боб", "Боб", "4517", "357645") # Содание объекта паспорт
passenger1 = Passenger(passport1, "89131331224", "bob@gmail.com") # Создание объекта пассажир
airport = Airport() # Создание объекта аэропорт
flight1 = Flight("F123", "Москва", "12:00", 100) # Создание объекта самолёт
flight2 = Flight("A200", "Новосибирск", "09:00", 300) # Создание объекта самолёт
airport.add_flight(flight1) # Добавление самолёта в список рейсов
airport.add_flight(flight2) # Добавление самолёта в список рейсов
flight1.buy_ticket(passenger1, 200, "Group A") # Покупка билета
flight1.register_passenger(passenger1) # Регистрация билета
flight1.cancel_ticket(passenger1) # Отмена билета
airport.display_flights() # Отображение всех рейсов
airport.cancel_flight(flight1) # Отмена рейса
airport.display_flights() # Отображение всех рейсов
|
8a675fbe7f04114e898eaa223d6393f7
|
{
"intermediate": 0.20966340601444244,
"beginner": 0.6283963322639465,
"expert": 0.16194027662277222
}
|
46,807
|
Hi! Please, adjust my program for the given task. You have to implement Djikstra algorithm using Priority queue which is implemented using binary heap. You are not allowed to use additional libraries.
Task:
Given an oriented graph, determine if it has a cycle of negative weight and print it (if exists).
Input
Input's first line has number N (1 ≤ N ≤ 100) — number of vertices in the graph. Each of the next N lines contains N numbers — all representing an adjacency matrix. All weights are strictly less than 10000 by absolute value. If there is no edge, corresponding value will be exactly 100000.
Output
First line should YES if there exists a negative cycle, or NO otherwise. If the cycle exists, second line should should contain the number of vertices in that cycle and third line should contain indices of those vertices (in cycle order).
Example
inputCopy
2
0 -1
-1 0
outputCopy
YES
2
2 1
Program to be adjusted:
#include <iostream>
#include <vector>
#include <list>
template<typename T, typename V>
class Edge;
template<typename T, typename V>
class Vertex {
private:
T label;
bool visited;
std::list<Edge<T, V>*> adjacentList;
public:
Vertex(T _label) : label(_label), visited(false) {}
int getDegree() const { return this->adjacentList.size(); }
T getLabel() const { return this->label; }
void addEdge(Edge<T, V>* edge) { this->adjacentList.push_back(edge); }
const std::list<Edge<T, V>*>& getAdjacentList() const { return this->adjacentList; }
bool isVisited() const { return this->visited; }
void setVisited(bool visited) { this->visited = visited; }
};
template<typename T, typename V>
class Edge {
private:
Vertex<T, V>& sVertex;
Vertex<T, V>& dVertex;
V weight;
bool visited;
public:
Edge(Vertex<T, V>& _sVertex, Vertex<T, V>& _dVertex, V _weight) : sVertex(_sVertex), dVertex(_dVertex), weight(_weight), visited(false) {}
const Vertex<T, V>& getAdjacentVertex(const Vertex<T, V>& vertex) const {
return (vertex.getLabel() != sVertex.getLabel()) ? sVertex : dVertex;
}
bool operator>(const Edge<T, V>& other) const { return this->weight > other.weight; }
T getLabelSource() const { return sVertex.getLabel(); }
T getLabelDest() const { return dVertex.getLabel(); }
V getWeight() const { return this->weight; }
bool isVisited() const { return this->visited; }
void setVisited(bool val) { this->visited = val; }
};
template<typename T, typename V>
class Graph : public virtual iDisjointSet<T> {
private:
int numOfVertices;
std::list<Vertex<T, V>*> vertices;
std::list<Edge<T, V>*> edges;
std::list<std::list<Edge<T, V>*>> trees;
std::list<int> degrees;
std::vector<int> parents;
int singleVertices;
void depthFirstSearch(Vertex<T, V>* vertex, std::list<Edge<T, V>*>& tree) {
vertex->setVisited(true);
const std::list<Edge<T, V>*>& adjEdges = vertex->getAdjacentList();
for (Edge<T, V>* adjEdge : adjEdges) {
Vertex<T, V>* adjacentVertex = const_cast<Vertex<T, V>*>(&adjEdge->getAdjacentVertex(*vertex));
if (!adjEdge->isVisited()) { tree.push_back(adjEdge); }
adjEdge->setVisited(true);
if (!adjacentVertex->isVisited()) {
depthFirstSearch(adjacentVertex, tree);
}
}
}
void getTrees() {
for (Vertex<T, V>* vertex : vertices) {
if (!vertex->isVisited()) {
std::list<Edge<T, V>*> tree;
depthFirstSearch(vertex, tree);
if (!tree.empty()) {
trees.push_back(tree);
}
}
}
}
void sortEdges(std::list<Edge<T,V>*>& edges) {
if (edges.empty()) { return; }
V minWeight = std::numeric_limits<V>::max();
V maxWeight = std::numeric_limits<V>::min();
for (auto edge : edges) {
minWeight = std::min(minWeight, edge->getWeight());
maxWeight = std::max(maxWeight, edge->getWeight());
}
std::vector<int> count(maxWeight - minWeight + 1, 0);
for (Edge<T,V>* edge : edges) { count[edge->getWeight() - minWeight]++; }
for (int i = 1; i < count.size(); i++)
count[i] += count[i - 1];
std::vector<Edge<T,V>*> sortedEdges(edges.size());
for (auto it = edges.rbegin(); it != edges.rend(); ++it) {
sortedEdges[count[(*it)->getWeight() - minWeight] - 1] = *it;
count[(*it)->getWeight() - minWeight]--;
}
edges.clear();
for (Edge<T,V>* edge : sortedEdges) { edges.push_back(edge); }
}
std::list<Edge<T, V>*> getMinimumSpanningTree(std::list<Edge<T, V>*>& listOfEdges) {
std::list<Edge<T, V>*> minimumSpanningTree;
sortEdges(listOfEdges);
for (Edge<T, V>* edge : listOfEdges) {
int setStart = find(edge->getLabelSource());
int setEnd = find(edge->getLabelDest());
if (setStart != setEnd) {
makeUnion(setStart, setEnd);
minimumSpanningTree.push_back(edge);
}
}
return minimumSpanningTree;
}
T find(int index) {
if (this->parents[index] == 0) { return index; }
this->parents[index] = find(this->parents[index]);
return this->parents[index];
}
void makeUnion(int indexStart, int indexEnd) { this->parents[find(indexStart)] = find(indexEnd); }
T getNumberOfVertices(std::list<Edge<T, V>*>& tree) {
std::vector<T> labels(100000, 0);
for (Edge<T, V>* edge : tree) {
T v1 = edge->getLabelSource();
T v2 = edge->getLabelDest();
labels[v1] = 1;
labels[v2] = 1;
}
T sum = 0;
for (auto i : labels) { sum += i; }
return sum;
}
void buildMinimumSpanningForest() {
getTrees();
std::cout << this->trees.size() + this->singleVertices << std::endl;
for (std::list<Edge<T, V>*>& tree : trees) {
Edge<T, V>* firstEdge = *tree.begin();
std::cout << getNumberOfVertices(tree) << ' ' << firstEdge->getLabelDest() << std::endl;
std::list<Edge<T, V>*> mst = getMinimumSpanningTree(tree);
for (Edge<T, V>* edge : mst) {
if (edge != nullptr)
std::cout << edge->getLabelSource() << " " << edge->getLabelDest() << " " << edge->getWeight() << std::endl;
}
}
}
public:
Graph(int _numOfVertices) : numOfVertices(_numOfVertices) { this->parents.resize(100000); this->singleVertices = 0; }
Graph() = default;
std::list<std::list<Edge<T, V>*>>& decomposeGraph() {
getTrees();
return this->trees;
}
Vertex<T, V>* addVertex(T label) {
Vertex<T, V>* newVertex = new Vertex<T, V>(label);
vertices.push_back(newVertex);
return newVertex;
}
Edge<T, V>* addEdge(Vertex<T, V>& sVertex, Vertex<T, V>& dVertex, V weight) {
Edge<T, V>* newEdge = new Edge<T, V>(sVertex, dVertex, weight);
edges.push_back(newEdge);
sVertex.addEdge(newEdge);
dVertex.addEdge(newEdge);
return newEdge;
}
void minimumSpanningForest() {
buildMinimumSpanningForest();
}
void addSingleVertex() { this->singleVertices++; }
};
int main(void) {
int numOfVertices, temp;
const int LIMIT = 100000;
std::cin >> numOfVertices;
Graph<int, int> graph(numOfVertices);
for (int i = 0; i < numOfVertices; i++) {
for (int j = 0; j < numOfVertices; j++) {
std::cin >> temp;
if (temp != 0) {
Vertex<int, int>* v1 = graph.addVertex(i + 1);
Vertex<int, int>* v2 = graph.addVertex(j + 1);
graph.addEdge(*v1, *v2, temp);
}
}
}
return 0;
}
|
2082ec04245889ed4027497627c47450
|
{
"intermediate": 0.413147896528244,
"beginner": 0.35242441296577454,
"expert": 0.23442766070365906
}
|
46,808
|
onuploadprogress axios typescript example
|
8c62f016ea2e97311baac4e40cae2cf6
|
{
"intermediate": 0.23916547000408173,
"beginner": 0.43786683678627014,
"expert": 0.3229677081108093
}
|
46,809
|
Defaulting to user installation because normal site-packages is not writeable
ERROR: Could not find a version that satisfies the requirement tensorflow<2.11 (from versions: 2.12.0rc0, 2.12.0rc1, 2.12.0, 2.12.1, 2.13.0rc0, 2.13.0rc1, 2.13.0rc2, 2.13.0, 2.13.1, 2.14.0rc0, 2.14.0rc1, 2.14.0, 2.14.1, 2.15.0rc0, 2.15.0rc1, 2.15.0, 2.15.1, 2.16.0rc0, 2.16.1)
ERROR: No matching distribution found for tensorflow<2.11
|
859d95d14b189f7df982e75566ced4f8
|
{
"intermediate": 0.3208305835723877,
"beginner": 0.3705481290817261,
"expert": 0.30862125754356384
}
|
46,810
|
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "3UeycYCyxDfE"
},
"source": [
"# TRANSLATOR"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "8BfUjVxBcz5N"
},
"source": [
"## instalation"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "WXqM38xBRHu2"
},
"outputs": [],
"source": [
"%%time\n",
"!pip install -q -U tensorflow-text\n",
"!pip install datasets\n",
"!pip install -q tensorflow_datasets\n",
"!pip install pydot\n",
"!cd /content\n",
"!clear"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"id": "Ukvs1XfMG7aG"
},
"outputs": [],
"source": [
"import tensorflow as tf\n",
"import tensorflow_text as tf_text\n",
"import tensorflow_datasets as tfds\n",
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"import requests\n",
"import functools\n",
"import collections\n",
"import os\n",
"import pathlib\n",
"import re\n",
"import string\n",
"import tempfile\n",
"import time\n",
"import matplotlib.pyplot as plt\n",
"import os\n",
"import re\n",
"import shutil\n",
"import string\n",
"import tensorflow as tf\n",
"\n",
"from tensorflow.keras import layers\n",
"from tensorflow.keras import losses\n",
"import pydot"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "7V7igFwpc6Hs"
},
"source": [
"## dataset"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"id": "ZaMtoUtAREzs",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "67fb1777-9e10-4197-88dc-1187b08082ac"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Nombre de phrases en français : 127085\n",
"Nombre de phrases en anglais : 127085\n"
]
}
],
"source": [
"from datasets import load_dataset\n",
"\n",
"dataset = load_dataset("Helsinki-NLP/opus_books", "en-fr")\n",
"data = dataset["train"]\n",
"\n",
"french_sentences = [example["fr"] for example in data["translation"][:127085]]\n",
"english_sentences = [example["en"] for example in data["translation"][:127085]]\n",
"dataset = tf.data.Dataset.from_tensor_slices((french_sentences, english_sentences))\n",
"\n",
"french_sentences_decoded = []\n",
"english_sentences_decoded = []\n",
"\n",
"for french_sentence, english_sentence in dataset.take(127085):\n",
" french_sentences_decoded.append("b '"+french_sentence.numpy().decode('utf-8'))\n",
" english_sentences_decoded.append("b '"+english_sentence.numpy().decode('utf-8'))\n",
"\n",
"print("Nombre de phrases en français :", len(french_sentences_decoded))\n",
"print("Nombre de phrases en anglais :", len(english_sentences_decoded))"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"id": "2tb7H5uFQoBA"
},
"outputs": [],
"source": [
"train_fr = french_sentences\n",
"train_en = english_sentences"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "NzS8h0budHWv"
},
"source": [
"## vocab"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"id": "72uMXsQhFIx8"
},
"outputs": [],
"source": [
"from tensorflow_text.tools.wordpiece_vocab import bert_vocab_from_dataset as bert_vocab\n",
"\n",
"bert_tokenizer_params = dict(lower_case=True)\n",
"reserved_tokens = ["[PAD]", "[UNK]", "[START]", "[END]"]\n",
"\n",
"bert_vocab_args = {\n",
" 'vocab_size': 8000,\n",
" 'reserved_tokens': reserved_tokens,\n",
" 'bert_tokenizer_params': bert_tokenizer_params,\n",
" 'learn_params': {},\n",
"}\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "q6f3mrA-DK0n"
},
"outputs": [],
"source": [
"%%time\n",
"en_vocab = bert_vocab.bert_vocab_from_dataset(\n",
" tf.data.Dataset.from_tensor_slices(english_sentences).batch(1000).prefetch(2),\n",
" **bert_vocab_args\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"id": "EJZzz1x5YY0x",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "1e627917-410a-479f-d27d-ca31480d12bb"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"CPU times: user 3min 38s, sys: 1.51 s, total: 3min 39s\n",
"Wall time: 3min 40s\n"
]
}
],
"source": [
"%%time\n",
"fr_vocab = bert_vocab.bert_vocab_from_dataset(\n",
" tf.data.Dataset.from_tensor_slices(french_sentences).batch(1000).prefetch(2),\n",
" **bert_vocab_args\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"id": "1hmXuHHNcBHg"
},
"outputs": [],
"source": [
"def write_vocab_file(filepath, vocab):\n",
" with open(filepath, 'w') as f:\n",
" for token in vocab:\n",
" print(token, file=f)\n",
"write_vocab_file('en_vocab.txt', en_vocab)\n",
"write_vocab_file('fr_vocab.txt', fr_vocab)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "kLTf_mEvfNR9"
},
"source": [
"#TOKENIZER\n",
"\n",
"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"id": "jGshnQ2idy8I"
},
"outputs": [],
"source": [
"fr_tokenizer = tf_text.BertTokenizer('fr_vocab.txt', **bert_tokenizer_params)\n",
"en_tokenizer = tf_text.BertTokenizer('en_vocab.txt', **bert_tokenizer_params)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {
"id": "bbQyYKhHkkDe",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "ef58d907-9286-471c-e286-2fa1cda19af9"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"[3092, 501, 202, 151, 107, 2890, 177, 4626]\n"
]
}
],
"source": [
"# Tokenize the examples -> (batch, word, word-piece)\n",
"en_tokenizere = en_tokenizer.tokenize("hello how are you Vadim")\n",
"# Merge the word and word-piece axes -> (batch, tokens)\n",
"en_tokenizere= en_tokenizere.merge_dims(-2,-1)\n",
"\n",
"for ex in en_tokenizere.to_list():\n",
" print(ex)\n"
]
},
{
"cell_type": "code",
"source": [
"words = en_tokenizer.detokenize(token_batch)\n",
"tf.strings.reduce_join(words, separator=' ', axis=-1)"
],
"metadata": {
"id": "k0m1461Gwy3e"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"## model"
],
"metadata": {
"id": "BjoPdwoxBWw2"
}
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {
"id": "7OqoDOybbwi6"
},
"outputs": [],
"source": [
"max_length = 200\n",
"\n",
"fr_sequences = [fr_tokenizer.tokenize(french_sentence.numpy().decode('utf-8')).merge_dims(-2,-1)\n",
" for french_sentence, _ in dataset.take(1000)]\n",
"fr_ragged = tf.ragged.stack(fr_sequences)\n",
"fr_padded = fr_ragged.to_tensor(default_value=0, shape=[None, None, max_length])\n",
"\n",
"fr_sequencesdeocde = [fr_tokenizer.tokenize("[START]"+french_sentence.numpy().decode('utf-8')+"[END]").merge_dims(-2,-1)\n",
" for french_sentence, _ in dataset.take(1000)]\n",
"fr_raggeddecode = tf.ragged.stack(fr_sequences)\n",
"fr_paddeddecode = fr_ragged.to_tensor(default_value=0, shape=[None, None, max_length])\n",
"\n",
"en_sequences = [en_tokenizer.tokenize(english_sentence.numpy().decode('utf-8')).merge_dims(-2,-1)\n",
" for _, english_sentence in dataset.take(1000)]\n",
"en_ragged = tf.ragged.stack(en_sequences)\n",
"en_padded = en_ragged.to_tensor(default_value=0, shape=[None, None, max_length])\n",
"\n",
"x_train = fr_padded\n",
"x2_train = fr_paddeddecode\n",
"y_train = en_padded\n",
"\n"
]
},
{
"cell_type": "code",
"source": [
"inputs = tf.keras.Input(shape=(1,200))\n",
"embedding_dim = 200\n",
"lstm_units = 512\n",
"vocab_size_en = len(en_vocab) + len(reserved_tokens)\n",
"vocab_size_fr = len(fr_vocab) + len(reserved_tokens)\n",
"\n",
"x = layers.LSTM(200, return_sequences=True)(inputs)\n",
"x = layers.Embedding(input_dim=vocab_size_fr, output_dim=embedding_dim, mask_zero=True)(x)\n",
"x = layers.LSTM(200, return_sequences=True)(x)\n",
"x = layers.LSTM(1024, return_sequences=True)(x)\n",
"x = layers.LSTM(500, return_sequences=True)(x)\n",
"x = layers.LSTM(400, return_sequences=True)(x)\n",
"x = layers.LSTM(300, return_sequences=True)(x)\n",
"outputs = layers.Dense(200)(x)\n",
"\n",
"model = tf.keras.Model(inputs=inputs, outputs=outputs)\n",
"model.summary()"
],
"metadata": {
"id": "NFo9fU5lw2IR"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "X--5AeJ7GgPp"
},
"outputs": [],
"source": [
"model.compile(tf.keras.optimizers.Adam(beta_1=0.9, beta_2=0.98,\n",
" epsilon=1e-9),\n",
" loss='categorical_crossentropy',\n",
" metrics=['accuracy'])"
]
},
{
"cell_type": "code",
"source": [
"inputs = tf.keras.Input(shape=(1,200))\n",
"embedding_dim = 200\n",
"lstm_units = 512\n",
"vocab_size_en = len(en_vocab) + len(reserved_tokens)\n",
"vocab_size_fr = len(fr_vocab) + len(reserved_tokens)\n",
"\n",
"encoder_inputs = tf.keras.layers.Input(shape=(200,))\n",
"encoder_embedding = tf.keras.layers.Embedding(input_dim=vocab_size_en, output_dim=embedding_dim, mask_zero=True)(encoder_inputs)\n",
"encoder_outputs, state_h, state_c = tf.keras.layers.LSTM(lstm_units, return_state=True)(encoder_embedding)\n",
"encoder_states = [state_h, state_c]\n",
"\n",
"decoder_inputs = tf.keras.layers.Input(shape=(200,))\n",
"decoder_embedding = tf.keras.layers.Embedding(input_dim=vocab_size_fr, output_dim=embedding_dim, mask_zero=True)(decoder_inputs)\n",
"decoder_lstm = tf.keras.layers.LSTM(lstm_units, return_sequences=True, return_state=True)\n",
"decoder_outputs, _, _ = decoder_lstm(decoder_embedding, initial_state=encoder_states)\n",
It looks like you're building a sequence-to-sequence model for English to French translation using TensorFlow and Keras. Here are a few suggestions to complete the model:
Add a dense output layer to the decoder to generate the predicted French words:
decoder_dense = tf.keras.layers.Dense(vocab_size_fr, activation='softmax')(decoder_outputs)
Define the model inputs and outputs:
model = tf.keras.models.Model(inputs=[encoder_inputs, decoder_inputs], outputs=decoder_dense)
Compile the model with an appropriate loss function and optimizer:
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
Prepare the training data:
Convert the English and French sentences to integer sequences using the respective tokenizers.
Pad the sequences to a fixed length.
Split the data into training and validation sets.
Train the model:
model.fit([encoder_input_data, decoder_input_data], decoder_target_data,
batch_size=batch_size,
epochs=epochs,
validation_split=0.2)
Evaluate the model on a test set and calculate metrics like BLEU score to assess the translation quality.
Use the trained model for inference:
Tokenize and encode the input English sentence.
Use the encoder to generate the initial states for the decoder.
Iteratively predict the French words using the decoder until an end token is generated or a maximum length is reached.
Decode the predicted French sequence back to text.
Here's an example of how the inference step could look:
def translate_sentence(sentence):
# Tokenize and encode the input sentence
input_seq = en_tokenizer.tokenize(sentence).merge_dims(-2,-1)
input_seq = input_seq.to_tensor(default_value=0, shape=[None, max_length])
# Generate the initial states for the decoder
states_value = encoder_model.predict(input_seq)
# Initialize the decoder input with the start token
target_seq = np.zeros((1, 1))
target_seq[0, 0] = fr_tokenizer.tokenize('[START]')[0]
stop_condition = False
decoded_sentence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict([target_seq] + states_value)
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_token = fr_tokenizer.detokenize([sampled_token_index])
if sampled_token == '[END]' or len(decoded_sentence.split()) > max_length:
stop_condition = True
else:
decoded_sentence += sampled_token.numpy().decode('utf-8') + ' '
target_seq = np.zeros((1, 1))
target_seq[0, 0] = sampled_token_index
states_value = [h, c]
return decoded_sentence
These are the main steps to complete your English to French translation model. Make sure to handle text preprocessing, tokenization, and padding appropriately. Experiment with different model architectures, hyperparameters, and training strategies to improve the translation quality.
what i should replace by this cells ? :
inputs = tf.keras.Input(shape=(1,200))
embedding_dim = 200
lstm_units = 512
vocab_size_en = len(en_vocab) + len(reserved_tokens)
vocab_size_fr = len(fr_vocab) + len(reserved_tokens)
encoder_inputs = tf.keras.layers.Input(shape=(200,))
encoder_embedding = tf.keras.layers.Embedding(input_dim=vocab_size_en, output_dim=embedding_dim, mask_zero=True)(encoder_inputs)
encoder_outputs, state_h, state_c = tf.keras.layers.LSTM(lstm_units, return_state=True)(encoder_embedding)
encoder_states = [state_h, state_c]
decoder_inputs = tf.keras.layers.Input(shape=(200,))
decoder_embedding = tf.keras.layers.Embedding(input_dim=vocab_size_fr, output_dim=embedding_dim, mask_zero=True)(decoder_inputs)
decoder_lstm = tf.keras.layers.LSTM(lstm_units, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_embedding, initial_state=encoder_states)
decoder_dense = tf.keras.layers.Dense(vocab_size_fr, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
model = tf.keras.Model([encoder_inputs, decoder_inputs], decoder_outputs)
model.compile(optimizer=tf.keras.optimizers.Adam(), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.summary()
Based on the code you provided, it seems like you have already implemented the necessary changes I suggested. Let's go through the code and make sure everything is in place:
You have defined the encoder inputs, embedding layer, and LSTM layer:
encoder_inputs = tf.keras.layers.Input(shape=(200,))
encoder_embedding = tf.keras.layers.Embedding(input_dim=vocab_size_en, output_dim=embedding_dim, mask_zero=True)(encoder_inputs)
encoder_outputs, state_h, state_c = tf.keras.layers.LSTM(lstm_units, return_state=True)(encoder_embedding)
encoder_states = [state_h, state_c]
You have defined the decoder inputs, embedding layer, LSTM layer, and dense output layer:
decoder_inputs = tf.keras.layers.Input(shape=(200,))
decoder_embedding = tf.keras.layers.Embedding(input_dim=vocab_size_fr, output_dim=embedding_dim, mask_zero=True)(decoder_inputs)
decoder_lstm = tf.keras.layers.LSTM(lstm_units, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_embedding, initial_state=encoder_states)
decoder_dense = tf.keras.layers.Dense(vocab_size_fr, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
You have defined the model inputs and outputs:
model = tf.keras.Model([encoder_inputs, decoder_inputs], decoder_outputs)
You have compiled the model with the appropriate loss function and optimizer:
model.compile(optimizer=tf.keras.optimizers.Adam(), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
You have printed the model summary:
model.summary()
The code you provided looks good and follows the necessary steps to build the sequence-to-sequence model for translation. You don't need to replace anything in these cells.
The next steps would be to prepare your training data, train the model, evaluate its performance, and use it for inference as described in my previous response.
Remember to preprocess and tokenize your training data, pad the sequences to a fixed length, and split the data into training and validation sets before training the model. You can use the fit() method to train the model and pass the appropriate input and target data.
After training, you can evaluate the model on a test set and calculate metrics like BLEU score to assess the translation quality. Finally, you can use the trained model for inference by following the steps outlined in the previous response to translate new English sentences to French.
history = model.fit([x_train,x2_train], y_train, epochs=10, batch_size=2)
Epoch 1/10
ValueError Traceback (most recent call last)
in <cell line: 1>()
----> 1 history = model.fit([x_train,x2_train], y_train, epochs=10, batch_size=2)
1 frames
/usr/local/lib/python3.10/dist-packages/keras/src/utils/traceback_utils.py in error_handler(*args, **kwargs)
120 # To get the full stack trace, call:
121 # keras.config.disable_traceback_filtering()
--> 122 raise e.with_traceback(filtered_tb) from None
123 finally:
124 del filtered_tb
/usr/local/lib/python3.10/dist-packages/keras/src/layers/input_spec.py in assert_input_compatibility(input_spec, inputs, layer_name)
243 if spec_dim is not None and dim is not None:
244 if spec_dim != dim:
--> 245 raise ValueError(
246 f'Input {input_index} of layer "{layer_name}" is '
247 "incompatible with the layer: "
ValueError: Input 0 of layer "functional_3" is incompatible with the layer: expected shape=(None, 200), found shape=(2, 1, 200)
The error message suggests that there is a mismatch between the expected input shape of the model and the actual shape of the input data you are providing.
The model expects the input shape to be (None, 200), but the shape of x_train is (2, 1, 200). It seems like x_train has an extra dimension of size 1.
To resolve this issue, you can try reshaping x_train and x2_train to remove the extra dimension. You can use the reshape() function from NumPy to do this:
x_train = x_train.reshape((x_train.shape[0], x_train.shape[-1]))
x2_train = x2_train.reshape((x2_train.shape[0], x2_train.shape[-1]))
This will reshape x_train and x2_train to have the shape (num_samples, 200), which should match the expected input shape of the model.
After reshaping the input data, you can try running the fit() method again:
history = model.fit([x_train, x2_train], y_train, epochs=10, batch_size=2)
Make sure that y_train also has the appropriate shape (num_samples, 200) and matches the output shape of the model.
If you still encounter any shape mismatch errors, double-check the shapes of your input and target data and ensure they align with the model's expected shapes.
Additionally, ensure that the number of samples in x_train, x2_train, and y_train are the same.
If the issue persists, please provide more information about the shapes of your input and target data, and I'll be happy to assist you further.
no can you change the model for accepted data like x1 and x2 now
|
5371ec069a20efac38d207a4e1a1694b
|
{
"intermediate": 0.3376249372959137,
"beginner": 0.42616572976112366,
"expert": 0.23620924353599548
}
|
46,811
|
make it 1 liner
for session, session_info in sessions.items():
if session_info[‘posts sent’]:
length_with_posts+=1
|
2dc721306e48612a34075619a613ec47
|
{
"intermediate": 0.27735060453414917,
"beginner": 0.5434809923171997,
"expert": 0.17916841804981232
}
|
46,812
|
pip install sklearn
Collecting sklearn
Downloading sklearn-0.0.post12.tar.gz (2.6 kB)
Preparing metadata (setup.py) ... error
error: subprocess-exited-with-error
× python setup.py egg_info did not run successfully.
│ exit code: 1
╰─> [15 lines of output]
The 'sklearn' PyPI package is deprecated, use 'scikit-learn'
rather than 'sklearn' for pip commands.
Here is how to fix this error in the main use cases:
- use 'pip install scikit-learn' rather than 'pip install sklearn'
- replace 'sklearn' by 'scikit-learn' in your pip requirements files
(requirements.txt, setup.py, setup.cfg, Pipfile, etc ...)
- if the 'sklearn' package is used by one of your dependencies,
it would be great if you take some time to track which package uses
'sklearn' instead of 'scikit-learn' and report it to their issue tracker
- as a last resort, set the environment variable
SKLEARN_ALLOW_DEPRECATED_SKLEARN_PACKAGE_INSTALL=True to avoid this error
More information is available at
https://github.com/scikit-learn/sklearn-pypi-package
[end of output]
note: This error originates from a subprocess, and is likely not a problem with pip.
error: metadata-generation-failed
× Encountered error while generating package metadata.
╰─> See above for output.
note: This is an issue with the package mentioned above, not pip.
hint: See above for details.
|
4f1326d445089b9afa0ac3581b1f5639
|
{
"intermediate": 0.30493244528770447,
"beginner": 0.336834579706192,
"expert": 0.35823291540145874
}
|
46,813
|
Given the following byte stream represented in binary:
0 0 1 1 1 0 0 1
1 1 0 0 0 0 1 0
1 0 1 1 0 0 0 1
1 1 0 0 0 0 1 0
1 0 1 1 0 1 0 1
1 1 1 1 0 0 0 0
1 0 0 1 1 1 1 1
1 0 0 0 1 1 0 0
1 0 1 0 1 1 0 1
Assuming that the above bytes constitute a valid UTF-8 byte stream, how many Unicode characters does it contain?
6.
7.
5.
8.
4.
|
105855430fef51cce3785a1559534af7
|
{
"intermediate": 0.41113999485969543,
"beginner": 0.333779513835907,
"expert": 0.25508052110671997
}
|
46,814
|
which type response must be
function uploadFile(){
if(state.selectedFiles !== undefined){
let currentFile = state.selectedFiles[0];
setState({
...state,
progress: 0,
currentFile: currentFile,
})
upload(currentFile, (event) => {
setState({
...state,
progress: Math.round((100 * event.loaded || 1) / (event.total || 1)),
})
}).then(response){
}
}
}
|
56f9c0035c3179853c0fda85338e6386
|
{
"intermediate": 0.37335050106048584,
"beginner": 0.3539656400680542,
"expert": 0.2726838290691376
}
|
46,815
|
python extract filename from filepath
|
398cc71e89239bf74472622b56910f89
|
{
"intermediate": 0.44093990325927734,
"beginner": 0.20821142196655273,
"expert": 0.3508486747741699
}
|
46,816
|
how to see directories in path system variable on windows on terminal?
|
75c43ae8ec1ffd084cd37f8162a8c24e
|
{
"intermediate": 0.38917994499206543,
"beginner": 0.345529705286026,
"expert": 0.2652903199195862
}
|
46,817
|
create auto login python script for a site, this is the source :
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"><html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><meta http-equiv="X-UA-Compatible" content="IE=9"><title>LOGIN</title><link rel="stylesheet" type="text/css" href="main.css"><script src="main.js" type="text/javascript"></script><STYLE>
.othd {border-width: 3px; border-style: solid; border-spacing:0px; padding:0px; margin:10px; width:254px; position: relative; display: inline-table;margin-right:0px;
}
table.othd thead tr{color:black; font-family:arial; height: 30px; font-size:14px; font-weight:bold; padding-left:5px; background: RGB(180,203,214);vertical-align:middle; }
table.othd thead td{padding-left:5px; border: 1px solid RGB(100,150,170); padding-right: 25px; }
table.othd tfoot td{padding-left:0px; height:10px; vertical-align: bottom; }
table.othd tbody td{background-color: #E7EEF1; padding-left: 5px; color:red; font-family:arial; font-size:13px; font-weight:bold; border: 1px solid RGB(100,150,170); border-top: 0px; }
table.othd thead img {background-image: url("3round_16x16.png");
background-repeat: no-repeat;width:16px;height:16px;position:absolute;right:10px;top:10px;}
table.othd thead A{text-decoration:none;color:blue;}table.othd thead A:hover{text-decoration:underline;}.oth_ok { background-position: 0px 0px; }
.oth_err { background-position: -32px 0px; }
.oth_xxx { background-position: -16px 0px; }
table.othd tfoot img {background-image: url("2xtrikampiai.png");
width:9px;height:5px;background-repeat: no-repeat; display: flex; margin-left:auto; margin-right: auto;cursor:pointer;}
.trikampis1 { background-position: 0px 0px; }
.trikampis2 { background-position: -9px 0px; }
#OthContainer {text-align:left; float:left;width:820px;margin-top:20px;font-family:arial;font-size:13px;border: 0px dashed RGB(100,150,170); border-top-width: 1px; padding: 0px; padding-top: 10px;}</STYLE>
<SCRIPT>
function expand_msg(Item, IP)
{ var OnOff = 0;
if (Item.className=="trikampis1") { OnOff = 1; }
GetAjaxSync("ajax.cgi", "cmd=othd&ip="+IP+"&on="+OnOff);
}
</SCRIPT></head><BODY><FORM ACTION="run.cgi" METHOD=POST style="width: 100%; text-align: center;"><INPUT TYPE=hidden NAME="cmd" VALUE="login"><INPUT TYPE=hidden NAME="id" VALUE="1"><TABLE CLASS="groupbox3" style="margin-top: 100px;"><THEAD><TR><TD COLSPAN=2>Login</TD></TR></THEAD><TBODY><TR><TD><DIV CLASS="paramTitle">Username:</DIV></TD><TD COLSPAN=1><INPUT TYPE=text CLASS="inpdig" NAME="usr" MAXLENGTH="15" VALUE=""></TD></TR><TR><TD><DIV CLASS="paramTitle">Password:</DIV></TD><TD COLSPAN=1><INPUT TYPE=password CLASS="inpdig" NAME="psw" maxlength="15"></TD></TR><TFOOT><TR class="bglightgray"><TD COLSPAN=2 STYLE="text-align:center;"><INPUT TYPE=submit CLASS="msg_buttons" VALUE="Login"></TD></TR></TFOOT></TABLE></FORM></BODY>
<script type="text/javascript">
if (document.title != "LOGIN") {ddlevelsmenu.setup("ddtopmenubar", "topbar");}
</script></HTML>
|
28e1417e9ebd4b1c33aaca60e9f0b9e8
|
{
"intermediate": 0.30604150891304016,
"beginner": 0.4853309690952301,
"expert": 0.20862749218940735
}
|
46,818
|
I have a list of columns. I want to extqract the columns of df that are not in that list
|
790e8baa775541a7a2be0c0bb66b95f2
|
{
"intermediate": 0.39271867275238037,
"beginner": 0.26389360427856445,
"expert": 0.34338775277137756
}
|
46,819
|
When getting rid of a hard drive, is it better to write it full of zeroes or random data?
|
83b5d56830c4b99e3b2e8765466796f2
|
{
"intermediate": 0.3166959285736084,
"beginner": 0.28032997250556946,
"expert": 0.40297406911849976
}
|
46,820
|
# Step 1: Ensure you’ve got the necessary packages installed and loaded:
if (!require(quadprog)) install.packages("quadprog")
if (!require(PerformanceAnalytics)) install.packages("PerformanceAnalytics")
library(quadprog)
library(PerformanceAnalytics)
# Step 2: Prepare your data based on the selected assets from both strategies.
# For demonstration purposes, let's assume selected_returns1 and selected_returns2 are data frames of daily returns for the selected assets under Strategy 1 and Strategy 2, respectively.
# You should replace these placeholders with your own data as obtained from previous steps.
# Step 3: Implement Global Minimum Variance Portfolio without short selling:
calculate_GMVP <- function(returns) {
Dmat <- 2 * cov(returns)
dvec <- rep(0, ncol(returns))
Amat <- rbind(c(1, rep(0, ncol(returns))), diag(ncol(returns)))
bvec <- c(1, rep(0, ncol(returns)))
meq <- 1
solve.QP(Dmat, dvec, Amat, bvec, meq)$solution
}
calculate_GMVP <- function(returns) {
# This is a placeholder, adjust according to the actual function definition
# Ensure this function is capable of calculating and returning the GMVP weights based on returns
}
# Calculate for both strategies
weights_GMVP1 <- calculate_GMVP(selected_returns1)
weights_GMVP2 <- calculate_GMVP(selected_returns2)
# Step 4: Implement Tangency Portfolio with short selling:
calculate_tangency_portfolio <- function(returns) {
avg_returns <- colMeans(returns)
inv_cov_matrix <- solve(cov(returns))
ones_vector <- rep(1, ncol(returns))
portfolio_weights <- inv_cov_matrix %*% avg_returns
portfolio_weights <- portfolio_weights / (ones_vector %*% inv_cov_matrix %*% avg_returns)
return(portfolio_weights)
}
calculate_tangency_portfolio <- function(selected_returns) {
avg_returns <- colMeans(selected_returns)
cov_matrix <- cov(selected_returns)
inv_cov_matrix <- solve(cov_matrix)
ones_vector <- matrix(1, nrow = ncol(selected_returns), ncol = 1)
numerator <- inv_cov_matrix %% avg_returns
denominator <- t(ones_vector) %% inv_cov_matrix %*% avg_returns
weights <- numerator / denominator
return(weights)
}
# Calculate for both strategies
weights_TP1 <- calculate_tangency_portfolio(selected_returns1)
weights_TP2 <- calculate_tangency_portfolio(selected_returns2)
# Step 5: Output the weight allocations and optionally calculate portfolio return and risk measures.
cat("Global Minimum Variance Portfolio Weights (Strategy 1):", weights_GMVP1, "\n")
cat("Tangency Portfolio Weights (Strategy 1):", weights_TP1, "\n")
cat("Global Minimum Variance Portfolio Weights (Strategy 2):", weights_GMVP2, "\n")
cat("Tangency Portfolio Weights (Strategy 2):", weights_TP2, "\n")
# Optional: Calculate Portfolio Return and Risk for a demonstration
portfolio_return_GMVP1 <- sum(colMeans(selected_returns1) * weights_GMVP1)
portfolio_risk_GMVP1 <- sqrt(t(weights_GMVP1) %*% cov(selected_returns1) %*% weights_GMVP1)
cat("Portfolio Return (GMVP Strategy 1):", portfolio_return_GMVP1, "\n")
cat("Portfolio Risk (GMVP Strategy 1):", portfolio_risk_GMVP1, "\n")
# Calculate for both strategies
> weights_TP1 <- calculate_tangency_portfolio(selected_returns1)
Error in t(ones_vector)%%inv_cov_matrix : non-conformable arrays
>
|
5c114eaeefba7b4e07654a719e16317f
|
{
"intermediate": 0.40354013442993164,
"beginner": 0.32102280855178833,
"expert": 0.27543705701828003
}
|
46,821
|
User
is their any way in R studio to find out how the table in enviorment was created ?
|
16c683f1a7ebd09ad5f02999b7eba3d0
|
{
"intermediate": 0.4673639237880707,
"beginner": 0.1454220414161682,
"expert": 0.3872140347957611
}
|
46,822
|
How to use ocrmypdf?
|
bd3493b9b96c0eebd3c1af18eceee17f
|
{
"intermediate": 0.23292656242847443,
"beginner": 0.14065755903720856,
"expert": 0.6264158487319946
}
|
46,823
|
my code:
# %%
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
from tensorflow import keras
def calculate_features_scaling_params(file_path, features_to_drop):
scaler = StandardScaler()
for chunk in pd.read_csv(file_path, chunksize=10000): # Adjust chunksize based on your memory capacity
filtered_chunk = chunk.drop(features_to_drop, axis=1)
scaler.partial_fit(filtered_chunk) # Accumulate means and variances
return scaler.mean_, scaler.var_
def calculate_targets_scaling_params(file_path):
scaler = StandardScaler()
for chunk in pd.read_csv(file_path, chunksize=10000): # Adjust chunksize based on your memory capacity
filtered_chunk = chunk[['y_High_1d'
, 'y_Low_1d', 'y_Priority_1d',
'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
'y_High_5d', 'y_Low_5d', 'y_Priority_5d'
]]
scaler.partial_fit(filtered_chunk) # Accumulate means and variances
return scaler.mean_, scaler.var_
# %%
import tensorflow as tf
if tf.test.gpu_device_name():
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
else:
print('Please install GPU version of TF')
# %%
file_path = r"C:\Users\arisa\Desktop\combined_day.csv"
batch_size = 128
# %%
features_to_drop = ['Date', 'Symbol',
'y_High_1d', 'y_Low_1d', 'y_Priority_1d',
'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
'y_High_5d', 'y_Low_5d', 'y_Priority_5d']
f_mean_, f_var_ = calculate_features_scaling_params(file_path, features_to_drop)
t_mean_, t_var_ = calculate_targets_scaling_params(file_path)
# %%
# Suppose mean_ and var_ have been obtained as above
x_scaler = StandardScaler()
x_scaler.mean_ = f_mean_
x_scaler.var_ = f_var_
x_scaler.scale_ = np.sqrt(f_var_)
y_scaler = StandardScaler()
y_scaler.mean_ = t_mean_
y_scaler.var_ = t_var_
y_scaler.scale_ = np.sqrt(t_var_)
# %%
# %%
row_counter = 0
# %%
def data_generator(file_path, batch_size, data_type, x_scaler, y_scaler):
chunksize = batch_size
total_rows = 301617 # Adjust with your dataset's actual number of rows
split_ratio = 0.92 # Assuming 80% for train, 20% for validation
train_rows = int(total_rows * split_ratio)
while True: # Loop forever, so the generator never terminates
for chunk in pd.read_csv(file_path, chunksize=chunksize):
if data_type == 'train' and row_counter >= train_rows:
continue # Skip the rest if we are fetching training data but have reached the end of the train set
elif data_type == 'val' and row_counter < train_rows:
row_counter += len(chunk)
continue # Skip this chunk if we are fetching validation data but are still in the train range
# Assuming your CSV has headers that match features/targets
# Normalizing the features
filtered_c = chunk.drop(['Date', 'Symbol'], axis=1)
feature_data = filtered_c.drop([
'y_High_1d', 'y_Low_1d', 'y_Priority_1d',
'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
'y_High_5d', 'y_Low_5d', 'y_Priority_5d'], axis=1)
target_data = filtered_c[['y_High_1d'
, 'y_Low_1d', 'y_Priority_1d',
'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
'y_High_5d', 'y_Low_5d', 'y_Priority_5d'
]]
feature_data_scaled = pd.DataFrame(x_scaler.transform(feature_data), columns=feature_data.columns)
# Assuming target_data also needs to be scaled, apply scaler separately
target_data_scaled = pd.DataFrame(y_scaler.transform(target_data), columns=target_data.columns)
# Now, feature_data_scaled and target_data_scaled are both DataFrames, scaled and ready to use
yield feature_data_scaled.values, target_data_scaled.values
row_counter += len(chunk)
# %%
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Input
import tensorflow as tf
def build_model():
input_shape = (6427,)
model = Sequential([
Dense(6427, activation='relu', input_shape = input_shape),
Dropout(0.25),
Dense(3200, activation='relu'),
Dropout(0.20),
Dense(1800, activation='relu'),
Dropout(0.15),
Dense(1024, activation='relu'),
Dropout(0.10),
Dense(512, activation='relu'),
Dropout(0.05),
Dense(256, activation='relu'),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(32, activation='relu'),
Dense(12),
])
model.compile(optimizer='adam',
loss='mse', # Use Mean Squared Error for regression
metrics=['mae']) # Mean Absolute Error as an additional metric
return model
# %%
# Instantiate the model
model = build_model()
model.summary()
# %%
train_generator = data_generator(file_path, batch_size, 'train',x_scaler=x_scaler,y_scaler=y_scaler)
val_generator = data_generator(file_path, batch_size, 'val',x_scaler=x_scaler,y_scaler=y_scaler)
total_samples = 301617 # Assuming same example size
train_samples = int(total_samples * 0.92)
val_samples = total_samples - train_samples
steps_per_epoch = train_samples // batch_size
validation_steps = val_samples // batch_size
# Modify the model fitting call to include validation data
model.fit(
train_generator,
steps_per_epoch=steps_per_epoch,
epochs=1000,
validation_data=val_generator,
validation_steps=validation_steps
)
error:
{
"name": "UnboundLocalError",
"message": "local variable 'row_counter' referenced before assignment",
"stack": "---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
Cell In[21], line 12
9 validation_steps = val_samples // batch_size
11 # Modify the model fitting call to include validation data
---> 12 model.fit(
13 train_generator,
14 steps_per_epoch=steps_per_epoch,
15 epochs=1000,
16 validation_data=val_generator,
17 validation_steps=validation_steps
18 )
File c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\utils\\traceback_utils.py:70, in filter_traceback.<locals>.error_handler(*args, **kwargs)
67 filtered_tb = _process_traceback_frames(e.__traceback__)
68 # To get the full stack trace, call:
69 # `tf.debugging.disable_traceback_filtering()`
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
Cell In[20], line 9, in data_generator(file_path, batch_size, data_type, x_scaler, y_scaler)
6 while True: # Loop forever, so the generator never terminates
7 for chunk in pd.read_csv(file_path, chunksize=chunksize):
----> 9 if data_type == 'train' and row_counter >= train_rows:
10 continue # Skip the rest if we are fetching training data but have reached the end of the train set
11 elif data_type == 'val' and row_counter < train_rows:
UnboundLocalError: local variable 'row_counter' referenced before assignment"
}
|
f749471a106353a7713789073a8c06a9
|
{
"intermediate": 0.2623293101787567,
"beginner": 0.37518081068992615,
"expert": 0.36248987913131714
}
|
46,824
|
File "C:\Users\Administrator.DESKTOP-3HB1DA0\AppData\Local\Programs\Python\Python312\Lib\site-packages\langchain_community\tools\yahoo_finance_news.py", line 9, in <module>
from langchain_community.document_loaders.web_base import WebBaseLoader
File "C:\Users\Administrator.DESKTOP-3HB1DA0\AppData\Local\Programs\Python\Python312\Lib\site-packages\langchain_community\document_loaders\__init__.py", line 163, in <module>
from langchain_community.document_loaders.pebblo import PebbloSafeLoader
File "C:\Users\Administrator.DESKTOP-3HB1DA0\AppData\Local\Programs\Python\Python312\Lib\site-packages\langchain_community\document_loaders\pebblo.py", line 5, in <module>
import pwd
ModuleNotFoundError: No module named 'pwd'
|
99a361cfcad6757ea99ec0c338a83922
|
{
"intermediate": 0.4206558167934418,
"beginner": 0.3226172924041748,
"expert": 0.25672686100006104
}
|
46,825
|
can i attach multiple textures framebuffer
|
fdb0f8c5ae2214ff9fcdee00ab7655fb
|
{
"intermediate": 0.4652262330055237,
"beginner": 0.23398855328559875,
"expert": 0.30078527331352234
}
|
46,826
|
i have a 3070 gpu
im training a NN model using tensorflow gpu, but only 10% of the gpu is engaged during training
how can i use full capacity of my gpu?
here is my code:
# %%
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
from tensorflow import keras
def calculate_features_scaling_params(file_path, features_to_drop):
scaler = StandardScaler()
for chunk in pd.read_csv(file_path, chunksize=10000): # Adjust chunksize based on your memory capacity
filtered_chunk = chunk.drop(features_to_drop, axis=1)
scaler.partial_fit(filtered_chunk) # Accumulate means and variances
return scaler.mean_, scaler.var_
def calculate_targets_scaling_params(file_path):
scaler = StandardScaler()
for chunk in pd.read_csv(file_path, chunksize=10000): # Adjust chunksize based on your memory capacity
filtered_chunk = chunk[['y_High_1d'
, 'y_Low_1d', 'y_Priority_1d',
'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
'y_High_5d', 'y_Low_5d', 'y_Priority_5d'
]]
scaler.partial_fit(filtered_chunk) # Accumulate means and variances
return scaler.mean_, scaler.var_
# %%
import tensorflow as tf
if tf.test.gpu_device_name():
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
else:
print('Please install GPU version of TF')
# %%
file_path = r"C:\Users\arisa\Desktop\combined_day.csv"
batch_size = 128
# %%
features_to_drop = ['Date', 'Symbol',
'y_High_1d', 'y_Low_1d', 'y_Priority_1d',
'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
'y_High_5d', 'y_Low_5d', 'y_Priority_5d']
f_mean_, f_var_ = calculate_features_scaling_params(file_path, features_to_drop)
t_mean_, t_var_ = calculate_targets_scaling_params(file_path)
# %%
# Suppose mean_ and var_ have been obtained as above
x_scaler = StandardScaler()
x_scaler.mean_ = f_mean_
x_scaler.var_ = f_var_
x_scaler.scale_ = np.sqrt(f_var_)
y_scaler = StandardScaler()
y_scaler.mean_ = t_mean_
y_scaler.var_ = t_var_
y_scaler.scale_ = np.sqrt(t_var_)
# %%
def data_generator(file_path, batch_size, data_type, x_scaler, y_scaler):
global row_counter
chunksize = batch_size
total_rows = 301617 # Adjust with your dataset's actual number of rows
split_ratio = 0.92 # Assuming 80% for train, 20% for validation
train_rows = int(total_rows * split_ratio)
while True: # Loop forever, so the generator never terminates
for chunk in pd.read_csv(file_path, chunksize=chunksize):
if data_type == 'train' and row_counter >= train_rows:
continue # Skip the rest if we are fetching training data but have reached the end of the train set
elif data_type == 'val' and row_counter < train_rows:
row_counter += len(chunk)
continue # Skip this chunk if we are fetching validation data but are still in the train range
# Assuming your CSV has headers that match features/targets
# Normalizing the features
filtered_c = chunk.drop(['Date', 'Symbol'], axis=1)
feature_data = filtered_c.drop([
'y_High_1d', 'y_Low_1d', 'y_Priority_1d',
'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
'y_High_5d', 'y_Low_5d', 'y_Priority_5d'], axis=1)
target_data = filtered_c[['y_High_1d'
, 'y_Low_1d', 'y_Priority_1d',
'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
'y_High_5d', 'y_Low_5d', 'y_Priority_5d'
]]
feature_data_scaled = pd.DataFrame(x_scaler.transform(feature_data), columns=feature_data.columns)
# Assuming target_data also needs to be scaled, apply scaler separately
target_data_scaled = pd.DataFrame(y_scaler.transform(target_data), columns=target_data.columns)
# Now, feature_data_scaled and target_data_scaled are both DataFrames, scaled and ready to use
yield feature_data_scaled.values, target_data_scaled.values
row_counter += len(chunk)
# %%
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Input
import tensorflow as tf
def build_model():
input_shape = (6427,)
model = Sequential([
Dense(6427, activation='relu', input_shape = input_shape),
Dropout(0.25),
Dense(3200, activation='relu'),
Dropout(0.20),
Dense(1800, activation='relu'),
Dropout(0.15),
Dense(1024, activation='relu'),
Dropout(0.10),
Dense(512, activation='relu'),
Dropout(0.05),
Dense(256, activation='relu'),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(32, activation='relu'),
Dense(12),
])
model.compile(optimizer='adam',
loss='mse', # Use Mean Squared Error for regression
metrics=['mae']) # Mean Absolute Error as an additional metric
return model
# %%
# Instantiate the model
model = build_model()
model.summary()
# %%
train_generator = data_generator(file_path, batch_size, 'train',x_scaler=x_scaler,y_scaler=y_scaler)
val_generator = data_generator(file_path, batch_size, 'val',x_scaler=x_scaler,y_scaler=y_scaler)
total_samples = 301617 # Assuming same example size
train_samples = int(total_samples * 0.92)
val_samples = total_samples - train_samples
steps_per_epoch = train_samples // batch_size
validation_steps = val_samples // batch_size
# Modify the model fitting call to include validation data
model.fit(
train_generator,
steps_per_epoch=steps_per_epoch,
epochs=100,
validation_data=val_generator,
validation_steps=validation_steps
)
|
d3fbe09a22b848cbf4bab2f1cb782199
|
{
"intermediate": 0.25347429513931274,
"beginner": 0.4146735370159149,
"expert": 0.3318521976470947
}
|
46,827
|
i have following code to train a NN model on my data :
# %%
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
from tensorflow import keras
def calculate_features_scaling_params(file_path, features_to_drop):
scaler = StandardScaler()
for chunk in pd.read_csv(file_path, chunksize=10000): # Adjust chunksize based on your memory capacity
filtered_chunk = chunk.drop(features_to_drop, axis=1)
scaler.partial_fit(filtered_chunk) # Accumulate means and variances
return scaler.mean_, scaler.var_
def calculate_targets_scaling_params(file_path):
scaler = StandardScaler()
for chunk in pd.read_csv(file_path, chunksize=10000): # Adjust chunksize based on your memory capacity
filtered_chunk = chunk[['y_High_1d'
, 'y_Low_1d', 'y_Priority_1d',
'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
'y_High_5d', 'y_Low_5d', 'y_Priority_5d'
]]
scaler.partial_fit(filtered_chunk) # Accumulate means and variances
return scaler.mean_, scaler.var_
# %%
import tensorflow as tf
if tf.test.gpu_device_name():
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
else:
print('Please install GPU version of TF')
# %%
file_path = r"C:\Users\arisa\Desktop\combined_day.csv"
batch_size = 128
# %%
features_to_drop = ['Date', 'Symbol',
'y_High_1d', 'y_Low_1d', 'y_Priority_1d',
'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
'y_High_5d', 'y_Low_5d', 'y_Priority_5d']
f_mean_, f_var_ = calculate_features_scaling_params(file_path, features_to_drop)
t_mean_, t_var_ = calculate_targets_scaling_params(file_path)
# %%
# Suppose mean_ and var_ have been obtained as above
x_scaler = StandardScaler()
x_scaler.mean_ = f_mean_
x_scaler.var_ = f_var_
x_scaler.scale_ = np.sqrt(f_var_)
y_scaler = StandardScaler()
y_scaler.mean_ = t_mean_
y_scaler.var_ = t_var_
y_scaler.scale_ = np.sqrt(t_var_)
# %%
def data_generator(file_path, batch_size, data_type, x_scaler, y_scaler):
global row_counter
chunksize = batch_size
total_rows = 301617 # Adjust with your dataset's actual number of rows
split_ratio = 0.92 # Assuming 80% for train, 20% for validation
train_rows = int(total_rows * split_ratio)
while True: # Loop forever, so the generator never terminates
for chunk in pd.read_csv(file_path, chunksize=chunksize):
if data_type == 'train' and row_counter >= train_rows:
continue # Skip the rest if we are fetching training data but have reached the end of the train set
elif data_type == 'val' and row_counter < train_rows:
row_counter += len(chunk)
continue # Skip this chunk if we are fetching validation data but are still in the train range
# Assuming your CSV has headers that match features/targets
# Normalizing the features
filtered_c = chunk.drop(['Date', 'Symbol'], axis=1)
feature_data = filtered_c.drop([
'y_High_1d', 'y_Low_1d', 'y_Priority_1d',
'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
'y_High_5d', 'y_Low_5d', 'y_Priority_5d'], axis=1)
target_data = filtered_c[['y_High_1d'
, 'y_Low_1d', 'y_Priority_1d',
'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
'y_High_5d', 'y_Low_5d', 'y_Priority_5d'
]]
feature_data_scaled = pd.DataFrame(x_scaler.transform(feature_data), columns=feature_data.columns)
# Assuming target_data also needs to be scaled, apply scaler separately
target_data_scaled = pd.DataFrame(y_scaler.transform(target_data), columns=target_data.columns)
# Now, feature_data_scaled and target_data_scaled are both DataFrames, scaled and ready to use
yield feature_data_scaled.values, target_data_scaled.values
row_counter += len(chunk)
# %%
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Input
import tensorflow as tf
def build_model():
input_shape = (6427,)
model = Sequential([
Dense(6427, activation='relu', input_shape = input_shape),
Dropout(0.25),
Dense(3200, activation='relu'),
Dropout(0.20),
Dense(1800, activation='relu'),
Dropout(0.15),
Dense(1024, activation='relu'),
Dropout(0.10),
Dense(512, activation='relu'),
Dropout(0.05),
Dense(256, activation='relu'),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(32, activation='relu'),
Dense(12),
])
model.compile(optimizer='adam',
loss='mse', # Use Mean Squared Error for regression
metrics=['mae']) # Mean Absolute Error as an additional metric
return model
# %%
# Instantiate the model
model = build_model()
model.summary()
# %%
train_generator = data_generator(file_path, batch_size, 'train',x_scaler=x_scaler,y_scaler=y_scaler)
val_generator = data_generator(file_path, batch_size, 'val',x_scaler=x_scaler,y_scaler=y_scaler)
total_samples = 301617 # Assuming same example size
train_samples = int(total_samples * 0.92)
val_samples = total_samples - train_samples
steps_per_epoch = train_samples // batch_size
validation_steps = val_samples // batch_size
# Modify the model fitting call to include validation data
model.fit(
train_generator,
steps_per_epoch=steps_per_epoch,
epochs=100,
validation_data=val_generator,
validation_steps=validation_steps
)
change the code so i can train a XGBoost ensmble decision tree instead of current NN model
|
2256aa604db13c6798716c04e1a218e7
|
{
"intermediate": 0.23331570625305176,
"beginner": 0.44527754187583923,
"expert": 0.3214067220687866
}
|
46,828
|
My code is not working, the canvas is not plotting the analyser data, help me out:
analyser.fftSize = 2048;
const bufferLength = analyser.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
const canvas = document.getElementById("canvas");
const canvasCtx = canvas.getContext("2d");
canvasCtx.clearRect(0, 0, 300, 300);
function draw(){
const drawVisual = requestAnimationFrame(draw);
analyser.getByteTimeDomainData(dataArray);
canvasCtx.fillStyle = "rgb(200 200 200)";
canvasCtx.fillRect(0, 0, 300, 300);
canvasCtx.lineWidth = 2;
canvasCtx.strokeStyle = "rgb(0 0 0)";
canvasCtx.beginPath();
const sliceWidth = 300 / bufferLength;
let x = 0;
for (let i = 0; i < bufferLength; i++) {
const v = dataArray[i] / 128.0;
const y = v * (300 / 2);
if (i === 0) {
canvasCtx.moveTo(x, y);
} else {
canvasCtx.lineTo(x, y);
}
x += sliceWidth;
}
}
draw();
|
9373bcb8bf6ce25542c716dfa538373f
|
{
"intermediate": 0.6800503134727478,
"beginner": 0.17430134117603302,
"expert": 0.14564840495586395
}
|
46,829
|
i have following code to train a NN model on my data :
# %%
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
from tensorflow import keras
def calculate_features_scaling_params(file_path, features_to_drop):
scaler = StandardScaler()
for chunk in pd.read_csv(file_path, chunksize=10000): # Adjust chunksize based on your memory capacity
filtered_chunk = chunk.drop(features_to_drop, axis=1)
scaler.partial_fit(filtered_chunk) # Accumulate means and variances
return scaler.mean_, scaler.var_
def calculate_targets_scaling_params(file_path):
scaler = StandardScaler()
for chunk in pd.read_csv(file_path, chunksize=10000): # Adjust chunksize based on your memory capacity
filtered_chunk = chunk[[‘y_High_1d’
, ‘y_Low_1d’, ‘y_Priority_1d’,
‘y_High_2d’, ‘y_Low_2d’, ‘y_Priority_2d’,
‘y_High_3d’, ‘y_Low_3d’, ‘y_Priority_3d’,
‘y_High_5d’, ‘y_Low_5d’, ‘y_Priority_5d’
]]
scaler.partial_fit(filtered_chunk) # Accumulate means and variances
return scaler.mean_, scaler.var_
# %%
import tensorflow as tf
if tf.test.gpu_device_name():
print(‘Default GPU Device: {}’.format(tf.test.gpu_device_name()))
else:
print(‘Please install GPU version of TF’)
# %%
file_path = r"C:\Users\arisa\Desktop\combined_day.csv"
batch_size = 128
# %%
features_to_drop = [‘Date’, ‘Symbol’,
‘y_High_1d’, ‘y_Low_1d’, ‘y_Priority_1d’,
‘y_High_2d’, ‘y_Low_2d’, ‘y_Priority_2d’,
‘y_High_3d’, ‘y_Low_3d’, ‘y_Priority_3d’,
‘y_High_5d’, ‘y_Low_5d’, ‘y_Priority_5d’]
f_mean_, f_var_ = calculate_features_scaling_params(file_path, features_to_drop)
t_mean_, t_var_ = calculate_targets_scaling_params(file_path)
# %%
# Suppose mean_ and var_ have been obtained as above
x_scaler = StandardScaler()
x_scaler.mean_ = f_mean_
x_scaler.var_ = f_var_
x_scaler.scale_ = np.sqrt(f_var_)
y_scaler = StandardScaler()
y_scaler.mean_ = t_mean_
y_scaler.var_ = t_var_
y_scaler.scale_ = np.sqrt(t_var_)
# %%
def data_generator(file_path, batch_size, data_type, x_scaler, y_scaler):
global row_counter
chunksize = batch_size
total_rows = 301617 # Adjust with your dataset’s actual number of rows
split_ratio = 0.92 # Assuming 80% for train, 20% for validation
train_rows = int(total_rows * split_ratio)
while True: # Loop forever, so the generator never terminates
for chunk in pd.read_csv(file_path, chunksize=chunksize):
if data_type == ‘train’ and row_counter >= train_rows:
continue # Skip the rest if we are fetching training data but have reached the end of the train set
elif data_type == ‘val’ and row_counter < train_rows:
row_counter += len(chunk)
continue # Skip this chunk if we are fetching validation data but are still in the train range
# Assuming your CSV has headers that match features/targets
# Normalizing the features
filtered_c = chunk.drop([‘Date’, ‘Symbol’], axis=1)
feature_data = filtered_c.drop([
‘y_High_1d’, ‘y_Low_1d’, ‘y_Priority_1d’,
‘y_High_2d’, ‘y_Low_2d’, ‘y_Priority_2d’,
‘y_High_3d’, ‘y_Low_3d’, ‘y_Priority_3d’,
‘y_High_5d’, ‘y_Low_5d’, ‘y_Priority_5d’], axis=1)
target_data = filtered_c[[‘y_High_1d’
, ‘y_Low_1d’, ‘y_Priority_1d’,
‘y_High_2d’, ‘y_Low_2d’, ‘y_Priority_2d’,
‘y_High_3d’, ‘y_Low_3d’, ‘y_Priority_3d’,
‘y_High_5d’, ‘y_Low_5d’, ‘y_Priority_5d’
]]
feature_data_scaled = pd.DataFrame(x_scaler.transform(feature_data), columns=feature_data.columns)
# Assuming target_data also needs to be scaled, apply scaler separately
target_data_scaled = pd.DataFrame(y_scaler.transform(target_data), columns=target_data.columns)
# Now, feature_data_scaled and target_data_scaled are both DataFrames, scaled and ready to use
yield feature_data_scaled.values, target_data_scaled.values
row_counter += len(chunk)
# %%
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Input
import tensorflow as tf
def build_model():
input_shape = (6427,)
model = Sequential([
Dense(6427, activation=‘relu’, input_shape = input_shape),
Dropout(0.25),
Dense(3200, activation=‘relu’),
Dropout(0.20),
Dense(1800, activation=‘relu’),
Dropout(0.15),
Dense(1024, activation=‘relu’),
Dropout(0.10),
Dense(512, activation=‘relu’),
Dropout(0.05),
Dense(256, activation=‘relu’),
Dense(128, activation=‘relu’),
Dense(64, activation=‘relu’),
Dense(32, activation=‘relu’),
Dense(12),
])
model.compile(optimizer=‘adam’,
loss=‘mse’, # Use Mean Squared Error for regression
metrics=[‘mae’]) # Mean Absolute Error as an additional metric
return model
# %%
# Instantiate the model
model = build_model()
model.summary()
# %%
train_generator = data_generator(file_path, batch_size, ‘train’,x_scaler=x_scaler,y_scaler=y_scaler)
val_generator = data_generator(file_path, batch_size, ‘val’,x_scaler=x_scaler,y_scaler=y_scaler)
total_samples = 301617 # Assuming same example size
train_samples = int(total_samples * 0.92)
val_samples = total_samples - train_samples
steps_per_epoch = train_samples // batch_size
validation_steps = val_samples // batch_size
# Modify the model fitting call to include validation data
model.fit(
train_generator,
steps_per_epoch=steps_per_epoch,
epochs=100,
validation_data=val_generator,
validation_steps=validation_steps
)
change the code to train a XGBoost ensmble decision tree instead of current NN model, keep other prats the same
just make necessary changes to train xgboost instead of NN
|
a037aad3b841a4a5590b4b3c6619b1eb
|
{
"intermediate": 0.40946394205093384,
"beginner": 0.36811649799346924,
"expert": 0.22241954505443573
}
|
46,830
|
why is this changing my self.list variable?
list_a = [{'a':'z',},{'a':'zz',},{'a':'zzz',}]
class Test:
def __init__(self, list):
self.list = list
def update(self):
self.element = random.choice(self.list)
for element in self.list:
print(element['a'])
list_b = ['1','2','3']
self.element['a'] = self.element['a'] + random.choice(list_b)
test = Test(list_a)
for i in range(10):
test.update()
time.sleep(5)
|
1d0407ed7d2408ca248af4942a11e836
|
{
"intermediate": 0.2580011487007141,
"beginner": 0.5805413126945496,
"expert": 0.16145753860473633
}
|
46,831
|
# Assuming ‘price_data’ is a data frame with the first column as ‘Date’ and the rest as asset prices.
# Converting price data to log returns for individual assets
prices <- as.matrix(price_data[,-1]) # Exclude ‘Date’ column and convert to a matrix
log_returns <- diff(log(prices)) # Calculate log returns
# Handling missing values if present
returns_data <- na.omit(log_returns) # Remove NAs from returns data
# Calculate the index returns as the average of all asset log returns per time period
# This assumes an equally weighted index
index_log_returns <- rowMeans(returns_data, na.rm = TRUE) # Mean log return of all assets
# Adding index returns to the matrix of returns for analysis
# Ensure that ‘index_log_returns’ is properly formatted to match dimensions
# It should be a single column matrix to bind with ‘returns_data’
index_log_returns_matrix <- matrix(index_log_returns, ncol = 1)
# Adding the index returns as a new column in ‘returns_data’
all_returns_data <- cbind(returns_data, index_log_returns_matrix)
# Providing appropriate column names, especially for the newly added index
colnames(all_returns_data) <- c(colnames(returns_data), "Index Returns")
# Ensure all_returns_data is in a format acceptable by fPortfolio::basicStats
library(fPortfolio)
if (!is.timeSeries(all_returns_data)) {
all_returns_data <- as.timeSeries(all_returns_data)
}
# Estimate Summary Statistics for the Returns (Both Individual Assets and the “Market Index”)
summary_stats <- basicStats(all_returns_data)
print(summary_stats)
Above code i have done is for Estimate the Summary Statistics for the Returns (Index and Asset) and comment on it. Note, the answer must state clearly which return measure you are using and the reason for opting that measure as an estimate for return.
Insert the code chunk below. this question
And the case was Case Begins:
You have recently joined as a Portfolio Manager at Morgan Stanley. The first task assigned to you is to create a portfolio for a client who is interested in investing 1 million Euro in secondary markets. He wants the money to be "fully-invested", but he is not aware of weight allocation in a scientific manner. Your employer has given you the responsibility to not only select the bunch of asset class for investment, but also allocate weight so as to garner more returns with limited risk.
After analyzing the market trends, you are bullish in your approach and have narrowed down to the three asset classes for selection of Portfolio Universe.
1. Stocks - Google, Tesla, Pfizer, Shell, AT&T
2. Forex - USDINR, EURUSD, USDCAD, USDCHF, NZDUSD
3. Commodities - Crude, Natural Gas, Gold, Wheat, Ethanol
Asset Tickr
Google GOOGL
Tesla TSLA
Pfizer PFE
Shell SHEL
AT&T T
USDINR USDINR
EURUSD EURUSD
USDCAD USDCAD
USDCHF USDCHF
NZDUSD NZDUSD
Crude WTI
Natural Gas NG
Gold XAU
Wheat W1
Ethanol EH
Note: Portfolio constraints from your supervisor
1. Portfolio should consist of 5 assets
2. Atleast one from Commodity and one from Forex
Now my second question is Portfolio Universe Selection
Since, you have to make a Portfolio constituting of five assets only, you select the assets based on the basis of two strategies as shared below.
Strategy 1:
Step 1: Calculate the Reward to Risk - (Median Return/Standard Deviation)
Step 2: Rank the Reward to Risk for Assets, then choose the top five asset while maintaining the constraint (Atleast one Commodity and one Forex)
Note: In case of tie, you choose the asset with higher mean return.
Strategy 2:
Based on Price/Earning Ratio while maintaining the constraint (Atleast one Commodity and one Forex)
Note: After filtering the 5 assets, export the file either as .csv or .xlsx.............................Can you please give me codes for second question
|
8c58480c332a6c5c81c50844bc4e900e
|
{
"intermediate": 0.3548296093940735,
"beginner": 0.38910555839538574,
"expert": 0.25606486201286316
}
|
46,832
|
I have log_returns calculated but for all 15 assets
If you already have log_returns calculated for all 15 assets but need to apply optimizations for subsets defined by strategy1_selection and strategy2_selection, your task is now to filter log_returns to match these subsets and then proceed with the optimization processes for each.
### Step 1: Filter Log Returns for Each Strategy
Assuming log_returns is a data frame (or a matrix) where rows represent dates/time periods, and columns represent assets, and you have a column naming convention that matches the tickers used in your strategy selections, you can do the following:
# Assuming ‘log_returns’ is your data frame and ‘strategy1_selectionTicker' and 'strategy2_selection
Ticker’
# contain the asset tickers for each strategy’s selected assets
# Filter log_returns for strategy1 assets
log_returns_strategy1 <- log_returns[, colnames(log_returns) %in% strategy1_selection$Ticker]
# Filter log_returns for strategy2 assets
log_returns_strategy2 <- log_returns[, colnames(log_returns) %in% strategy2_selection.................Can you continue this
|
dd910c30212418fabfad00ec1f525169
|
{
"intermediate": 0.2869262993335724,
"beginner": 0.18534153699874878,
"expert": 0.5277321934700012
}
|
46,833
|
I have log_returns calculated but for all 15 assets
If you already have log_returns calculated for all 15 assets but need to apply optimizations for subsets defined by strategy1_selection and strategy2_selection, your task is now to filter log_returns to match these subsets and then proceed with the optimization processes for each.
### Step 1: Filter Log Returns for Each Strategy
Assuming log_returns is a data frame (or a matrix) where rows represent dates/time periods, and columns represent assets, and you have a column naming convention that matches the tickers used in your strategy selections, you can do the following:
# Assuming ‘log_returns’ is your data frame and ‘strategy1_selectionTicker’ and 'strategy2_selection
Ticker’
# contain the asset tickers for each strategy’s selected assets
# Filter log_returns for strategy1 assets
log_returns_strategy1 <- log_returns[, colnames(log_returns) %in% strategy1_selection$Ticker]
# Filter log_returns for strategy2 assets
log_returns_strategy2 <- log_returns[, colnames(log_returns) %in% strategy2_selection…Can you continue this
|
e1a2c273645b79bc9997033c35c0ce83
|
{
"intermediate": 0.2793777585029602,
"beginner": 0.17567184567451477,
"expert": 0.5449503660202026
}
|
46,834
|
I have log_returns calculated but for all 15 assets
If you already have log_returns calculated for all 15 assets but need to apply optimizations for subsets defined by strategy1_selection and strategy2_selection, your task is now to filter log_returns to match these subsets and then proceed with the optimization processes for each.
### Step 1: Filter Log Returns for Each Strategy
Assuming log_returns is a data frame (or a matrix) where rows represent dates/time periods, and columns represent assets, and you have a column naming convention that matches the tickers used in your strategy selections, you can do the following:
# Assuming ‘log_returns’ is your data frame and ‘strategy1_selectionTicker’ and 'strategy2_selection
Ticker’
# contain the asset tickers for each strategy’s selected assets
# Filter log_returns for strategy1 assets
log_returns_strategy1 <- log_returns[, colnames(log_returns) %in% strategy1_selection$Ticker]
# Filter log_returns for strategy2 assets
log_returns_strategy2 <- log_returns[, colnames(log_returns) %in% strategy2_selection…Can you continue this
|
504c76075ae0661903afceab16cf3fe0
|
{
"intermediate": 0.2793777585029602,
"beginner": 0.17567184567451477,
"expert": 0.5449503660202026
}
|
46,835
|
log_returns_strategy1 <- log_returns[, colnames(log_returns) %in% strategy1_selection$Ticker]
I ran the above for strategy 1 which has 5 assets but after the above table got created their is only 4 assets ....What to do?
|
7fd67e1d3630aeccd3f2cedec9098fc8
|
{
"intermediate": 0.35487499833106995,
"beginner": 0.30430811643600464,
"expert": 0.340816855430603
}
|
46,836
|
From the beginning to the end, there should be an English sentence and a Persian sentence on the right side, and put the Persian and English text side by side in the table.
.The English legal system also applies to Wales and is often closely followed in Northern Ireland. It has two types of law:
"(a) Common law: the basic principles of law as applied in past cases (case law) where Parliament has not made any special rulings or Acts. The existence of common law means that a judge does not have to refer to Parliament when an unfamiliar case comes up. Instead, be looks for precedents (past cases of a similar kind) and for general principles;
(b) Statute law: specific laws passed as acts of Parliament. Statute law takes priority aver common law so far as it covers the case being heard. If it does not give specific guidance (e.g. in a new type of case which was not envisaged when the law was passed), then judges apply the common law, trying as far as possible to do what they think Parliament would have wanted them to, if it had known about the case in hand."
Extracts from an Article published in The International Business Topics", collected by D. Cotton, 1984, Bell and Hyman Publications, p. 133.
7
This system gives a lot of power to judges. They do not, however, have an entirely free hand. They are always bound by the decisions of more important courts than their own- and the most powerful court in the country is Parliament. Judges need not give the same verdict as a higher court; that would be too vague a rule and would lead to bad decisions. They must, however, apply the law in the same way and adopt the higher court's reasoning in their own consideration of the case.نظام حقوقی انگلستان
نظام حقوقی انگلستان در ولز نیز اعمال شده و اغلب به دقت در ایرلند شمالی متابعت می شود. این نظام دارای دو نوع حقوق است:
۲
(الف) كامن لا: " كامن لا عبارت از اصول اساسی حقوق است که در دعاوی گذشته رویه قضایی که مجلس قواعد یا قوانین ۱۰ خاصی مقرر نکرده، بکار رفته است وجود کامن لا بدین معنی است که به هنگام آمدن یک پرونده متفاوت با پرونده های قبلی قاضی مجبور نیست به [مصوبات مجلس رجوع کند؛ بلکه به جای آن در سابقه قضایی (پرونده های قبلی از نوع مشابه و اصول کلی ۱۱ کنکاش به عمل می آورد؛
پیش(ب) حقوق موضوعه حقوق موضوعه عبارت از قوانین خاصی است که تحت عنوان قوانین مجلس به تصویب رسیده اند. حقوق موضوعه بركا من لا اولویت مییابد تا جایی که پرونده های تحت بررسی راهم در بر میگیرد. در صورتی که حقوق موضوعه راه حل معینی ارائه ندهد برای مثال در نوع جدیدی از دعوی که در زمان تصویب قانون پیش بینی نشده است ،قضات کامن لا را اعمال میکنند | و در این راستا تا جای ممکن تلاش میکنند تا آنچه را که تصور میکنند مجلس در صورت آگاهی از پرونده تحت رسیدگی از آنان طلب میکرد انجام
دهند.
این نظام حقوقی قدرت زیادی به قضات ۷ اعطا می.کند با اینحال دست آنها کاملاً باز نیست. آنها همواره مقید به آرای دادگاههای مهمتر از خود و قدرتمندترین دادگاه کشور که مجلس است میباشند. قضات لازم نیست همان حکمی ۱۰ را که دادگاه بالاتر داده است صادر کنند ۱۱ این ،آمر ،قاعده بسیار مبهمی ۱۲ بوده و منجر به آرای نامطلوبی خواهد شد. [اما] به هر حال قضات باید قانون را به همان طریق اعمال نموده و استدلال ۱۳ دادگاه بالاتر ۱۴ را در بررسی خود نسبت به پرونده، اتخاذ نمایند.
|
83b63ce35511bd602ee337ce258b8af8
|
{
"intermediate": 0.22441741824150085,
"beginner": 0.699193000793457,
"expert": 0.07638955861330032
}
|
46,837
|
From the beginning to the end, there should be an English sentence and a Persian sentence on the right side, and put the Persian and English text side by side in the table.
.The English legal system also applies to Wales and is often closely followed in Northern Ireland. It has two types of law:
"(a) Common law: the basic principles of law as applied in past cases (case law) where Parliament has not made any special rulings or Acts. The existence of common law means that a judge does not have to refer to Parliament when an unfamiliar case comes up. Instead, be looks for precedents (past cases of a similar kind) and for general principles;
(b) Statute law: specific laws passed as acts of Parliament. Statute law takes priority aver common law so far as it covers the case being heard. If it does not give specific guidance (e.g. in a new type of case which was not envisaged when the law was passed), then judges apply the common law, trying as far as possible to do what they think Parliament would have wanted them to, if it had known about the case in hand."
Extracts from an Article published in The International Business Topics", collected by D. Cotton, 1984, Bell and Hyman Publications, p. 133.
7
This system gives a lot of power to judges. They do not, however, have an entirely free hand. They are always bound by the decisions of more important courts than their own- and the most powerful court in the country is Parliament. Judges need not give the same verdict as a higher court; that would be too vague a rule and would lead to bad decisions. They must, however, apply the law in the same way and adopt the higher court's reasoning in their own consideration of the case.نظام حقوقی انگلستان
نظام حقوقی انگلستان در ولز نیز اعمال شده و اغلب به دقت در ایرلند شمالی متابعت می شود. این نظام دارای دو نوع حقوق است:
۲
(الف) كامن لا: " كامن لا عبارت از اصول اساسی حقوق است که در دعاوی گذشته رویه قضایی که مجلس قواعد یا قوانین ۱۰ خاصی مقرر نکرده، بکار رفته است وجود کامن لا بدین معنی است که به هنگام آمدن یک پرونده متفاوت با پرونده های قبلی قاضی مجبور نیست به [مصوبات مجلس رجوع کند؛ بلکه به جای آن در سابقه قضایی (پرونده های قبلی از نوع مشابه و اصول کلی ۱۱ کنکاش به عمل می آورد؛
پیش(ب) حقوق موضوعه حقوق موضوعه عبارت از قوانین خاصی است که تحت عنوان قوانین مجلس به تصویب رسیده اند. حقوق موضوعه بركا من لا اولویت مییابد تا جایی که پرونده های تحت بررسی راهم در بر میگیرد. در صورتی که حقوق موضوعه راه حل معینی ارائه ندهد برای مثال در نوع جدیدی از دعوی که در زمان تصویب قانون پیش بینی نشده است ،قضات کامن لا را اعمال میکنند | و در این راستا تا جای ممکن تلاش میکنند تا آنچه را که تصور میکنند مجلس در صورت آگاهی از پرونده تحت رسیدگی از آنان طلب میکرد انجام
دهند.
این نظام حقوقی قدرت زیادی به قضات ۷ اعطا می.کند با اینحال دست آنها کاملاً باز نیست. آنها همواره مقید به آرای دادگاههای مهمتر از خود و قدرتمندترین دادگاه کشور که مجلس است میباشند. قضات لازم نیست همان حکمی ۱۰ را که دادگاه بالاتر داده است صادر کنند ۱۱ این ،آمر ،قاعده بسیار مبهمی ۱۲ بوده و منجر به آرای نامطلوبی خواهد شد. [اما] به هر حال قضات باید قانون را به همان طریق اعمال نموده و استدلال ۱۳ دادگاه بالاتر ۱۴ را در بررسی خود نسبت به پرونده، اتخاذ نمایند.
|
e4e46d238b5c5da2ee7cc721ef2604c7
|
{
"intermediate": 0.22441741824150085,
"beginner": 0.699193000793457,
"expert": 0.07638955861330032
}
|
46,838
|
Create a ROBLOX game called “Super Why!: Super Readers to the Rescue!” where the game takes place in the land of Storybrook Village, a place where all Fairytale characters reside together in a loving, peaceful community. Whyatt will begin the show by greeting the viewers when he suddenly gets a call from someone who needs help. After investigating the problem at hand, Whyatt then summons the other super readers by sending out a call.
Whyatt, Pig, Red Riding Hood, and Princess Pea then each appear at the Book Club before going inside as Whyatt then uploads the problem into the Super Duper Computer by plugging his PDA-like device into it. The character with the problem then stands behind a small lectern made of dominos and explains their situation. After discussion, Princess Pea then casts a spell to summon the appropriate book.
Whyatt then reads the title of the book as they then transform and fly inside of it.
Upon landing, Super Why will begin to read a few sentences of the book with the viewer, asking them to read along. It's then a parallel is drawn in which the character with a problem notices how the book characters face the same problem as they are right now.
The goal of the Super Readers is to follow the storyline to solve the problem. As they progress through the events of the story, they encounter obstacles, which can be solved by applying their literacy skills to change the story. As they overcome these obstacles, they are rewarded with red glittery "Super Letters" that are then eventually uploaded back onto the Super Duper Computer to spell out the word that makes up the solution to the characters problem. After collecting the Super Letters and uploading them onto the Super Duper Computer in order to get the Super Story answer, Whyatt will then ask why that word in particular is the right word. Someone else will then explain it before everybody then goes on their own to do what they had been doing previously. Except for Whyatt, who then goes to the problem directly in order to see how it went.
|
5eda68c32491cd9c45bbce56299edee0
|
{
"intermediate": 0.4363112151622772,
"beginner": 0.3709159195423126,
"expert": 0.19277285039424896
}
|
46,839
|
Heres how:
Enable asahi-krun copr: https://copr.fedorainfracloud.org/coprs/slp/asahi-krun/
Install krunvm
Enter these commands: sudo dnf remove libkrunfw libkrun, sudo dnf install libkrunfw-4.0.0~asahikrun-1.fc39 libkrun-1.6.0~asahikrun-3.fc39
Clone virglrenderer repo with asahi-native context: git clone https://gitlab.freedesktop.org/slp/virglrenderer.git cd virglrenderer git checkout asahi-native-context-wip
Run: meson setup build -Ddrm-asahi-experimental=true
Then do cd build then run sudo ninja install
now do buildah unshare, and run: krunvm create --cpus 6 --mem 6144 --name asahi-krun quay.io/slopezpa/asahi-krun
then do krunvm start asahi-krun /bin/bash
now run dhclient, then dnf install 'dnf-command(copr)'
then add the asahi-mesa copr: dnf copr enable slp/asahi-mesa
then install the packages necessary: dnf install mesa-libGL-devel mesa-vulkan-drivers mesa-libOSMesa mesa-libEGL mesa-libglapi mesa-va-drivers mesa-libGL mesa-dri-drivers mesa-vdpau-drivers mesa-libgbm-devel mesa-libgbm mesa-omx-drivers mesa-libd3d mesa-libxatracker-devel
then exit then enter the vm: krunvm start asahi-krun how do I do this on aarch64 fedora linux? I dont have any build tools installed either so i need those too
|
14d1b39bfb590bd9a4e0b613d7ea1d3e
|
{
"intermediate": 0.6258096694946289,
"beginner": 0.1468244045972824,
"expert": 0.22736597061157227
}
|
46,840
|
How do I setup a gl framebuffer so I can visualize the stencil buffer?
|
62d9973fa532e34c742233c5b903abff
|
{
"intermediate": 0.49672335386276245,
"beginner": 0.1842087209224701,
"expert": 0.31906798481941223
}
|
46,841
|
void main()
{
FragColor = vec4(vec3(gl_FragCoord.z), 1.0);
}
You can do this to visualize the depth buffer, but how do I visualize a stencil buffer?
|
9b206bc25958359d62e5f6df64f1099f
|
{
"intermediate": 0.6474024057388306,
"beginner": 0.10054154694080353,
"expert": 0.2520561218261719
}
|
46,842
|
void main()
{
FragColor = vec4(vec3(gl_FragCoord.z), 1.0);
}
You can do this to visualize a depth buffer, but how do you visualize a stencil buffer/
|
8242c53096ff4de0eb938e92fde6b9cd
|
{
"intermediate": 0.5959963202476501,
"beginner": 0.14033736288547516,
"expert": 0.2636662423610687
}
|
46,843
|
[root@asahi-krun /]# curl -OL https://mirrors.cloud.tencent.com/rpmfusion/nonfree/fedora/steam/38/x86_64/s/steam-1.0.0.78-1.fc38.i686.rpm
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 159 100 159 0 0 162 0 --:--:-- --:--:-- --:--:-- 162
[root@asahi-krun /]# rpm2cpio steam-1.0.0.78-1.fc38.i686.rpm | cpio -idvmargument is not an RPM package
cpio: premature end of archive
[root@asahi-krun /]#
|
c953b148bf44537ee1be972e7e6e1544
|
{
"intermediate": 0.3498469889163971,
"beginner": 0.3491653501987457,
"expert": 0.3009876608848572
}
|
46,844
|
[root@asahi-krun /]# curl -OL https://mirrors.cloud.tencent.com/rpmfusion/nonfree/fedora/steam/38/x86_64/s/steam-1.0.0.78-1.fc38.i686.rpm
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 159 100 159 0 0 162 0 --:--:-- --:--:-- --:--:-- 162
[root@asahi-krun /]# rpm2cpio steam-1.0.0.78-1.fc38.i686.rpm | cpio -idvmargument is not an RPM package
cpio: premature end of archive
[root@asahi-krun /]#
|
1ab7eb70f635baaabef47f3b46aefd45
|
{
"intermediate": 0.3498469889163971,
"beginner": 0.3491653501987457,
"expert": 0.3009876608848572
}
|
46,845
|
[root@asahi-krun /]# curl -OL https://mirrors.cloud.tencent.com/rpmfusion/nonfree/fedora/steam/38/x86_64/s/steam-1.0.0.78-1.fc38.i686.rpm
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 159 100 159 0 0 162 0 --:--:-- --:--:-- --:--:-- 162
[root@asahi-krun /]# rpm2cpio steam-1.0.0.78-1.fc38.i686.rpm | cpio -idvmargument is not an RPM package
cpio: premature end of archive
[root@asahi-krun /]#
|
189778af2b5417243cba8b411fe2790d
|
{
"intermediate": 0.3498469889163971,
"beginner": 0.3491653501987457,
"expert": 0.3009876608848572
}
|
46,846
|
Im storing stencil here:
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, getWidth(), getHeight());
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, getRenderbuffer());
How do I draw thestencil values to the screen for debugging?
|
dde0474db56165bae420d88771302e40
|
{
"intermediate": 0.8379887342453003,
"beginner": 0.06071774661540985,
"expert": 0.10129353404045105
}
|
46,847
|
Is it possible to establish peer to peer tcp connection if both these peers are behind NAT?
|
11a80a31ad66dc5574c4cb36a7ac7c4e
|
{
"intermediate": 0.42646482586860657,
"beginner": 0.23952484130859375,
"expert": 0.33401036262512207
}
|
46,848
|
Can I have a depth buffer with no stencil ina renderbuffer opengl
|
68753f84e853ae7818e5523d070d4fc7
|
{
"intermediate": 0.4190022051334381,
"beginner": 0.17926809191703796,
"expert": 0.4017297625541687
}
|
46,849
|
does liner layer really linear?
|
7702e60beaa2bdf77ad1dee955ebd55b
|
{
"intermediate": 0.24359922111034393,
"beginner": 0.18169622123241425,
"expert": 0.5747045278549194
}
|
46,850
|
Let's say I have the output from a GPT2LMHeadModel, where I have a tensor of shape (2x2x50257). The issue I now need to sample this tensor using TopPLogitsWarper which expects size (batch_size, 50257). How Do I perform top p sampling?
|
97262df0728843a27a1ccced00bd1e23
|
{
"intermediate": 0.399284303188324,
"beginner": 0.055464256554841995,
"expert": 0.5452514290809631
}
|
46,851
|
Hey chatgpt, I'm having an issue where I need to apply top p sampling to the logits of GPT2LMHeadModel. How would I go about this, if it is even possible?
|
cd4a9ae1565d80b22b2530380750e5c1
|
{
"intermediate": 0.4458897113800049,
"beginner": 0.09450651705265045,
"expert": 0.45960381627082825
}
|
46,852
|
what is the best cad software for linux for aircraft and mechanical design?
|
db5eeb9d9e67035bb7fc3219c15ba4b9
|
{
"intermediate": 0.2313954383134842,
"beginner": 0.25406309962272644,
"expert": 0.5145414471626282
}
|
46,853
|
Hello Chatgpt, I have a problem. I am attempting to manually apply a TopPLogitsWarper to the output of a GPT2LMHeadModel. My code is as below for the external part of my code which extracts the LM's logits.
with torch.no_grad():
out_logits = model(encoded_batch, attention_mask=attn_mask).logits
# torch.save(out_logits, "outlogits.pt")
from transformers import TopPLogitsWarper
logits_warp = TopPLogitsWarper(0.9)
shift_logits = out_logits[..., :-1, :].contiguous()
out_logits = logits_warp.__call__(input_ids=None, scores = shift_logits)
....
However, when attempting to use the TopPLogitsWarper, with the code below, it gives the following error at the line 'indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)' where it states ' [86,0,0] Assertion `idx_dim >= 0 && idx_dim < index_size && "index out of bounds"` failed.
Below is the code for __call__:
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
sorted_logits, sorted_indices = torch.sort(scores, descending=False)
cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1)
# Remove tokens with cumulative top_p above the threshold (token with 0 are kept)
sorted_indices_to_remove = cumulative_probs <= (1 - self.top_p)
# Keep at least min_tokens_to_keep
sorted_indices_to_remove[..., -self.min_tokens_to_keep :] = 0
# scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
scores = scores.masked_fill(indices_to_remove, self.filter_value)
return scores
can you please help me?
|
9a73c842975a50e12ac2c2c563a129fd
|
{
"intermediate": 0.5247091054916382,
"beginner": 0.10414484888315201,
"expert": 0.371146023273468
}
|
46,854
|
# Form implementation generated from reading ui file '.\linkage\home_page_ui.ui'
#
# Created by: PyQt6 UI code generator 6.6.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic6 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt6 import QtCore, QtGui, QtWidgets
class Ui_HomePageWindow(object):
def setupUi(self, HomePageWindow):
HomePageWindow.setObjectName("HomePageWindow")
HomePageWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(parent=HomePageWindow)
self.centralwidget.setObjectName("centralwidget")
self.frame_page_home = QtWidgets.QFrame(parent=self.centralwidget)
self.frame_page_home.setGeometry(QtCore.QRect(0, 0, 801, 571))
self.frame_page_home.setFrameShape(QtWidgets.QFrame.Shape.StyledPanel)
self.frame_page_home.setFrameShadow(QtWidgets.QFrame.Shadow.Raised)
self.frame_page_home.setObjectName("frame_page_home")
HomePageWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(parent=HomePageWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 25))
self.menubar.setObjectName("menubar")
self.document = QtWidgets.QMenu(parent=self.menubar)
self.document.setObjectName("document")
self.web_html = QtWidgets.QMenu(parent=self.menubar)
self.web_html.setObjectName("web_html")
HomePageWindow.setMenuBar(self.menubar)
self.statusBar = QtWidgets.QStatusBar(parent=HomePageWindow)
self.statusBar.setObjectName("statusBar")
HomePageWindow.setStatusBar(self.statusBar)
self.doc_manager = QtGui.QAction(parent=HomePageWindow)
self.doc_manager.setObjectName("doc_manager")
self.doc_qa = QtGui.QAction(parent=HomePageWindow)
self.doc_qa.setObjectName("doc_qa")
self.web_manager = QtGui.QAction(parent=HomePageWindow)
self.web_manager.setObjectName("web_manager")
self.web_qa = QtGui.QAction(parent=HomePageWindow)
self.web_qa.setObjectName("web_qa")
self.document.addSeparator()
self.document.addAction(self.doc_manager)
self.document.addSeparator()
self.document.addAction(self.doc_qa)
self.web_html.addSeparator()
self.web_html.addAction(self.web_manager)
self.web_html.addSeparator()
self.web_html.addAction(self.web_qa)
self.menubar.addAction(self.document.menuAction())
self.menubar.addAction(self.web_html.menuAction())
self.retranslateUi(HomePageWindow)
QtCore.QMetaObject.connectSlotsByName(HomePageWindow)
def retranslateUi(self, HomePageWindow):
_translate = QtCore.QCoreApplication.translate
HomePageWindow.setWindowTitle(_translate("HomePageWindow", "MainWindow"))
self.document.setTitle(_translate("HomePageWindow", "文档"))
self.web_html.setTitle(_translate("HomePageWindow", "网页"))
self.doc_manager.setText(_translate("HomePageWindow", "文档管理"))
self.doc_qa.setText(_translate("HomePageWindow", "文档问答"))
self.web_manager.setText(_translate("HomePageWindow", "网页管理"))
self.web_qa.setText(_translate("HomePageWindow", "网页问答"))
这是一个主界面UI, 其中点击菜单栏(如点击文档管理)self.frame_page_home 怎么嵌入文档管理界面,这样做是否合理
|
719ad60747452b5d4b8a36c3695b0000
|
{
"intermediate": 0.24063339829444885,
"beginner": 0.5938680171966553,
"expert": 0.1654985547065735
}
|
46,855
|
i have calculated two scalers as :
# Suppose mean_ and var_ have been obtained as above
x_scaler = StandardScaler()
x_scaler.mean_ = f_mean_
x_scaler.var_ = f_var_
x_scaler.scale_ = np.sqrt(f_var_)
y_scaler = StandardScaler()
y_scaler.mean_ = t_mean_
y_scaler.var_ = t_var_
y_scaler.scale_ = np.sqrt(t_var_)
how can i save them?
|
da5002d5ca63f29ed45e7e0a9a001f7b
|
{
"intermediate": 0.3348543643951416,
"beginner": 0.3518921434879303,
"expert": 0.3132534623146057
}
|
46,856
|
#INCLUDE <sys/ioctl.h>// find the terminal width of the laptop being used
int getTerminalWidth()
{
struct winsize size;
// Get the size of the terminal window using ioctl system call
ioctl(STDOUT_FILENO, TIOCGWINSZ, &size);
int terminalWidth = size.ws_col;
return terminalWidth;
} make this compatible for windows in CPP
|
3c599fbc2fc83f5ff5884fa2419ba9e6
|
{
"intermediate": 0.39456185698509216,
"beginner": 0.3972199857234955,
"expert": 0.2082180380821228
}
|
46,857
|
lua script fps counter defold engine
|
fec99d8d1845ff7bc16cb2fdb810cadf
|
{
"intermediate": 0.2869197726249695,
"beginner": 0.3992342948913574,
"expert": 0.3138459026813507
}
|
46,858
|
lua script that show FPS (defold engine)
|
8d2ff135e009333fd9c383b23690d9fc
|
{
"intermediate": 0.363330215215683,
"beginner": 0.2944985628128052,
"expert": 0.34217119216918945
}
|
46,859
|
i am training a model on my dataset
my code for training the model is :
# %%
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
from tensorflow import keras
def calculate_features_scaling_params(file_path, features_to_drop):
scaler = StandardScaler()
for chunk in pd.read_csv(file_path, chunksize=10000): # Adjust chunksize based on your memory capacity
filtered_chunk = chunk.drop(features_to_drop, axis=1)
scaler.partial_fit(filtered_chunk) # Accumulate means and variances
return scaler.mean_, scaler.var_
def calculate_targets_scaling_params(file_path):
scaler = StandardScaler()
for chunk in pd.read_csv(file_path, chunksize=10000): # Adjust chunksize based on your memory capacity
filtered_chunk = chunk[['y_High_1d'
, 'y_Low_1d', 'y_Priority_1d',
'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
'y_High_5d', 'y_Low_5d', 'y_Priority_5d'
]]
scaler.partial_fit(filtered_chunk) # Accumulate means and variances
return scaler.mean_, scaler.var_
# %%
import tensorflow as tf
if tf.test.gpu_device_name():
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
else:
print('Please install GPU version of TF')
# %%
file_path = r"C:\Users\arisa\Desktop\combined_day.csv"
batch_size = 128
# %%
features_to_drop = ['Date', 'Symbol',
'y_High_1d', 'y_Low_1d', 'y_Priority_1d',
'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
'y_High_5d', 'y_Low_5d', 'y_Priority_5d']
f_mean_, f_var_ = calculate_features_scaling_params(file_path, features_to_drop)
t_mean_, t_var_ = calculate_targets_scaling_params(file_path)
# %%
# Suppose mean_ and var_ have been obtained as above
x_scaler = StandardScaler()
x_scaler.mean_ = f_mean_
x_scaler.var_ = f_var_
x_scaler.scale_ = np.sqrt(f_var_)
y_scaler = StandardScaler()
y_scaler.mean_ = t_mean_
y_scaler.var_ = t_var_
y_scaler.scale_ = np.sqrt(t_var_)
# %%
row_counter = 0
# %%
def data_generator(file_path, batch_size, data_type, x_scaler, y_scaler):
global row_counter
chunksize = batch_size
total_rows = 301617 # Adjust with your dataset's actual number of rows
split_ratio = 0.92 # Assuming 80% for train, 20% for validation
train_rows = int(total_rows * split_ratio)
while True: # Loop forever, so the generator never terminates
for chunk in pd.read_csv(file_path, chunksize=chunksize):
if data_type == 'train' and row_counter >= train_rows:
continue # Skip the rest if we are fetching training data but have reached the end of the train set
elif data_type == 'val' and row_counter < train_rows:
row_counter += len(chunk)
continue # Skip this chunk if we are fetching validation data but are still in the train range
# Assuming your CSV has headers that match features/targets
# Normalizing the features
filtered_c = chunk.drop(['Date', 'Symbol'], axis=1)
feature_data = filtered_c.drop([
'y_High_1d', 'y_Low_1d', 'y_Priority_1d',
'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
'y_High_5d', 'y_Low_5d', 'y_Priority_5d'], axis=1)
target_data = filtered_c[['y_High_1d'
, 'y_Low_1d', 'y_Priority_1d',
'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
'y_High_5d', 'y_Low_5d', 'y_Priority_5d'
]]
feature_data_scaled = pd.DataFrame(x_scaler.transform(feature_data), columns=feature_data.columns)
# Assuming target_data also needs to be scaled, apply scaler separately
target_data_scaled = pd.DataFrame(y_scaler.transform(target_data), columns=target_data.columns)
# Now, feature_data_scaled and target_data_scaled are both DataFrames, scaled and ready to use
yield feature_data_scaled.values, target_data_scaled.values
row_counter += len(chunk)
# %%
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Input
import tensorflow as tf
def build_model():
input_shape = (6427,)
model = Sequential([
Dense(6427, activation='relu', input_shape = input_shape),
Dropout(0.25),
Dense(3200, activation='relu'),
Dropout(0.20),
Dense(1800, activation='relu'),
Dropout(0.15),
Dense(1024, activation='relu'),
Dropout(0.10),
Dense(512, activation='relu'),
Dropout(0.05),
Dense(256, activation='relu'),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(32, activation='relu'),
Dense(12),
])
model.compile(optimizer='adam',
loss='mse', # Use Mean Squared Error for regression
metrics=['mae']) # Mean Absolute Error as an additional metric
return model
# %%
# Instantiate the model
model = build_model()
model.summary()
# %%
import warnings
warnings.filterwarnings(action='ignore', message='X has feature names, but StandardScaler was fitted without feature names')
train_generator = data_generator(file_path, batch_size, 'train',x_scaler=x_scaler,y_scaler=y_scaler)
val_generator = data_generator(file_path, batch_size, 'val',x_scaler=x_scaler,y_scaler=y_scaler)
total_samples = 301617 # Assuming same example size
train_samples = int(total_samples * 0.92)
val_samples = total_samples - train_samples
steps_per_epoch = train_samples // batch_size
validation_steps = val_samples // batch_size
# Modify the model fitting call to include validation data
model.fit(
train_generator,
steps_per_epoch=steps_per_epoch,
epochs=75,
validation_data=val_generator,
validation_steps=validation_steps
)
the first epoch runs ok but when second epoch starts the process is not procced and the step not updates
the code not throws any error or anything but the step and epoch not changes :
like after 10 hours of running this is the output :
Epoch 1/75
2167/2167 [==============================] - 932s 429ms/step - loss: 0.9483 - mae: 0.6670 - val_loss: 0.8434 - val_mae: 0.6839
Epoch 2/75
1/2167 [..............................] - ETA: 9:27 - loss: 1.2611 - mae: 0.7980
first epoch completed in 10 minutes but after that(for almost 10 hours) not procceding on epoch 2/75
|
68260904b5fb3fa0ffaea3d5f2575467
|
{
"intermediate": 0.19747671484947205,
"beginner": 0.45991817116737366,
"expert": 0.3426051139831543
}
|
46,860
|
def f(n):
k = to_5(n)
k == k[::-1]
k = int(k, 5)
res = abs(k - n)
return res
print(min(n for n in range(0,5000) if f(n) == 100))
почему мне выдает ошибку
|
e905872550352d5ea5cd5431ae7f1266
|
{
"intermediate": 0.1534869223833084,
"beginner": 0.7177849411964417,
"expert": 0.12872816622257233
}
|
46,861
|
Hi
|
aa120b89ada7344cb3d3c8d5b9931cb1
|
{
"intermediate": 0.33010533452033997,
"beginner": 0.26984941959381104,
"expert": 0.400045245885849
}
|
46,862
|
I would like to know the best method to write fields to a custom table, using a run script within workflow editor.
Here is an example of my run script
var dr = new GlideRecord('u_direct_enterprise_software');
dr.addQuery('u_record_number', workflow.scratchpad.newRecID);
dr.query();
while (dr.next()) {
var cvar = current.variables;
dr.u_is_the_vendor_on_a_sactioned_list = cvar.is_the_vendor_on_a_sactioned_list;
dr.u_has_the_vendor_or_software_been_breached_in_the_past_3_years = cvar._has_the_vendor_or_software_been_breached_in_the_past_3_years;
dr.u_vendor_website = cvar.vendor_website;
dr.u_vendor_headquarters = cvar.vendor_headquarters;
dr.u_vendor_locations = cvar.vendor_locations;
dr.u_company_officers = cvar.company_officers;
dr.u_vendor_status_from_securityscorecard = cvar.vendor_status_from_securityscorecard;
dr.u_fedramp_level_if_applicable = cvar.fedramp_level_if_applicable;
dr.u_cve_mitre_vulnerabilities_noted = cvar.cve_mitre_vulnerabilities_noted;
dr.u_virus_total_results = cvar.virus_total_results;
dr.u_data_stored_in_the_software = cvar.data_stored_in_the_software;
dr.u_us_developed = cvar.us_developed;
dr.update();
}
Essentially,
The catalog task is created -> it gets completed (the new fields are filled out on the catalog task by the user) -> runs through the approval stage -> after approval, the fields that are filled out on the catalog task get written to a record on the custom table u_direct_enterprise_software
At the moment, I currently have all functionality working besides the fields writing to the record on the table.
|
815fd23d1c3ceca31f8979200fb08759
|
{
"intermediate": 0.4899115562438965,
"beginner": 0.2657182514667511,
"expert": 0.24437017738819122
}
|
46,863
|
Test Key & IV value details –
Algorithm = AES-256-CBC
Enc_key= X5mUl3J1jneCd0adISoHWDTj7U8Rnhvd
iv= 1111111245683783
create a js function to do this
we will pass a json object to function and that function should encrypt using above details and return the encrypted base64 data
{
"tid":"0011111A",
"amount":"10",
"organization_code":"Retail",
"additional_attribute1":"attribute1",
"additional_attribute2":"attribute2",
"additional_attribute3":"attribute3",
"invoiceNumber":"",
"rrn":"",
"type":"SALE",
"cb_amt":"",
"app_code":"",
"tokenisedValue":"35454564",
"actionId":"1"
}
|
b54f8973e429005bbc2a6c3198c8c318
|
{
"intermediate": 0.43849971890449524,
"beginner": 0.2409685105085373,
"expert": 0.3205317556858063
}
|
46,864
|
grid background design html css login page and grids have neon property
|
90e1d905151834c6fc37c5ba5bd82f88
|
{
"intermediate": 0.4210916757583618,
"beginner": 0.2684181332588196,
"expert": 0.3104901909828186
}
|
46,865
|
Public Shared Function IsAscending(values As ReadOnlySpan(Of Integer)) As Boolean
For i = 1 To values.Length - 1
If values(i) < values(i - 1) Then
Return False
End If
Next
Return True
End Function
Public Shared Function TransformIndexByStrides(index As Integer, sourceStrides As Integer(), sourceReverseStride As Boolean, transformStrides As Integer()) As Integer
Debug.Assert(index >= 0)
Debug.Assert(If(sourceReverseStride, IsAscending(sourceStrides), IsDescending(sourceStrides)), "Index decomposition requires ordered strides")
Debug.Assert(sourceStrides.Length = transformStrides.Length)
' scalar tensor
If sourceStrides.Length = 0 Then
Debug.Assert(index = 0, "Index has to be zero for a scalar tensor")
Return 0
End If
Dim transformIndex = 0
Dim remainder = index
For i = 0 To sourceStrides.Length - 1
' reverse the index for reverseStride so that we divide by largest stride first
Dim nIndex = If(sourceReverseStride, sourceStrides.Length - 1 - i, i)
Dim sourceStride = sourceStrides(nIndex)
Dim transformStride = transformStrides(nIndex)
transformIndex += transformStride * (remainder / sourceStride)
remainder = remainder Mod sourceStride
Next
Return transformIndex
End Function
请根据上面代码上下文解决下面这段代码中“Debug.Assert(If(sourceReverseStride, IsAscending(sourceStrides), IsDescending(sourceStrides)), "Index decomposition requires ordered strides")”这一行代码出现的问题:类型“Integer()”的值无法转换为“ML.OnnxRuntime.Tensors.ReadOnlySpan(Of Integer)”。
Public Shared Function TransformIndexByStrides(index As Integer, sourceStrides As Integer(), sourceReverseStride As Boolean, transformStrides As Integer()) As Integer
Debug.Assert(index >= 0)
Debug.Assert(If(sourceReverseStride, IsAscending(sourceStrides), IsDescending(sourceStrides)), "Index decomposition requires ordered strides")
Debug.Assert(sourceStrides.Length = transformStrides.Length)
' scalar tensor
If sourceStrides.Length = 0 Then
Debug.Assert(index = 0, "Index has to be zero for a scalar tensor")
Return 0
End If
Dim transformIndex = 0
Dim remainder = index
For i = 0 To sourceStrides.Length - 1
' reverse the index for reverseStride so that we divide by largest stride first
Dim nIndex = If(sourceReverseStride, sourceStrides.Length - 1 - i, i)
Dim sourceStride = sourceStrides(nIndex)
Dim transformStride = transformStrides(nIndex)
transformIndex += transformStride * (remainder / sourceStride)
remainder = remainder Mod sourceStride
Next
Return transformIndex
End Function
|
5375648966191a47f9b7127ff8adf9f8
|
{
"intermediate": 0.323280930519104,
"beginner": 0.4456549286842346,
"expert": 0.23106414079666138
}
|
46,866
|
the following is my code:
import { Box, Typography, styled } from "@mui/material";
import CustomerProfile from "./components/CustomerProfile";
import InfoCard from "./components/InfoCard";
import FeedIcon from "@mui/icons-material/Feed";
import LocationOnIcon from "@mui/icons-material/LocationOn";
import AccountCircleIcon from "@mui/icons-material/AccountCircle";
const Container = styled(Box)(({ theme }) => ({
display: "flex",
flexDirection: "column",
padding: theme.spacing(4),
gap: theme.spacing(4),
}));
const InfoCardContainer = styled(Box)(({ theme }) => ({
display: "flex",
width: "100%",
gap: theme.spacing(4),
}));
const ContentContainer = styled(Box)(({ theme }) => ({
display: "flex",
justifyContent: "space-around",
gap: theme.spacing(2),
padding: theme.spacing(4),
}));
const ContentColumn = styled(Box)(({ theme }) => ({
flex: 1,
display: "flex",
flexDirection: "column",
gap: theme.spacing(1),
}));
const ContentTitle = styled(Typography)({
color: "#8f9daf",
});
const ContentSubtitle = styled(Typography)(
({ theme, green = false, red = false }) => ({
color: green ? "#41beb0" : red ? "#ff7a7d" : theme.palette.primary.main,
fontWeight: "bold",
})
);
const EmploymentInformation = () => {
return (
<ContentContainer>
<ContentColumn>
<ContentTitle>Occupation:</ContentTitle>
<ContentSubtitle>Office Administration</ContentSubtitle>
<ContentTitle>Approved Employer:</ContentTitle>
<ContentSubtitle green>Yes</ContentSubtitle>
<ContentTitle>Salary Transferred:</ContentTitle>
<ContentSubtitle red>No</ContentSubtitle>
</ContentColumn>
<ContentColumn>
<ContentTitle>Organization:</ContentTitle>
<ContentSubtitle>KFMC</ContentSubtitle>
<ContentTitle>Place:</ContentTitle>
<ContentSubtitle>Riyadh</ContentSubtitle>
</ContentColumn>
</ContentContainer>
);
};
const AddressInformation = () => {
return (
<ContentContainer>
<ContentColumn>
<ContentTitle>Street:</ContentTitle>
<ContentSubtitle>Ahmed bin Khaled St.</ContentSubtitle>
<ContentTitle>Building No.</ContentTitle>
<ContentSubtitle>1234</ContentSubtitle>
<ContentTitle>City:</ContentTitle>
<ContentSubtitle>Riyadh</ContentSubtitle>
</ContentColumn>
<ContentColumn>
<ContentTitle>District:</ContentTitle>
<ContentSubtitle>Al-Olaya</ContentSubtitle>
<ContentTitle>Secondary No.</ContentTitle>
<ContentSubtitle>Riyadh</ContentSubtitle>
<ContentTitle>Postal Code:</ContentTitle>
<ContentSubtitle>11452</ContentSubtitle>
</ContentColumn>
</ContentContainer>
);
};
const IdInformation = () => {
return (
<ContentContainer>
<ContentColumn>
<ContentTitle>ID Type:</ContentTitle>
<ContentSubtitle>Saudi ID</ContentSubtitle>
<ContentTitle>Date of Birth:</ContentTitle>
<ContentSubtitle>15, June 1996</ContentSubtitle>
</ContentColumn>
<ContentColumn>
<ContentTitle>ID Number:</ContentTitle>
<ContentSubtitle>1091182301</ContentSubtitle>
<ContentTitle>Date of Expiry:</ContentTitle>
<ContentSubtitle>30, Dec 2025</ContentSubtitle>
</ContentColumn>
</ContentContainer>
);
};
const employmentInfo = {
title: "Employment Information",
icon: <FeedIcon color="white" />,
content: <EmploymentInformation />,
};
const addressInfo = {
title: "Address Information",
icon: <LocationOnIcon color="white" />,
content: <AddressInformation />,
};
const idInfo = {
title: "ID Information",
icon: <AccountCircleIcon color="white" />,
content: <IdInformation />,
};
const Home = () => {
return (
<Container>
<CustomerProfile />
<InfoCardContainer>
<InfoCard info={employmentInfo} />
<InfoCard info={addressInfo} />
<InfoCard info={idInfo} />
</InfoCardContainer>
</Container>
);
};
export default Home;
The following is the error: Warning: Received `true` for a non-boolean attribute `green`.
Can you please edit the code to solve the error?
|
a31d564dca8acb4037dc2b784fc235f4
|
{
"intermediate": 0.35369664430618286,
"beginner": 0.44285476207733154,
"expert": 0.2034486085176468
}
|
46,867
|
根据代码上下文编写dimensions变量
vb
Public Class DenseTensor(Of T)
Friend Sub New(ByVal fromArray As Array, Optional ByVal reverseStride As Boolean = False)
MyBase.New()
' Copy initial array
Dim backingArray(fromArray.Length - 1) As T
Dim index As Integer = 0
If reverseStride Then
' Array is always row-major
Dim sourceStrides() As Integer = ArrayUtilities.GetStrides(dimensions)
For Each item In fromArray
Dim destIndex As Integer = ArrayUtilities.TransformIndexByStrides(index, sourceStrides, False, strides)
backingArray(destIndex) = DirectCast(item, T)
index += 1
Next
Else
For Each item In fromArray
backingArray(index) = DirectCast(item, T)
index += 1
Next
End If
memory = backingArray
End Sub
End Class
Public Class ArrayUtilities
Public Shared Function GetStrides(dimensions As ReadOnlySpan(Of Integer), Optional reverseStride As Boolean = False) As Integer()
Dim strides = New Integer(dimensions.Length - 1) {}
If dimensions.Length = 0 Then
Return strides
End If
Dim stride = 1
If reverseStride Then
For i = 0 To strides.Length - 1
strides(i) = stride
stride *= dimensions(i)
Next
Else
For i = strides.Length - 1 To 0 Step -1
strides(i) = stride
stride *= dimensions(i)
Next
End If
Return strides
End Function
End Class
|
abb68d7d4bf3669fdf575836b6531d77
|
{
"intermediate": 0.23892399668693542,
"beginner": 0.5390737652778625,
"expert": 0.22200226783752441
}
|
46,868
|
根据代码上下文编写dimensions变量并赋值
vb
Public Class DenseTensor(Of T)
Friend Sub New(ByVal fromArray As Array, Optional ByVal reverseStride As Boolean = False)
MyBase.New()
' Copy initial array
Dim backingArray(fromArray.Length - 1) As T
Dim index As Integer = 0
If reverseStride Then
' Array is always row-major
Dim sourceStrides() As Integer = ArrayUtilities.GetStrides(dimensions)
For Each item In fromArray
Dim destIndex As Integer = ArrayUtilities.TransformIndexByStrides(index, sourceStrides, False, strides)
backingArray(destIndex) = DirectCast(item, T)
index += 1
Next
Else
For Each item In fromArray
backingArray(index) = DirectCast(item, T)
index += 1
Next
End If
memory = backingArray
End Sub
End Class
Public Class ArrayUtilities
Public Shared Function GetStrides(dimensions As ReadOnlySpan(Of Integer), Optional reverseStride As Boolean = False) As Integer()
Dim strides = New Integer(dimensions.Length - 1) {}
If dimensions.Length = 0 Then
Return strides
End If
Dim stride = 1
If reverseStride Then
For i = 0 To strides.Length - 1
strides(i) = stride
stride *= dimensions(i)
Next
Else
For i = strides.Length - 1 To 0 Step -1
strides(i) = stride
stride *= dimensions(i)
Next
End If
Return strides
End Function
End Class
|
c3e9f49742282ed69f8c3a3c19593000
|
{
"intermediate": 0.2505733072757721,
"beginner": 0.5157487392425537,
"expert": 0.2336779534816742
}
|
46,869
|
根据代码上下文编写dimensions变量并赋值
vb
Public Class DenseTensor(Of T)
Friend Sub New(ByVal fromArray As Array, Optional ByVal reverseStride As Boolean = False)
MyBase.New()
' Copy initial array
Dim backingArray(fromArray.Length - 1) As T
Dim index As Integer = 0
If reverseStride Then
' Array is always row-major
Dim sourceStrides() As Integer = ArrayUtilities.GetStrides(dimensions)
For Each item In fromArray
Dim destIndex As Integer = ArrayUtilities.TransformIndexByStrides(index, sourceStrides, False, strides)
backingArray(destIndex) = DirectCast(item, T)
index += 1
Next
Else
For Each item In fromArray
backingArray(index) = DirectCast(item, T)
index += 1
Next
End If
memory = backingArray
End Sub
End Class
Public Class ArrayUtilities
Public Shared Function GetStrides(dimensions As ReadOnlySpan(Of Integer), Optional reverseStride As Boolean = False) As Integer()
Dim strides = New Integer(dimensions.Length - 1) {}
If dimensions.Length = 0 Then
Return strides
End If
Dim stride = 1
If reverseStride Then
For i = 0 To strides.Length - 1
strides(i) = stride
stride *= dimensions(i)
Next
Else
For i = strides.Length - 1 To 0 Step -1
strides(i) = stride
stride *= dimensions(i)
Next
End If
Return strides
End Function
End Class
|
91674567405a1ceb35e2f4a76d1e1d05
|
{
"intermediate": 0.2505733072757721,
"beginner": 0.5157487392425537,
"expert": 0.2336779534816742
}
|
46,870
|
How to setup zebra printer that way that before each print job a certain zpl command is executed on printer
|
2340fb6c9a3ee9b2972fb0cc6d92deb6
|
{
"intermediate": 0.3588191568851471,
"beginner": 0.17328469455242157,
"expert": 0.46789613366127014
}
|
46,871
|
How to detect if printer was connected with python
|
14fdafb6dc5a30853782ccb401b6d4d5
|
{
"intermediate": 0.26209643483161926,
"beginner": 0.16485187411308289,
"expert": 0.5730516314506531
}
|
46,872
|
i have a csv file that contains many columns and has a large size(27GB)
i want to remove all the columns except the ones that:
A.The columns which their names that contains: Open, High, Low, Close,macd,sma,wma,ema,rsi,cci,stoch,_short,_ichimoku,_dr
B.The columns which their names that ends whith: volume_base, volume_crypto, _tradecount
C.The columns after y_High_1d(including y_High_1d itself)
|
95ad505219d32c986206f0f61bf69e15
|
{
"intermediate": 0.3243207037448883,
"beginner": 0.34848347306251526,
"expert": 0.32719582319259644
}
|
46,873
|
Запустил docker контейнер с freepbx а он при запуске пишет это:
2024-04-16.05:51:29 [NOTICE] ** [monitoring] Container configured for monitoring with 'zabbix modern'
2024-04-16.05:51:30 [NOTICE] ** [scheduling] Container configured for scheduled tasks with 'cron'
2024-04-16.05:51:30 [INFO] ** [fail2ban] Starting Fail2ban
2024-04-16.05:52:00 [NOTICE] ** [messaging] Container configured to route mail via SMTP to 'postfix-relay'
2024-04-16.05:52:00 [INFO] ** [mariadb] No embedded database detected, skip..
2024-04-16.05:52:06 [WARN] ** [freepbx] MySQL/MariaDB Server 'mariadb' is not accessible, retrying.. (5 seconds so far)
2024-04-16.05:52:11 [WARN] ** [freepbx] MySQL/MariaDB Server 'mariadb' is not accessible, retrying.. (10 seconds so far)
2024-04-16.05:52:16 [WARN] ** [freepbx] MySQL/MariaDB Server 'mariadb' is not accessible, retrying.. (15 seconds so far)
2024-04-16.05:52:21 [WARN] ** [freepbx] MySQL/MariaDB Server 'mariadb' is not accessible, retrying.. (20 seconds so far)
2024-04-16.05:52:26 [WARN] ** [freepbx] MySQL/MariaDB Server 'mariadb' is not accessible, retrying.. (25 seconds so far)
2024-04-16.05:52:31 [WARN] ** [freepbx] MySQL/MariaDB Server 'mariadb' is not accessible, retrying.. (30 seconds so far)
2024-04-16.05:52:36 [WARN] ** [freepbx] MySQL/MariaDB Server 'mariadb' is not accessible, retrying.. (35 seconds so far)
2024-04-16.05:52:41 [WARN] ** [freepbx] MySQL/MariaDB Server 'mariadb' is not accessible, retrying.. (40 seconds so far)
2024-04-16.05:52:46 [WARN] ** [freepbx] MySQL/MariaDB Server 'mariadb' is not accessible, retrying.. (45 seconds so far)
2024-04-16.05:52:51 [WARN] ** [freepbx] MySQL/MariaDB Server 'mariadb' is not accessible, retrying.. (50 seconds so far)
|
86983fa8fa53fa47de59c74f7cd3ba28
|
{
"intermediate": 0.34504425525665283,
"beginner": 0.32307398319244385,
"expert": 0.33188170194625854
}
|
46,874
|
Entity Framework core 6 many to many with join table
|
6e1c17dfe58b88f06fa7d3db78596703
|
{
"intermediate": 0.6050905585289001,
"beginner": 0.2542107105255127,
"expert": 0.14069868624210358
}
|
46,875
|
i have bunch of csv files
i have following code to merge them:
import pandas as pd
import os
# Directory where all your CSV files are located
csv_directory = r"C:\Users\arisa\Desktop\day_spot"
# List all CSV files in the given directory
csv_files = [file for file in os.listdir(csv_directory) if file.endswith('.csv')]
# Placeholder for storing the data frames
data_frames = []
# Loop over the list of csv files
for csv_file in csv_files:
# Read the CSV file and append it to the list of data frames
file_path = os.path.join(csv_directory, csv_file)
df = pd.read_csv(file_path)
data_frames.append(df)
# Concatenate all data frames to create a single data frame
combined_df = pd.concat(data_frames, ignore_index=True)
# Save the combined data frame to a new CSV file
combined_csv_path = r'C:\Users\arisa\Desktop\combined_day.csv'
combined_df.to_csv(combined_csv_path, index=False)
print(f'Combined CSV created at: {combined_csv_path}')
i want to update the code so that in final file:
i want to remove all the columns except the ones that:
A.The columns which their names that contains: Open, High, Low, Close,macd,sma,wma,ema,rsi,cci,stoch,_short,_ichimoku,_dr
B.The columns which their names that ends whith: volume_base, volume_crypto, tradecount
C.The columns after y_High_1d(including y_High_1d itself)
|
cdeb1b9b752be31c85fb350d2a849f62
|
{
"intermediate": 0.3468990623950958,
"beginner": 0.4228151738643646,
"expert": 0.23028579354286194
}
|
46,876
|
how to write Gtest for the following function static void sdl_egl_destroy(struct graphics_opengl_platform *egl) {
if (egl->eglwindow) {
SDL_GL_DeleteContext(egl->eglcontext);
SDL_DestroyWindow(egl->eglwindow);
}
g_free(egl);
SDL_Quit();
}
|
c85b7637f5383b542b88d6a1fef13240
|
{
"intermediate": 0.5509567856788635,
"beginner": 0.2970137298107147,
"expert": 0.15202954411506653
}
|
46,877
|
i have a large dataset as csv file (27GB size)
i want to train an ensmble decision tree on it
for each row i have 3 y value columns named as yq1 yq2 yq3 that need to be predicted and model should predict them
give me proper python code to implement it
|
3b33f263636c24f78df7555f781f8f70
|
{
"intermediate": 0.4408280849456787,
"beginner": 0.09490528702735901,
"expert": 0.4642665982246399
}
|
46,878
|
hi
|
f5efa459e0d9beef8fe9326448e77577
|
{
"intermediate": 0.3246487081050873,
"beginner": 0.27135494351387024,
"expert": 0.40399640798568726
}
|
46,879
|
I would like to know the best method to write fields to a custom table, using a run script within workflow editor in servicenow.
Essentially,
The catalog task is created -> it gets completed (the new fields are filled out on the catalog task by the user) -> runs through the approval stage -> after approval, the fields that are filled out on the catalog task get written to a record on the custom table u_direct_enterprise_software.
At the moment, I currently have all functionality working besides the fields writing to the record on the table. correct he workflow script if possible
They simply do not populate to the table and I do not know why.
Here is an example of my run script
var dr = new GlideRecord('u_direct_enterprise_software');
dr.addQuery('u_record_number', workflow.scratchpad.newRecID);
dr.query();
while (dr.next()) {
var cvar = current.variables;
dr.u_is_the_vendor_on_a_sactioned_list = cvar.is_the_vendor_on_a_sactioned_list;
dr.u_has_the_vendor_or_software_been_breached_in_the_past_3_years = cvar._has_the_vendor_or_software_been_breached_in_the_past_3_years;
dr.u_vendor_website = cvar.vendor_website;
dr.u_vendor_headquarters = cvar.vendor_headquarters;
dr.u_vendor_locations = cvar.vendor_locations;
dr.u_company_officers = cvar.company_officers;
dr.u_vendor_status_from_securityscorecard = cvar.vendor_status_from_securityscorecard;
dr.u_fedramp_level_if_applicable = cvar.fedramp_level_if_applicable;
dr.u_cve_mitre_vulnerabilities_noted = cvar.cve_mitre_vulnerabilities_noted;
dr.u_virus_total_results = cvar.virus_total_results;
dr.u_data_stored_in_the_software = cvar.data_stored_in_the_software;
dr.u_us_developed = cvar.us_developed;
dr.update();
}
|
0fb99c7dcc6094ad43582d2f77886f9a
|
{
"intermediate": 0.5438546538352966,
"beginner": 0.28026512265205383,
"expert": 0.17588016390800476
}
|
46,880
|
Hi, I want to work with Powershell ISE, please give me short answers, as best only the source code i want with an comment. pls do not answer on this message, this is just an info for you
|
5531d2f27a5fc1f87d0dbe4346e8f25c
|
{
"intermediate": 0.36739861965179443,
"beginner": 0.29752984642982483,
"expert": 0.3350715935230255
}
|
46,881
|
Hi, I want to work with Powershell ISE, please give me short answers, as best only the source code i want with an comment. pls do not answer on this message, this is just an info for you
|
45dabe87ee4c8e10207468cc74892a39
|
{
"intermediate": 0.36739861965179443,
"beginner": 0.29752984642982483,
"expert": 0.3350715935230255
}
|
46,882
|
Is MVC a design pattern?
|
a7defc7a1df61ad390e9b95d9ad9bea8
|
{
"intermediate": 0.2757970094680786,
"beginner": 0.26499542593955994,
"expert": 0.45920753479003906
}
|
46,883
|
Hi how to create an text file in powershell, pls show only code with an comment
|
766efe76d936972cfcec8d91cbe8ebf7
|
{
"intermediate": 0.3191577196121216,
"beginner": 0.4297395348548889,
"expert": 0.2511027157306671
}
|
46,884
|
Hi, pls give me short answers, with only source code for powershell and an comment.
Show an prompt with "OK" after that pressed, the code should continue
|
0682b7f3b4815f7519dfe0171ac20311
|
{
"intermediate": 0.29088959097862244,
"beginner": 0.3673631250858307,
"expert": 0.34174731373786926
}
|
46,885
|
Configure many to many relationships with same table entity framework core 6
|
020278a74111c04afc6efcfbde5c4b2b
|
{
"intermediate": 0.499287486076355,
"beginner": 0.17046476900577545,
"expert": 0.33024775981903076
}
|
46,886
|
i a django application we created a class named "db_validator" that on initialisation is going to load all the django model and their data ( rows ) in self.tables which is a dictionary of pd dataframes where the key is the table / model name and the values are the dataframes , write the class
|
5149d93da2366eb612997321a55e330a
|
{
"intermediate": 0.5017939805984497,
"beginner": 0.23618288338184357,
"expert": 0.26202306151390076
}
|
46,887
|
in this javascript why is the command 'instructionsElement.appendChild(newStation);' not working - '
script.js
Remix
Share
new-train-game
Settings
Assets
Files
script.js
PRETTIER
});
//24 hour clock display
const TIME_MULTIPLIER = 60 * 10; // 10 minutes = 600 seconds
// Function to format time in 24-hour format with leading zeros
function formatTime(hours, minutes) {
// Handle the case where minutes reach 60 (should display the next hour)
if (minutes === 60) {
hours++;
minutes = 0;
}
return `${hours.toString().padStart(2, "0")}:${minutes
.toString()
.padStart(2, "0")}`;
}
// Function to update the clock display and handle daily bonus
function updateClock() {
const currentTime = new Date();
// Simulate game time by multiplying actual time with multiplier
const gameTime = new Date(currentTime.getTime() * TIME_MULTIPLIER);
// Get hours and minutes in 24-hour format
let hours = gameTime.getHours();
// Get minutes and force them to the nearest multiple of 10 (ending in 0)
let minutes = Math.floor(gameTime.getMinutes() / 10) * 10;
// Format the time string with fixed minute handling
const formattedTime = formatTime(hours, minutes);
// Update the content of the div with the formatted time
document.getElementById("timedisplay").textContent = formattedTime;
// Check if it's midnight (00:00)
if (hours === 0 && minutes === 0) {
// Generate random daily bonus (modify as needed)
money += cafeOneBonus + cafeTwoBonus + hotelOneBonus;
const moneyDisplay = document.getElementById("moneydisplay");
const moneyString = `£${money}`;
moneyDisplay.textContent = moneyString;
console.log(
`Daily bonus of ${
cafeOneBonus + cafeTwoBonus + hotelOneBonus + hotelTwoBonus
} added! Total money: ${money}`
); // You can replace console.log with your desired action
}
}
// Call the updateClock function initially
updateClock();
// Update the clock every second to simulate smooth time progression
setInterval(updateClock, 1000);
//add a new station
const newStation = document.createElement("button");
newStation.id = "trainbutton";
newStation.textContent = "Place New Station";
newStation.addEventListener("click", () => {
if (money > 150000) {
} else {
console.log(`You need £150,000 to add a new stations`);
}
});
STATUS
LOGS
TERMINAL
TOOLS
PREVIEW
'
|
39da4405eab5f1077306da4d30c0a894
|
{
"intermediate": 0.4191504716873169,
"beginner": 0.2957295775413513,
"expert": 0.2851199209690094
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.