row_id
int64 0
48.4k
| init_message
stringlengths 1
342k
| conversation_hash
stringlengths 32
32
| scores
dict |
|---|---|---|---|
47,590
|
@echo off
SETLOCAL
for %%i in (*.pdf) do "C:\Program Files (x86)\gs\gs10.03.0\bin\gswin32c.exe" -q -dNOPAUSE -sDEVICE=txtwrite -sOutputFile="%%~ni.txt" -dFirstPage=1 -dLastPage=1 "%%i" -c quit
echo Conversion Complete!
ENDLOCAL for this i want an vba code
|
b0cec9d45fd609dd0d412fd8b2e618eb
|
{
"intermediate": 0.3801597058773041,
"beginner": 0.406890869140625,
"expert": 0.21294943988323212
}
|
47,591
|
HI!
|
ed662c7a1b053ccb17fdb000cbce4b36
|
{
"intermediate": 0.3374777138233185,
"beginner": 0.2601830065250397,
"expert": 0.40233927965164185
}
|
47,592
|
function addMessage(question, answer, answer_source, callback) {
var chatBox = document.getElementById("chatBox");
var current_index = answer_index++;
var answer_id = "answer_" + current_index;
var source_id = "source_" + current_index;
alert(callback)
callback({"answer_id": answer_id, "source_id": source_id})
var html_ = get_qa_template(question, answer, answer_id, source_id);
chatBox.innerHTML += html_;
if (answer_source && answer_source !== "None" && answer_source !== "") {
extendSource(source_id, answer_source);
}
chatBox.lastElementChild.scrollIntoView(); // 滚动到最后一个子元素
return {"answer_id": answer_id, "source_id": source_id};
}
pyqt6 如何调用 上面的JS
如:
@pyqtSlot(str, str, str, ?)
def py_add_message(self, question, answer, answer_source=None):
# self.current_html_answer_id = None
# self.current_html_source_id = None
js_code = f"addMessage('{question}', '{answer}', '{answer_source}', {self.get_answer_id})"
# self.page().runJavaScript(js_code, self.get_answer_id)
self.page().runJavaScript(js_code)
|
4142e452294d8d76ee77de501ba402d1
|
{
"intermediate": 0.37245362997055054,
"beginner": 0.43919965624809265,
"expert": 0.188346728682518
}
|
47,593
|
what is a escape character in SQL and how it works
|
4428dff33e8b0fa36c87e9c37bec6e10
|
{
"intermediate": 0.32783687114715576,
"beginner": 0.37830647826194763,
"expert": 0.2938566505908966
}
|
47,594
|
psycopg2.errors.SyntaxError: syntax error at or near "AS"
LINE 8: AS SELECT
^
conn.execute("""
CREATE TABLE preprive (
id BIGSERIAL primary key
)
AS SELECT
S.academie AS ACADEMIE,
COALESCE (L.codeaca, S.academie) AS codeaca,
|
769fb5fa2a954c5f6ad11e10f1ce8183
|
{
"intermediate": 0.2074250876903534,
"beginner": 0.5696244239807129,
"expert": 0.2229505181312561
}
|
47,595
|
class Actor(torch.nn.Module):
def __init__(self, gnn_model):
super(Actor, self).__init__()
self.gnn = gnn_model
def forward(self, state):
# State contains node_features_tensor, edge_feature_tensor, edge_index
node_features_tensor, edge_feature_tensor, edge_index = state
action_probs = self.gnn(node_features_tensor, edge_index)
return action_probs
class Critic(torch.nn.Module):
def __init__(self, state_dim):
super(Critic, self).__init__()
self.network = torch.nn.Sequential(
torch.nn.Linear(state_dim, 128),
torch.nn.ReLU(),
torch.nn.Linear(128, 1)
)
def forward(self, state):
return self.network(state)
class PPOAgent:
def init(self, gnn_model, state_dim, action_space, lr_actor, lr_critic, gamma, gae_lambda, epsilon, policy_clip, epochs):
self.gamma = gamma
self.gae_lambda = gae_lambda
self.epsilon = epsilon
self.policy_clip = policy_clip
self.epochs = epochs
self.actor = Actor(gnn_model)
self.critic = Critic(state_dim)
self.optimizer_actor = optim.Adam(self.actor.parameters(), lr=lr_actor)
self.optimizer_critic = optim.Adam(self.critic.parameters(), lr=lr_critic)
self.action_space = action_space # Assume continuous
def select_action(self, state):
state_tensor = torch.FloatTensor(state).unsqueeze(0) # Adjust dimensions as necessary
action_probs = self.actor(state_tensor)
cov_mat = torch.diag(action_probs.var()).unsqueeze(0) # Ensure variances are positive and form a covariance matrix
dist = MultivariateNormal(action_probs, cov_mat)
action = dist.sample()
log_prob = dist.log_prob(action)
return action.numpy().squeeze(), log_prob.item()
def compute_gae(self, next_value, rewards, masks, values):
values = values + [next_value]
gae = 0
returns = []
for step in reversed(range(len(rewards))):
delta = rewards[step] + self.gamma * values[step + 1] * masks[step] - values[step]
gae = delta + self.gamma * self.gae_lambda * masks[step] * gae
returns.insert(0, gae + values[step])
return returns
def update_policy(self, prev_states, prev_actions, prev_log_probs, returns, advantages):
advantages = torch.tensor(advantages)
returns = torch.tensor(returns)
prev_log_probs = torch.tensor(prev_log_probs)
for _ in range(self.epochs):
log_probs, state_values, entropy = self.evaluate(prev_states, prev_actions)
ratios = torch.exp(log_probs - prev_log_probs.detach())
advantages = returns - state_values.detach()
surr1 = ratios * advantages
surr2 = torch.clamp(ratios, 1-self.policy_clip, 1+self.policy_clip) * advantages
actor_loss = - torch.min(surr1, surr2).mean()
critic_loss = F.mse_loss(state_values, returns)
self.optimizer_actor.zero_grad()
actor_loss.backward()
self.optimizer_actor.step()
self.optimizer_critic.zero_grad()
critic_loss.backward()
self.optimizer_critic.step()
def evaluate(self, states, actions):
# Replace with actual evaluation logic based on your training loop requirements
pass
Check the below training loop is properly synchronized with the above given 'class PPOAgent' and all its functions 'select_action', 'compute_gae', 'update_policy' exactly,
# Training loop
def train(env, agent, num_episodes, max_timesteps, batch_size, epsilon):
for episode in range(num_episodes):
node_features_tensor, edge_feature_tensor, edge_index, performance_metrics = env.reset()
state = (node_features_tensor, edge_index) # Adjust based on your actual state representation needs
episode_rewards = []
states = []
actions = []
log_probs = []
values = []
masks = []
for t in range(max_timesteps):
action, log_prob = agent.select_action(state)
next_state, next_edge_feature_tensor, next_edge_index, reward, done, previous_metrics = env.step(action.numpy())
next_state = torch.tensor(next_state, dtype=torch.float32) # Convert to tensor if not already
episode_rewards.append(reward)
states.append(state)
actions.append(action)
log_probs.append(log_prob)
values.append(agent.critic(state).item()) # Assuming this is how you get value estimation
masks.append(1 - float(done))
state = next_state
edge_feature_tensor = next_edge_feature_tensor
edge_index = next_edge_index
if done:
next_value = agent.critic(next_state).item() # Fetch next state value for GAE
break
# Outside the loop, we need to handle the case when we haven’t reached done
if not done:
next_value = agent.critic(next_state).item()
# Compute returns and advantages
returns = agent.compute_gae(next_value, episode_rewards, dones, values)
# Normalizing advantages
advantages = torch.tensor(returns) - torch.tensor(values)
# Update policy and value network
agent.update_policy(states, actions, log_probs, returns, advantages)
# Log episode information
total_reward = sum(episode_rewards)
print(f"Episode {episode + 1}/{num_episodes}, Total Reward: {total_reward}")
|
63c9cdc3a73c5879d98c12d64dc3f00e
|
{
"intermediate": 0.21817174553871155,
"beginner": 0.43283581733703613,
"expert": 0.3489924669265747
}
|
47,596
|
i have folder folder of files
i want a python code to iterate files of folder A and for each filename of folder A if file name dosent exist in folder B then move it to folder C
give me proper python code
|
6a51ef48550b96d636d5ec8c44c5dc9c
|
{
"intermediate": 0.46212100982666016,
"beginner": 0.19826190173625946,
"expert": 0.3396170437335968
}
|
47,597
|
* conn.execute("""
CREATE TABLE prepublic (
id BIGSERIAL primary key,
ANT VARCHAR( 3 ) NULL DEFAULT NULL,
CCP VARCHAR( 10 ) NULL DEFAULT NULL
)
AS SELECT
S.academie AS ACADEMIE,
COALESCE (L.codeaca, S.academie) AS codeaca,
sqlalchemy.exc.ProgrammingError: (psycopg2.errors.SyntaxError) syntax error at or near "AS"
LINE 8: AS SELECT
|
4cc67485b177fed49372d2835e18aa1e
|
{
"intermediate": 0.26746925711631775,
"beginner": 0.41839179396629333,
"expert": 0.3141389489173889
}
|
47,598
|
from sqlalchemy import create_engine
from sqlalchemy.engine import make_url
# Database configuration
username = 'postgres'
password = 'Password123'
hostname = '107.110.152.126'
port = '4432'
database_name = 'new'
# PostgreSQL URL. Format:
# postgresql://<username>:<password>@<host>:<port>/<database_name>
database_url = make_url(
f"postgresql://{username}:{password}@{hostname}:{port}/{database_name}"
)
# Create an engine
engine = create_engine(database_url)
# Test the connection
try:
print("Connected to the database!")
except Exception as e:
print(f"Error connecting to the database: {e}")
i a using this code please let me know how i can do query and retrive results from this
|
1347642a31c7f56c1e30f1347d714785
|
{
"intermediate": 0.5059235095977783,
"beginner": 0.2857251465320587,
"expert": 0.2083512842655182
}
|
47,599
|
System.MissingMethodException: Default constructor not found for type System.String
|
14ea119a1c9d6a9db2276c15dbd71421
|
{
"intermediate": 0.42807263135910034,
"beginner": 0.32196691632270813,
"expert": 0.24996048212051392
}
|
47,600
|
How to set default printer via powershell in windows 10
|
9f758eb996e3d556e9039af7ffdeff59
|
{
"intermediate": 0.3574337959289551,
"beginner": 0.3103908598423004,
"expert": 0.3321753144264221
}
|
47,601
|
set user location by background script in servicneow
|
1b7a9a42c58d11eb4ae7b26a6195c639
|
{
"intermediate": 0.454812616109848,
"beginner": 0.2539636194705963,
"expert": 0.2912237346172333
}
|
47,602
|
data = {
"1": {
"root": [
{
"item name": "sdf",
"item count": "asd"
},
{
"item name": "df",
"item count": "dcvb"
}
]
},
"2": {
"root": [
{
"item name": "sdf2",
"item count": "asd2"
},
{
"item name": "df2",
"item count": "dcvb2"
}
]
}
}
for key, value in data.items():
for item in value["root"]:
fullItemname=item["item name"]
print("Item Name:", fullitemname)
print()
fix the error
|
f66ed0641ff435366f9f390cd235f5b5
|
{
"intermediate": 0.3155561685562134,
"beginner": 0.4071009159088135,
"expert": 0.27734288573265076
}
|
47,603
|
write a function to compute average of a python list
|
55fbd79d1d4852e9bdbae96c84bab88c
|
{
"intermediate": 0.356499582529068,
"beginner": 0.30134645104408264,
"expert": 0.34215399622917175
}
|
47,604
|
despite i have bootstrap in package.json and also in angular.json , it didn't apply
|
af29d9518c3850b8a001728e5ae1df71
|
{
"intermediate": 0.45650604367256165,
"beginner": 0.2799166142940521,
"expert": 0.26357731223106384
}
|
47,605
|
from flask import Flask, Response, request, jsonify
from pymongo import MongoClient
from bson import ObjectId
import bcrypt
import jwt
from flask_cors import CORS
import re
app = Flask(__name__)
CORS(app)
client = MongoClient("mongodb://localhost:27017/")
db = client['publication']
if 'publication' not in client.list_database_names():
db = client['publication']
user_collection = db['user']
if 'user' not in db.list_collection_names():
db.create_collection('user')
counter_collection = db['counter']
if 'counter' not in db.list_collection_names():
db.create_collection('counter')
counter_collection.insert_one({'_id': 'userid', 'seq': 0})
@app.route("/")
def hello_world():
res = user_collection.find()
return jsonify(list(res)),200
@app.route("/signup", methods=["POST"])
def register_user():
data = request.json
username = data.get("username")
email = data.get("email")
password = data.get("password")
if not (username and email and password):
return jsonify({"message": "Missing required fields"}), 400
user_exists = user_collection.find_one({"email": email})
if user_exists:
return jsonify({"message": "Cet utilisateur existe déjà"}), 400
hashed_password = bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt(9))
# Increment the counter and retrieve the new value
counter_doc = counter_collection.find_one_and_update({'_id': 'userid'}, {'$inc': {'seq': 1}}, return_document=True)
user_id = counter_doc['seq']
new_user = {
"_id": user_id,
"username": username,
"email": email,
"password": hashed_password.decode("utf-8")
}
user_collection.insert_one(new_user)
return jsonify(new_user), 201
@app.route("/login", methods=["POST"])
def login():
data = request.json
email = data.get("email")
password = data.get("password")
if not (email and password):
return jsonify({"message": "Email and password are required"}), 400
utilisateur = user_collection.find_one({"email": email})
if not utilisateur:
return jsonify({"message": "Utilisateur introuvable"}), 404
# Password comparison
if bcrypt.checkpw(password.encode("utf-8"), utilisateur["password"].encode("utf-8")):
# JWT token generation
payload = {"email": email, "nom": utilisateur.get("nom")}
token = jwt.encode(payload, "1234567890", algorithm="HS256")
return jsonify({"token": token, "message": f"Welcome {utilisateur.get('username')}"})
else:
return jsonify({"message": "Mot de passe incorrect"}), 401
# ! craete collection Annonces
if 'Annonces' not in db.list_collection_names():
db.create_collection('Annonces')
Annonce_collection = db['Annonces']
@app.route("/add", methods=["POST"])
def add_annonce():
data = request.json
Annonce_collection.insert_one(data)
return jsonify(list(data)), 201
@app.route("/search/<word>", methods=["GET"])
def search_annonce(word):
regex = re.compile(re.escape(word), re.IGNORECASE)
annonces = Annonce_collection.find({"titre": {"$regex": regex}})
results = []
for annonce in annonces:
annonce['_id'] = str(annonce['_id'])
results.append(annonce)
if not results:
return jsonify({"message": f"No announcements found containing the word '{word}'"}), 404
return jsonify(results), 200
@app.route("/annonce/delete/<annonce_id>", methods=["DELETE"])
def delete_annonce(annonce_id):
if not ObjectId.is_valid(annonce_id):
return jsonify({"message": "Invalid ID format"}), 400
result = Annonce_collection.delete_one({"_id": ObjectId(annonce_id)})
if result.deleted_count == 0:
return jsonify({"message": "Announcement not found"}), 404
return jsonify({"message": "Announcement deleted successfully"}), 200
# Update by ID
@app.route("/annonce/update/<annonce_id>", methods=["PUT"])
def update_annonce(annonce_id):
if not ObjectId.is_valid(annonce_id):
return jsonify({"message": "Invalid ID format"}), 400
data = request.json
result = Annonce_collection.update_one({"_id": ObjectId(annonce_id)}, {"$set": data})
if result.matched_count == 0:
return jsonify({"message": "Announcement not found"}), 404
return jsonify({"message": "Announcement updated successfully"}), 200
@app.route("/annonce")
def get_annonces():
res = Annonce_collection.find()
annonces = []
for annonce in res:
annonce['_id'] = str(annonce['_id'])
annonces.append(annonce)
return jsonify(annonces), 200
if __name__ == "__main__":
app.run(debug=True)
"in the route /add i want to add imaes to my react app "
{
"categorie": "Categorie",
"plusCategorie": "plusCategorie",
"ville": "Ville",
"secteur": "Secteur",
"NumeroTele": "Telephone",
"Etat": "Etat",
"Prix": "Prix",
"titre": "Titre",
"TexteAnnonce": "TexteAnnonce",
"images": [
"C:\\Users\\LENOVO\\Desktop\\selenium\\imges\\AdobeStock_73502611_Preview.jpeg",
"C:\\Users\\LENOVO\\Desktop\\selenium\\imges\\AdobeStock_238105207_Preview.jpeg",
"C:\\Users\\LENOVO\\Desktop\\selenium\\imges\\AdobeStock_686756529_Preview.jpeg"
]
}
|
2905ac083b38ebbf4242af2020f762b2
|
{
"intermediate": 0.3699720799922943,
"beginner": 0.3549700975418091,
"expert": 0.2750578224658966
}
|
47,606
|
in which book can i find this ?
"Classical inventory problem concerns the purchase and sale of newspapers. The paper seller buys the papers for cuch and sells them for LE each. Newspapers not sold at the end of the day are sold scrap for 0. LE each. There are three types of newspapers, fair, and poor with probabilities of 0.35, 0.43, and 0.22 respectively."
|
1febfd0002f57370aef6f8a87ff8263d
|
{
"intermediate": 0.3701304793357849,
"beginner": 0.3783997893333435,
"expert": 0.2514696419239044
}
|
47,607
|
write function for Convert Celsius to Fahrenheit: Write a function that converts Celsius temperatures to Fahrenheit.
|
b310f9653a61eff58a2eeac068d93e77
|
{
"intermediate": 0.33214113116264343,
"beginner": 0.30232927203178406,
"expert": 0.3655295968055725
}
|
47,608
|
we have an ojbect of python tuple : {('1', 'REDUCTION'), ('0', 'NOT CONCERNED'), ..} and a tuple instance (1,'X') write a test to check if the key of the instance aka 1 exists in the keys of the object of tuples
|
c9b4d62652866fab64f6d272ceffa332
|
{
"intermediate": 0.5658815503120422,
"beginner": 0.12044215947389603,
"expert": 0.31367629766464233
}
|
47,610
|
write 10 python class example for each of easy , medium and hard , so that I can ask a question in a coding test exam
|
a05f1a7be8179dd1a2b75fbd66bac0eb
|
{
"intermediate": 0.1279284805059433,
"beginner": 0.7493155002593994,
"expert": 0.12275595963001251
}
|
47,611
|
model.eval() # set model to evaluation mode
### Step 2: Define a Hook Function
# Next, you define a function that will act as a hook. This function will be called every time the specified layer(s) has a forward pass. For this example, the function simply prints the output shape of each layer, but you can adjust it to log or store these activations as needed.
timestamps = []
def print_hook(name):
def hook(module, input, output):
now = datetime.now()
timestamps.append((name, now))
# print(f"Layer Name: {name}, Timestamp: {now}")
return hook
for name, module in model.named_modules():
module.register_forward_hook(print_hook(name))
rewrite the code and find gpu usage by every layer in AutoModelForCausalLM llm model
|
fe51db41c23080baab50fd3521020fe1
|
{
"intermediate": 0.32702991366386414,
"beginner": 0.5164485573768616,
"expert": 0.15652155876159668
}
|
47,612
|
i tried to console this console.log( CryptoJS.AES.decrypt(
JSON.parse(localStorage.getItem('OMS-Auth')!),
'OMS-admin'
).toString(CryptoJS.enc.Utf8)) gives me malformed utf-8 but when i remove .toString(CryptoJS.enc.Utf8) worked well how stringify without giving this error
|
651c617699e13c857156742b5f96d55a
|
{
"intermediate": 0.4643315076828003,
"beginner": 0.36916348338127136,
"expert": 0.16650499403476715
}
|
47,613
|
import pandas as pd
import re
import os
# Set the directory path
dir_path = "/home/ryans/Documents/Project/Stance-Analysis-of-Tweets/Data/tweeteval/datasets/stance/feminist"
# Define a function to clean the text
def clean_text(text):
# Remove URLs
text = re.sub(r'http\S+', '', text)
# Remove mentions (@username)
text = re.sub(r'@\w+', '', text)
# Remove hashtags (#hashtag)
text = re.sub(r'#\w+', '', text)
# Remove punctuation
text = re.sub(r'[^\w\s]', '', text)
# Convert to lowercase
text = text.lower()
# Remove whitespace at the beginning and end of the text
text = text.strip()
return text
# Create lists to store the cleaned data and labels
cleaned_text_data = []
labels = []
# Loop through the files
for filename in os.listdir(dir_path):
if filename.endswith("_text.txt"):
file_path = os.path.join(dir_path, filename)
# Read the text file
with open(file_path, "r", encoding="utf-8") as f:
text_data = f.readlines()
# Clean the text data
cleaned_text = [clean_text(line) for line in text_data]
cleaned_text_data.extend(cleaned_text)
elif filename.endswith("_labels.txt"):
file_path = os.path.join(dir_path, filename)
# Read the labels file
with open(file_path, "r", encoding="utf-8") as f:
label_data = f.readlines()
# Convert labels to integers
label_data = [int(label) for label in label_data]
labels.extend(label_data)
# Create a DataFrame with the cleaned text data and labels
data = pd.DataFrame({"text": cleaned_text_data, "label": labels})
# Save the cleaned data to a CSV file
data.to_csv("cleaned_data.csv", index=False)
|
cce9b35bee558165301ed75dd715ce63
|
{
"intermediate": 0.5107725262641907,
"beginner": 0.3141845166683197,
"expert": 0.1750430017709732
}
|
47,614
|
ive got this file. how do i play it?
#EXTM3U
#EXT-X-VERSION:5
#EXT-X-INDEPENDENT-SEGMENTS
#EXT-X-MEDIA:TYPE=SUBTITLES,GROUP-ID="sub1",CHARACTERISTICS="public.accessibility.transcribes-spoken-dialog,public.accessibility.describes-music-and-sound",NAME="English CC",AUTOSELECT=YES,DEFAULT=NO,FORCED=NO,LANGUAGE="en",URI="https://manifest-gcp-us-east1-vop1.cfcdn.mux.com/kcSIyA2ZPnToXjeAi8sdRH00GNvsRrxyjrhNfIEC01HMMDEXVoj9tUxLBhnY9l5pGZV6yiEF5F8to/subtitles.m3u8?cdn=cloudflare&expires=1712955600&signature=NjYxOWEwZDBfOWFiMGFmNTFmMTc1ODY1M2NhOWI0MTc5NzA2MmUyYjU1YTU0NjRjMTk1YjM0OTQ3Y2RjMzVlNDdiOTkyMTllNw=="
#EXT-X-STREAM-INF:BANDWIDTH=2340800,AVERAGE-BANDWIDTH=2340800,CODECS="mp4a.40.2,avc1.640020",RESOLUTION=1280x720,CLOSED-CAPTIONS=NONE,SUBTITLES="sub1"
https://manifest-gcp-us-east1-vop1.cfcdn.mux.com/4BO02RNe5BLhqK01NhdVXoXlceDukyTI9bzLD2FCEuAb35lNvHyIHy01mqCNy4Sa44i00lmyCZuykGJ00kFeG7tC4KAP017RZSfn23yBM6EmvB6kpXt592gqtOgA/rendition.m3u8?cdn=cloudflare&expires=1712955600&rid=Ir02FmqsqUnMIpEqH89MdlvWa15QwOo6dCnelCtSI9YI&skid=default&signature=NjYxOWEwZDBfZDI1ZjBiYWE1Y2Y1NTZhNWI2YTllYWRjYzBlYjU1ODE0ZTBiMGFjMTgwNTg5YmJmYzgyNDc0MWMzYzlhOWY4ZQ==
#EXT-X-STREAM-INF:BANDWIDTH=4259200,AVERAGE-BANDWIDTH=4259200,CODECS="mp4a.40.2,avc1.64002a",RESOLUTION=1920x1080,CLOSED-CAPTIONS=NONE,SUBTITLES="sub1"
https://manifest-gcp-us-east1-vop1.cfcdn.mux.com/VslozvwxQFwyJCBH024Q3o6zHeCfkpLw2lEHyNR9LLbig7wzdaVb02lm01X9S1rokrnQX26S3rBarGySH8ZWQ01M8XO6jah01oI7wNQkiu4j8gNk/rendition.m3u8?cdn=cloudflare&expires=1712955600&rid=Ir02FmqsqUnMIpEqH89MdlvWa15QwOo6dCnelCtSI9YI&skid=default&signature=NjYxOWEwZDBfN2Y3MTI0MjNjMTM1NzJhMTNiNTk4ZDcwMDljYmY3MzllM2E4ODQ4NmEzYWYxOGVhOWI2ZjFjOGU4MGZhYjc5MA==
#EXT-X-STREAM-INF:BANDWIDTH=1152800,AVERAGE-BANDWIDTH=1152800,CODECS="mp4a.40.2,avc1.64001f",RESOLUTION=854x480,CLOSED-CAPTIONS=NONE,SUBTITLES="sub1"
https://manifest-gcp-us-east1-vop1.cfcdn.mux.com/XseblA01o01Ln77DoUQJgyTItNeu02f01oE01DrxltrP8BzzYI02xCIfRKl2WSMaDroVMKnCHwPt01clGtVdXLB7kIvjBOb72INbwQy9DJThDEQCR2uGfvarHeKmg/rendition.m3u8?cdn=cloudflare&expires=1712955600&rid=Ir02FmqsqUnMIpEqH89MdlvWa15QwOo6dCnelCtSI9YI&skid=default&signature=NjYxOWEwZDBfODhkNjdjMDM1MzU5N2RlMmY4ODUyYzM1ZTRlOWY3N2FhNDQyMmJiZjEzZWFlMjY2MjgwYWQ1MzgyNTFiYjhkNQ==
#EXT-X-STREAM-INF:BANDWIDTH=595100,AVERAGE-BANDWIDTH=595100,CODECS="mp4a.40.2,avc1.64001e",RESOLUTION=480x270,CLOSED-CAPTIONS=NONE,SUBTITLES="sub1"
https://manifest-gcp-us-east1-vop1.cfcdn.mux.com/A600THyu8pMUFwRrJN6K44PIhuNDbEQnTd5Dl01ch1XVt023paAiZschixZZy02jky27ENN5LEgZjdHE1dXUw01A3dUS9tKNuuS3N6erNn8B009LQ/rendition.m3u8?cdn=cloudflare&expires=1712955600&rid=Ir02FmqsqUnMIpEqH89MdlvWa15QwOo6dCnelCtSI9YI&skid=default&signature=NjYxOWEwZDBfNWQ5OGY3NzNiNGQwYjc1YTdkMzRiZjllMzkxNmE2MTViZDhiYjdiYTBiMTAwNjg0ODg3YjQxNDQ2ZjdmMDExZA==
|
22759fabc76cb8370f8fe271a99a476c
|
{
"intermediate": 0.3528953194618225,
"beginner": 0.3565620481967926,
"expert": 0.2905426025390625
}
|
47,615
|
در هر دیکشنری زیر یک کلید به اسم signals اضافه کن و مقدارش رو برابر '0' قرار بده
{'open': '65880.2', 'close': '65872.1', 'high': '65880.2', 'low': '65872.0', 'volume': '2.72', 'time': 1713785400000}, {'open': '65874.3', 'close': '65880.3', 'high': '65900.7', 'low': '65827.0', 'volume': '89.40', 'time': 1713785100000}, {'open': '65980.9', 'close': '65875.4', 'high': '65992.6', 'low': '65870.0', 'volume': '140.70', 'time': 1713784800000}, {'open': '65896.7', 'close': '65981.0', 'high': '65981.0', 'low': '65885.8', 'volume': '121.20', 'time': 1713784500000}, {'open': '65843.4', 'close': '65896.5', 'high': '65943.4', 'low': '65815.5', 'volume': '112.06', 'time': 1713784200000}, {'open': '66017.2', 'close': '65844.6', 'high': '66026.3', 'low': '65843.0', 'volume': '123.48', 'time': 1713783900000}, {'open': '66017.9', 'close': '66017.2', 'high': '66082.4', 'low': '65992.4', 'volume': '73.56', 'time': 1713783600000}, {'open': '66018.9', 'close': '66017.9', 'high': '66065.0', 'low': '66000.0', 'volume': '88.81', 'time': 1713783300000}, {'open': '65984.8', 'close': '66019.5', 'high': '66061.3', 'low': '65984.2', 'volume': '69.34', 'time': 1713783000000}, {'open': '65995.3', 'close': '65985.1', 'high': '66025.8', 'low': '65965.4', 'volume': '91.20', 'time': 1713782700000}, {'open': '65915.3', 'close': '65995.0', 'high': '66014.2', 'low': '65914.2', 'volume': '116.15', 'time': 1713782400000}, {'open': '65960.3', 'close': '65913.9', 'high': '65999.0', 'low': '65903.8', 'volume': '120.83', 'time': 1713782100000}, {'open': '65986.4', 'close': '65960.5', 'high': '65986.5', 'low': '65941.2', 'volume': '84.64', 'time': 1713781800000}, {'open': '66059.9', 'close': '65986.5', 'high': '66112.0', 'low': '65965.1', 'volume': '135.67', 'time': 1713781500000}, {'open': '66008.4', 'close': '66059.7', 'high': '66091.8', 'low': '65984.9', 'volume': '162.73', 'time': 1713781200000}, {'open': '65983.2', 'close': '66008.9', 'high': '66017.4', 'low': '65950.0', 'volume': '127.98', 'time': 1713780900000}, {'open': '65988.3', 'close': '65983.6', 'high': '66040.0', 'low': '65919.2', 'volume': '199.54', 'time': 1713780600000},
|
12722106d51588bf14bc51acb7f01983
|
{
"intermediate": 0.3017561137676239,
"beginner": 0.4861222803592682,
"expert": 0.21212156116962433
}
|
47,616
|
Give me query in teradata to extract specific information like address from a specific column having unstructured text like text from a document, using text analytics function
|
edd60a378f76a8bb5c8a56983dde0417
|
{
"intermediate": 0.5128965377807617,
"beginner": 0.3031691312789917,
"expert": 0.1839343011379242
}
|
47,617
|
что за ошибка? com.fasterxml.jackson.databind.exc.InvalidDefinitionException: Cannot construct instance of
`org.springframework.security.core.Authentication` (no Creators, like default construct, exist): abstract types either
need to be mapped to concrete types, have custom deserializer, or contain additional type information
at [Source: (io.undertow.servlet.spec.ServletInputStreamImpl); line: 1, column: 1]
|
ea13869dff22d73de1d538ea3cec8883
|
{
"intermediate": 0.7271513938903809,
"beginner": 0.16780772805213928,
"expert": 0.10504087060689926
}
|
47,618
|
import asyncio
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import re
import MetaTrader5 as mt5
import pandas as pd
from telegram import Bot
# telegram
TOKEN = '6015612448:AAFGB5C35wkCItukxTEJrWY3gyqZy-iK5r4'
CHAT_ID = '882283026'
URL = 'https://id.tradingview.com/chart/2f71DPzH/?symbol=OANDA%3AXAUUSD'
login = 124385496
server = "Exness-MT5Trial7"
password = "748798lokaldeN#"
symbol = "XAUUSDm"
volume = 0.01
timeframe = mt5.TIMEFRAME_M5
time_candle = 30 #second
time_check = 900 #second
# chrome_options = Options()
# chrome_options.add_argument('--headless')
# driver = webdriver.Chrome(options=chrome_options)
bot = Bot(TOKEN)
driver = webdriver.Chrome()
driver.get(URL)
async def send_message_async(message):
await bot.send_message(chat_id=CHAT_ID, text=message)
async def wait_dot(driver):
try:
while True:
# Menunggu hingga elemen muncul
element = WebDriverWait(driver, 5).until(
lambda driver: re.search(r'color: rgb\((\d+), (\d+), (\d+)\)', driver.find_element(By.XPATH, "/html/body/div[2]/div[5]/div[1]/div[1]/div/div[2]/div[3]/div[2]/div/div[2]/div/div[2]/div[2]/div[2]/div/div[4]/div").get_attribute("style"))
)
if element:
red, green, blue = map(int, element.groups())
if red == 255 and green == 255 and blue == 255:
print("Sell")
await send_message_async("Dot Sell")
return "Sell"
# Check if color is black
elif red == 0 and green == 0 and blue == 0:
print("Buy")
await send_message_async("Dot Buy")
return "Buy"
else:
print("Unknown color")
await send_message_async("Dot Unknown")
await asyncio.sleep(1)
else:
print("Warna tidak ditemukan")
await send_message_async("Dot Color Unknown")
except Exception as e:
print("Gagal menemukan elemen:", e)
await send_message_async("Dot Still Running")
async def check_trend(driver):
try:
trend = driver.find_element(By.XPATH, "/html/body/div[2]/div[5]/div[1]/div[1]/div/div[2]/div[1]/div[2]/div/div[2]/div[2]/div[2]/div[2]/div[2]/div/div[3]/div")
price = driver.find_element(By.XPATH, "/html/body/div[2]/div[5]/div[1]/div[1]/div/div[2]/div[1]/div[2]/div/div[2]/div[1]/div[1]/div[2]/div/div[5]/div[2]")
trend_value = float(trend.text)
price_value = float(price.text)
if trend_value < price_value:
print("Trend: Buy")
await send_message_async("Trend Buy")
return "Buy"
elif trend_value > price_value:
print("Trend: Sell")
await send_message_async("Trend Sell")
return "Sell"
else:
print("Trend: Neutral")
await send_message_async("Trend Neutral")
except Exception as e:
print("Gagal menemukan Trend:", e)
await send_message_async("Trend Not Found")
prev_candle_color = "merah"
async def wait_candle(symbol, timeframe, initial_color, target_color):
global prev_candle_color
if not mt5.initialize():
print("initialize() failed, error code =", mt5.last_error())
return None
while True:
candles = mt5.copy_rates_from_pos(symbol, timeframe, 0, 2)
df = pd.DataFrame(candles)
current_candle_color = 'hijau' if df['close'][1] > df['close'][0] else 'merah'
print("Warna candle saat ini:", current_candle_color)
await send_message_async(f"Candle Color now: {current_candle_color}")
# Jika arah perdagangan adalah buy, tunggu perubahan dari merah ke hijau
if prev_candle_color == 'merah' and current_candle_color == 'hijau' and target_color == 'hijau':
print("Perubahan candle dari merah ke hijau terdeteksi!")
await send_message_async("Red Candle to Green Detected!!")
prev_candle_color = current_candle_color
mt5.shutdown()
return True
# Jika arah perdagangan adalah sell, tunggu perubahan dari hijau ke merah
elif prev_candle_color == 'hijau' and current_candle_color == 'merah' and target_color == 'merah':
print("Perubahan candle dari hijau ke merah terdeteksi!")
await send_message_async("Green Candle to Red Detected!!")
prev_candle_color = current_candle_color
mt5.shutdown()
return True
else:
prev_candle_color = current_candle_color
await asyncio.sleep(2)
async def wait_candle2(symbol, timeframe, initial_color, target_color, order_type):
prev_candle_color = initial_color
if not mt5.initialize():
print("initialize() failed, error code =", mt5.last_error())
return None
candles = mt5.copy_rates_from_pos(symbol, timeframe, 0, 2)
df = pd.DataFrame(candles)
current_candle_color = 'hijau' if df['close'][1] > df['close'][0] else 'merah'
# print("Warna candle sebelumnya:", prev_candle_color)
# print("Warna candle saat ini:", current_candle_color)
if order_type == 'buy':
return prev_candle_color == 'merah' and current_candle_color == 'hijau'
elif order_type == 'sell':
return prev_candle_color == 'hijau' and current_candle_color == 'merah'
else:
result = False
# Setelah pengecekan, atur ulang nilai prev_candle_color
prev_candle_color = current_candle_color
return result
async def execute_trade(symbol, timeframe, initial_color, target_color, order_type):
global prev_candle_color
prev_candle_color = initial_color
result = await wait_candle(symbol, timeframe, initial_color, target_color)
await asyncio.sleep(time_candle)
result2 = await wait_candle2(symbol, timeframe, initial_color, target_color, order_type)
if result and result2:
await send_order(order_type=order_type)
else:
await execute_trade(symbol, timeframe, initial_color, target_color, order_type)
async def check_candle_after_order(symbol, timeframe, duration, order_type):
global prev_candle_color
if not mt5.initialize():
print("initialize() failed, error code =", mt5.last_error())
return None
start_time = time.time()
end_time = start_time + duration
while time.time() < end_time:
candles = mt5.copy_rates_from_pos(symbol, timeframe, 0, 2)
df = pd.DataFrame(candles)
current_candle_color = 'hijau' if df['close'][1] > df['open'][1] else 'merah'
print("Warna candle saat ini setelah open order:", current_candle_color)
await send_message_async(f"Candle Color After Order: {current_candle_color}")
if order_type == "buy" and current_candle_color == 'merah':
print("Candle close nya merah, kembali ke wait_candle")
await send_message_async("Red Candle, Back to Wait Candle")
prev_candle_color = current_candle_color
return False
elif order_type == "sell" and current_candle_color == 'hijau':
print("Candle close nya hijau, kembali ke wait_candle")
await send_message_async("Green Candle, Back to Wait Candle")
prev_candle_color = current_candle_color
return False
await asyncio.sleep(time_candle)
print("Waktu pengecekan candle setelah open order sudah habis, lanjut ke langkah berikutnya")
await send_message_async("Candle Check Timeout")
return True
async def send_order(login=login, server=server, password=password, symbol=symbol, volume=volume, order_type=None):
if not mt5.initialize(login=login, server=server, password=password):
print("initialize() failed, error code =", mt5.last_error())
return
action = mt5.TRADE_ACTION_DEAL
order_type = mt5.ORDER_TYPE_BUY if order_type == "buy" else mt5.ORDER_TYPE_SELL
result = mt5.order_send({
"action": action,
"symbol": symbol,
"volume": volume,
"type": order_type,
"price": mt5.symbol_info_tick(symbol).ask if order_type == mt5.ORDER_TYPE_BUY else mt5.symbol_info_tick(symbol).bid,
"deviation": 20,
"magic": 234000,
"type_time": mt5.ORDER_TIME_GTC,
"type_filling": mt5.ORDER_FILLING_FOK,
})
if result.retcode == mt5.TRADE_RETCODE_DONE:
print("Order successful")
await send_message_async("Order Position Open")
else:
print("Order failed")
await send_message_async("Order Failed")
mt5.shutdown()
async def main():
while True:
try:
await send_message_async("Waiting.....")
driver.refresh()
dot_result = await wait_dot(driver)
trend_result = await check_trend(driver)
if dot_result == "Buy" and trend_result == "Sell":
print("Wait Candle for Sell")
await send_message_async("Wait Candle for Sell")
await execute_trade(symbol, timeframe, "hijau", "merah", order_type="sell")
result = await check_candle_after_order(symbol, timeframe, time_check, order_type="sell")
if not result:
await execute_trade(symbol, timeframe, "hijau", "merah", order_type="sell")
elif dot_result == "Sell" and trend_result == "Buy":
print("Wait Candle for Buy")
await send_message_async("Wait Candle for Buy")
await execute_trade(symbol, timeframe, "merah", "hijau", order_type="buy")
result = await check_candle_after_order(symbol, timeframe, time_check, order_type="buy")
if not result:
await execute_trade(symbol, timeframe, "merah", "hijau", order_type="buy")
else:
print("No suitable conditions found for trade execution.")
await send_message_async("Wait next Dot, No Trade Open")
except Exception as e:
# Tangani kesalahan di sini
print("An error occurred:", str(e))
await send_message_async("An error occurred: " + str(e))
await asyncio.sleep(3) # Contoh: Jeda 60 detik sebelum melanjutkan loop
continue
finally:
# Optional: Pause execution before continuing the loop
await asyncio.sleep(5)
asyncio.run(main())
|
7e305c231c3f97a40caf260367bd8358
|
{
"intermediate": 0.34997230768203735,
"beginner": 0.46118414402008057,
"expert": 0.1888435184955597
}
|
47,619
|
package com.mns.oms.batch.config;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import org.springframework.batch.core.Job;
import org.springframework.batch.core.JobParameters;
import org.springframework.batch.core.JobParametersBuilder;
import org.springframework.batch.core.Step;
import org.springframework.batch.core.configuration.annotation.EnableBatchProcessing;
import org.springframework.batch.core.configuration.annotation.StepScope;
import org.springframework.batch.core.job.builder.JobBuilder;
import org.springframework.batch.core.repository.JobRepository;
import org.springframework.batch.core.repository.support.JobRepositoryFactoryBean;
import org.springframework.batch.core.step.builder.StepBuilder;
import org.springframework.batch.core.launch.JobLauncher;
import org.springframework.batch.core.launch.support.RunIdIncrementer;
import org.springframework.batch.item.data.MongoItemReader;
import org.springframework.batch.support.transaction.ResourcelessTransactionManager;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.task.TaskExecutor;
import org.springframework.data.domain.Sort.Direction;
import org.springframework.data.mongodb.core.MongoTemplate;
import org.springframework.scheduling.annotation.EnableScheduling;
import org.springframework.scheduling.annotation.Scheduled;
import com.mns.oms.batch.domain.CarrierData;
import com.mns.oms.batch.listener.CarrierStepListener;
import com.mns.oms.batch.listener.JobStatusNotificationListener;
import com.mns.oms.batch.model.BeamDataDTO;
import com.mns.oms.batch.processor.BeamDataProcessor;
import com.mns.oms.batch.writer.KafkaBatchWriter;
import org.springframework.transaction.PlatformTransactionManager;
/**
* @author Mrinmoy Mandal
*
* Module: WISMR
*
*
*/
@Configuration
@EnableBatchProcessing
@EnableScheduling
@ConditionalOnProperty(value = "beam.batchjob.enabled", matchIfMissing = true, havingValue = "true")
public class BeamDataBatchConfiguration {
@Autowired
private JobStatusNotificationListener jobListener;
@Value("${beam.data.write.chunk.size}")
private String chunkSize;
@Autowired
@Qualifier("beamTaskExecutor")
private TaskExecutor beamTaskExecutor;
@Value("${beam.batchjob.step.partitioner.each.range}")
private int range;
@Autowired
private MongoTemplate mongoTemplate;
// @Autowired
private JobRepository jobRepository;
@Autowired
private PlatformTransactionManager transactionManager;
@Autowired
private JobLauncher jobLauncher;
@Scheduled(cron = "${beam.spring.batch.job.cron.expression}")
public void ffiSchedule() {
try {
JobParameters jobParameters = new JobParametersBuilder().addDate("launchDate", new Date())
.toJobParameters();
jobLauncher.run(exportDataToBeam(), jobParameters);
} catch (Exception e) {
e.printStackTrace();
}
}
@Bean
public JobRepository jobRepository() throws Exception {
MongoJobRepositoryFactoryBean factory = new MongoJobRepositoryFactoryBean();
factory.setMongoTemplate(mongoTemplate);
factory.afterPropertiesSet();
return factory.getObject();
}
@Bean
@StepScope
public MongoItemReader<CarrierData> mongoItemReader(@Value("#{stepExecutionContext['minValue']}") Long minValue,
@Value("#{stepExecutionContext['maxValue']}") Long maxValue) {
MongoItemReader<CarrierData> reader = new MongoItemReader<>();
reader.setTemplate(mongoTemplate);
Map<String, Direction> sortMap = new HashMap<>();
sortMap.put("_id", Direction.DESC);
reader.setSort(sortMap);
reader.setTargetType(CarrierData.class);
reader.setPageSize(range);
reader.setQuery("{isProcessed: {$eq: false} }");
return reader;
}
@Bean
public BeamDataProcessor beamDataProcessor() {
return new BeamDataProcessor();
}
@Autowired
private KafkaBatchWriter kafkaItemWriter;
@Bean
public Job exportDataToBeam() throws Exception {
return new JobBuilder("exportDataToBeam", jobRepository)
.incrementer(new RunIdIncrementer())
.listener(jobListener)
.start(beamMasterStep())
.build();
}
@Bean
public Step beamMasterStep() throws Exception {
return new StepBuilder("beamStep", jobRepository)
.<CarrierData, BeamDataDTO>chunk(Integer.valueOf(chunkSize), transactionManager)
.reader(mongoItemReader(null, null))
.processor(beamDataProcessor())
.writer(kafkaItemWriter)
.taskExecutor(beamTaskExecutor)
.listener(new CarrierStepListener())
.build();
}
fix it
}...........................Cannot resolve symbol 'MongoJobRepositoryFactoryBean'
|
397cc5b6f7c2da7f3da411b7f4e5c062
|
{
"intermediate": 0.3138139247894287,
"beginner": 0.44402381777763367,
"expert": 0.24216228723526
}
|
47,620
|
in this javascript for leaflet.js I am adding an image overlay when grid squares are clicked. How can I detect when four squares have been clicked in a 2x2 pattern - 'var map = L.tileLayer('', {
maxZoom: 20,
subdomains: ['mt0', 'mt1', 'mt2', 'mt3']
});
// initialize the map on the "map" div with a given center and zoom
var map = L.map('map', {
layers: [map]
}).setView([-5.0750, 19.4250], 13);
// Flag to track grid click event state (combined for roads and parks)
var gridClickEnabled = false;
// Function to handle square click and add an image overlay at the center for houses
function houseSquareClick(e) {
if (gridClickEnabled) {
var clickedSquare = e.target.feature;
// Get the center of the clicked square
var centerCoords = turf.centroid(clickedSquare);
// Get the bounding box of the clicked square
var bbox = e.target.getBounds();
var imageUrl = 'https://cdn.glitch.global/12fb2e80-41df-442d-8bf7-be84a3d85f59/_5bf487a3-e022-43b0-bbbb-29c7d2337032.jpeg?v=1713694179855';
var latLngBounds = L.latLngBounds([[bbox.getSouth(), bbox.getWest()], [bbox.getNorth(), bbox.getEast()]]);
var imageOverlay = L.imageOverlay(imageUrl, latLngBounds, {
opacity: 0.8,
interactive: true
}).addTo(map);
}
}
// Function to handle square click and update color for parks
// Function to handle square click and add an image overlay at the center for houses
function parkSquareClick(e) {
if (gridClickEnabled) {
var clickedSquare = e.target.feature;
// Get the center of the clicked square
var centerCoords = turf.centroid(clickedSquare);
// Get the bounding box of the clicked square
var bbox = e.target.getBounds();
var imageUrl = 'https://cdn.glitch.global/12fb2e80-41df-442d-8bf7-be84a3d85f59/_a771ce0e-61e1-44e5-860f-716e495098e7.jpeg?v=1713694447500';
var latLngBounds = L.latLngBounds([[bbox.getSouth(), bbox.getWest()], [bbox.getNorth(), bbox.getEast()]]);
var imageOverlay = L.imageOverlay(imageUrl, latLngBounds, {
opacity: 0.8,
interactive: true
}).addTo(map);
}
}
// Function to handle square click and update color for roads (optional)
function squareClick(e) {
if (gridClickEnabled) {
var clickedSquare = e.target.feature;
clickedSquare.properties = {fillColor: 'gray', fillOpacity: 1 }; // Change color to black
e.target.setStyle(clickedSquare.properties); // Update style on map
}
}
// Get references to the button elements
var parksButton = document.getElementById("parksButton");
var roadsButton = document.getElementById("roadsButton");
var housesButton = document.getElementById("housesButton");
// Function to toggle grid click event based on button
function toggleGridClick(featureType) { // Renamed for clarity
// Update gridClickEnabled based on button click, but only if different from current state
if (featureType === "parks") {
gridClickEnabled = !gridClickEnabled || featureType !== "roads" || featureType !== "houses"; // Handle all three features
} else if (featureType === "roads") {
gridClickEnabled = !gridClickEnabled || featureType !== "parks" || featureType !== "houses"; // Handle all three features
} else if (featureType === "houses") { // New feature type for houses
gridClickEnabled = !gridClickEnabled || featureType !== "parks" || featureType !== "roads"; // Handle all three features
}
map.eachLayer(function(layer) { // Check for existing square grid layer
if (layer.feature && layer.feature.geometry.type === 'Polygon') {
layer.off('click'); // Remove all click listeners before adding a new one
if (gridClickEnabled) {
if (featureType === "parks") {
layer.on('click', parkSquareClick); // Add click listener for parks
parksButton.innerText = "Parks On";
roadsButton.innerText = "Roads Off";
housesButton.innerText = "Houses Off";
// Update button text
} else if (featureType === "roads") { // Optional for roads button
layer.on('click', squareClick); // Add click listener for roads
roadsButton.innerText = "Roads On";
parksButton.innerText = "Parks Off";
housesButton.innerText = "Houses Off";
// Update button text (optional)
}else if (featureType === "houses") { // New click listener for houses
layer.on('click', houseSquareClick); // Add click listener for houses
housesButton.innerText = "Houses On";
parksButton.innerText = "Parks Off";
roadsButton.innerText = "Roads Off"; // Update button text for houses
}
} else {
parksButton.innerText = "Parks Off"; // Update button text
roadsButton.innerText = "Roads Off"; // Update button text (optional)
housesButton.innerText = "Houses Off"; // Update button text (optional)
}
}
});
}
// Add click event listeners to the buttons
parksButton.addEventListener("click", function() {
toggleGridClick("parks");
});
roadsButton.addEventListener("click", function() {
toggleGridClick("roads"); // Optional for roads button
});
housesButton.addEventListener("click", function() {
toggleGridClick("houses");
});
// Square Grid
var bbox = [19.35, -5, 19.5, -5.15];
var cellSide = 1;
var options = {units: 'kilometers'};
var squareGrid = turf.squareGrid(bbox, cellSide, options);
// Add GeoJSON layer with click event handler (optional, can be removed)
L.geoJSON(squareGrid, {
style: function (feature) {
return {weight: 0.5, fillOpacity: 0 }; // Initial style for squares
}
}).addTo(map);
'
|
b5d446c8cb9cf0aa1fc3d6257b20f3ab
|
{
"intermediate": 0.3524804711341858,
"beginner": 0.4055292308330536,
"expert": 0.241990327835083
}
|
47,621
|
@Bean
public JobRepository jobRepository() throws Exception {
MongoJobRepositoryFactoryBean factory = new MongoJobRepositoryFactoryBean();
factory.setMongoTemplate(mongoTemplate);
factory.afterPropertiesSet();
return factory.getObject();
}Cannot resolve symbol 'MongoJobRepositoryFactoryBean'....fix it
|
ed7152f3b6d7832363cb74612c06c753
|
{
"intermediate": 0.48198357224464417,
"beginner": 0.3477025628089905,
"expert": 0.17031390964984894
}
|
47,622
|
In this below code, my intention is to tune only the selective features in the selected nodes, and keep the unselected nodes and its corresponding features as well as the unselective features in the selected nodes are kept unchange.
x = self.gat2(x, edge_index)
print("x.size():", x.size())
total size of 'x' is
x.size(): torch.Size([20, 24])
totally 20 nodes and each nodes having 24 features
among the 20 nodes we are selected only 11 nodes based on the index values corresponding to the index number [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] is '1.0000' respectively for each selected node (to identify the first selected node, it has index value '1.0000' at the index number '7', similarly for each selected nodes 8 - 17).
the selective features are different for each selected nodes
for the first eight selected nodes [7, 8, 9, 10, 11, 12, 13, 14] the selective features need to tune is at the index number of [18, 19]
for the selected node [15] the selective features need to tune is at the index number of [20]
for the selected node [16] the selective features need to tune is at the index number of [21]
for the selected node [17] the selective features need to tune is at the index number of [22]
after tuning process done by the algorithm, we made to keep the values present index [18, 19] as same for the paired selected nodes [(7, 8), (9, 10), (11, 14)], so we implemented the synchronization after tuning using averaging the values between the respective pairs. and i need to keep all the static nodes and dynamix nodes total 20 nodes and its corresponding 24 features in to the GNN model with masking where needed for selective feature tuning.
now please identify where the implementation logic is not followed in the below given custom GNN code,
class CustomGNN(torch.nn.Module):
def __init__(self, in_channels, out_channels):
super(CustomGNN, self).__init__()
self.gat1 = GATConv(in_channels, 8, heads=8, dropout=0.6)
self.gat2 = GATConv(8 * 8, out_channels, heads=1, concat=False, dropout=0.6)
self.component_nodes_indices = torch.tensor([7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17], dtype=torch.long)
# Define masks for tuning selective features
self.m_features_mask = torch.zeros(24, dtype=torch.bool)
self.m_features_mask[[18, 19]] = True
self.c_features_mask = torch.zeros(24, dtype=torch.bool)
self.c_features_mask[20] = True
self.i_features_mask = torch.zeros(24, dtype=torch.bool)
self.i_features_mask[21] = True
self.v_features_mask = torch.zeros(24, dtype=torch.bool)
self.v_features_mask[22] = True
def forward(self, x, edge_index):
x = F.dropout(x, p=0.6, training=self.training)
x = F.elu(self.gat1(x, edge_index))
x = F.dropout(x, p=0.6, training=self.training)
x = self.gat2(x, edge_index)
print("x.size():", x.size())
#print("x", x)
# Synchronize updates for defined node pairs before updating dynamic features
# Averaging the values for the synchronous pairs for features at indices [18] and [19]
# Ensure the original_features tensor is prepared for this operation to not alter unrelated features.
original_features = x.clone()
# Define synchronous node pairs and their associated feature indices
sync_pairs = [(7, 8), (9, 10), (11, 14)] # Indices in self.component_nodes_indices
features_to_sync = [18, 19]
# Perform synchronization
for pair in sync_pairs:
indices = [self.component_nodes_indices[pair[0]], self.component_nodes_indices[pair[1]]]
assert max(indices) < x.size(0), f"One of the indices {max(indices)} is out of bounds for tensor with {x.size(0)} nodes."
# Calculate the mean of the paired node features for selected features
avg_features = original_features[indices, :][:, features_to_sync].mean(dim=0)
# Update the original tensor with averaged features for both nodes in the pair for specified features
original_features[indices, features_to_sync] = avg_features
# Apply mask and update dynamic features (if there’s any additional logic for individual component node updates)
dynamic_updates = torch.zeros_like(x)
# Update logic as previous, but now considering synchronization is already handled
# Note: With the current use-case, dynamic updates remain as initially set.
# This placeholder exists for cases where further dynamic processing is applied after synchronization.
# Ensuring static features are kept as is from original_features and only dynamic are updated
return original_features * (1 - dynamic_updates) + x * dynamic_updates
|
28c4c01bff9ecd92bc66666d9b039197
|
{
"intermediate": 0.28568920493125916,
"beginner": 0.5249229669570923,
"expert": 0.18938779830932617
}
|
47,623
|
In this javascript for leaflet.js I wish to create an array for 'houseSquareClick' and add a console log when four house image overlays have been added to the map - 'var map = L.tileLayer('', {
maxZoom: 20,
subdomains: ['mt0', 'mt1', 'mt2', 'mt3']
});
// initialize the map on the "map" div with a given center and zoom
var map = L.map('map', {
layers: [map]
}).setView([-5.0750, 19.4250], 13);
// Flag to track grid click event state (combined for roads and parks)
var gridClickEnabled = false;
// Function to handle square click and add an image overlay at the center for houses
function houseSquareClick(e) {
if (gridClickEnabled) {
var clickedSquare = e.target.feature;
// Get the center of the clicked square
var centerCoords = turf.centroid(clickedSquare);
// Get the bounding box of the clicked square
var bbox = e.target.getBounds();
var imageUrl = 'https://cdn.glitch.global/12fb2e80-41df-442d-8bf7-be84a3d85f59/_5bf487a3-e022-43b0-bbbb-29c7d2337032.jpeg?v=1713694179855';
var latLngBounds = L.latLngBounds([[bbox.getSouth(), bbox.getWest()], [bbox.getNorth(), bbox.getEast()]]);
var imageOverlay = L.imageOverlay(imageUrl, latLngBounds, {
opacity: 0.8,
interactive: true
}).addTo(map);
}
}
// Function to handle square click and update color for parks
// Function to handle square click and add an image overlay at the center for houses
function parkSquareClick(e) {
if (gridClickEnabled) {
var clickedSquare = e.target.feature;
// Get the center of the clicked square
var centerCoords = turf.centroid(clickedSquare);
// Get the bounding box of the clicked square
var bbox = e.target.getBounds();
var imageUrl = 'https://cdn.glitch.global/12fb2e80-41df-442d-8bf7-be84a3d85f59/_a771ce0e-61e1-44e5-860f-716e495098e7.jpeg?v=1713694447500';
var latLngBounds = L.latLngBounds([[bbox.getSouth(), bbox.getWest()], [bbox.getNorth(), bbox.getEast()]]);
var imageOverlay = L.imageOverlay(imageUrl, latLngBounds, {
opacity: 0.8,
interactive: true
}).addTo(map);
}
}
// Function to handle square click and update color for roads (optional)
function squareClick(e) {
if (gridClickEnabled) {
var clickedSquare = e.target.feature;
clickedSquare.properties = {fillColor: 'gray', fillOpacity: 1 }; // Change color to black
e.target.setStyle(clickedSquare.properties); // Update style on map
}
}
// Get references to the button elements
var parksButton = document.getElementById("parksButton");
var roadsButton = document.getElementById("roadsButton");
var housesButton = document.getElementById("housesButton");
// Function to toggle grid click event based on button
function toggleGridClick(featureType) { // Renamed for clarity
// Update gridClickEnabled based on button click, but only if different from current state
if (featureType === "parks") {
gridClickEnabled = !gridClickEnabled || featureType !== "roads" || featureType !== "houses"; // Handle all three features
} else if (featureType === "roads") {
gridClickEnabled = !gridClickEnabled || featureType !== "parks" || featureType !== "houses"; // Handle all three features
} else if (featureType === "houses") { // New feature type for houses
gridClickEnabled = !gridClickEnabled || featureType !== "parks" || featureType !== "roads"; // Handle all three features
}
map.eachLayer(function(layer) { // Check for existing square grid layer
if (layer.feature && layer.feature.geometry.type === 'Polygon') {
layer.off('click'); // Remove all click listeners before adding a new one
if (gridClickEnabled) {
if (featureType === "parks") {
layer.on('click', parkSquareClick); // Add click listener for parks
parksButton.innerText = "Parks On";
roadsButton.innerText = "Roads Off";
housesButton.innerText = "Houses Off";
// Update button text
} else if (featureType === "roads") { // Optional for roads button
layer.on('click', squareClick); // Add click listener for roads
roadsButton.innerText = "Roads On";
parksButton.innerText = "Parks Off";
housesButton.innerText = "Houses Off";
// Update button text (optional)
}else if (featureType === "houses") { // New click listener for houses
layer.on('click', houseSquareClick); // Add click listener for houses
housesButton.innerText = "Houses On";
parksButton.innerText = "Parks Off";
roadsButton.innerText = "Roads Off"; // Update button text for houses
}
} else {
parksButton.innerText = "Parks Off"; // Update button text
roadsButton.innerText = "Roads Off"; // Update button text (optional)
housesButton.innerText = "Houses Off"; // Update button text (optional)
}
}
});
}
// Add click event listeners to the buttons
parksButton.addEventListener("click", function() {
toggleGridClick("parks");
});
roadsButton.addEventListener("click", function() {
toggleGridClick("roads"); // Optional for roads button
});
housesButton.addEventListener("click", function() {
toggleGridClick("houses");
});
// Square Grid
var bbox = [19.35, -5, 19.5, -5.15];
var cellSide = 1;
var options = {units: 'kilometers'};
var squareGrid = turf.squareGrid(bbox, cellSide, options);
// Add GeoJSON layer with click event handler (optional, can be removed)
L.geoJSON(squareGrid, {
style: function (feature) {
return {weight: 0.5, fillOpacity: 0 }; // Initial style for squares
}
}).addTo(map);
'
|
c0e28ff5f0d4453d1542ba1148099976
|
{
"intermediate": 0.3660309314727783,
"beginner": 0.43441686034202576,
"expert": 0.1995522528886795
}
|
47,624
|
как инилиализировать этот бин? public class NcsAuthenticationProvider implements AuthenticationProvider {
|
7f8dc5cf8324960128e3904afff1f184
|
{
"intermediate": 0.32841044664382935,
"beginner": 0.3255918622016907,
"expert": 0.3459976613521576
}
|
47,625
|
how to mock a gtest for a returned functions
|
0d0e3c20526658509ad6629c30ec2d46
|
{
"intermediate": 0.36684486269950867,
"beginner": 0.3485441207885742,
"expert": 0.2846110165119171
}
|
47,626
|
here is the pom..........................@Bean
public JobRepository jobRepository() throws Exception {
give implementation for this method using mongo
}
|
7772a32e9fb4889fb000b748129a0cba
|
{
"intermediate": 0.5466774702072144,
"beginner": 0.26007890701293945,
"expert": 0.19324372708797455
}
|
47,627
|
In this javascript for leaflet.js where I have commented '// check for 2x2 pattern here' how can I determine if the four added house imageOverlays are in a 2x2 pattern on the square grid. - 'var map = L.tileLayer('', {
maxZoom: 20,
subdomains: ['mt0', 'mt1', 'mt2', 'mt3']
});
// initialize the map on the "map" div with a given center and zoom
var map = L.map('map', {
layers: [map]
}).setView([-5.0750, 19.4250], 13);
// Flag to track grid click event state (combined for roads and parks)
var gridClickEnabled = false;
// Array to keep track of house image overlays
var houseImageOverlays = [];
// Function to handle square click and add an image overlay at the center for houses
function houseSquareClick(e) {
if (gridClickEnabled) {
var clickedSquare = e.target.feature;
// Get the center of the clicked square
var centerCoords = turf.centroid(clickedSquare);
// Get the bounding box of the clicked square
var bbox = e.target.getBounds();
var imageUrl = 'https://cdn.glitch.global/12fb2e80-41df-442d-8bf7-be84a3d85f59/_5bf487a3-e022-43b0-bbbb-29c7d2337032.jpeg?v=1713694179855';
var latLngBounds = L.latLngBounds([[bbox.getSouth(), bbox.getWest()], [bbox.getNorth(), bbox.getEast()]]);
var imageOverlay = L.imageOverlay(imageUrl, latLngBounds, {
opacity: 0.8,
interactive: true
}).addTo(map);
// Add the image overlay to the array
houseImageOverlays.push(imageOverlay);
if (houseImageOverlays.length === 4) {
// Check for 2x2 pattern here
console.log('Four house image overlays have been added to the map');
}
}
}
// Function to handle square click and update color for parks
// Function to handle square click and add an image overlay at the center for houses
function parkSquareClick(e) {
if (gridClickEnabled) {
var clickedSquare = e.target.feature;
// Get the center of the clicked square
var centerCoords = turf.centroid(clickedSquare);
// Get the bounding box of the clicked square
var bbox = e.target.getBounds();
var imageUrl = 'https://cdn.glitch.global/12fb2e80-41df-442d-8bf7-be84a3d85f59/_a771ce0e-61e1-44e5-860f-716e495098e7.jpeg?v=1713694447500';
var latLngBounds = L.latLngBounds([[bbox.getSouth(), bbox.getWest()], [bbox.getNorth(), bbox.getEast()]]);
var imageOverlay = L.imageOverlay(imageUrl, latLngBounds, {
opacity: 0.8,
interactive: true
}).addTo(map);
}
}
// Function to handle square click and update color for roads (optional)
function squareClick(e) {
if (gridClickEnabled) {
var clickedSquare = e.target.feature;
clickedSquare.properties = {fillColor: 'gray', fillOpacity: 1 }; // Change color to black
e.target.setStyle(clickedSquare.properties); // Update style on map
}
}
// Get references to the button elements
var parksButton = document.getElementById("parksButton");
var roadsButton = document.getElementById("roadsButton");
var housesButton = document.getElementById("housesButton");
// Function to toggle grid click event based on button
function toggleGridClick(featureType) { // Renamed for clarity
// Update gridClickEnabled based on button click, but only if different from current state
if (featureType === "parks") {
gridClickEnabled = !gridClickEnabled || featureType !== "roads" || featureType !== "houses"; // Handle all three features
} else if (featureType === "roads") {
gridClickEnabled = !gridClickEnabled || featureType !== "parks" || featureType !== "houses"; // Handle all three features
} else if (featureType === "houses") { // New feature type for houses
gridClickEnabled = !gridClickEnabled || featureType !== "parks" || featureType !== "roads"; // Handle all three features
}
map.eachLayer(function(layer) { // Check for existing square grid layer
if (layer.feature && layer.feature.geometry.type === 'Polygon') {
layer.off('click'); // Remove all click listeners before adding a new one
if (gridClickEnabled) {
if (featureType === "parks") {
layer.on('click', parkSquareClick); // Add click listener for parks
parksButton.innerText = "Parks On";
roadsButton.innerText = "Roads Off";
housesButton.innerText = "Houses Off";
// Update button text
} else if (featureType === "roads") { // Optional for roads button
layer.on('click', squareClick); // Add click listener for roads
roadsButton.innerText = "Roads On";
parksButton.innerText = "Parks Off";
housesButton.innerText = "Houses Off";
// Update button text (optional)
}else if (featureType === "houses") { // New click listener for houses
layer.on('click', houseSquareClick); // Add click listener for houses
housesButton.innerText = "Houses On";
parksButton.innerText = "Parks Off";
roadsButton.innerText = "Roads Off"; // Update button text for houses
}
} else {
parksButton.innerText = "Parks Off"; // Update button text
roadsButton.innerText = "Roads Off"; // Update button text (optional)
housesButton.innerText = "Houses Off"; // Update button text (optional)
}
}
});
}
// Add click event listeners to the buttons
parksButton.addEventListener("click", function() {
toggleGridClick("parks");
});
roadsButton.addEventListener("click", function() {
toggleGridClick("roads"); // Optional for roads button
});
housesButton.addEventListener("click", function() {
toggleGridClick("houses");
});
// Square Grid
var bbox = [19.35, -5, 19.5, -5.15];
var cellSide = 1;
var options = {units: 'kilometers'};
var squareGrid = turf.squareGrid(bbox, cellSide, options);
// Add GeoJSON layer with click event handler (optional, can be removed)
L.geoJSON(squareGrid, {
style: function (feature) {
return {weight: 0.5, fillOpacity: 0 }; // Initial style for squares
}
}).addTo(map);
'
|
5b27ffb1fca514038db18ea618bde708
|
{
"intermediate": 0.383200466632843,
"beginner": 0.4028944671154022,
"expert": 0.21390511095523834
}
|
47,628
|
Okay, so i have notion, give me indepth and catcy comparison of chatgpt 3.5, turbo, chat gpt 4, gemini, latest llama, lastest calude, all in table wise
|
af96f139c25445d398fa500c43a5dd7b
|
{
"intermediate": 0.27308163046836853,
"beginner": 0.30509501695632935,
"expert": 0.4218233823776245
}
|
47,629
|
rowIndex = Application.Match(dateValue, wsDestination.Columns(1), 0) + 1 : bug type :
Option Explicit
Sub CreatePivotTablevvvvvvvvvvvvvvvvvvvvvvv()
Dim wsSource As Worksheet
Dim wsDestination As Worksheet
Dim lastRow As Long
Dim i As Long, j As Long
Dim dateValue As Date, hourValue As Date
Dim dict As Object
Dim key As Variant
' Définir les feuilles source et destination
Set wsSource = ActiveSheet
Set wsDestination = Sheets.Add(After:=ActiveSheet)
wsDestination.Name = "Tableau Croisé Dynamique"
' Trouver la dernière ligne des données source
lastRow = wsSource.Cells(wsSource.Rows.Count, 1).End(xlUp).row
' Créer un dictionnaire pour stocker les valeurs uniques de date
Set dict = CreateObject("Scripting.Dictionary")
' Parcourir les données source et ajouter les dates uniques au dictionnaire
For i = 2 To lastRow
dateValue = CDate(wsSource.Cells(i, 2).value)
If dateValue >= DateSerial(2024, 1, 1) And dateValue <= DateSerial(2024, 1, 31) Then
If Not dict.Exists(dateValue) Then
dict.Add dateValue, 0
End If
End If
Next i
' Créer les en-têtes de ligne (dates)
wsDestination.Cells(1, 1).value = "Date"
i = 2
For Each key In dict.Keys
wsDestination.Cells(i, 1).value = key
wsDestination.Cells(i, 1).NumberFormat = "dd/mm/yyyy"
i = i + 1
Next key
' Créer les en-têtes de colonne (heures)
For j = 2 To 25
wsDestination.Cells(1, j).value = Format(TimeSerial(j - 2, 0, 0), "hh:mm")
Next j
' Remplir les valeurs dans le tableau croisé dynamique
Application.ScreenUpdating = False
For i = 2 To lastRow
dateValue = CDate(wsSource.Cells(i, 2).value)
hourValue = CDate(wsSource.Cells(i, 1).value)
If dateValue >= DateSerial(2024, 1, 1) And dateValue <= DateSerial(2024, 1, 31) Then
Dim rowIndex As Long
rowIndex = Application.Match(dateValue, wsDestination.Columns(1), 0) + 1
Dim colIndex As Long
colIndex = Application.Match(Format(hourValue, "hh:mm"), wsDestination.Rows(1), 0) + 1
If rowIndex > 1 And colIndex > 1 Then
wsDestination.Cells(rowIndex, colIndex).value = wsSource.Cells(i, 3).value
End If
End If
Next i
Application.ScreenUpdating = True
' Formater le tableau
With wsDestination.Range(wsDestination.Cells(1, 1), wsDestination.Cells(dict.Count + 1, 25))
.Borders.LineStyle = xlContinuous
.Borders.Weight = xlThin
.Interior.Color = RGB(220, 220, 220)
.Font.Bold = True
.HorizontalAlignment = xlCenter
End With
' Ajuster la largeur des colonnes
wsDestination.Columns.AutoFit
End Sub
|
6101e32b7bcb71871e021f5730df59fb
|
{
"intermediate": 0.29538825154304504,
"beginner": 0.5127493143081665,
"expert": 0.19186235964298248
}
|
47,630
|
how to setup length of all lines in a program shall be limited to a maximum of 120 characters in Visual studio code
|
ac60d017743f4d3fa691df29a8fcad5c
|
{
"intermediate": 0.3124714493751526,
"beginner": 0.3181840777397156,
"expert": 0.3693445324897766
}
|
47,631
|
Hi, can you give me a ffmpeg 7.0 lossless option on grabing a xcomposite window using these parts of the arg:gdigrab device to grab a window using the hwnd=HANDLER
|
1cbbc8b17b88ad2acbbdb515c33331f5
|
{
"intermediate": 0.5377809405326843,
"beginner": 0.18081502616405487,
"expert": 0.281404048204422
}
|
47,632
|
python lexer implemented in rust lang
|
8bb29bab778b8d0e60633e630d474301
|
{
"intermediate": 0.33007603883743286,
"beginner": 0.4120189845561981,
"expert": 0.2579050362110138
}
|
47,633
|
I have a series of noisy data in Python, which from 0 to about 60-90% of its length is having fairly straight trend, then it starts to rise and it ends with sharp rise from negative values to 0. How can I cut this part, so I have only the close to linear part?
Provide just complete code with the dataset in variable
|
86a4cb6079d86eaa80a0bbcaab46cacc
|
{
"intermediate": 0.41531485319137573,
"beginner": 0.1985393613576889,
"expert": 0.38614580035209656
}
|
47,634
|
learning python on my ubuntu pc
|
bb9a83111b9e261724e3ee0d2a63341b
|
{
"intermediate": 0.3850010335445404,
"beginner": 0.21921418607234955,
"expert": 0.39578479528427124
}
|
47,635
|
python lexer implemented in rust with support for python ‘type comment’
|
8eee192c9874a0c8511cf6488d52a9b1
|
{
"intermediate": 0.3477514684200287,
"beginner": 0.3424210548400879,
"expert": 0.3098275065422058
}
|
47,636
|
write a website for blogging , make it more professional , add dark mode and light mode , make it fully responsive
|
fcbc2dd3e41819779844631fd520d675
|
{
"intermediate": 0.34426289796829224,
"beginner": 0.28276753425598145,
"expert": 0.3729695677757263
}
|
47,637
|
explain to me step by step what is going on here and also explain to me the mathematical intricasies and methods used
/ This source code is subject to the terms of the Mozilla Public License 2.0 at https://mozilla.org/MPL/2.0/
// © DonovanWall
//██████╗ ██╗ ██╗
//██╔══██╗██║ ██║
//██║ ██║██║ █╗ ██║
//██║ ██║██║███╗██║
//██████╔╝╚███╔███╔╝
//╚═════╝ ╚══╝╚══╝
//@version=4
study(title=“Gaussian Channel [DW]”, shorttitle=“GC [DW]”, overlay=true)
// This study is an experiment utilizing the Ehlers Gaussian Filter technique combined with lag reduction techniques and true range to analyze trend activity.
// Gaussian filters, as Ehlers explains it, are simply exponential moving averages applied multiple times.
// First, beta and alpha are calculated based on the sampling period and number of poles specified. The maximum number of poles available in this script is 9.
// Next, the data being analyzed is given a truncation option for reduced lag, which can be enabled with “Reduced Lag Mode”.
// Then the alpha and source values are used to calculate the filter and filtered true range of the dataset.
// Filtered true range with a specified multiplier is then added to and subtracted from the filter, generating a channel.
// Lastly, a one pole filter with a N pole alpha is averaged with the filter to generate a faster filter, which can be enabled with “Fast Response Mode”.
//Custom bar colors are included.
//Note: Both the sampling period and number of poles directly affect how much lag the indicator has, and how smooth the output is.
// Larger inputs will result in smoother outputs with increased lag, and smaller inputs will have noisier outputs with reduced lag.
// For the best results, I recommend not setting the sampling period any lower than the number of poles + 1. Going lower truncates the equation.
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------
//Updates:
// Huge shoutout to @e2e4mfck for taking the time to improve the calculation method!
// -> migrated to v4
// -> pi is now calculated using trig identities rather than being explicitly defined.
// -> The filter calculations are now organized into functions rather than being individually defined.
// -> Revamped color scheme.
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------
//Functions - courtesy of @e2e4mfck
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------
//Filter function
f_filt9x (_a, _s, _i) =>
int _m2 = 0, int _m3 = 0, int _m4 = 0, int _m5 = 0, int _m6 = 0,
int _m7 = 0, int _m8 = 0, int _m9 = 0, float _f = .0, _x = (1 - _a)
// Weights.
// Initial weight _m1 is a pole number and equal to _i
_m2 := _i == 9 ? 36 : _i == 8 ? 28 : _i == 7 ? 21 : _i == 6 ? 15 : _i == 5 ? 10 : _i == 4 ? 6 : _i == 3 ? 3 : _i == 2 ? 1 : 0
_m3 := _i == 9 ? 84 : _i == 8 ? 56 : _i == 7 ? 35 : _i == 6 ? 20 : _i == 5 ? 10 : _i == 4 ? 4 : _i == 3 ? 1 : 0
_m4 := _i == 9 ? 126 : _i == 8 ? 70 : _i == 7 ? 35 : _i == 6 ? 15 : _i == 5 ? 5 : _i == 4 ? 1 : 0
_m5 := _i == 9 ? 126 : _i == 8 ? 56 : _i == 7 ? 21 : _i == 6 ? 6 : _i == 5 ? 1 : 0
_m6 := _i == 9 ? 84 : _i == 8 ? 28 : _i == 7 ? 7 : _i == 6 ? 1 : 0
_m7 := _i == 9 ? 36 : _i == 8 ? 8 : _i == 7 ? 1 : 0
_m8 := _i == 9 ? 9 : _i == 8 ? 1 : 0
_m9 := _i == 9 ? 1 : 0
// filter
_f := pow(_a, _i) * nz(_s) +
_i * _x * nz(_f[1]) - (_i >= 2 ?
_m2 * pow(_x, 2) * nz(_f[2]) : 0) + (_i >= 3 ?
_m3 * pow(_x, 3) * nz(_f[3]) : 0) - (_i >= 4 ?
_m4 * pow(_x, 4) * nz(_f[4]) : 0) + (_i >= 5 ?
_m5 * pow(_x, 5) * nz(_f[5]) : 0) - (_i >= 6 ?
_m6 * pow(_x, 6) * nz(_f[6]) : 0) + (_i >= 7 ?
_m7 * pow(_x, 7) * nz(_f[7]) : 0) - (_i >= 8 ?
_m8 * pow(_x, 8) * nz(_f[8]) : 0) + (_i == 9 ?
_m9 * pow(_x, 9) * nz(_f[9]) : 0)
//9 var declaration fun
f_pole (_a, _s, _i) =>
_f1 = f_filt9x(_a, _s, 1), _f2 = (_i >= 2 ? f_filt9x(_a, _s, 2) : 0), _f3 = (_i >= 3 ? f_filt9x(_a, _s, 3) : 0)
_f4 = (_i >= 4 ? f_filt9x(_a, _s, 4) : 0), _f5 = (_i >= 5 ? f_filt9x(_a, _s, 5) : 0), _f6 = (_i >= 6 ? f_filt9x(_a, _s, 6) : 0)
_f7 = (_i >= 2 ? f_filt9x(_a, _s, 7) : 0), _f8 = (_i >= 8 ? f_filt9x(_a, _s, 8) : 0), _f9 = (_i == 9 ? f_filt9x(_a, _s, 9) : 0)
_fn = _i == 1 ? _f1 : _i == 2 ? _f2 : _i == 3 ? _f3 :
_i == 4 ? _f4 : _i == 5 ? _f5 : _i == 6 ? _f6 :
_i == 7 ? _f7 : _i == 8 ? _f8 : _i == 9 ? _f9 : na
[_fn, _f1]
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------
//Inputs
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------
//Source
src = input(defval=hlc3, title=“Source”)
//Poles
int N = input(defval=4, title=“Poles”, minval=1, maxval=9)
//Period
int per = input(defval=144, title=“Sampling Period”, minval=2)
//True Range Multiplier
float mult = input(defval=1.414, title=“Filtered True Range Multiplier”, minval=0)
//Lag Reduction
bool modeLag = input(defval=false, title=“Reduced Lag Mode”)
bool modeFast = input(defval=false, title=“Fast Response Mode”)
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------
//Definitions
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------
//Beta and Alpha Components
beta = (1 - cos(4asin(1)/per)) / (pow(1.414, 2/N) - 1)
alpha = - beta + sqrt(pow(beta, 2) + 2beta)
//Lag
lag = (per - 1)/(2N)
//Data
srcdata = modeLag ? src + (src - src[lag]) : src
trdata = modeLag ? tr(true) + (tr(true) - tr(true)[lag]) : tr(true)
//Filtered Values
[filtn, filt1] = f_pole(alpha, srcdata, N)
[filtntr, filt1tr] = f_pole(alpha, trdata, N)
//Lag Reduction
filt = modeFast ? (filtn + filt1)/2 : filtn
filttr = modeFast ? (filtntr + filt1tr)/2 : filtntr
//Bands
hband = filt + filttrmult
lband = filt - filttr*mult
// Colors
color1 = #0aff68
color2 = #00752d
color3 = #ff0a5a
color4 = #990032
fcolor = filt > filt[1] ? #0aff68 : filt < filt[1] ? #ff0a5a : #cccccc
barcolor = (src > src[1]) and (src > filt) and (src < hband) ? #0aff68 : (src > src[1]) and (src >= hband) ? #0aff1b : (src <= src[1]) and (src > filt) ? #00752d :
(src < src[1]) and (src < filt) and (src > lband) ? #ff0a5a : (src < src[1]) and (src <= lband) ? #ff0a11 : (src >= src[1]) and (src < filt) ? #990032 : #cccccc
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------
//Outputs
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------
//Filter Plot
filtplot = plot(filt, title=“Filter”, color=fcolor, linewidth=3)
//Band Plots
hbandplot = plot(hband, title=“Filtered True Range High Band”, color=fcolor)
lbandplot = plot(lband, title=“Filtered True Range Low Band”, color=fcolor)
//Channel Fill
fill(hbandplot, lbandplot, title=“Channel Fill”, color=fcolor, transp=80)
//Bar Color
barcolor(barcolor)
|
e208ebdaa933fa3813178f39e89ac544
|
{
"intermediate": 0.39621350169181824,
"beginner": 0.2619469165802002,
"expert": 0.34183958172798157
}
|
47,638
|
@app.route("/add", methods=["POST"])
def add_annonce():
data = request.json
Annonce_collection.insert_one(data)
return jsonify(list(data)), 201
i want to add images in public in my react app
{
"categorie": "Categorie",
"plusCategorie": "plusCategorie",
"ville": "Ville",
"secteur": "Secteur",
"NumeroTele": "Telephone",
"Etat": "Etat",
"Prix": "Prix",
"titre": "Titre",
"TexteAnnonce": "TexteAnnonce",
"images": [
"C:\\Users\\LENOVO\\Desktop\\selenium\\imges\\AdobeStock_73502611_Preview.jpeg",
"C:\\Users\\LENOVO\\Desktop\\selenium\\imges\\AdobeStock_238105207_Preview.jpeg",
"C:\\Users\\LENOVO\\Desktop\\selenium\\imges\\AdobeStock_686756529_Preview.jpeg"
]
}
|
a6d170d1190053f49abeb4d70da98220
|
{
"intermediate": 0.37346377968788147,
"beginner": 0.3727576732635498,
"expert": 0.2537785768508911
}
|
47,639
|
#include <bits/stdc++.h>
using namespace std;
void add_edge(vector<vector<int>> &adj, int u, int v, int l)
{
if (u >= 1 && u <= adj.size() - 1 && v >= 1 && v <= adj.size() - 1)
{
adj[u][v] = l;
adj[v][u] = l;
}
}
void func(vector<int> &dist1, vector<int> &adj2, int n)
{
int cnt = 0;
for (int i = 1; i <= n; i++)
{ if (dist1[i] <= adj2[i])
cnt++;}
cout << cnt;
}
void dijkstra(vector<vector<int>> &adj, int src, int n, vector<int> &dist)
{
vector<int>dijkstra (adj,src);
priority_queue<pair<int,int>,vector<pair<int,int>>,greater<pair<int,int>>>pq;
vector<int>dist(n);
for(int i=0;i<n;i++)
{
dist[i]=100000;
}
dist[src]=0;
pq.push({0,src});
while(!pq.empty())
{
int dist=pq.front.first;
int node=pq.front..second;
pq.pop();
for(auto it:adj[node])
{
int d=it.first;
int n=it.second;
if(dist[node]+d<dist[n])
{
dist[n]=dist[node]+d;
}
pq.push({dist[node],n});
}
}
}
int main()
{
int n, m, k;
cin >> n >> m >> k;
vector<vector<int>> adj1(n + 1, vector<int>(n + 1, 100000));
// vector<vector<int>> adj2(n + 1, vector<int>(n + 1, 100000));
vector<int> adj2(n+1, 100000);
vector<int> dist1(n + 1, 100000);
for (int i = 0; i < m; i++)
{
int u, v, l;
cin >> u >> v >> l;
add_edge(adj1, u, v, l);
}
for (int i = 0; i < k; i++)
{
int v, l;
cin >> v >> l;
adj2[v] = l;
// add_edge(adj2, 0, v, l);
}
dijkstra(adj1, 1, n, dist1);
// dijkstra(adj2, 1, n, dist2);
func(dist1, adj2, n);
return 0;
}
Lukarp has started his own tech company. He received a lot of funding from Igen with which he opened many offices around the world. Each office needs to communicate with one other, for which they're using high speed connections between the offices. Office number 1 is Lukarp's HQ. Some offices are important and hence need faster connections to the HQ for which Lukarp has used special fiber connections. Lukarp has already planned the connections but feels some fiber connections are redundant. You have been hired by Lukarp to remove the fiber connections which don't cause faster connections.
Statement
The offices and (bi-directional) connections (both normal and fiber) are given to you.
. The
normal connection connects any two offices
and
. Normal connections have latency
. The
fiber connection connects the HQ with the office
. Fiber connections also come with a latency
. The total latency of a path is the sum of latencies on the connections. You are to output the
that can be removed, such that the
between the HQ and any other node remains the same as before.
There are
offices with
normal connections and
high-speed fiber connections.
The
normal connection connects offices
and
(bi-directionally) with latency
.
The
fiber connection connects offices
and
(bi-directionally) with latency
.
Input Format
The first line of the input file will contain three space-separated integers
,
and
, the number of offices, the number of normal connections and the number of fiber connections.
There will be
lines after this, the
line signifying the
normal connection, each containing three space-separated integers
,
and
the two offices that are connected and the latency of the connection respectively.
There will be
lines after this, the
line signifying the
fiber connection, each containing three space-separated integers
and
, the office connected to the HQ and the latency of the fiber connection respectively.
Output Format
Output only one integer
- the maximum number of fiber connections that can be removed without changing the latency of smallest latency path from office 1 to any other office.
Constraints
Note: There may be multiple connections of either type between two offices, and an office might be connected to itself as well.
Samples
Input
Copy
4 5 2
1 2 2
1 4 9
1 3 3
2 4 4
3 4 5
3 4
4 5
Output
Copy
1
|
5bdfd36dd2b7f90de0e7d159a1715369
|
{
"intermediate": 0.3015320897102356,
"beginner": 0.43683984875679016,
"expert": 0.26162809133529663
}
|
47,640
|
if I have a circle on a grid, how do I calculate how many complete squares are in that circle using it's radius?
|
458735a31b7d8ccb918a77d5fb116888
|
{
"intermediate": 0.35246196389198303,
"beginner": 0.2300504893064499,
"expert": 0.4174875020980835
}
|
47,641
|
4Question 2: Need For Speed
4.1Introduction
Lukarp has started his own tech company. He received a lot of funding from
Igen with which he opened many offices around the world. Each office needs
to communicate with one other, for which they’re using high speed connections
between the offices. Office number 1 is Lukarp’s HQ. Some offices are important
and hence need faster connections to the HQ for which Lukarp has use special
fiber connections. Lukarp has already planned the connections but feels some
fiber connections are redundant. You have been hired by Lukarp to remove
those fiber connections which don’t cause faster connections.
4.2Problem Statement
4.2.1The Problem
The offices and (bi-directional) connections (both normal and fiber) are given
to you. HQ is numbered as 1. The ith normal connection connects any two
offices ai and bi . Normal connections have latency li . The ith fiber connection
connects the HQ with the office ci . Fiber connections also come with a latency
pi . The total latency of a path is the sum of latencies on the connections.
You are to output the maximum number of fiber connections that can be
removed, such that the latency of the smallest latency path between the
HQ and any other node remains the same as before.
• There are n offices with m normal connections and k high-speed fiber
connections.
• The ith normal connection connects offices ai and bi (bi-directionally) with
latency li .
• The ith fiber connection connects offices 1 and ci (bi-directionally) with
latency pi .
4.2.2
Input Format
The first line of the input file will contain three space-separated integers n, m
and k, the number of offices, the number of normal connections and the number
of fiber connections.
There will be m lines after this, the ith line signifying the ith normal connection,
each containing three space-separated integers ai , bi and li the two offices that
are connected and the latency of the connection respectively.
There will be k lines after this, the ith line signifying the ith fiber connection,
each containing three space-separated integers ci and pi , the office connected to
the HQ and the latency of the fiber connection respectively.
64.2.3
Output Format
Output only one integer m - the maximum number of fiber connections that
can be removed without changing the latency of smallest latency path from
office 1 to any other office.
4.2.4
Constraints
• 2 ≤ n ≤ 105
• 1 ≤ m ≤ 2 · 105
• 1 ≤ k ≤ 105
• 1 ≤ ai , bi , ci ≤ n
• 1 ≤ li , pi ≤ 109
4.2.5
Example
Input:
4 5 2
1 2 2
1 4 9
1 3 3
2 4 4
3 4 5
3 4
4 5
Output:
1
Explanation:
In this example, there are five normal connections as shown in the figure below.
The fiber connection going from 1 to 3 can be removed because the normal con-
nection (3) is faster than the fiber connection (4). However, the fiber connection
with 4 cannot be removed. Hence the maximum number of fiber connections
that can be removed is 1.
code in c++
|
510b727213cd6c9aeb387ae8bf40a3d0
|
{
"intermediate": 0.32388344407081604,
"beginner": 0.3446963429450989,
"expert": 0.3314202129840851
}
|
47,642
|
write a python code to implement a neural network to recognize hand signs from images. the training data is split into folders corresponding to their given letter/number. there is also a test data set (also labelled and divided) to see how well it is predicting
I am running this on colab, use cuda
|
9ebc6435ca36de512382944a0b499242
|
{
"intermediate": 0.20876654982566833,
"beginner": 0.08296400308609009,
"expert": 0.708269476890564
}
|
47,643
|
arduino predeclare function
|
65d6a0a3a2d5f4c8bc12324685f60066
|
{
"intermediate": 0.3009408414363861,
"beginner": 0.4454346001148224,
"expert": 0.2536245584487915
}
|
47,644
|
I am using java 21 and noticed that if I add an int to a string, the result will the be a new string that is the concatenation of the original string with the number. Since when can you do this in java?
|
0c0078b2c922e1ef9543bd1e93d8bb7f
|
{
"intermediate": 0.5967209935188293,
"beginner": 0.16441577672958374,
"expert": 0.23886322975158691
}
|
47,645
|
fix my code
from google.colab import drive
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms, datasets
import os
# Assuming Google Drive is already mounted
# If not, uncomment the next line and execute it.
# drive.mount('/content/drive')
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f'Using {device} device')
class HandSignsDataset(Dataset):
def __init__(self, root_dir, transform=None):
self.transform = transform
self.images = datasets.ImageFolder(root=root_dir, transform=transform)
self.classes = self.images.classes
self.num_classes = len(self.classes)
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img, label = self.images[idx]
return img, label
transform = transforms.Compose([
transforms.Resize((224, 224)), # Resize images to a fixed size
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
# Specify your paths here
train_data_path = '/content/drive/MyDrive/Output_ISL/train'
test_data_path = '/content/drive/MyDrive/Output_ISL/test'
# Loading the datasets
train_dataset = HandSignsDataset(root_dir=train_data_path, transform=transform)
test_dataset = HandSignsDataset(root_dir=test_data_path, transform=transform)
train_loader = DataLoader(train_dataset, batch_size=10, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=10, shuffle=False)
# Determine the number of classes dynamically
num_classes = train_dataset.num_classes
# Defining the Neural Network
class Net(nn.Module):
def __init__(self, num_classes):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.fc1 = nn.Linear(64 * 56 * 56, 600) # Adjusted for the size after pooling
self.fc2 = nn.Linear(600, num_classes)
def forward(self, x):
x = self.pool(nn.functional.relu(self.conv1(x)))
x = self.pool(nn.functional.relu(self.conv2(x)))
x = x.view(-1, 64 * 56 * 56) # Adjusted for the size after pooling
x = nn.functional.relu(self.fc1(x))
x = self.fc2(x)
return x
model = Net(num_classes).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# Training the model
def train_model(model, criterion, optimizer, num_epochs=10): # Adjust num_epochs as required
model.train()
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images, labels = images.to(device), labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')
# Function to test the model
def test_model(model):
model.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images, labels = images.to(device), labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(f'Accuracy of the model on the test images: {100 * correct / total}%')
train_model(model, criterion, optimizer)
test_model(model)
|
a19a807a6af7fffac90080c3fe1be6fd
|
{
"intermediate": 0.20523381233215332,
"beginner": 0.5289955735206604,
"expert": 0.26577067375183105
}
|
47,646
|
How do I install pcem v17 for linux on arch linux
|
9a07343f4d52f811e8a3f1dd9b4328f0
|
{
"intermediate": 0.5281184315681458,
"beginner": 0.18817883729934692,
"expert": 0.28370270133018494
}
|
47,647
|
I have a series of noisy data in Python, which from 0 to about 60-90% of its length is having fairly straight trend, then it starts to rise and it ends with sharp rise from negative values to 0. How can I cut this part, so I have only the close to linear part?
Provide just complete code with the dataset in variable
|
86697df9e026b60e6412fa7dc968142e
|
{
"intermediate": 0.41531485319137573,
"beginner": 0.1985393613576889,
"expert": 0.38614580035209656
}
|
47,648
|
You have this dataframe:
t_gene helper transcripts relation class pred q_gene chain
0 ENSG00000117013 ENST00000347132.KCNQ4 ENST00000347132.KCNQ4.5 o2o I 0.996369 reg_663 0
1 ENSG00000117013 ENST00000509682.KCNQ4 ENST00000509682.KCNQ4.5 o2o I 0.996369 reg_663 0
2 ENSG00000170369 ENST00000304725.CST2 NaN o2z NaN NaN None 0
3 ENSG00000112494 ENST00000366829.UNC93A NaN o2z NaN NaN None 0
4 ENSG00000112494 ENST00000230256.UNC93A NaN o2z NaN NaN None 0
... ... ... ... ... ... ... ... ...
325366 ENSG00000177212 ENST00000641220.OR2T33 ENST00000641220.OR2T33.267831 NaN NaN -2.000000 NaN 0
325367 ENSG00000204572 ENST00000398531.KRTAP5-10 ENST00000398531.KRTAP5-10.355706 NaN NaN 0.003860 NaN 0
325368 ENSG00000196156 ENST00000391356.KRTAP4-3 ENST00000391356.KRTAP4-3.266097 NaN NaN 0.005833 NaN 0
325369 ENSG00000280204 ENST00000641544.OR1S1 ENST00000641544.OR1S1.114894 NaN NaN 0.017002 NaN 0
325370 ENSG00000176024 ENST00000391794.ZNF613 ENST00000391794.ZNF613.29503 NaN NaN 0.019844 NaN 0
the last column called "chain" is a helper column with 1's and 0's. We are interested in the rows with 1's. So,
df[df["chain"] == 1]
t_gene helper transcripts relation class pred q_gene chain
1589 ENSG00000223572 ENST00000434505.CKMT1A ENST00000434505.CKMT1A.-1 m2o PI NaN reg_5556 1
1636 ENSG00000227488 ENST00000405679.GAGE12D ENST00000405679.GAGE12D.-1 m2m I NaN reg_8861 1
1638 ENSG00000216649 ENST00000381698.GAGE12E ENST00000381698.GAGE12E.-1 m2m I NaN reg_8941 1
...
for each one of the values in the "helper" column here, I want to group the values from the initial dataframe to end up with something like this (take this example for only 1 row):
df[df["helper"] == "ENST00000434505.CKMT1A"]
t_gene helper transcripts relation class pred q_gene chain
1589 ENSG00000223572 ENST00000434505.CKMT1A ENST00000434505.CKMT1A.-1 m2o PI NaN reg_5556 1
95321 ENSG00000223572 ENST00000434505.CKMT1A ENST00000434505.CKMT1A.57 NaN M -1.000000 NaN 0
125650 ENSG00000223572 ENST00000434505.CKMT1A ENST00000434505.CKMT1A.119651 NaN NaN 0.004655 NaN 0
152750 ENSG00000223572 ENST00000434505.CKMT1A ENST00000434505.CKMT1A.285792 NaN NaN 0.004157 NaN 0
188865 ENSG00000223572 ENST00000434505.CKMT1A ENST00000434505.CKMT1A.2013 NaN NaN 0.994052 NaN 0
225580 ENSG00000223572 ENST00000434505.CKMT1A ENST00000434505.CKMT1A.306590 NaN NaN -2.000000 NaN 0
226621 ENSG00000223572 ENST00000434505.CKMT1A ENST00000434505.CKMT1A.52021 NaN NaN 0.004832 NaN 0
256004 ENSG00000223572 ENST00000434505.CKMT1A ENST00000434505.CKMT1A.5066 NaN NaN 0.964385 NaN 0
291688 ENSG00000223572 ENST00000434505.CKMT1A ENST00000434505.CKMT1A.22 NaN NaN -1.000000 NaN 0
once you have this, we need to find the median of the values in the "pred" column. We only want to consider the values from rows that have NaN in the q_gene. We also want to consider only positive values in the median, so if the row has -2 or -1 in the "pred" column that should not be considered.
This needs to be most efficient, fastest and elegant solution. Please provide the code.
|
9411fecb7ffabd19981a7a848547b4ae
|
{
"intermediate": 0.27008652687072754,
"beginner": 0.3472314178943634,
"expert": 0.38268208503723145
}
|
47,649
|
You have this dataframe:
t_gene helper transcripts relation class pred q_gene chain
0 ENSG00000117013 ENST00000347132.KCNQ4 ENST00000347132.KCNQ4.5 o2o I 0.996369 reg_663 0
1 ENSG00000117013 ENST00000509682.KCNQ4 ENST00000509682.KCNQ4.5 o2o I 0.996369 reg_663 0
2 ENSG00000170369 ENST00000304725.CST2 NaN o2z NaN NaN None 0
3 ENSG00000112494 ENST00000366829.UNC93A NaN o2z NaN NaN None 0
4 ENSG00000112494 ENST00000230256.UNC93A NaN o2z NaN NaN None 0
... ... ... ... ... ... ... ... ...
325366 ENSG00000177212 ENST00000641220.OR2T33 ENST00000641220.OR2T33.267831 NaN NaN -2.000000 NaN 0
325367 ENSG00000204572 ENST00000398531.KRTAP5-10 ENST00000398531.KRTAP5-10.355706 NaN NaN 0.003860 NaN 0
325368 ENSG00000196156 ENST00000391356.KRTAP4-3 ENST00000391356.KRTAP4-3.266097 NaN NaN 0.005833 NaN 0
325369 ENSG00000280204 ENST00000641544.OR1S1 ENST00000641544.OR1S1.114894 NaN NaN 0.017002 NaN 0
325370 ENSG00000176024 ENST00000391794.ZNF613 ENST00000391794.ZNF613.29503 NaN NaN 0.019844 NaN 0
the last column called "chain" is a helper column with 1's and 0's. We are interested in the rows with 1's. So,
df[df["chain"] == 1]
t_gene helper transcripts relation class pred q_gene chain
1589 ENSG00000223572 ENST00000434505.CKMT1A ENST00000434505.CKMT1A.-1 m2o PI NaN reg_5556 1
1636 ENSG00000227488 ENST00000405679.GAGE12D ENST00000405679.GAGE12D.-1 m2m I NaN reg_8861 1
1638 ENSG00000216649 ENST00000381698.GAGE12E ENST00000381698.GAGE12E.-1 m2m I NaN reg_8941 1
...
for each one of the values in the "helper" column here, I want to group the values from the initial dataframe to end up with something like this (take this example for only 1 row):
df[df["helper"] == "ENST00000434505.CKMT1A"]
t_gene helper transcripts relation class pred q_gene chain
1589 ENSG00000223572 ENST00000434505.CKMT1A ENST00000434505.CKMT1A.-1 m2o PI NaN reg_5556 1
95321 ENSG00000223572 ENST00000434505.CKMT1A ENST00000434505.CKMT1A.57 NaN M -1.000000 NaN 0
125650 ENSG00000223572 ENST00000434505.CKMT1A ENST00000434505.CKMT1A.119651 NaN NaN 0.004655 NaN 0
152750 ENSG00000223572 ENST00000434505.CKMT1A ENST00000434505.CKMT1A.285792 NaN NaN 0.004157 NaN 0
188865 ENSG00000223572 ENST00000434505.CKMT1A ENST00000434505.CKMT1A.2013 NaN NaN 0.994052 NaN 0
225580 ENSG00000223572 ENST00000434505.CKMT1A ENST00000434505.CKMT1A.306590 NaN NaN -2.000000 NaN 0
226621 ENSG00000223572 ENST00000434505.CKMT1A ENST00000434505.CKMT1A.52021 NaN NaN 0.004832 NaN 0
256004 ENSG00000223572 ENST00000434505.CKMT1A ENST00000434505.CKMT1A.5066 NaN NaN 0.964385 NaN 0
291688 ENSG00000223572 ENST00000434505.CKMT1A ENST00000434505.CKMT1A.22 NaN NaN -1.000000 NaN 0
once you have this, we need to find the median of the values in the "pred" column. We only want to consider the values from rows that have NaN in the q_gene. We also want to consider only positive values in the median, so if the row has -2 or -1 in the "pred" column that should not be considered. This median value should be remapped to the rows that have chain == "1" in the initial unfiltered dataframe.
This needs to be most efficient, fastest and elegant solution. Please provide the code.
|
12444bab8f2742525e5674e1e1426fde
|
{
"intermediate": 0.27008652687072754,
"beginner": 0.3472314178943634,
"expert": 0.38268208503723145
}
|
47,650
|
struct UserNotLoggedInError : public std::runtime_error
{
UserNotLoggedInError(const char* what) :
std::runtime_error(what)
{}
};
what is the meaning of this function and how to call it
|
66e22edb6a70adb1d0db72f81a744269
|
{
"intermediate": 0.2951014041900635,
"beginner": 0.6457094550132751,
"expert": 0.05918911099433899
}
|
47,651
|
sudo apt-get install cabextract wimtools chntpw genisoimage fedora equivilent
|
2d63c404a241a4130aa8259a13a5562b
|
{
"intermediate": 0.38534024357795715,
"beginner": 0.20293749868869781,
"expert": 0.41172224283218384
}
|
47,652
|
can you rewrite the script below to get rid of sklearn library?
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
# Provided data
data = np.array([-69.33755367944337, -69.57485691061791, -69.95485623221921, -70.35670585767394, -70.02034273911067, -70.5129384876611, -71.02125953391158, -70.80903376668984, -71.20983071308493, -71.35402174914674, -70.68650350925711, -71.242193402648, -71.08632752865438, -71.44354140743718, -72.06282391252182, -71.97604039772658, -71.20173397084388, -71.51796204122806, -71.7182022204926, -71.67954492912169, -72.56859341935751, -72.59068281168845, -73.39285922814696, -73.51717530707887, -73.55699885780066, -72.73437843149856, -72.49911393827797, -72.64907769615752, -73.24531686949209, -73.40296710128197, -73.52570059974023, -74.27575795265385, -74.5812303885853, -74.0760713833962, -73.95234251421066, -74.33767084494107, -73.93464078707383, -74.36604346993707, -74.66625255632445, -74.153920495273, -74.29434768888893, -74.62799625459768, -74.93859466223553, -75.44631321413202, -75.18206954054764, -75.40276907672386, -74.78340495259873, -74.86896778852987, -74.97593861051185, -75.14257986714031, -74.45960068089896, -74.61419986123104, -74.38591313592757, -74.71377051426681, -74.76700477212658, -74.51336664778708, -75.01540934749838, -74.8834473254391, -75.30352461038053, -74.89978493421569, -75.18863746653184, -75.52971974212473, -75.52950026970822, -75.64569137794243, -74.89492768476644, -74.66599724768287, -75.02164146569116, -74.91410776089221, -74.6755346495755, -74.92443419084003, -74.34716841404688, -74.18481520014302, -74.33609231615057, -74.43157744509017, -74.2182811573172, -73.07700329486033, -72.62022334222704, -72.50411374534245, -72.33112651186798, -71.57335321661401, -71.6213659570347, -71.11238154463315, -69.64247515171974, -68.97851911837131, -68.52461645325822, -68.23371281045979, -67.06884368158687, -66.5462781782542, -65.11474752094495, -63.83500025114583, -62.93596159734818, -61.081490096558305, -60.10550542951689, -58.18974229959314, -56.57869259024329, -55.1192648931368, -53.01809895193117, -49.79361366355593, -46.56353633689577, -43.651282894251274, -40.423205368844236, -36.84372546445569, -33.72828912175518, -29.47846649064585, -26.017840943162362, -21.43248786683416, -16.797827786556912, -14.200943987198356, -8.888342860036111, -3.541854552327645])
x = np.arange(0, len(data)).reshape(-1, 1)
y = data
# Function to calculate a rolling average
def rolling_average(data, window_size):
return np.convolve(data, np.ones(window_size) / window_size, mode='valid')
# Calculate residuals’ standard deviation in a rolling window
model = LinearRegression()
std_devs = []
for end in range(2, len(x) + 1): # Incrementally increase the size of the dataset
model.fit(x[:end], y[:end])
predictions = model.predict(x[:end])
residuals = y[:end] - predictions
std_devs.append(np.std(residuals))
# Calculate rolling average of standard deviations with a defined window size
window_size = 10 # Adjust based on your dataset’s characteristics
rolling_std_devs = rolling_average(np.array(std_devs), window_size)
# Detect increase in rolling standard deviation as cut-off
cut_off_index = np.argwhere(rolling_std_devs > np.median(rolling_std_devs) * 1.5)[0][0] if len(np.argwhere(rolling_std_devs > np.median(rolling_std_devs) * 1.5)) > 0 else len(data)
# Trim the data
trimmed_data = data[:cut_off_index]
# Visualization
plt.figure(figsize=(14, 7))
plt.plot(data, label='Original Data', alpha=0.7)
plt.plot(trimmed_data, 'r', label='Identified Linear Part', linestyle='-', linewidth=2)
plt.axvline(x=cut_off_index, color='green', linestyle='-', label='Cut-off Point')
plt.legend()
plt.title('Refined Trimming Approach')
plt.xlabel('Index')
plt.ylabel('Data Value')
plt.show()
|
02823f5c295f1fae09a8b1934df5c0af
|
{
"intermediate": 0.52858567237854,
"beginner": 0.24914014339447021,
"expert": 0.22227421402931213
}
|
47,653
|
this is my code:
result = subprocess.run(cmd, shell=True, capture_output=True, text=True, check=True)
right now displays the error like this:
Traceback (most recent call last):
File "/home/alejandro/Documents/projects/forks/postoga/./postoga.py", line
309, in <module>
main()
File "/home/alejandro/Documents/projects/forks/postoga/./postoga.py", line
305, in main
master.run()
File "/home/alejandro/Documents/projects/forks/postoga/./postoga.py", line
109, in run
self.gmodel = bed_to_gtf(self.outdir, self.bed, self.isoforms)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/alejandro/Documents/projects/forks/postoga/modules/convert_fro
m_bed.py", line 35, in bed_to_gtf
sh = shell(cmd)
^^^^^^^^^^
File "/home/alejandro/Documents/projects/forks/postoga/modules/utils.py",
line 22, in shell
result = subprocess.run(cmd, shell=True, capture_output=True, text=True,
check=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^
File "/usr/lib64/python3.11/subprocess.py", line 571, in run
raise CalledProcessError(retcode, process.args,
subprocess.CalledProcessError: Command 'bed2gtf --bed /home/alejandro/Docume
nts/projects/hiller/bat/query_annotation.bed --isoforms /home/alejandro/Docu
ments/projects/forks/postoga/test/postoga_isoforms.txt --output /home/alejan
dro/Documents/projects/forks/postoga/test/query_annotation.gtf' returned non
-zero exit status 1.
but I also want to print the error from that tool
|
0ff3ba36373013e67417d4e0dec15e8e
|
{
"intermediate": 0.31254976987838745,
"beginner": 0.522612452507019,
"expert": 0.1648378223180771
}
|
47,654
|
In this exercise, you are going to use the Person and Student classes to create two objects, then print out all of the available information from each object.
Your tasks
Create a Person object with the following information:
Name: Wolfgang Amadeus Mozart
Birthday: January 27, 1756
Create a Student object with the following infromation:
Name: Johann Nepomuk Hummel
Birthday: November 14, 1778
Grade: 10
GPA: 4.0
You do not need to modify the Person or Student class. public class Person {
private String name;
private String birthday;
public Person (String name, String birthday)
{
this.name = name;
this.birthday = birthday;
}
public String getBirthday(){
return birthday;
}
public String getName(){
return name;
}
}
public class PersonRunner
{
public static void main(String[] args)
{
// Start here!
}
}
public class Student extends Person {
private int grade;
private double gpa;
public Student(String name, String birthday, int grade, double gpa){
super(name, birthday);
this.grade = grade;
this.gpa = gpa;
}
public int getGrade(){
return grade;
}
public double getGpa(){
return gpa;
}
}
|
a984f6d13d811ad803e4f7e87a1b46c7
|
{
"intermediate": 0.22065459191799164,
"beginner": 0.6233177185058594,
"expert": 0.1560276448726654
}
|
47,655
|
fix my code, it running on colab and on cpu so i do not want any recommendations to change it to a gpu
code
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms, datasets
# Define device as CPU
device = torch.device('cpu')
print(f'Using {device} device')
# Define your dataset class
class HandSignsDataset(Dataset):
def __init__(self, root_dir, transform=None):
self.transform = transform
self.images = datasets.ImageFolder(root=root_dir, transform=transform)
self.classes = self.images.classes
self.num_classes = len(self.classes)
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img, label = self.images[idx]
return img, label
# Define transformation pipeline
transform = transforms.Compose([
transforms.Resize((64, 64)), # Resize images to a smaller resolution
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
# Specify your paths here
train_data_path = '/content/drive/MyDrive/Output_ISL/train'
test_data_path = '/content/drive/MyDrive/Output_ISL/test'
# Loading the datasets
# Loading the datasets
train_dataset = HandSignsDataset(root_dir=train_data_path, transform=transform)
test_dataset = HandSignsDataset(root_dir=test_data_path, transform=transform)
# Specify batch_size for DataLoader
batch_size = 64
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
# Determine the number of classes dynamically
num_classes = train_dataset.num_classes
# Define a simple Neural Network
class Net(nn.Module):
def __init__(self, num_classes):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 8, kernel_size=3, stride=1, padding=1)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
# Adjust the input size calculation to account for max pooling
self.fc1 = nn.Linear(8 * 16 * 16 // 4, 64) # Divide by 4 to account for max pooling (2x2)
self.fc2 = nn.Linear(64, num_classes)
def forward(self, x):
x = self.pool(nn.functional.relu(self.conv1(x)))
# Update the view size to match the adjusted input size
x = x.view(-1, 8 * 16 * 16 // 4)
x = nn.functional.relu(self.fc1(x))
x = self.fc2(x)
return x
# Instantiate the model
model = Net(num_classes).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# Training the model
def train_model(model, criterion, optimizer, train_loader, num_epochs=10):
model.train()
for epoch in range(num_epochs):
running_loss = 0.0
for i, (images, labels) in enumerate(train_loader):
images, labels = images.to(device), labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
# Print statistics every 100 mini-batches
if i % 100 == 99:
print(f'Epoch [{epoch+1}/{num_epochs}], Batch [{i+1}/{len(train_loader)}], Loss: {running_loss / 100:.4f}')
running_loss = 0.0
# Function to test the model
def test_model(model, test_loader):
model.eval()
correct = 0
total = 0
with torch.no_grad():
for images, labels in test_loader:
images, labels = images.to(device), labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(f'Accuracy of the model on the test images: {100 * correct / total:.2f}%')
# Train the model
train_model(model, criterion, optimizer, train_loader)
# Test the model
test_model(model, test_loader)
|
90b2c3b0627db3ddde07ad1c3eea6d5b
|
{
"intermediate": 0.2196556031703949,
"beginner": 0.38075023889541626,
"expert": 0.39959412813186646
}
|
47,656
|
a data file is in the form:
user1 3
1
user2 4
0 1 2
user3 4
0 2
in which user2 is "name" and the number after that is the limit
where there can be as many as white space between user and the limit, then, after name and limit is taken in as input, the line right after that will contains a series of number on the same line that you will take in, create a c++ profile that will suceesfully take in the data in the exact form
|
c898cd13e41b5cb55f989d53d9e5b221
|
{
"intermediate": 0.47783440351486206,
"beginner": 0.10432061553001404,
"expert": 0.4178449511528015
}
|
47,657
|
You are an expert Rust programmer. You are given the following function:
#[derive(Debug, PartialEq, Eq, Hash)]
pub enum FivePrimeStatus {
// 5' mapping
Complete,
CompleteWithIR,
TruncatedInExon,
TruncatedInExonIR,
}
pub fn map_5_prime(
c_fivends: &[(u64, u64)],
c_exons: &[(u64, u64)],
c_introns: &[(u64, u64)],
tx_exons: &[&(u64, u64)],
id: &Arc<str>,
// flags -> (skip_exon [0: true, 1: false], nt_5_end)
flags: &HashSet<(u64, u64)>,
line: String,
) -> Result<(String, String, Status)> {
let mut status = FivePrimeStatus::Complete;
let mut idx = String::new();
let exon = tx_exons[0];
let (skip, _) = flags.iter().next().unwrap();
// println!("{:?} - {:?} - {:?}", exon, c_exons, id);
for (i, fend) in c_fivends.iter().enumerate() {
if exon.0 > fend.1 || exon.1 < fend.0 {
if i == c_fivends.len() - 1 {
// exon is after the last fivend
for (j, c_exon) in c_exons.iter().enumerate() {
if exon.0 > c_exon.0 && exon.0 < c_exon.1 {
if c_fivends.contains(&c_exon) && *skip < 1 {
status = FivePrimeStatus::Complete;
idx = i.to_string();
break;
} else {
status = FivePrimeStatus::TruncatedInExon;
idx = i.to_string();
break;
}
} else {
status = FivePrimeStatus::Complete;
continue;
}
}
break;
} else {
continue;
}
} else {
// most likely inside the exon
// starts are equal -> Complete
// starts differ -> check if intron is preserved
if exon.0 == fend.0 {
// starts are equal
// check if intron is preserved -> ask Michael
status = FivePrimeStatus::Complete;
idx = i.to_string();
break;
} else if exon.0 > fend.0 {
// starts differ, eval skip flag + truncation
// to eval truncation -> check if intron is preserved
if *skip > 0 {
// not ignore this exon
// should evaluate nt_5_end here
status = FivePrimeStatus::TruncatedInExon;
idx = i.to_string();
break;
} else {
// even if --ignore-exon is set, we need to check
// if the 5'end truncates any other consensus exon
for (j, c_exon) in c_exons.iter().enumerate() {
if exon.0 > c_exon.0 && exon.0 < c_exon.1 {
if c_fivends.contains(&c_exon) && *skip < 1 {
status = FivePrimeStatus::Complete;
idx = i.to_string();
break;
} else {
status = FivePrimeStatus::TruncatedInExon;
idx = i.to_string();
break;
}
} else {
status = FivePrimeStatus::Complete;
continue;
}
}
// ignore this exon -> still check intron
// loop through introns and check if this
// exon overlaps anyone
if status == FivePrimeStatus::Complete {
for c_intron in c_introns.iter() {
if exon.1 < c_intron.0 {
status = FivePrimeStatus::Complete;
break;
} else if exon.0 <= c_intron.0 && exon.1 >= c_intron.1 {
// retains an intron
status = FivePrimeStatus::CompleteWithIR;
idx = i.to_string();
break;
} else {
status = FivePrimeStatus::Complete;
idx = i.to_string();
continue;
}
}
break;
}
}
} else {
for (j, c_exon) in c_exons.iter().enumerate() {
if exon.0 > c_exon.0 && exon.0 < c_exon.1 {
if c_fivends.contains(&c_exon) && *skip < 1 {
status = FivePrimeStatus::Complete;
idx = i.to_string();
break;
} else {
status = FivePrimeStatus::TruncatedInExon;
idx = i.to_string();
break;
}
} else {
status = FivePrimeStatus::Complete;
continue;
}
}
}
}
}
let info = match status {
FivePrimeStatus::Complete => (line, "".to_string(), Status::Complete),
FivePrimeStatus::CompleteWithIR => {
let new_id = format!("{}_5COMP_IR", id);
let mut fields: Vec<&str> = line.split('\t').collect();
fields[3] = &new_id;
let line = fields.join("\t");
(line, "".to_string(), Status::Complete)
}
FivePrimeStatus::TruncatedInExon => {
let new_id = format!("{}_5TRUNC", id);
let mut fields: Vec<&str> = line.split('\t').collect();
fields[3] = &new_id;
let line = fields.join("\t");
(line, "".to_string(), Status::Truncated)
}
FivePrimeStatus::TruncatedInExonIR => {
let new_id = format!("{}_5TRUNC_IR", id);
let mut fields: Vec<&str> = line.split('\t').collect();
fields[3] = &new_id;
let line = fields.join("\t");
(line, "".to_string(), Status::Truncated)
}
};
Ok(info)
}
Your task is to improve its efficiency, making it faster, elegant and efficient. Be careful to change its current functionality. If this function produces different results than the achieved with this implementation you will die. Provide the code.
|
64f8327bb07dfe0a09d708a0ab46acff
|
{
"intermediate": 0.40584900975227356,
"beginner": 0.3957996666431427,
"expert": 0.19835135340690613
}
|
47,658
|
qemu-system-aarch64 -M virt -cpu host -accel kvm -m 2G -smp 2 -device ramfb -bios /usr/share/qemu/qemu-uefi-aarch64.bin -device qemu-xhci -device usb-kbd -device usb-tablet -drive file=/home/jayden/Downloads/Windows11_InsiderPreview_Client_ARM64_en-us_22598.VHDX,format=vhdx,if=none,id=boot -device usb-storage,drive=boot,serial=boot -drive file=/home/jayden/Downloads/virtio-win-0.1.248.iso,media=cdrom,if=none,id=iso -device usb-storage,drive=iso -nic user,model=virtio-net-pci,mac=52:54:98:76:54:32 -vnc :0
qemu-system-aarch64: Could not find ROM image '/usr/share/qemu/qemu-uefi-aarch64.bin' fedora
|
66175cba48ac5bf8dbf7a267260983a3
|
{
"intermediate": 0.3689511716365814,
"beginner": 0.31550344824790955,
"expert": 0.31554532051086426
}
|
47,659
|
i have this <input style="text-align:center" class="professionInput" type="text" [(ngModel)]="profession"
/> and have print button i want to check if this input is empty make button have dimmed class angular
|
8f4d5227a6a3475c98c297a72da80feb
|
{
"intermediate": 0.37098705768585205,
"beginner": 0.2645126283168793,
"expert": 0.36450034379959106
}
|
47,660
|
Correct the code, the product cards don’t line up in a row, they go down
|
b58e2768fd35c9324c45b762821a29f6
|
{
"intermediate": 0.29560402035713196,
"beginner": 0.3960094451904297,
"expert": 0.30838659405708313
}
|
47,661
|
I am trying to boot arm windows on an aaarch64 fedora install using qemu. However, when booting I get "Imge type x64 cant be loaded on aarch64" even though I know for a fact it is a windows 11 arm vhdk that I got from microsoft. here is my xml <domain type="kvm">
<name>win11</name>
<uuid>ac9517de-618f-4e75-b40c-3444d17064eb</uuid>
<metadata>
<libosinfo:libosinfo xmlns:libosinfo="http://libosinfo.org/xmlns/libvirt/domain/1.0">
<libosinfo:os id="http://microsoft.com/win/11"/>
</libosinfo:libosinfo>
</metadata>
<memory unit="KiB">4194304</memory>
<currentMemory unit="KiB">4194304</currentMemory>
<vcpu placement="static">4</vcpu>
<os firmware="efi">
<type arch="aarch64" machine="virt-8.1">hvm</type>
<firmware>
<feature enabled="no" name="enrolled-keys"/>
<feature enabled="no" name="secure-boot"/>
</firmware>
<loader readonly="yes" type="pflash" format="qcow2">/usr/share/edk2/aarch64/QEMU_EFI-pflash.qcow2</loader>
<nvram template="/usr/share/edk2/aarch64/vars-template-pflash.qcow2" format="qcow2">/var/lib/libvirt/qemu/nvram/win11_VARS.qcow2</nvram>
<boot dev="hd"/>
</os>
<features>
<acpi/>
<hyperv mode="custom">
<relaxed state="off"/>
<vapic state="off"/>
<spinlocks state="off"/>
</hyperv>
<gic version="3"/>
</features>
<cpu mode="host-passthrough" check="none"/>
<clock offset="localtime"/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<emulator>/usr/bin/qemu-system-aarch64</emulator>
<disk type="file" device="disk">
<driver name="qemu" type="raw"/>
<source file="/home/jayden/Downloads/Windows11_InsiderPreview_Client_ARM64_en-us_22598.VHDX"/>
<target dev="sda" bus="usb"/>
<address type="usb" bus="0" port="1"/>
</disk>
<disk type="file" device="cdrom">
<driver name="qemu" type="raw"/>
<source file="/home/jayden/Downloads/virtio-win-0.1.248.iso"/>
<target dev="sdb" bus="usb"/>
<readonly/>
<address type="usb" bus="0" port="2"/>
</disk>
<controller type="usb" index="0" model="qemu-xhci" ports="15">
<address type="pci" domain="0x0000" bus="0x02" slot="0x00" function="0x0"/>
</controller>
<controller type="pci" index="0" model="pcie-root"/>
<controller type="pci" index="1" model="pcie-root-port">
<model name="pcie-root-port"/>
<target chassis="1" port="0x8"/>
<address type="pci" domain="0x0000" bus="0x00" slot="0x01" function="0x0" multifunction="on"/>
</controller>
<controller type="pci" index="2" model="pcie-root-port">
<model name="pcie-root-port"/>
<target chassis="2" port="0x9"/>
<address type="pci" domain="0x0000" bus="0x00" slot="0x01" function="0x1"/>
</controller>
<controller type="pci" index="3" model="pcie-root-port">
<model name="pcie-root-port"/>
<target chassis="3" port="0xa"/>
<address type="pci" domain="0x0000" bus="0x00" slot="0x01" function="0x2"/>
</controller>
<controller type="pci" index="4" model="pcie-root-port">
<model name="pcie-root-port"/>
<target chassis="4" port="0xb"/>
<address type="pci" domain="0x0000" bus="0x00" slot="0x01" function="0x3"/>
</controller>
<controller type="pci" index="5" model="pcie-root-port">
<model name="pcie-root-port"/>
<target chassis="5" port="0xc"/>
<address type="pci" domain="0x0000" bus="0x00" slot="0x01" function="0x4"/>
</controller>
<controller type="pci" index="6" model="pcie-root-port">
<model name="pcie-root-port"/>
<target chassis="6" port="0xd"/>
<address type="pci" domain="0x0000" bus="0x00" slot="0x01" function="0x5"/>
</controller>
<controller type="pci" index="7" model="pcie-root-port">
<model name="pcie-root-port"/>
<target chassis="7" port="0xe"/>
<address type="pci" domain="0x0000" bus="0x00" slot="0x01" function="0x6"/>
</controller>
<controller type="pci" index="8" model="pcie-root-port">
<model name="pcie-root-port"/>
<target chassis="8" port="0xf"/>
<address type="pci" domain="0x0000" bus="0x00" slot="0x01" function="0x7"/>
</controller>
<controller type="pci" index="9" model="pcie-root-port">
<model name="pcie-root-port"/>
<target chassis="9" port="0x10"/>
<address type="pci" domain="0x0000" bus="0x00" slot="0x02" function="0x0" multifunction="on"/>
</controller>
<controller type="pci" index="10" model="pcie-root-port">
<model name="pcie-root-port"/>
<target chassis="10" port="0x11"/>
<address type="pci" domain="0x0000" bus="0x00" slot="0x02" function="0x1"/>
</controller>
<controller type="pci" index="11" model="pcie-root-port">
<model name="pcie-root-port"/>
<target chassis="11" port="0x12"/>
<address type="pci" domain="0x0000" bus="0x00" slot="0x02" function="0x2"/>
</controller>
<controller type="pci" index="12" model="pcie-root-port">
<model name="pcie-root-port"/>
<target chassis="12" port="0x13"/>
<address type="pci" domain="0x0000" bus="0x00" slot="0x02" function="0x3"/>
</controller>
<controller type="pci" index="13" model="pcie-root-port">
<model name="pcie-root-port"/>
<target chassis="13" port="0x14"/>
<address type="pci" domain="0x0000" bus="0x00" slot="0x02" function="0x4"/>
</controller>
<controller type="pci" index="14" model="pcie-root-port">
<model name="pcie-root-port"/>
<target chassis="14" port="0x15"/>
<address type="pci" domain="0x0000" bus="0x00" slot="0x02" function="0x5"/>
</controller>
<controller type="scsi" index="0" model="virtio-scsi">
<address type="pci" domain="0x0000" bus="0x03" slot="0x00" function="0x0"/>
</controller>
<controller type="virtio-serial" index="0">
<address type="pci" domain="0x0000" bus="0x04" slot="0x00" function="0x0"/>
</controller>
<interface type="network">
<mac address="52:54:00:5f:9e:3b"/>
<source network="default"/>
<model type="virtio"/>
<address type="pci" domain="0x0000" bus="0x01" slot="0x00" function="0x0"/>
</interface>
<serial type="pty">
<target type="system-serial" port="0">
<model name="pl011"/>
</target>
</serial>
<console type="pty">
<target type="serial" port="0"/>
</console>
<channel type="unix">
<target type="virtio" name="org.qemu.guest_agent.0"/>
<address type="virtio-serial" controller="0" bus="0" port="1"/>
</channel>
<tpm model="tpm-tis">
<backend type="emulator" version="2.0"/>
</tpm>
<audio id="1" type="none"/>
<video>
<model type="ramfb" heads="1" primary="yes"/>
</video>
</devices>
</domain>
|
24d9151f990728fee89f12e01837292d
|
{
"intermediate": 0.3812265396118164,
"beginner": 0.39969655871391296,
"expert": 0.21907688677310944
}
|
47,662
|
Please fix this code and return full fixed cofe
|
150587b50c7b5777bd3307cac6f60d69
|
{
"intermediate": 0.2912351191043854,
"beginner": 0.4324002265930176,
"expert": 0.2763647139072418
}
|
47,663
|
How to send email with SMTP on Mailgun
|
a18fab687bc3756940f45c7d50b72939
|
{
"intermediate": 0.40914008021354675,
"beginner": 0.28128528594970703,
"expert": 0.30957460403442383
}
|
47,664
|
import { useParams } from 'react-router-dom';
const Update = () => {
const { annonceId } = useParams();...
but when i navigate to the page the css styling is gone
|
52e05e3e2c94731c2f3b1607cf4a958d
|
{
"intermediate": 0.4872738718986511,
"beginner": 0.34378471970558167,
"expert": 0.168941468000412
}
|
47,665
|
Here are my new components:
(ns blueridge.ui.new
(:require
[ui.components :as components]
[cljs.pprint :refer [cl-format]]
[clojure.string :as string]
[reagent.core :as ra]
[“reactstrap” :as rs]))
(defn dropdown-item [config]
(let [label (:label config)]
[components/dropdown-item config label]))
(def default-styles-dropdown-button
{:title "Actions"
:dropdown-button {:key (str "dropdown-" (rand-int 100))
;; If there are multiple dropdowns with the same title this leads to problems.
;; You should set this manually, but this is a small failsafe.
:direction "down"
:class {}
:style {:height "3rem"}}
:dropdown-toggle {:key "dropdown-toggle"
:style {:width "8rem"}
:color "primary"
:caret true}
:dropdown-menu {:key "dropdown-menu"
:class "dropdown-menu-right"}
:items [[dropdown-item {:key "item-1"
:disabled false
:label "Clickable item"}]
[dropdown-item {:key "item-2"
:disabled true
:label [:div.d-flex
"Disabled item "
[:i.fas.fa-question-circle.fa-md.ml-2]]}]]})
(defn dropdown-button
"Standard dropdown menu button for Gojee. Takes a config map.
See default-styles-dropdown-menu-button for an example list of things that can be changed if desired.
Required:
`:items` [list of dropdown items]
Recommended:
`:title` string
`:dropdown-button` {:key string} (unique, prevents duplicate menus)
Optional:
`:dropdown-toggle` {}
`:dropdown-menu` {}"
[config]
(ra/with-let [is-open (ra/atom false)
merged-config
button-options (:dropdown-button merged-config)
toggle-options (:dropdown-toggle merged-config)
menu-options (:dropdown-menu merged-config)
items (:items merged-config)]
[components/dropdown-button
(merge button-options
{:is-open @is-open
:toggle #(reset! is-open (not @is-open))})
[components/dropdown-toggle
toggle-options
(:title merged-config)]
[components/dropdown-menu
menu-options
(map dropdown-item items)]]))
I want the options and items to update on hotreload, so I don't think I can use with-let. How should I structure the code instead?
|
997121e7a2e9718e713bdde8ba465a4f
|
{
"intermediate": 0.3478584885597229,
"beginner": 0.553428053855896,
"expert": 0.09871342778205872
}
|
47,666
|
I want to combine the best parts of these two similar functions:
(defn dropdown-button-list
“Returns a dropdown button component
Requires options of:
{:id …
:label …
:items [{:label …
:dispatch …}
…]}
Optionally can take options of:
{:color …
:class …
:direction …
:disabled …
:items [{:disabled …}
…]}”
[{:keys [id label color direction disabled class items] :as options}]
(ra/with-let [is-open (ra/atom false)]
[dropdown-button {:key (str id “-dropdown-button”)
:is-open @is-open
:direction (or direction “down”)
:toggle #(reset! is-open (not @is-open))
:class class}
[dropdown-toggle {:key (str id “-dropdown-toggle”)
:caret true
:color (or color “warning”)
:disabled disabled}
label]
(let [dropdown-items (->> items
(remove nil?)
(map-indexed append-dropdown-idx))]
;; README: There is an issue where if the dropdown menu is long enough, it will hug the screen / browsers
;; right side, for now just put a mr-3 to move it left a bit and make it look better.
[dropdown-menu {:key (str id “-dropdown-menu”)}
(for [{:keys [label dispatch disabled dropdown-idx]} dropdown-items]
[dropdown-item {:key (str id “-dropdown-item-” dropdown-idx)
:on-click #(rf/dispatch dispatch)
:disabled disabled}
label])])]))
(defn dropdown-button-component
“Standard dropdown menu button. Takes a config map.
See default-styles-dropdown-menu-button for an example list of things that can be changed if desired.
Required:
:items [list of dropdown items preformatted]
Recommended:
:title string
:dropdown-button {:key string} (unique, prevents duplicate menus)
Optional:
:dropdown-toggle {}
:dropdown-menu {}”
[{:keys [title dropdown-button dropdown-toggle dropdown-menu hiccup-items items]
:or {title (:title default-styles-dropdown-button)}}]
(def foo dropdown-button)
(ra/with-let [is-open (ra/atom false)
default default-styles-dropdown-button]
(fn [] [components/dropdown-button
(merge (:dropdown-button default)
dropdown-button
{:is-open @is-open
:toggle #(reset! is-open (not @is-open))})
[components/dropdown-toggle
(merge (:dropdown-toggle default) dropdown-toggle)
title]
[components/dropdown-menu
(merge (:dropdown-menu default)
dropdown-menu)
(map dropdown-item items)]])))
They are supported by the following code:
(defn dropdown-item [config]
(let [label (:label config)]
[components/dropdown-item config label]))
(def default-styles-dropdown-button
{:title "Actions"
:dropdown-button {:key (str "dropdown-" (rand-int 100))
;; If there are multiple dropdowns with the same title this leads to problems.
;; You should set this manually, but this is a small failsafe.
:direction "down"
:class {}
:style {:height ""}}
:dropdown-toggle {:key "dropdown-toggle"
:style {:width ""}
:color "primary"
:caret true}
:dropdown-menu {:key "dropdown-menu"
:class "dropdown-menu-right"}
;; These items are here just to show examples of the structure, they are not read as default options by the menu generation code
:items [{:key "item-1"
:disabled false
:label "Clickable item"}
{:key "item-2"
:disabled true
:label [:div.d-flex
"Disabled item "
[:i.fas.fa-question-circle.fa-md.ml-2]]}]})
(defn- append-dropdown-idx [idx item]
(assoc item :dropdown-idx idx))
|
f7ee423d543e08d377ecef83f7b07a21
|
{
"intermediate": 0.3551373779773712,
"beginner": 0.3726988732814789,
"expert": 0.27216383814811707
}
|
47,667
|
PS C:\Users\hp\Desktop\botTelegram> & C:/Users/hp/AppData/Local/Programs/Python/Python312/python.exe c:/Users/hp/Desktop/botTelegram/test.py
Traceback (most recent call last):
File "c:\Users\hp\Desktop\botTelegram\test.py", line 14, in <module>
from PIL import Image
File "C:\Users\hp\AppData\Roaming\Python\Python312\site-packages\PIL\Image.py", line 103, in <module>
from . import _imaging as core
ImportError: cannot import name '_imaging' from 'PIL' (C:\Users\hp\AppData\Roaming\Python\Python312\site-packages\PIL\__init__.py)
PS C:\Users\hp\Desktop\botTelegram>
|
f2ab12adadd28a40fd78e9a2ba6a1437
|
{
"intermediate": 0.4110018014907837,
"beginner": 0.3127533197402954,
"expert": 0.2762449085712433
}
|
47,668
|
if i have a file with 2 inputs, like below a name and an integer after that can be separated by as many white spaces as possible, how to take in the 2 input
|
b38f179639544f0b18e9dea6b27bee9e
|
{
"intermediate": 0.46779558062553406,
"beginner": 0.14908887445926666,
"expert": 0.3831155300140381
}
|
47,669
|
<select onChange={(e)=>setEtat(e.target.value)} defaultValue={Etat} className='w-75'>
<option >-État-</option>
<option value="Neuf">Neuf</option>
<option value="Très bon">Très bon</option>
<option value="Bon">Bon</option>
<option value="Correct">Correct</option>
<option value="Endommagé">Endommagé</option>
<option value="Pour Pièces">Pour Pièces</option>
</select>
select the value so it shows
|
0da05729e4476734b96b2decb86b2005
|
{
"intermediate": 0.31402158737182617,
"beginner": 0.4977681338787079,
"expert": 0.18821024894714355
}
|
47,670
|
A code snippet to send files through curl in a loop that goes through files that match the pattern
use curl -X POST 'https://store1.gofile.io/contents/uploadfile' -H "Authorization: Bearer your_token" -F "file=@file.txt"
|
5950f1d512fbe6b8df26776c7827bbb9
|
{
"intermediate": 0.31614285707473755,
"beginner": 0.37785980105400085,
"expert": 0.3059973418712616
}
|
47,671
|
Traceback (most recent call last):
File "C:\python\material_management\src\application\controllers\manage_all_main_roots.py", line 90, in execute
task.execute()
File "C:\python\material_management\src\domain\interactors\distribute_for_root.py", line 119, in execute
self._add_related_materials_from_replacement_data()
File "C:\python\material_management\src\domain\interactors\distribute_for_root.py", line 52, in _add_related_materials_from_replacement_data
).execute()
^^^^^^^^^
File "C:\python\material_management\src\domain\use_cases\normalize_related_materials.py", line 55, in execute
unique_codes_sets = self._get_unique_codes_sets()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\python\material_management\src\domain\use_cases\normalize_related_materials.py", line 20, in _get_unique_codes_sets
requirements = self._repository_source.get()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\python\material_management\src\domain\repositories\repository.py", line 21, in get
self._get_from_data_source()
File "C:\python\material_management\src\domain\repositories\replaced_nomenclatures_repository.py", line 12, in _get_from_data_source
replaced_nomenclatures = self._get_data_adapter.get_replaced_nomenclatures()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\python\material_management\src\data_sources\get_data_adapter_facade.py", line 63, in get_replaced_nomenclatures
return excel.GetReplacedNomenclatureAdapter(self._codes_replacement_file_path).execute()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\python\material_management\src\data_sources\adapters\excel\get_replaced_nomenclatures_adapter.py", line 49, in execute
replaced_data = self._get_data_from_excel()
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\python\material_management\src\data_sources\adapters\excel\get_replaced_nomenclatures_adapter.py", line 15, in _get_data_from_excel
workbook = load_workbook(self._codes_replacement_file_path)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\1CIntegrationSystem\AppData\Local\Programs\Python\Python312\Lib\site-packages\openpyxl\reader\excel.py", line 346, in load_workbook
reader.read()
File "C:\Users\1CIntegrationSystem\AppData\Local\Programs\Python\Python312\Lib\site-packages\openpyxl\reader\excel.py", line 301, in read
self.read_worksheets()
File "C:\Users\1CIntegrationSystem\AppData\Local\Programs\Python\Python312\Lib\site-packages\openpyxl\reader\excel.py", line 237, in read_worksheets
ws_parser.bind_all()
File "C:\Users\1CIntegrationSystem\AppData\Local\Programs\Python\Python312\Lib\site-packages\openpyxl\worksheet\_reader.py", line 465, in bind_all
self.bind_cells()
File "C:\Users\1CIntegrationSystem\AppData\Local\Programs\Python\Python312\Lib\site-packages\openpyxl\worksheet\_reader.py", line 374, in bind_cells
self.ws._cells[(cell['row'], cell['column'])] = c
~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
MemoryError
|
f0eacf17152d59d349da55f340a975a8
|
{
"intermediate": 0.40772467851638794,
"beginner": 0.36959871649742126,
"expert": 0.2226766049861908
}
|
47,672
|
I want you to help me create a javascript or jquery to scroll up when a button is click :
Button in question : <div id="payment-confirmation" style="visibility: initial;">
<div class="ps-shown-by-js">
<button type="submit" class="btn btn-primary center-block">
COMMANDER
</button>
<article class="alert alert-danger mt-2 js-alert-payment-conditions" role="alert" data-alert="danger" style="display: none;">
Merci de sélectionner un <a href="#checkout-payment-step">moyen de paiement</a>.
</article>
</div>
<div class="ps-hidden-by-js" style="display: none;">
</div>
</div>
Scholl up to : <section id="checkout-payment-step" class="checkout-step -reachable -clickable -current js-current-step">
<h1 class="step-title h3">
<i class="material-icons rtl-no-flip done"></i>
<span class="step-number">4</span>
Paiement
<span class="step-edit text-muted"><i class="material-icons edit">mode_edit</i> Modifier</span>
</h1>
<div class="content">
<div class="payment-options ">
<div>
<div id="payment-option-1-container" class="payment-option clearfix">
|
f816b6f4852e00d52393a167ec149f61
|
{
"intermediate": 0.7023252844810486,
"beginner": 0.1957998424768448,
"expert": 0.1018749326467514
}
|
47,673
|
#ifndef CONTENT_H
#define CONTENT_H
#include <vector>
#include <string>
#include <iostream>
class Content {
public:
/**
* @brief Construct a new Content object
*
* @param id Content ID
* @param name Content name (spaces are fine)
* @param nr Number of reviews
* @param ts Total stars of all reviews
* @param rating Parental control rating
*/
Content(int id, std::string name, int nr, int ts, int rating);
/**
* @brief Destroy the Content object
*
*/
virtual ~Content();
/**
* Accessors
*/
int id() const;
std::string name() const;
int rating() const;
const std::vector<std::string>& getViewers() const;
/**
* @brief Displays the Content item information (except for the viewers)
* in a nice visual form
*
* @param ostr Stream to output to
*/
virtual void display(
std::ostream& ostr) const;
/**
* @brief Gives a review for this Content item with the given number
* of stars
*
* @param numStars Number of stars for the review
* @throw std::invalid_argument if numStars is not between 0-5
*/
void review(int numStars);
/**
* @brief Get the average number of stars over all the reviews
*
* @return double review average
*/
double getStarAverage() const;
/**
* @brief Adds the given username to the list of those who have viewed
* this content
*
* @param username
*/
void addViewer(const std::string& username);
/**
* @brief Convenience function to check if a user has viewed this content
*
* @param uname Username to find
* @return true If the user HAS viewed this content
* @return false otherwise
*/
bool hasViewed(const std::string& uname) const;
protected:
/**
* @brief Returns an array of C-Strings that correspond to ratings
* to be displayed for a particular type of Content
*
* @return const char*
*/
virtual const char** ratingStrings() const = 0;
private:
int id_;
std::string name_;
int numReviews_;
int totalStars_;
int rating_;
std::vector<std::string> usersWhoViewed_;
};
// Update the Movie and Series classes below to use inheritance
// Then you may also add the appropriate data members (if necessary),
// and override the behavior
class Movie : public Content
{
public:
/**
* @brief Construct a new Movie object
*
* @param id Content ID
* @param name Content name (spaces are fine)
* @param nr Number of reviews
* @param ts Total stars of all reviews
* @param rating Parental control rating
*/
Movie(int id, std::string name, int nr, int ts, int rating);
/**
* @brief Destroy the Movie object
*
*/
~Movie();
/// Add an override of the Content::display function,
/// if you deem it necessary
protected:
// Do not alter this line
const char** ratingStrings() const;
private:
// Add more data members if you deem it necessary
};
class Series : public Content {
public:
/**
* @brief Construct a new Series object
*
* @param id Content ID
* @param name Content name (spaces are fine)
* @param nr Number of reviews
* @param ts Total stars of all reviews
* @param rating Parental control rating
* @param numEpisode Numer of episodes in the series
*/
Series(int id, std::string name, int nr, int ts, int rating, int numEpisodes);
/**
* @brief Destroy the Series object
*
*/
~Series();
/**
* @brief Accessor / getter to return the number of episodes in the series
*
* @return int
*/
int numEpisodes() const;
/// Add an override of the Content::display function,
/// if you deem it necessary
protected:
// Do not alter this line
const char** ratingStrings() const;
private:
// Add data members as needed
};
#endif
this is a header file foy my class content, help me add appropriate data members to the Movie and Series class, as necessary
|
0ade7d82169637dd981ec9be27711c72
|
{
"intermediate": 0.4027208089828491,
"beginner": 0.385143518447876,
"expert": 0.2121356874704361
}
|
47,674
|
i heard spanish dj usually say something like "como i se" but u dont understand, what does it mean
|
5ef7e50a1df12150cdf21d2bfc844032
|
{
"intermediate": 0.40109673142433167,
"beginner": 0.33511239290237427,
"expert": 0.26379087567329407
}
|
47,675
|
SELECT Date(o."updatedAt"), c.name -> c."defaultLang" as name, SUM(o.amount) as amount, SUM(o.price) as total_price, o.currency
FROM "Order" o
LEFT JOIN "Pack" p ON o."packId" = p.id
LEFT JOIN "Collection" c ON p."collectionId" = c.id
WHERE o.status IS TRUE GROUP BY Date(o."updatedAt"), p.id, c.id, o.currency ORDER BY Date(o."updatedAt") DESC
postgres에서 사용중인 구문이야.
order_amounts AS (
SELECT
DATE("createdAt") AS "orderDate",
"currency",
SUM("price") AS "totalPrice"
FROM "Order"
WHERE "status" = TRUE
GROUP BY DATE("createdAt"), "currency"
),
이걸 이용해서 JSON_OBJECT_AGG("currency", "totalPrice")를 엮어 select에 total_price 대신 넣어줘. 그렇게되면 기존 sql select에서는 currency를 안보여줘도 돼.
|
54e1e67e2bce69e6f6f3dd91c2b92a5a
|
{
"intermediate": 0.3599373996257782,
"beginner": 0.34047746658325195,
"expert": 0.29958513379096985
}
|
47,676
|
DNN(B)-frs(1000)-[16(relu)-16(relu)-16(relu)-8(relu)-1(sigmoid)]-[es(??)-bs(10)]
|
158961270b46f827311e71c6e6648b3b
|
{
"intermediate": 0.11745919287204742,
"beginner": 0.13027805089950562,
"expert": 0.7522627115249634
}
|
47,677
|
SELECT Date(o."updatedAt"), c.name -> c."defaultLang" as name, SUM(o.amount) as amount, SUM(o.price) as total_price, o.currency
FROM "Order" o
LEFT JOIN "Pack" p ON o."packId" = p.id
LEFT JOIN "Collection" c ON p."collectionId" = c.id
WHERE o.status IS TRUE GROUP BY Date(o."updatedAt"), p.id, c.id, o.currency ORDER BY Date(o."updatedAt") DESC
postgres에서 사용중인 구문이야.
order_amounts AS (
SELECT
DATE("createdAt") AS "orderDate",
"currency",
SUM("price") AS "totalPrice"
FROM "Order"
WHERE "status" = TRUE
GROUP BY DATE("createdAt"), "currency"
),
이걸 이용해서 JSON_OBJECT_AGG("currency", "totalPrice")를 엮어 select에 total_price 대신 넣어줘. 그렇게되면 기존 select에 넣은 currency는 . 안넣어줘도 돼
|
607361476756aa3f0b4f1ee0be2bd65b
|
{
"intermediate": 0.3446052074432373,
"beginner": 0.3468841314315796,
"expert": 0.3085106611251831
}
|
47,678
|
Consider a book having many pages. Each page can hold atmost 100 names. Each name is assigned an ID sequentially. The names are ordered in the reverse order of their ID's starting from the largest ID to the smallest ID.
Write a python script that does the following
1. Ask for the old and new number of names in the book and then ask for a name which is specified by its page number and its position on that page when the total number of names is the old number.
2. Show the specified name's location when the total number of names is changed to the new number
|
bc03c6072f7f957c7cf6c6f572cd6b92
|
{
"intermediate": 0.3891048729419708,
"beginner": 0.24457092583179474,
"expert": 0.36632421612739563
}
|
47,679
|
SELECT Date(o."updatedAt"), c.name -> c."defaultLang" as name, SUM(o.amount) as amount, SUM(o.price) as total_price, o.currency
FROM "Order" o
LEFT JOIN "Pack" p ON o."packId" = p.id
LEFT JOIN "Collection" c ON p."collectionId" = c.id
WHERE o.status IS TRUE GROUP BY Date(o."updatedAt"), p.id, c.id, o.currency ORDER BY Date(o."updatedAt") DESC
postgres에서 사용중인 구문이야.
order_amounts AS (
SELECT
DATE(“createdAt”) AS “orderDate”,
“currency”,
SUM(“price”) AS “totalPrice”
FROM “Order”
WHERE “status” = TRUE
GROUP BY DATE(“createdAt”), “currency”
),
이걸 이용해서 JSON_OBJECT_AGG(“currency”, “totalPrice”)를 엮어 select에 total_price 대신 넣어줘. 그렇게되면 기존 select에 넣은 currency는 안넣어줘도 돼. 쌍따옴표는 " 이 기호를 사용해줘
|
420fddb8e6cee7a5aa928137de740580
|
{
"intermediate": 0.3376230001449585,
"beginner": 0.37914836406707764,
"expert": 0.28322863578796387
}
|
47,680
|
SELECT Date(o."updatedAt"), c.name -> c."defaultLang" as name, SUM(o.amount) as amount, SUM(o.price) as total_price, o.currency
FROM "Order" o
LEFT JOIN "Pack" p ON o."packId" = p.id
LEFT JOIN "Collection" c ON p."collectionId" = c.id
WHERE o.status IS TRUE GROUP BY Date(o."updatedAt"), p.id, c.id, o.currency ORDER BY Date(o."updatedAt") DESC
currency를 그룹화하지 않고, {currency: SUM(price), ...} 로 total_price에 담고 싶어. Postgres 16버전에서 해당 sql로 고쳐줘.최적화도 해줘
JSONB_OBJECT_AGG는 WITH로 따로 쿼리를 빼줘
|
06c7674b1b3d940b1fcbaa3ba440fca6
|
{
"intermediate": 0.3827131688594818,
"beginner": 0.2733345925807953,
"expert": 0.3439522981643677
}
|
47,681
|
WITH CurrencyPrices AS (
SELECT
Date(o."updatedAt") as updated_date,
o.currency,
o."packId",
SUM(amount),
SUM(o.price) as total_price
FROM "Order" o
WHERE o.status IS TRUE
GROUP BY updated_date, o."packId", o.currency
)
이걸 가지고, "Pack" as p 이라는 모델을 JOIN해서 p.id = "CurrencyPrices"."packId"가 동일하고, "Collection" as c라는 모델을 또 JOIN해서 c.id = p."collectionId"로
SELECT에는
updated_date,
c.name -> c."defaultLang" as name,
amount,
total_price
이렇게 조회해서 가지고 오고 싶어. Postgres 15버전이야.
|
57b8064d4c8d78c71543fef690400613
|
{
"intermediate": 0.31694602966308594,
"beginner": 0.26484113931655884,
"expert": 0.41821274161338806
}
|
47,682
|
WITH CurrencyPrices AS (
SELECT
Date(o."updatedAt") as updated_date,
o.currency,
o."packId",
SUM(amount),
SUM(o.price) as total_price
FROM "Order" o
WHERE o.status IS TRUE
GROUP BY updated_date, o."packId", o.currency
)
SELECT
cp.updated_date,
c.name -> c."defaultLang" as name,
cp.amount,
JSONB_OBJECT_AGG(cp.currency, cp.total_price) as total_price
FROM "CurrencyPrices" cp
JOIN "Pack" p ON cp."packId" = p.id
JOIN "Collection" c ON p."collectionId" = c.id
ORDER BY cp.updated_date DESC;
ERROR: relation "CurrencyPrices" does not exist
LINE 17: FROM "CurrencyPrices" cp
^
postgres 15버전 sql코드 고쳐
|
b484872c750928c18d276f5130efcdd9
|
{
"intermediate": 0.35827577114105225,
"beginner": 0.303589403629303,
"expert": 0.33813488483428955
}
|
47,683
|
WITH CurrencyPrices AS (
SELECT
Date(o."updatedAt") as updated_date,
c.name -> c."defaultLang" as name,
o.currency,
SUM(o.amount) as amount,
SUM(o.price) as total_price
FROM "Order" o
JOIN "Pack" p ON p.id = o."packId"
JOIN "Collection" c ON c.id = p."collectionId"
WHERE o.status IS TRUE
GROUP BY updated_date, o.currency, c.name, c."defaultLang"
ORDER BY updated_date DESC;
)
SELECT
cp.updated_date,
cp.name,
cp.amount,
JSONB_OBJECT_AGG(cp.currency, cp.total_price) as total_price
FROM CurrencyPrices cp
ORDER BY cp.updated_date DESC;
ERROR: syntax error at end of input
LINE 13: ORDER BY updated_date DESC
^
|
f5246b1c94ee6b85ec9dc5c11e772df9
|
{
"intermediate": 0.3003212511539459,
"beginner": 0.4680252969264984,
"expert": 0.23165342211723328
}
|
47,684
|
Напиши класс на c# описывающий человека максимально точно
|
d643e2ee8f17af5c1a85a6071fb29e5f
|
{
"intermediate": 0.4078296720981598,
"beginner": 0.2470356971025467,
"expert": 0.3451346755027771
}
|
47,685
|
in servicenow, Is it possible to link the Change Request to Change tasks that is created using data source?
and if yes, is there any steps or documentation that can help me to achieved this.
|
c7aa175c6dfa20c6b910f4e561ef2462
|
{
"intermediate": 0.5532147884368896,
"beginner": 0.21807485818862915,
"expert": 0.2287103235721588
}
|
47,686
|
Write a long story for children and include a reference to the value of values and morals
|
41280f52a6e4be4411bc572c75f95140
|
{
"intermediate": 0.36026135087013245,
"beginner": 0.33697816729545593,
"expert": 0.3027605414390564
}
|
47,687
|
I am facing a challenge with a simple read ACL. Not sure where I'm missing. Need to help you resolve my issue that I have explained below.
For the table "sn_hr_core_case_cabin_crew", I have written 2 read ACL.
ACL1: If the user has the below role, he will be allowed to read the record.
ACL2: If the case is assigned to the user, then allow read access.
answer = caseAssignee();
function caseAssignee() {
var userID = gs.getUserID();
var assignee = current.assigned_to;
gs.info("ACL | Cabin Crew Read Access " + userID + ' | ' + assignee);
if (userID == assignee) {
return true;
} else {
return false;
}
}
When I impersonate the assigned to user, I'm able to view the list of records.
However, on click of any of the records, I'm getting "No records found" message only.
Can you help me understand what I'm missing here?
|
9459224fcf65c287179175548e118a9e
|
{
"intermediate": 0.39041855931282043,
"beginner": 0.3841135501861572,
"expert": 0.2254679650068283
}
|
47,688
|
check this code:
fn main() {
let c_fivends = vec![(5,10),(25,30),(40,45),(60,65)];
let c_exons = vec![(32,37),(47,55),(70,80)];
let tx_5end = (43,45);
if let Ok(k) = c_fivends.binary_search_by(|&(start, _)| start.cmp(&tx_5end.0)) {
println!("we are inside")
} else {
println!("we are not inside")
};
}
current result is "we are not inside". In this case the expected behavior should be "we are inside" because 43 is between 40 and 45.
|
e28001807b3b33c006415b33556c1e37
|
{
"intermediate": 0.2949373424053192,
"beginner": 0.5163787007331848,
"expert": 0.18868398666381836
}
|
47,689
|
check this code:
fn main() {
let c_fivends = vec![(5,10),(25,30),(40,45),(60,65)];
let c_exons = vec![(32,37),(47,55),(70,80)];
let tx_5end = (43,45);
if let Ok(k) = c_fivends.binary_search_by(|&(start, _)| start.cmp(&tx_5end.0)) {
println!("we are inside")
} else {
println!("we are not inside")
};
}
current result is "we are not inside". In this case the expected behavior should be "we are inside" because 43 is between 40 and 45. Try to use any built in function; otherwise, write a function to use instead binary_search_by
|
dc93343e48d92169af94e71dbae030be
|
{
"intermediate": 0.2426660656929016,
"beginner": 0.6202725768089294,
"expert": 0.13706132769584656
}
|
47,690
|
check this code:
fn main() {
let c_fivends = vec![(5,10),(25,30),(40,45),(60,65)];
let c_exons = vec![(32,37),(47,55),(70,80)];
let tx_5end = (43,45);
if let Ok(k) = c_fivends.binary_search_by(|&(start, _)| start.cmp(&tx_5end.0)) {
println!("we are inside")
} else {
println!("we are not inside")
};
}
current result is "we are not inside". In this case the expected behavior should be "we are inside" because 43 is between 40 and 45. Your solution needs to be the most efficient, fastest and elegant.
|
4991b6193ae235e0de0e597215a012bf
|
{
"intermediate": 0.2524358332157135,
"beginner": 0.4372480809688568,
"expert": 0.3103160560131073
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.