row_id
int64 0
48.4k
| init_message
stringlengths 1
342k
| conversation_hash
stringlengths 32
32
| scores
dict |
|---|---|---|---|
44,177
|
I'm using a AVR dude in Linux terminal; I want to use AVR ATmega 328 to program with it; I have this comand as below please change it to ATmega328 compatible
avrdude -c usbasp -p m32 -U flash:w:main.hex:i
|
c340af1f8cd05589ab8d9b1c4166f5dd
|
{
"intermediate": 0.4051102101802826,
"beginner": 0.27847322821617126,
"expert": 0.31641656160354614
}
|
44,178
|
this is my function:
pub fn to_bed<'a>(
s: &'a str,
parent: String,
child: String,
feature: String,
) -> Result<HashMap<String, HashMap<&str, String>>, &'static str> {
s.par_lines()
.map(|line| {
if !line.starts_with("#") {
Some(GxfRecord::parse(line, &feature))
} else {
None
}
})
.filter_map(|x| x)
.try_fold_with(HashMap::new(), |mut acc, record| {
let record = record?;
let tx_id = if !record.attr.feature().is_empty() {
record.attr.feature().to_owned()
} else {
// continue with the next record
return Ok(acc);
};
let entry = acc.entry(tx_id).or_insert(HashMap::new());
if !parent.is_empty() {
if record.feat == parent {
// args.parent
entry.insert("chr", record.chr.to_owned());
entry.insert("start", record.start.to_string());
entry.insert("end", record.end.to_string());
entry.insert("strand", record.strand.to_string());
} else if record.feat == child {
// args.child
entry.entry("exons").or_default().push('.');
let exon_starts = entry.entry("exon_starts").or_insert(String::from(""));
exon_starts.push_str(&record.start.to_string());
exon_starts.push_str(",");
let exon_sizes = entry.entry("exon_sizes").or_insert(String::from(""));
exon_sizes.push_str(&(record.end - record.start).to_string());
exon_sizes.push_str(",");
} else if record.feat == "start_codon" {
entry.insert("start_codon", record.start.to_string());
} else if record.feat == "stop_codon" {
entry.insert("stop_codon", record.start.to_string());
}
} else {
entry.insert("chr", record.chr.to_owned());
entry.insert("start", record.start.to_string());
entry.insert("end", record.end.to_string());
entry.insert("strand", record.strand.to_string());
entry.entry("exons").or_default().push('.');
let exon_starts = entry.entry("exon_starts").or_insert(String::from(""));
exon_starts.push_str(&record.start.to_string());
exon_starts.push_str(",");
let exon_sizes = entry.entry("exon_sizes").or_insert(String::from(""));
exon_sizes.push_str(&(record.end - record.start).to_string());
exon_sizes.push_str(",");
}
Ok(acc)
}) // end fold
.try_reduce_with(|mut map1, map2| {
for (k, v) in map2 {
let entry = map1.entry(k).or_insert(HashMap::new());
for (k2, v2) in v {
entry.insert(k2, v2);
}
}
Ok(map1)
}) // end reduce
.unwrap_or(Err("Error converting GTF/GFF3 to BED"))
}
could you help me to make this part of the code to look more elegant?
if !parent.is_empty() {
if record.feat == parent {
// args.parent
entry.insert("chr", record.chr.to_owned());
entry.insert("start", record.start.to_string());
entry.insert("end", record.end.to_string());
entry.insert("strand", record.strand.to_string());
} else if record.feat == child {
// args.child
entry.entry("exons").or_default().push('.');
let exon_starts = entry.entry("exon_starts").or_insert(String::from(""));
exon_starts.push_str(&record.start.to_string());
exon_starts.push_str(",");
let exon_sizes = entry.entry("exon_sizes").or_insert(String::from(""));
exon_sizes.push_str(&(record.end - record.start).to_string());
exon_sizes.push_str(",");
} else if record.feat == "start_codon" {
entry.insert("start_codon", record.start.to_string());
} else if record.feat == "stop_codon" {
entry.insert("stop_codon", record.start.to_string());
}
} else {
entry.insert("chr", record.chr.to_owned());
entry.insert("start", record.start.to_string());
entry.insert("end", record.end.to_string());
entry.insert("strand", record.strand.to_string());
entry.entry("exons").or_default().push('.');
let exon_starts = entry.entry("exon_starts").or_insert(String::from(""));
exon_starts.push_str(&record.start.to_string());
exon_starts.push_str(",");
let exon_sizes = entry.entry("exon_sizes").or_insert(String::from(""));
exon_sizes.push_str(&(record.end - record.start).to_string());
exon_sizes.push_str(",");
}
|
131a88f430609f27bf32d4c9e8051afb
|
{
"intermediate": 0.3396970331668854,
"beginner": 0.43731337785720825,
"expert": 0.22298958897590637
}
|
44,179
|
loader.py:
# load symbol from:
# https://stackoverflow.com/questions/22029562/python-how-to-make-simple-animated-loading-while-process-is-running
# imports
from itertools import cycle
from shutil import get_terminal_size
from threading import Thread
from time import sleep
from zotify.termoutput import Printer
class Loader:
"""Busy symbol.
Can be called inside a context:
with Loader("This take some Time..."):
# do something
pass
"""
def __init__(self, chan, desc="Loading...", end='', timeout=0.1, mode='prog'):
"""
A loader-like context manager
Args:
desc (str, optional): The loader's description. Defaults to "Loading...".
end (str, optional): Final print. Defaults to "".
timeout (float, optional): Sleep time between prints. Defaults to 0.1.
"""
self.desc = desc
self.end = end
self.timeout = timeout
self.channel = chan
self._thread = Thread(target=self._animate, daemon=True)
if mode == 'std1':
self.steps = ["⢿", "⣻", "⣽", "⣾", "⣷", "⣯", "⣟", "⡿"]
elif mode == 'std2':
self.steps = ["◜","◝","◞","◟"]
elif mode == 'std3':
self.steps = ["😐 ","😐 ","😮 ","😮 ","😦 ","😦 ","😧 ","😧 ","🤯 ","💥 ","✨ ","\u3000 ","\u3000 ","\u3000 "]
elif mode == 'prog':
self.steps = ["[∙∙∙]","[●∙∙]","[∙●∙]","[∙∙●]","[∙∙∙]"]
self.done = False
def start(self):
self._thread.start()
return self
def _animate(self):
for c in cycle(self.steps):
if self.done:
break
Printer.print_loader(self.channel, f"\r\t{c} {self.desc} ")
sleep(self.timeout)
def __enter__(self):
self.start()
def stop(self):
self.done = True
cols = get_terminal_size((80, 20)).columns
Printer.print_loader(self.channel, "\r" + " " * cols)
if self.end != "":
Printer.print_loader(self.channel, f"\r{self.end}")
def __exit__(self, exc_type, exc_value, tb):
# handle exceptions with those variables ^
self.stop()
mycode.py:
import base64, os, eyed3, requests, json, re, numpy as np, sounddevice as sd, scipy.io.wavfile, acrcloud, eyed3.id3.frames
from eyed3.id3.frames import UserTextFrame
from bs4 import BeautifulSoup
from genius_api import GeniusApi
from itertools import cycle
from shutil import get_terminal_size
from threading import Thread
from time import sleep
from my_shazam_utility import shazam_recognize_song
from applemusic_api import AppleMusicApi
from Acrcloudretrieve import recognize_song, set_id3_tags_mp3
from Retrieve_lyrics import get_lyrics
from erhalten_alb_covers import save_and_embed_album_cover
def load_config():
with open('D:/Eurydice/Encompassing Data by discerning/config/config.json', 'r') as config_file:
config_data = json.load(config_file)
return config_data
config = load_config()
CLIENT_ID = config['Spotify']['CLIENT_ID']
CLIENT_SECRET = config['Spotify']['CLIENT_SECRET']
genius_api = GeniusApi()
def get_audio_source_choice(duration=10):
border = "=" * 50
title = "AUDIO SOURCE SELECTION"
padded_title = title.center(len(border))
print(f"\n{border}")
print(padded_title)
print(border)
box_width = max(len(s) for s in ["Microphone - Live audio capture",
"Internal Sound - Detect sounds playing internally on the device",
"File - Detect through an internally saved file"]) + 6
print("\nPlease select the audio source you'd like to use:\n")
print(f"+{'-' * (box_width - 2)}+")
print(f"| 1: Microphone - Live audio capture{' ' * (box_width - len(' 1: Microphone - Live audio capture') - 3)}|")
print(f"| 2: Internal Sound - Detect sounds playing internally on the device{' ' * (box_width - len(' 2: Internal Sound - Detect sounds playing internally on the device') - 3)}|")
print(f"| 3: File - Detect through an internally saved file{' ' * (box_width - len(' 3: File - Detect through an internally saved file') - 3)}|")
print(f"+{'-' * (box_width - 2)}+")
choice = input("Enter your choice (1, 2, or 3) and press Enter: ")
print(f"{border}\n")
return choice
def capture_internal_audio(device, duration=10, sample_rate=44100, filename="internal_audio.wav"):
device_info = sd.query_devices(device, 'input')
max_input_channels = device_info.get('max_input_channels', 1)
channels = min(2, max_input_channels)
print(f"Capturing internal audio using {channels} channel(s).\n Please play the audio you'd like to identify…")
recording = sd.rec(int(duration * sample_rate), samplerate=sample_rate, channels=2, dtype='float64', device=device)
sd.wait()
scipy.io.wavfile.write(filename, sample_rate, (recording * 32767).astype(np.int16))
print("Capture complete.")
print(f"Recording shape (samples, channels): {recording.shape}")
print(recording, sample_rate)
print(filename)
return filename
def capture_and_save_audio_from_mic(duration=10, sample_rate=44100, filename="temp_captured_audio_file.wav"):
print("Recording…")
recording = sd.rec(int(duration * sample_rate), samplerate=sample_rate, channels=2, dtype='int16')
animation = AudioCaptureAnimation(mode='std2')
animation_thread = threading.Thread(target=animation.start_animation, daemon=True)
animation_thread.start()
time.sleep(duration)
animation.stop_animation()
animation_thread.join()
sd.wait()
print("Recording stopped.")
scipy.io.wavfile.write(filename, sample_rate, recording)
print(f"Recorded (samples, channels): {recording.shape}")
print(recording, sample_rate)
print(filename)
return filename
def get_user_choice(duration=10):
print("=" * 50)
print("Welcome to the Song Recognition Service!")
print("=" * 50)
print("\nPlease select the recognition service you'd like to use:\n")
print(" 1: YoutubeACR - Fast and accurate music recognition")
print(" 2: Shazam - Discover music, artists, and lyrics in seconds")
print("-" * 50)
choice = input("Enter your choice (1 or 2) and press Enter: ")
print("\n" + "." * 25 + " Processing " + "." * 25 + "\n")
return choice
def add_or_update_txxx_frame(audiofile, description, value):
found = False
frames = audiofile.tag.frame_set.get(eyed3.id3.frames.USERTEXT_FID, [])
for frame in frames:
if frame.description == description:
frame.text = value
found = True
break
if not found:
new_frame = eyed3.id3.frames.UserTextFrame(description=description, text=value)
if not frames: # If it's the first frame of this type
audiofile.tag.frame_set[eyed3.id3.frames.USERTEXT_FID] = [new_frame]
else:
frames.append(new_frame)
def authenticate_spotify(client_id, client_secret):
auth_url = 'https://accounts.spotify.com/api/token'
client_creds = f"{client_id}:{client_secret}"
client_creds_b64 = base64.b64encode(client_creds.encode())
headers = {'Authorization': f'Basic {client_creds_b64.decode()}'}
data = {'grant_type': 'client_credentials'}
response = requests.post(auth_url, headers=headers, data=data)
access_token = response.json().get('access_token')
return access_token
def search_spotify_for_song(access_token, artist_name, title):
base_url = "https://api.spotify.com/v1/search"
query = f"{title} artist:{artist_name}"
headers = {"Authorization": f"Bearer {access_token}"}
params = {"q": query, "type": "track", "limit": 1}
response = requests.get(base_url, headers=headers, params=params)
results = response.json()
try:
track_info = results['tracks']['items'][0]
return track_info
except IndexError:
print("Song not found on Spotify.")
return None
def get_lyrics_from_genius(artist_name, title):
results = genius_api.get_search_by_songs(f"{artist_name} {title}")
if results:
song_info = results[0]['result'] # Take the most relevant result
song_id = str(song_info['id'])
song_details = genius_api.get_song_by_id(song_id, text_format='plain')
return song_details.get('lyrics', "Lyrics not available.")
return "Song not found on Genius."
def save_lyrics_to_file(audio_file_path, track_number, title, artist_name, album_name, isrc, lyrics):
base_directory = os.path.dirname(audio_file_path)
file_name_format = f"{track_number:02d}. {title} - {artist_name} - {album_name} - {isrc}.lrc"
safe_file_name = re.sub(r'[/:*?"<>|]', '', file_name_format)
lyrics_file_path = os.path.join(base_directory, safe_file_name)
with open(lyrics_file_path, 'w', encoding='utf-8') as lrc_file:
lrc_file.write(lyrics)
print(f"Lyrics saved as: {safe_file_name}")
def get_high_quality_album_art_url(song_info):
images = song_info['album']['images']
if not images:
return None
highest_quality_image = max(images, key=lambda x: x['width']*x['height'])
return highest_quality_image['url']
def save_high_quality_album_art(image_url, file_path):
try:
response = requests.get(image_url, stream=True)
if response.status_code == 200:
with open(file_path, 'wb') as out_file:
for chunk in response.iter_content(1024):
out_file.write(chunk)
print(f"High quality album art saved: {file_path}")
return True
else:
print("Could not download the album art.")
except Exception as e:
print(f"Error saving high-quality album art: {e}")
return False
def embed_album_art_to_song(file_path, image_path):
try:
audiofile = eyed3.load(file_path)
if audiofile.tag is None:
audiofile.initTag()
with open(image_path, 'rb') as img_file:
audiofile.tag.images.set(3, img_file.read(), 'image/jpeg')
audiofile.tag.save()
print("High quality album art embedded into song.")
except FileNotFoundError:
print(f"Failed to embed album art - No such file: {image_path}")
def process_audio_file_with_spotify_search(audio_file_path):
shazam_data = shazam_recognize_song(audio_file_path)
if shazam_data:
artist_name = shazam_data['track']['subtitle']
title = shazam_data['track']['title']
print(f"Identified Song: {artist_name} - {title}")
access_token = authenticate_spotify(CLIENT_ID, CLIENT_SECRET)
song_info = search_spotify_for_song(access_token, artist_name, title)
if song_info:
print(json.dumps(song_info, indent=4))
print("\n///////////////////////////////\n")
album_name = song_info['album']['name']
album_url = song_info['album']['external_urls']['spotify']
track_number = song_info['track_number']
release_date = song_info['album']['release_date']
isrc = song_info.get('external_ids', {}).get('isrc', "Not Available")
label = song_info['label'] if 'label' in song_info else "Not Available"
explicit = str(song_info['explicit']) if 'explicit' in song_info else "Not Available" # Convert to string
genres = ", ".join(song_info['genres']) if 'genres' in song_info else "Not Available"
author_url = song_info['artists'][0]['external_urls']['spotify'] if 'artists' in song_info else "Not Available"
spotify_url = song_info['external_urls']['spotify']
print(f"Track Number on Spotify: {track_number}")
audiofile = eyed3.load(audio_file_path)
if audiofile.tag is None:
audiofile.initTag(version=eyed3.id3.ID3_V2_3)
audiofile.tag.artist = artist_name
audiofile.tag.album = album_name
audiofile.tag.album_artist = artist_name
audiofile.tag.title = title
audiofile.tag.recording_date = release_date
add_or_update_txxx_frame(audiofile, "Album URL", album_url)
add_or_update_txxx_frame(audiofile, "Eurydice", "True")
add_or_update_txxx_frame(audiofile, "Compilation", "KK")
add_or_update_txxx_frame(audiofile, "Genre", genres)
add_or_update_txxx_frame(audiofile, "Author URL", author_url)
add_or_update_txxx_frame(audiofile, "Label", label)
add_or_update_txxx_frame(audiofile, "Explicit", explicit)
add_or_update_txxx_frame(audiofile, "ISRC", isrc)
add_or_update_txxx_frame(audiofile, "Spotify URL", spotify_url)
audiofile.tag.comments.set(f"ISRC: {isrc}, Label: {label}, Explicit: {explicit}")
audiofile.tag.save()
print(f"Metadata embedded into the file: {audio_file_path}")
high_res_image_url = get_high_quality_album_art_url(song_info)
if high_res_image_url:
image_file_path = os.path.splitext(audio_file_path)[0] + ".jpg"
if save_high_quality_album_art(high_res_image_url, image_file_path):
embed_album_art_to_song(audio_file_path, image_file_path)
else:
print("Skipping album art embed due to download failure.")
else:
print("No album art available.")
new_file_name = f"{track_number:02d}. {title} - {artist_name} - {album_name} - {isrc}.mp3"
new_file_name = re.sub(r'[/:*?"<>|]', '', new_file_name)
new_file_path = os.path.join(os.path.dirname(audio_file_path), new_file_name)
os.rename(audio_file_path, new_file_path) # Rename file
print(f"File has been renamed to: {new_file_name}")
new_image_file_path = os.path.splitext(new_file_path)[0] + ".jpg"
os.rename(image_file_path, new_image_file_path)
print(f"Album art file has been renamed to: {os.path.basename(new_image_file_path)}")
lyrics = get_lyrics_from_genius(artist_name, title)
if 'plain' in lyrics:
lyrics_plain_text = lyrics['plain']
print("Printing Lyrics:\n", lyrics_plain_text)
save_lyrics_to_file(audio_file_path, track_number, title, artist_name, album_name, isrc, lyrics_plain_text)
print("Lyrics file saved")
else:
print("No lyrics available to save.")
else:
print("Song not found on Spotify.")
else:
print("Song could not be identified.")
if __name__ == "__main__":
audio_source_choice = get_audio_source_choice(duration=10)
if audio_source_choice == '3':
user_choice = get_user_choice(duration=10)
audio_file_path = 'D:/Eurydice/Encompassing Data by discerning/Test_file/Unknown_file.mp3'
if user_choice == '1':
print("\n" + "." * 15 + " ᴜsɪɴɢ YᴏᴜᴛᴜʙᴇACR " + "." * 15 + "\n")
song_tags = recognize_song(audio_file_path)
if song_tags:
print(f'Song identified: {song_tags}')
set_id3_tags_mp3(audio_file_path, song_tags)
artist_name = song_tags.get('artists')[0].get('name')
song_title = song_tags.get('title')
safe_artist_name = re.sub(r'[/\:?"<>|]', '', artist_name)
safe_song_title = re.sub(r'[/\:?"<>|]', '', song_title)
new_file_name = f"{safe_artist_name} - {safe_song_title}.mp3"
new_file_path = os.path.join(os.path.dirname(audio_file_path), new_file_name)
os.rename(audio_file_path, new_file_path)
print(f"File has been renamed to: {new_file_name}")
else:
print('Could not identify the song in YᴏᴜᴛᴜʙᴇACR.')
apple_music_api = AppleMusicApi(Exception)
apple_music_api.get_access_token()
track_results = apple_music_api.search('songs', f"{artist_name} - {song_title}")
if track_results:
track_id = track_results[0]['id']
album_artwork_url_template = track_results[0]['attributes']['artwork']['url']
save_and_embed_album_cover(new_file_path, artist_name, song_title, album_artwork_url_template)
else:
print("Song not found on Apple Music.")
lrc_lyrics = get_lyrics(safe_artist_name, safe_song_title)
if lrc_lyrics:
lrc_file_path = os.path.join(os.path.dirname(audio_file_path), f"{safe_artist_name} - {safe_song_title}.lrc")
with open(lrc_file_path, 'w', encoding='utf-8') as lrc_file:
lrc_file.write(lrc_lyrics)
print(f"Saved LRC file to: {lrc_file_path}")
else:
print("Could not get the lyrics.")
elif user_choice == '2':
print("\n" + "." * 15 + " ᴜsɪɴɢ Sʜᴀᴢᴀᴍ " + "." * 15 + "\n")
song_tags = shazam_recognize_song(audio_file_path)
print(song_tags)
process_audio_file_with_spotify_search(audio_file_path)
else:
print("Invalid choice. Exiting....")
exit()
elif audio_source_choice == '1':
audio_file_path = capture_and_save_audio_from_mic(duration=10, sample_rate=44100)
print("Attempting to recognize using YᴏᴜᴛᴜʙᴇACR first…\n")
song_tags = recognize_song(audio_file_path)
use_acrcloud = True
if song_tags is None:
print("YᴏᴜᴛᴜʙᴇACR couldn't identify the song. Attempting recognition with Sʜᴀᴢᴀᴍ…\n")
song_tags = shazam_recognize_song(audio_file_path)
use_acrcloud = False
if song_tags:
if use_acrcloud:
artist_name = song_tags.get('artists')[0].get('name')
song_title = song_tags.get('title')
print(f"Song recognized successfully from youtubeACR!\n Artist: {artist_name}, Song: {song_title}\n")
else:
artist_name = song_tags['track']['subtitle']
title = song_tags['track']['title']
access_token = authenticate_spotify(CLIENT_ID, CLIENT_SECRET)
song_info = search_spotify_for_song(access_token, artist_name, title)
if song_info:
album_name = song_info['album']['name']
track_number = song_info['track_number']
isrc = song_info.get('external_ids', {}).get('isrc', "Not Available")
print(f"Song recognized successfully by sha-spo!\n Artist: {artist_name}, Song: {track_number:02d}. {title}, Album: {album_name}, ISRC tag: {isrc}\n")
else:
print(f"Song recognized successfully by Shazam!\n Artist: {artist_name}, Song: {title}\n")
else:
print("Failed to recognize the song from the service.\n")
elif audio_source_choice == '2':
print("\nAvailable audio devices for capture:\n")
devices = sd.query_devices()
for index, device in enumerate(devices):
print(f"{index}: {device['name']} - {'(Default)' if device['default_samplerate'] == device['default_low_output_latency'] else ''}")
device_selection = input("Please enter the device index or name you wish to use for the capture: ").strip()
try:
device_selection = int(device_selection)
except ValueError:
pass
audio_file_path = capture_internal_audio(device=device_selection, duration=10, sample_rate=44100)
print("waiting....\n")
print("Attempting to recognize using YᴏᴜᴛᴜʙᴇACR first…\n")
song_tags = recognize_song(audio_file_path)
use_acrcloud = True
if song_tags is None:
print("YᴏᴜᴛᴜʙᴇACR couldn't identify the song. Attempting recognition with Sʜᴀᴢᴀᴍ…\n")
song_tags = shazam_recognize_song(audio_file_path)
use_acrcloud = False
if song_tags:
if use_acrcloud:
artist_name = song_tags.get('artists')[0].get('name')
song_title = song_tags.get('title')
print(f"Song recognized successfully from youtubeACR!\n Artist: {artist_name}, Song: {song_title}\n")
else:
artist_name = song_tags['track']['subtitle']
title = song_tags['track']['title']
access_token = authenticate_spotify(CLIENT_ID, CLIENT_SECRET)
song_info = search_spotify_for_song(access_token, artist_name, title)
if song_info:
album_name = song_info['album']['name']
track_number = song_info['track_number']
isrc = song_info.get('external_ids', {}).get('isrc', "Not Available")
print(f"Song recognized successfully by sha-spo!\n Artist: {artist_name}, Song: {track_number:02d}. {title}, Album: {album_name}, ISRC tag: {isrc}\n")
else:
print(f"Song recognized successfully by Shazam!\n Artist: {artist_name}, Song: {title}\n")
else:
print("Failed to recognize the song from the service.\n")
else:
exit();
import lpader.py in maincode
and fix maincode , to write loadre
|
4b80d1f168bd752aa5de04bc72b3e17e
|
{
"intermediate": 0.2920333743095398,
"beginner": 0.5443325042724609,
"expert": 0.16363412141799927
}
|
44,180
|
I have this code as below; I want to use an ATmega328 ; please check and change code to be compatible with ATmega328 :
#define F_CPU 16000000UL // 16 MHz clock
#include <avr/io.h>
#include <util/delay.h>
// Define potentiometer ADC channel (PA0 = ADC0)
#define potPin 0
// Define digital I/O pins for LEDs - changed to use PD1 to PD6
#define led1 PD1
#define led2 PD2
#define led3 PD3
#define led4 PD4
#define led5 PD5
#define led6 PD6
#define SAMPLES_TO_AVERAGE 10 // Number of samples to average for ADC
void adc_init()
{
// Initialize ADC
ADMUX = (1<<REFS0); // Select AVcc as reference voltage and ADC0 as input channel
ADCSRA = (1<<ADEN) | (1<<ADPS2) | (1<<ADPS1) | (1<<ADPS0); // Enable ADC and set prescaler to 128
}
uint16_t adc_read(uint8_t ch)
{
// Select ADC channel with safety mask and without changing the reference voltage selection
ADMUX = (ADMUX & 0xF0) | (ch & 0x0F);
// Start single conversion
ADCSRA |= (1<<ADSC);
// Wait until conversion is complete
while (ADCSRA & (1<<ADSC));
return ADC;
}
uint16_t adc_read_average(uint8_t ch)
{
uint32_t sum = 0;
for (int i = 0; i < SAMPLES_TO_AVERAGE; ++i) {
sum += adc_read(ch);
}
return (uint16_t)(sum / SAMPLES_TO_AVERAGE);
}
int main(void)
{
// Set up the LED pins as output - updated for PD1 to PD6
DDRD |= (1<<led1) | (1<<led2) | (1<<led3) | (1<<led4) | (1<<led5) | (1<<led6);
// Initialize ADC
adc_init();
while(1)
{
// Read the value from the potentiometer and average it
uint16_t potValue = adc_read_average(potPin);
// Map the potentiometer value from the given range (65 - 337) to (0 - 1023)
uint16_t mappedValue = (uint32_t)(potValue - 65) * 1023 / (337 - 65);
// Define thresholds based on the number of LEDs
uint16_t threshold1 = 170; // First threshold
uint16_t threshold2 = 341; // Second threshold
uint16_t threshold3 = 512; // Third threshold
uint16_t threshold4 = 683; // Fourth threshold
uint16_t threshold5 = 854; // Fifth threshold
// Turn off all LEDs to start with - updated for PD1 to PD6
PORTD &= ~((1<<led1) | (1<<led2) | (1<<led3) | (1<<led4) | (1<<led5) | (1<<led6));
// Determine which LEDs to light up based on the mappedValue - updated for PD1 to PD6
if (mappedValue >= threshold5) {
PORTD |= (1<<led1) | (1<<led2) | (1<<led3) | (1<<led4) | (1<<led5) | (1<<led6);
} else if (mappedValue >= threshold4) {
PORTD |= (1<<led1) | (1<<led2) | (1<<led3) | (1<<led4) | (1<<led5);
} else if (mappedValue >= threshold3) {
PORTD |= (1<<led1) | (1<<led2) | (1<<led3) | (1<<led4);
} else if (mappedValue >= threshold2) {
PORTD |= (1<<led1) | (1<<led2) | (1<<led3);
} else if (mappedValue >= threshold1) {
PORTD |= (1<<led1) | (1<<led2);
} else {
// Ensure led1 is always turned on regardless of the value
PORTD |= (1<<led1);
}
// Small delay to reduce flickering
_delay_ms(30);
}
}
|
61588b1e2e68092a9d4a5605ade25a92
|
{
"intermediate": 0.5121175050735474,
"beginner": 0.2978494167327881,
"expert": 0.19003306329250336
}
|
44,181
|
Write a program to print below String. [Result should be in same format]
Row, row, row your boat
Gently down the stream!!!
Here, Both lines combined together represents single string
Tips: use \n to make new line (and use \t to give tab between the words
correct it on this java program
public class Exercise {
public static void main(String[] args) {
System.out.println(printResult());
}
public static String printResult(){
// Write your logic here
return "Replace this text with your text"; // Replace the text here with what you are expecting to print.
/*
If you want to print "Hello", just write return "Hello";
You will leran about return statement later in this course.
For now put the text (which you want to print ) in return statement
*/
}
}
|
230d485f16b818a16a974de6def6500c
|
{
"intermediate": 0.2390594184398651,
"beginner": 0.56965172290802,
"expert": 0.19128884375095367
}
|
44,182
|
I have this code as below; I want to use an ATmega328 ; please check and change code to be compatible with ATmega328; and also I use in this code PA0 for analoge input change it to PC0 :
#define F_CPU 16000000UL // 16 MHz clock
#include <avr/io.h>
#include <util/delay.h>
// Define potentiometer ADC channel (PA0 = ADC0)
#define potPin 0
// Define digital I/O pins for LEDs - changed to use PD1 to PD6
#define led1 PD1
#define led2 PD2
#define led3 PD3
#define led4 PD4
#define led5 PD5
#define led6 PD6
#define SAMPLES_TO_AVERAGE 10 // Number of samples to average for ADC
void adc_init()
{
// Initialize ADC
ADMUX = (1<<REFS0); // Select AVcc as reference voltage and ADC0 as input channel
ADCSRA = (1<<ADEN) | (1<<ADPS2) | (1<<ADPS1) | (1<<ADPS0); // Enable ADC and set prescaler to 128
}
uint16_t adc_read(uint8_t ch)
{
// Select ADC channel with safety mask and without changing the reference voltage selection
ADMUX = (ADMUX & 0xF0) | (ch & 0x0F);
// Start single conversion
ADCSRA |= (1<<ADSC);
// Wait until conversion is complete
while (ADCSRA & (1<<ADSC));
return ADC;
}
uint16_t adc_read_average(uint8_t ch)
{
uint32_t sum = 0;
for (int i = 0; i < SAMPLES_TO_AVERAGE; ++i) {
sum += adc_read(ch);
}
return (uint16_t)(sum / SAMPLES_TO_AVERAGE);
}
int main(void)
{
// Set up the LED pins as output - updated for PD1 to PD6
DDRD |= (1<<led1) | (1<<led2) | (1<<led3) | (1<<led4) | (1<<led5) | (1<<led6);
// Initialize ADC
adc_init();
while(1)
{
// Read the value from the potentiometer and average it
uint16_t potValue = adc_read_average(potPin);
// Map the potentiometer value from the given range (65 - 337) to (0 - 1023)
uint16_t mappedValue = (uint32_t)(potValue - 65) * 1023 / (337 - 65);
// Define thresholds based on the number of LEDs
uint16_t threshold1 = 170; // First threshold
uint16_t threshold2 = 341; // Second threshold
uint16_t threshold3 = 512; // Third threshold
uint16_t threshold4 = 683; // Fourth threshold
uint16_t threshold5 = 854; // Fifth threshold
// Turn off all LEDs to start with - updated for PD1 to PD6
PORTD &= ~((1<<led1) | (1<<led2) | (1<<led3) | (1<<led4) | (1<<led5) | (1<<led6));
// Determine which LEDs to light up based on the mappedValue - updated for PD1 to PD6
if (mappedValue >= threshold5) {
PORTD |= (1<<led1) | (1<<led2) | (1<<led3) | (1<<led4) | (1<<led5) | (1<<led6);
} else if (mappedValue >= threshold4) {
PORTD |= (1<<led1) | (1<<led2) | (1<<led3) | (1<<led4) | (1<<led5);
} else if (mappedValue >= threshold3) {
PORTD |= (1<<led1) | (1<<led2) | (1<<led3) | (1<<led4);
} else if (mappedValue >= threshold2) {
PORTD |= (1<<led1) | (1<<led2) | (1<<led3);
} else if (mappedValue >= threshold1) {
PORTD |= (1<<led1) | (1<<led2);
} else {
// Ensure led1 is always turned on regardless of the value
PORTD |= (1<<led1);
}
// Small delay to reduce flickering
_delay_ms(30);
}
}
|
16b783b0a3b9576f80a8fba3ebf58bd9
|
{
"intermediate": 0.4602013826370239,
"beginner": 0.30686959624290466,
"expert": 0.23292897641658783
}
|
44,183
|
ast transformations and type inference using prolog in code editors
|
cfcd0d90b54d24a0da328fb9edae5fd3
|
{
"intermediate": 0.28207018971443176,
"beginner": 0.27540555596351624,
"expert": 0.4425242841243744
}
|
44,184
|
select t.applicant_name,t.father,t.residential_address,t.street,t.postoffice,t.district,t.state,t.pincode,t.mobile
from tbl_nloan_other_applicants t,
tbl_nloan_mhf_rl_number s,
tbl_nloan_loan_mst o
where s.loan_id = o.loan_id
and t.application_id=o.application_id
and t.cust_id=o.cust_id
and o.loan_id='0100001320000003'
and s.rl_status_updated_on between 01/03/2024 and 27/03/2024);
|
fa0890e81f1f23a383f6720ff8056353
|
{
"intermediate": 0.3153579533100128,
"beginner": 0.4447072446346283,
"expert": 0.2399347573518753
}
|
44,185
|
I have a private member variable int mem of a class Hari. I also have getters and setters for the int mem, I need to access the int mem from another cpp file via getters, How to achieve this. code is cpp, generate code for this.
|
760b1e8116910bc1e811fb8c57471d5c
|
{
"intermediate": 0.26476263999938965,
"beginner": 0.534017026424408,
"expert": 0.2012202888727188
}
|
44,186
|
iOS OC pickerview中使用图片和文字
|
2c56de644a91eb51bcdfe8c8b13d0877
|
{
"intermediate": 0.37326058745384216,
"beginner": 0.23045474290847778,
"expert": 0.39628466963768005
}
|
44,187
|
write me a kotlin function that gzip decompresses base64 strings
|
8b4b9626a7c40b789d271897b84d8996
|
{
"intermediate": 0.5274313688278198,
"beginner": 0.19838744401931763,
"expert": 0.27418121695518494
}
|
44,188
|
prompt_tuning微调
|
aeaf0682099965db5242d36f685f4be8
|
{
"intermediate": 0.21989916265010834,
"beginner": 0.17974483966827393,
"expert": 0.6003559827804565
}
|
44,189
|
In servicenow i wrote a script include and a catalog client script "On change". But i want to run it at on Submit. below i paste my both codes please use the same functionality do changes according to the on submit catalog client script
var Checkopstatus = Class.create();
CheckOpStatus.prototype= Object.extendsobject(AbstractAjaxProcessor, {
getoperationalstatus: function(ip_address) {
var result =[];
var relGr = new GlideRecord('cmdb_rel_ci');
var queryString =
"'type=d93304fb0a0a0b78006081a72ef08444^child.ip_address=" + ip_ address;
relGr.addEncodedQuery(queryString);
relGr.query();
while (relGr.next()) {
var parent = relGr.parent.getRefRecord();
if (parent && parent.operational_status == '1' ) {
result.push(parent.name.toString());
}
}
return result;
},
ajaxFunction_getParentNames: function() {
var ip_address = this.getParameter('sysparm_ip_address'):
var result2 = this.getOperationalStatus(ip_address);
return JSON.stringify(result2);
},
type: 'CheckOpStatus'
});
function onChange(control, oldValue, newValue, isLoading) {
if (isLoading || newValue == '') {
return;
}
g_form.getReference('select_server', function(ref) {
var ip_address = ref.ip_address;
var ga = new GlideAjax('CheckOpStatus');
ga.addParam('sysparm _name', 'getParentNames' );
ga.addParam('sysparm_ip_address', ip_address);
ga.getXMLAnswer (function(answer) {
var result = JSON. parse(answer);
if (result.length > 0 ) {
var names = result.join(', ');
alert("servers tagged to the Physical server " + result.join(', '));
g_form.clearvalue('select_server');
}
});
});
}
|
bfee50f5782d5c35161b91dee3b3d9ab
|
{
"intermediate": 0.3639771342277527,
"beginner": 0.4514457583427429,
"expert": 0.1845770627260208
}
|
44,190
|
i have a reference variable on my catalog item named as select_server and refer to cmdb_ci_server table records and the records of cmdb_ci_server table further linked to another table records cmdb_rel_ci table via ip address relationship. i want that when i select any server and try to submit the form then it will check all records related to the server on cmdb_rel_ci table and check all records operational status field value. if the field is operational on cmdb_rel_ci table records then show a popup show that you cannot select because parent record operatioal status is operational and clear the field value from catalog selected field and restrict the catalog form to be submitted. i provide the workflow script below for your understanding
var relGlide = new GlideRecord("cmdb_rel_ci);
var queryString = "type=d93304fb0a0a0b78006081a72ef08444^child.ip_address=" +current.variables.ip_address;
relGlide.addEncodedQuery(queryString);
relGlide.query();
while (relGlide.next()){
//if any CI is operational
if(relGlide.parent.operational_status ==1){
}
}
|
bfc89ee95f502e38c59705d6f7f9ae2d
|
{
"intermediate": 0.5564049482345581,
"beginner": 0.255805104970932,
"expert": 0.18778999149799347
}
|
44,191
|
i have a reference variable on my catalog item named as select_server and refer to cmdb_ci_server table records and the records of cmdb_ci_server table further linked to another table records cmdb_rel_ci table via ip address relationship. i want that when i select any server and try to submit the form then it will check all records related to the server on cmdb_rel_ci table and check all records operational status field value. if the field is operational on cmdb_rel_ci table records then show a popup show that you cannot select because parent record operatioal status is operational and clear the field value from catalog selected field and restrict the catalog form to be submitted. i provide the workflow script below for your understanding
var relGlide = new GlideRecord("cmdb_rel_ci);
var queryString = “type=d93304fb0a0a0b78006081a72ef08444^child.ip_address=” +current.variables.ip_address;
relGlide.addEncodedQuery(queryString);
relGlide.query();
|
901a1e858410944c1ded00a8eea2f064
|
{
"intermediate": 0.5586718916893005,
"beginner": 0.1898665577173233,
"expert": 0.25146156549453735
}
|
44,192
|
I'm studing for an exam in my discrete math for computer science course, which heavily focus on the 3 topics below. I want you to give me a comprehensive cheat sheet including everything i need to know about the 3 topics below.
Runtime (big-O,Ω,Θ)
Sorting
Recursion Runtime
|
75517e92176ecccfda7965a86f9a007a
|
{
"intermediate": 0.20069792866706848,
"beginner": 0.607799232006073,
"expert": 0.1915029138326645
}
|
44,193
|
i have a reference variable on my catalog item named as select_server and refer to cmdb_ci_server table records and the records of cmdb_ci_server table further linked to another table records cmdb_rel_ci table via ip address relationship. i want that when i select any server and try to submit the form then it will check all records related to the server on cmdb_rel_ci table and check all records operational status field value. if the field is operational on cmdb_rel_ci table records then show a popup show that you cannot select because parent record operatioal status is operational and clear the field value and restrict the catalog form from submitting. i provide the script below for your understanding
var relGlide = new GlideRecord("cmdb_rel_ci);
var queryString = “type=d93304fb0a0a0b78006081a72ef08444^child.ip_address=” +current.variables.ip_address;
relGlide.addEncodedQuery(queryString);
relGlide.query();
|
98eca516e067a88e9374daae081cd6ed
|
{
"intermediate": 0.4408663511276245,
"beginner": 0.24704095721244812,
"expert": 0.312092661857605
}
|
44,194
|
i need to run on submit catalog client script beofre submmiting the form and abort the submittion
|
1e03a7caca95eaf1221f77defcac40af
|
{
"intermediate": 0.46324604749679565,
"beginner": 0.1891031712293625,
"expert": 0.34765079617500305
}
|
44,195
|
◞ Waiting…Waiting…
with Loader(chan="Main", desc="Waiting…", mode='std2') as loader:
choice = input("Enter your choice (1, 2, or 3) and press Enter: ")
loader.stop()
print(f"Your choice was: {choice}")
print(f"{border}\n")
return choice
loader.py:
from itertools import cycle
from shutil import get_terminal_size
from threading import Thread
from time import sleep
from printer import Printer
class Loader:
def __init__(self, chan, desc="Loading...", end='', timeout=0.1, mode='prog'):
self.desc = desc
self.end = end
self.timeout = timeout
self.channel = chan
self._thread = Thread(target=self._animate, daemon=True)
if mode == 'std1':
self.steps = ["⢿", "⣻", "⣽", "⣾", "⣷", "⣯", "⣟", "⡿"]
elif mode == 'std2':
self.steps = ["◜","◝","◞","◟"]
elif mode == 'std3':
self.steps = ["😐 ","😐 ","😮 ","😮 ","😦 ","😦 ","😧 ","😧 ","🤯 ","💥 ","✨ ","\u3000 ","\u3000 ","\u3000 "]
elif mode == 'prog':
self.steps = ["[∙∙∙]","[●∙∙]","[∙●∙]","[∙∙●]","[∙∙∙]"]
self.done = False
def start(self):
self._thread = Thread(target=self._animate, daemon=True)
self._thread.start()
return self
def _animate(self):
for c in cycle(self.steps):
if self.done:
break
Printer.print_loader(self.channel, f"\r\t{c} {self.desc} ")
print(f"\r{c} {self.desc}", end='', flush=True)
sleep(self.timeout)
def __enter__(self):
self.start()
return self
def stop(self):
self.done = True
cols = get_terminal_size((80, 20)).columns
Printer.print_loader(self.channel, "\r" + " " * cols)
if self.end != "":
Printer.print_loader(self.channel, f"\r{self.end}")
def __exit__(self, exc_type, exc_value, tb):
# handle exceptions with those variables ^
self.stop()
printer.py:
from shutil import get_terminal_size
class Printer:
@staticmethod
def print_loader(channel, message):
# Considering 'cols' has been defined globally or passed appropriately
clear_line = "\r" + " " * get_terminal_size().columns + "\r"
print(f"{clear_line}{message}", end='')
mycode.py:
# -- coding: utf-8 --
import base64, os, eyed3, requests, json, re, numpy as np, sounddevice as sd, scipy.io.wavfile, acrcloud, eyed3.id3.frames
from eyed3.id3.frames import UserTextFrame
from bs4 import BeautifulSoup
from genius_api import GeniusApi
from itertools import cycle
from shutil import get_terminal_size
from threading import Thread
from time import sleep
from my_shazam_utility import shazam_recognize_song
from applemusic_api import AppleMusicApi
from Acrcloudretrieve import recognize_song, set_id3_tags_mp3
from Retrieve_lyrics import get_lyrics
from erhalten_alb_covers import save_and_embed_album_cover
from loader import Loader
def load_config():
with open('D:/Eurydice/Encompassing Data by discerning/config/config.json', 'r') as config_file:
config_data = json.load(config_file)
return config_data
config = load_config()
CLIENT_ID = config['Spotify']['CLIENT_ID']
CLIENT_SECRET = config['Spotify']['CLIENT_SECRET']
genius_api = GeniusApi()
def get_audio_source_choice():
border = "=" * 50
title = "AUDIO SOURCE SELECTION"
padded_title = title.center(len(border))
print(f"\n{border}")
print(padded_title)
print(border)
box_width = max(len(s) for s in ["Microphone - Live audio capture",
"Internal Sound - Detect sounds playing internally on the device",
"File - Detect through an internally saved file"]) + 6
print("\nPlease select the audio source you'd like to use:\n")
print(f"+{'-' * (box_width - 2)}+")
print(f"| 1: Microphone - Live audio capture{' ' * (box_width - len(' 1: Microphone - Live audio capture') - 3)}|")
print(f"| 2: Internal Sound - Detect sounds playing internally on the device{' ' * (box_width - len(' 2: Internal Sound - Detect sounds playing internally on the device') - 3)}|")
print(f"| 3: File - Detect through an internally saved file{' ' * (box_width - len(' 3: File - Detect through an internally saved file') - 3)}|")
print(f"+{'-' * (box_width - 2)}+")
with Loader(chan="Main", desc="Waiting…", mode='std2') as loader:
choice = input("Enter your choice (1, 2, or 3) and press Enter: ")
loader.stop()
print(f"Your choice was: {choice}")
print(f"{border}\n")
return choice
def capture_internal_audio(device, duration=10, sample_rate=44100, filename="internal_audio.wav"):
device_info = sd.query_devices(device, 'input')
max_input_channels = device_info.get('max_input_channels', 1)
channels = min(2, max_input_channels)
print(f"Capturing internal audio using {channels} channel(s).\n Please play the audio you'd like to identify…")
with Loader(chan="Main", desc="Recording…", mode='std1') as loader:
recording = sd.rec(int(duration * sample_rate), samplerate=sample_rate, channels=2, dtype='float64', device=device)
sd.wait()
loader.stop()
scipy.io.wavfile.write(filename, sample_rate, (recording * 32767).astype(np.int16))
print("Capture complete.")
print(f"Recording shape (samples, channels): {recording.shape}")
print(recording, sample_rate)
print(filename)
return filename
def capture_and_save_audio_from_mic(duration=10, sample_rate=44100, filename="temp_captured_audio_file.wav"):
with Loader(chan="Main", desc="Recording…", mode='std1') as loader:
recording = sd.rec(int(duration * sample_rate), samplerate=sample_rate, channels=2, dtype='int16')
sd.wait()
loader.stop()
print("Recording stopped.")
scipy.io.wavfile.write(filename, sample_rate, recording)
print(f"Recorded (samples, channels): {recording.shape}")
print(recording, sample_rate)
print(filename)
return filename
def get_user_choice():
print("=" * 50)
print("Welcome to the Song Recognition Service!")
print("=" * 50)
print("\nPlease select the recognition service you'd like to use:\n")
print(" 1: YoutubeACR - Fast and accurate music recognition")
print(" 2: Shazam - Discover music, artists, and lyrics in seconds")
print("-" * 50)
choice = input("Enter your choice (1 or 2) and press Enter: ")
with Loader(chan="Main", desc="Waiting…", mode='prog') as loader:
print("\n" + "." * 25 + " Processing " + "." * 25 + "\n")
loader.stop()
return choice
def add_or_update_txxx_frame(audiofile, description, value):
found = False
frames = audiofile.tag.frame_set.get(eyed3.id3.frames.USERTEXT_FID, [])
for frame in frames:
if frame.description == description:
frame.text = value
found = True
break
if not found:
new_frame = eyed3.id3.frames.UserTextFrame(description=description, text=value)
if not frames:
audiofile.tag.frame_set[eyed3.id3.frames.USERTEXT_FID] = [new_frame]
else:
frames.append(new_frame)
def authenticate_spotify(client_id, client_secret):
auth_url = 'https://accounts.spotify.com/api/token'
client_creds = f"{client_id}:{client_secret}"
client_creds_b64 = base64.b64encode(client_creds.encode())
headers = {'Authorization': f'Basic {client_creds_b64.decode()}'}
data = {'grant_type': 'client_credentials'}
response = requests.post(auth_url, headers=headers, data=data)
access_token = response.json().get('access_token')
return access_token
def search_spotify_for_song(access_token, artist_name, title):
base_url = "https://api.spotify.com/v1/search"
query = f"{title} artist:{artist_name}"
headers = {"Authorization": f"Bearer {access_token}"}
params = {"q": query, "type": "track", "limit": 1}
response = requests.get(base_url, headers=headers, params=params)
results = response.json()
try:
track_info = results['tracks']['items'][0]
return track_info
except IndexError:
print("Song not found on Spotify.")
return None
def get_lyrics_from_genius(artist_name, title):
results = genius_api.get_search_by_songs(f"{artist_name} {title}")
if results:
song_info = results[0]['result'] # Take the most relevant result
song_id = str(song_info['id'])
song_details = genius_api.get_song_by_id(song_id, text_format='plain')
return song_details.get('lyrics', "Lyrics not available.")
return "Song not found on Genius."
def save_lyrics_to_file(audio_file_path, track_number, title, artist_name, album_name, isrc, lyrics):
base_directory = os.path.dirname(audio_file_path)
file_name_format = f"{track_number:02d}. {title} - {artist_name} - {album_name} - {isrc}.lrc"
safe_file_name = re.sub(r'[/:*?"<>|]', '', file_name_format)
lyrics_file_path = os.path.join(base_directory, safe_file_name)
with open(lyrics_file_path, 'w', encoding='utf-8') as lrc_file:
lrc_file.write(lyrics)
print(f"Lyrics saved as: {safe_file_name}")
def get_high_quality_album_art_url(song_info):
images = song_info['album']['images']
if not images:
return None
highest_quality_image = max(images, key=lambda x: x['width']*x['height'])
return highest_quality_image['url']
def save_high_quality_album_art(image_url, file_path):
try:
response = requests.get(image_url, stream=True)
if response.status_code == 200:
with open(file_path, 'wb') as out_file:
for chunk in response.iter_content(1024):
out_file.write(chunk)
print(f"High quality album art saved: {file_path}")
return True
else:
print("Could not download the album art.")
except Exception as e:
print(f"Error saving high-quality album art: {e}")
return False
def embed_album_art_to_song(file_path, image_path):
try:
audiofile = eyed3.load(file_path)
if audiofile.tag is None:
audiofile.initTag()
with open(image_path, 'rb') as img_file:
audiofile.tag.images.set(3, img_file.read(), 'image/jpeg')
audiofile.tag.save()
print("High quality album art embedded into song.")
except FileNotFoundError:
print(f"Failed to embed album art - No such file: {image_path}")
def process_audio_file_with_spotify_search(audio_file_path):
shazam_data = shazam_recognize_song(audio_file_path)
if shazam_data:
artist_name = shazam_data['track']['subtitle']
title = shazam_data['track']['title']
print(f"Identified Song: {artist_name} - {title}")
access_token = authenticate_spotify(CLIENT_ID, CLIENT_SECRET)
song_info = search_spotify_for_song(access_token, artist_name, title)
if song_info:
print(json.dumps(song_info, indent=4))
print("\n///////////////////////////////\n")
album_name = song_info['album']['name']
album_url = song_info['album']['external_urls']['spotify']
track_number = song_info['track_number']
release_date = song_info['album']['release_date']
isrc = song_info.get('external_ids', {}).get('isrc', "Not Available")
label = song_info['label'] if 'label' in song_info else "Not Available"
explicit = str(song_info['explicit']) if 'explicit' in song_info else "Not Available"
genres = ", ".join(song_info['genres']) if 'genres' in song_info else "Not Available"
author_url = song_info['artists'][0]['external_urls']['spotify'] if 'artists' in song_info else "Not Available"
spotify_url = song_info['external_urls']['spotify']
print(f"Track Number on Spotify: {track_number}")
audiofile = eyed3.load(audio_file_path)
if audiofile.tag is None:
audiofile.initTag(version=eyed3.id3.ID3_V2_3)
audiofile.tag.artist = artist_name
audiofile.tag.album = album_name
audiofile.tag.album_artist = artist_name
audiofile.tag.title = title
audiofile.tag.recording_date = release_date
add_or_update_txxx_frame(audiofile, "Album URL", album_url)
add_or_update_txxx_frame(audiofile, "Eurydice", "True")
add_or_update_txxx_frame(audiofile, "Compilation", "KK")
add_or_update_txxx_frame(audiofile, "Genre", genres)
add_or_update_txxx_frame(audiofile, "Author URL", author_url)
add_or_update_txxx_frame(audiofile, "Label", label)
add_or_update_txxx_frame(audiofile, "Explicit", explicit)
add_or_update_txxx_frame(audiofile, "ISRC", isrc)
add_or_update_txxx_frame(audiofile, "Spotify URL", spotify_url)
audiofile.tag.comments.set(f"ISRC: {isrc}, Label: {label}, Explicit: {explicit}")
audiofile.tag.save()
print(f"Metadata embedded into the file: {audio_file_path}")
high_res_image_url = get_high_quality_album_art_url(song_info)
if high_res_image_url:
image_file_path = os.path.splitext(audio_file_path)[0] + ".jpg"
if save_high_quality_album_art(high_res_image_url, image_file_path):
embed_album_art_to_song(audio_file_path, image_file_path)
else:
print("Skipping album art embed due to download failure.")
else:
print("No album art available.")
new_file_name = f"{track_number:02d}. {title} - {artist_name} - {album_name} - {isrc}.mp3"
new_file_name = re.sub(r'[/:*?"<>|]', '', new_file_name)
new_file_path = os.path.join(os.path.dirname(audio_file_path), new_file_name)
os.rename(audio_file_path, new_file_path)
print(f"File has been renamed to: {new_file_name}")
new_image_file_path = os.path.splitext(new_file_path)[0] + ".jpg"
os.rename(image_file_path, new_image_file_path)
print(f"Album art file has been renamed to: {os.path.basename(new_image_file_path)}")
lyrics = get_lyrics_from_genius(artist_name, title)
if 'plain' in lyrics:
lyrics_plain_text = lyrics['plain']
print("Printing Lyrics:\n", lyrics_plain_text)
save_lyrics_to_file(audio_file_path, track_number, title, artist_name, album_name, isrc, lyrics_plain_text)
print("Lyrics file saved")
else:
print("No lyrics available to save.")
else:
print("Song not found on Spotify.")
else:
print("Song could not be identified.")
if __name__ == "__main__":
audio_source_choice = get_audio_source_choice()
if audio_source_choice == '3':
user_choice = get_user_choice()
audio_file_path = 'D:/Eurydice/Encompassing Data by discerning/Test_file/Unknown_file.mp3'
if user_choice == '1':
print("\n" + "." * 15 + " ᴜsɪɴɢ YᴏᴜᴛᴜʙᴇACR " + "." * 15 + "\n")
song_tags = recognize_song(audio_file_path)
if song_tags:
print(f'Song identified: {song_tags}')
set_id3_tags_mp3(audio_file_path, song_tags)
artist_name = song_tags.get('artists')[0].get('name')
song_title = song_tags.get('title')
safe_artist_name = re.sub(r'[/\:?"<>|]', '', artist_name)
safe_song_title = re.sub(r'[/\:?"<>|]', '', song_title)
new_file_name = f"{safe_artist_name} - {safe_song_title}.mp3"
new_file_path = os.path.join(os.path.dirname(audio_file_path), new_file_name)
os.rename(audio_file_path, new_file_path)
print(f"File has been renamed to: {new_file_name}")
else:
print('Could not identify the song in YᴏᴜᴛᴜʙᴇACR.')
apple_music_api = AppleMusicApi(Exception)
apple_music_api.get_access_token()
track_results = apple_music_api.search('songs', f"{artist_name} - {song_title}")
if track_results:
track_id = track_results[0]['id']
album_artwork_url_template = track_results[0]['attributes']['artwork']['url']
save_and_embed_album_cover(new_file_path, artist_name, song_title, album_artwork_url_template)
else:
print("Song not found on Apple Music.")
lrc_lyrics = get_lyrics(safe_artist_name, safe_song_title)
if lrc_lyrics:
lrc_file_path = os.path.join(os.path.dirname(audio_file_path), f"{safe_artist_name} - {safe_song_title}.lrc")
with open(lrc_file_path, 'w', encoding='utf-8') as lrc_file:
lrc_file.write(lrc_lyrics)
print(f"Saved LRC file to: {lrc_file_path}")
else:
print("Could not get the lyrics.")
elif user_choice == '2':
print("\n" + "." * 15 + " ᴜsɪɴɢ Sʜᴀᴢᴀᴍ " + "." * 15 + "\n")
song_tags = shazam_recognize_song(audio_file_path)
print(song_tags)
process_audio_file_with_spotify_search(audio_file_path)
else:
print("Invalid choice. Exiting....")
exit()
elif audio_source_choice == '1':
audio_file_path = capture_and_save_audio_from_mic(duration=10, sample_rate=44100)
print("Attempting to recognize using YᴏᴜᴛᴜʙᴇACR first…\n")
song_tags = recognize_song(audio_file_path)
use_acrcloud = True
if song_tags is None:
print("YᴏᴜᴛᴜʙᴇACR couldn't identify the song. Attempting recognition with Sʜᴀᴢᴀᴍ…\n")
song_tags = shazam_recognize_song(audio_file_path)
use_acrcloud = False
if song_tags:
if use_acrcloud:
artist_name = song_tags.get('artists')[0].get('name')
song_title = song_tags.get('title')
print(f"Song recognized successfully from youtubeACR!\n Artist: {artist_name}, Song: {song_title}\n")
else:
artist_name = song_tags['track']['subtitle']
title = song_tags['track']['title']
access_token = authenticate_spotify(CLIENT_ID, CLIENT_SECRET)
song_info = search_spotify_for_song(access_token, artist_name, title)
if song_info:
album_name = song_info['album']['name']
track_number = song_info['track_number']
isrc = song_info.get('external_ids', {}).get('isrc', "Not Available")
print(f"Song recognized successfully by sha-spo!\n Artist: {artist_name}, Song: {track_number:02d}. {title}, Album: {album_name}, ISRC tag: {isrc}\n")
else:
print(f"Song recognized successfully by Shazam!\n Artist: {artist_name}, Song: {title}\n")
else:
print("Failed to recognize the song from the service.\n")
elif audio_source_choice == '2':
print("\nAvailable audio devices for capture:\n")
devices = sd.query_devices()
for index, device in enumerate(devices):
print(f"{index}: {device['name']} - {'(Default)' if device['default_samplerate'] == device['default_low_output_latency'] else ''}")
device_selection = input("Please enter the device index or name you wish to use for the capture: ").strip()
try:
device_selection = int(device_selection)
except ValueError:
pass
audio_file_path = capture_internal_audio(device=device_selection, duration=10, sample_rate=44100)
print("waiting....\n")
print("Attempting to recognize using YᴏᴜᴛᴜʙᴇACR first…\n")
song_tags = recognize_song(audio_file_path)
use_acrcloud = True
if song_tags is None:
print("YᴏᴜᴛᴜʙᴇACR couldn't identify the song. Attempting recognition with Sʜᴀᴢᴀᴍ…\n")
song_tags = shazam_recognize_song(audio_file_path)
use_acrcloud = False
if song_tags:
if use_acrcloud:
artist_name = song_tags.get('artists')[0].get('name')
song_title = song_tags.get('title')
print(f"Song recognized successfully from youtubeACR!\n Artist: {artist_name}, Song: {song_title}\n")
else:
artist_name = song_tags['track']['subtitle']
title = song_tags['track']['title']
access_token = authenticate_spotify(CLIENT_ID, CLIENT_SECRET)
song_info = search_spotify_for_song(access_token, artist_name, title)
if song_info:
album_name = song_info['album']['name']
track_number = song_info['track_number']
isrc = song_info.get('external_ids', {}).get('isrc', "Not Available")
print(f"Song recognized successfully by sha-spo!\n Artist: {artist_name}, Song: {track_number:02d}. {title}, Album: {album_name}, ISRC tag: {isrc}\n")
else:
print(f"Song recognized successfully by Shazam!\n Artist: {artist_name}, Song: {title}\n")
else:
print("Failed to recognize the song from the service.\n")
else:
exit();
|
2b52faabb8971731811f95c1e989a3b5
|
{
"intermediate": 0.2765008807182312,
"beginner": 0.5614528059959412,
"expert": 0.16204631328582764
}
|
44,196
|
i have a reference variable on my catalog item select_incident refer to incident table. my requirement is when i select any incident from there and try to submit the catalog request then it will check the priority of that incident, if the priority of incident is 1 then show a popup on screen that priority is High not able to submit the request and prevent me to submit the request
|
cf491e866a1fe2dd64de15bf0b4c3b9b
|
{
"intermediate": 0.40118300914764404,
"beginner": 0.27414312958717346,
"expert": 0.3246738612651825
}
|
44,197
|
under the datatable library in python write a code to remove duplicates from the dataframe
|
3fc13cc7f44293dd28140588f1f93a36
|
{
"intermediate": 0.7998887896537781,
"beginner": 0.04876116290688515,
"expert": 0.15135003626346588
}
|
44,198
|
if i enter 3 here , 3 disappears and press enter it goes to user_choice 3
its fine
but when i press 3 or 2 or 1
the entered number disapperas
printer.py:
from itertools import cycle
from shutil import get_terminal_size
from threading import Thread
from time import sleep
from printer import Printer
class Loader:
def init(self, chan, desc=“Loading…”, end=‘’, timeout=0.1, mode=‘prog’):
self.desc = desc
self.end = end
self.timeout = timeout
self.channel = chan
self._thread = Thread(target=self._animate, daemon=True)
if mode == ‘std1’:
self.steps = [“⢿”, “⣻”, “⣽”, “⣾”, “⣷”, “⣯”, “⣟”, “⡿”]
elif mode == ‘std2’:
self.steps = [“◜”,“◝”,“◞”,“◟”]
elif mode == ‘std3’:
self.steps = ["😐 ","😐 ","😮 ","😮 ",“😦 “,“😦 “,“😧 “,“😧 “,“🤯 “,“💥 “,“✨ “,”\u3000 “,”\u3000 “,”\u3000 “]
elif mode == ‘prog’:
self.steps = [”[∙∙∙]”,”[●∙∙]”,”[∙●∙]”,”[∙∙●]”,”[∙∙∙]”]
self.done = False
def start(self):
self._thread = Thread(target=self._animate, daemon=True)
self._thread.start()
return self
def _animate(self):
for c in cycle(self.steps):
if self.done:
break
Printer.print_loader(self.channel, f”\r\t{c} {self.desc} “)
# print(f”\r{c} {self.desc}”, end=‘’, flush=True)
sleep(self.timeout)
def enter(self):
self.start()
return self
def stop(self):
self.done = True
cols = get_terminal_size((80, 20)).columns
Printer.print_loader(self.channel, “\r” + " " * cols)
if self.end != “”:
Printer.print_loader(self.channel, f”\r{self.end}”)
def exit(self, exc_type, exc_value, tb):
# handle exceptions with those variables ^
self.stop()
if i enter 3 here , 3 disappears and press enter it goes to user_choice 3
its fine
but when i press 3 or 2 or 1
the entered number disapperas
give condition like after pressing like 1 or 2 or 3 then start animation upto 10 seeconds
code:
def get_audio_source_choice():
border = "=" * 50
title = "AUDIO SOURCE SELECTION"
padded_title = title.center(len(border))
print(f"\n{border}")
print(padded_title)
print(border)
box_width = max(len(s) for s in ["Microphone - Live audio capture",
"Internal Sound - Detect sounds playing internally on the device",
"File - Detect through an internally saved file"]) + 6
print("\nPlease select the audio source you'd like to use:\n")
print(f"+{'-' * (box_width - 2)}+")
print(f"| 1: Microphone - Live audio capture{' ' * (box_width - len(' 1: Microphone - Live audio capture') - 3)}|")
print(f"| 2: Internal Sound - Detect sounds playing internally on the device{' ' * (box_width - len(' 2: Internal Sound - Detect sounds playing internally on the device') - 3)}|")
print(f"| 3: File - Detect through an internally saved file{' ' * (box_width - len(' 3: File - Detect through an internally saved file') - 3)}|")
print(f"+{'-' * (box_width - 2)}+")
with Loader(chan="Main", desc="Waiting…", mode='std2') as loader:
choice = input("Enter your choice (1, 2, or 3) and press Enter: ")
loader.stop()
print(f"Your choice was: {choice}")
print(f"{border}\n")
return choice
|
78d27e6d4023f4a23106b97893638098
|
{
"intermediate": 0.327720046043396,
"beginner": 0.4301060736179352,
"expert": 0.24217386543750763
}
|
44,199
|
Fix this code:
<!DOCTYPE html>
<html lang="ru">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Yaoi</title>
</head>
<body>
<div>
<img src="me.png" style="display: none;" id="scream"/>
<img src="cloud.png" style="display: none;" id="cloud"/>
<img src="restart.png" style="display: none;" id="restart"/>
</div>
<h3 style="text-align: center;">Save Me</h3>
<div id="container" style="text-align: center; padding-top: 25px;">
<canvas id="gameCanvas" width="800" height="500" style="border: 1px solid #d3d3d3; background: url(bac.jpg);" onclick="playAgain()"></canvas>
</div>
<audio style="display: none;" autoplay="autoplay">
<source src="3D_Worldrunner_Main.mid" type="audio/mpeg">
Your browser does not support the audio element.
</audio>
<div style="text-align: center;"><input type="button" value="FullScreen" onclick="fullScreen()"/></div>
<script>
let startTime = 0;
const elem = document.getElementById("gameCanvas");
elem.addEventListener('webkitfullscreenchange', function(e) {
config.isFullScreen = !config.isFullScreen;
if (config.isFullScreen) {
const elem = document.getElementById("gameCanvas");
elem.setAttribute("width", screen.width);
elem.setAttribute("height", screen.height);
container.width = screen.width;
container.height = screen.height;
config.font="30px arial";
} else {
const elem = document.getElementById("gameCanvas");
elem.setAttribute("width", 800);
elem.setAttribute("height", 500);
container.width = 800;
container.height = 500;
config.font="25px arial";
}
}, false);
const config = {isFullScreen: false, font:"25px arial", fontColor:"rgba(0,0,0,0.9)"};
function fullScreen() {
const elem = document.getElementById("gameCanvas");
//config.isFullScreen = true;
elem.requestFullscreen();
}
function playAgain() {
startTime = new Date();
gameLoop = setInterval(moveBalls, 20);
enemyIncrementLoop = setInterval(incrementEnemy, 5000);
init();
$("#gameCanvas").off("click");
}
const container = {id: "container", width: 800, height: 500};
const player = {id: 'player', x: container.width / 2, y: container.height - 50, width: 50, height: 80, speedX: 20, speedY: 5};
const cloud = {id: "cloud", x: -150, y: 50, width: 150, height: 100, speedX: 3, speedY: 5};
let objects;
function init() {
objects = [];
incrementEnemy();
}
function incrementEnemy() {
if (objects.length < 10) {
const r = Math.floor(Math.random() * 256);
const g = Math.floor(Math.random() * 256);
const b = Math.floor(Math.random() * 256);
const color = `rgba(${r},${g},${b},0.5)`;
const xPosition = container.width * Math.random();
const xRandius = Math.floor(Math.random() * 40) + 10;
const xSpeed = Math.floor(Math.random() * 6) + 2;
const ySpeed = Math.floor(Math.random() * 6) + 2;
if (xPosition <= 2 * xRandius || xPosition + 2 * xRandius >= container.width) {
xPosition = container.width / 2;
}
objects.push({id: "#circle", radius: xRandius, speedX: xSpeed, speedY: ySpeed, color: color, centerX: xPosition, centerY: 80});
}
}
const c = document.getElementById('gameCanvas');
const ctx = c.getContext('2d');
ctx.font = config.font;
ctx.fillText('Click Here To Start Game', container.width / 2 - 5 * 25, container.height / 2);
window.addEventListener('keydown', gamekeyboard);
let mouseOffset = 1;
let currentLoopCountOffset = 0;
function gamemousemove() {
if (currentLoopCountOffset == mouseOffset) {
currentLoopCountOffset = 0
if (config.isFullScreen)
{
if (event.pageX - this.offsetLeft > player.width / 2 && event.pageX - this.offsetLeft <= container.width - player.width / 2) {
player.x = event.pageX - player.width / 2;
}
if (event.pageY > player.height / 2 && event.pageY <= container.height - player.height / 2) {
player.y = event.pageY - player.height / 2;
}
else {
if (event.pageX - this.offsetLeft > player.width / 2 && event.pageX - this.offsetLeft <= container.width - player.width / 2) {
player.x = event.pageX - player.width / 2 - this.offsetLeft;
}
if (event.pageY - this.offsetTop > player.height / 2 && event.pageY - this.offsetTop <= container.height - player.height / 2) {
player.y = event.pageY - player.height / 2 - this.offsetTop;
}
}
}
}
currentLoopCountOffset++;
}
function gamekeyboardlistener() {
if (event.keyCode == 37) {
e('Left');
if (player.x > 0)
player.x -= player.speedX;
}
if (event.keyCode == 38) {
e('Top');
}
if (event.keyCode == 39) {
e('Right');
if (player.x + player.width + player.speedX < container.width)
player.x += player.speedX;
}
if (event.keyCode == 40) {
e('Bottom');
}
}
function moveBalls() {
const c = document.getElementById('gameCanvas');
const ctx = c.getContext('2d');
ctx.clearRect(0, 0, container.width, container.height);
for (let i = 0; i < objects.length; i++) {
const object = objects[i];
if (
(object.centerY + object.radius * 0.75 >= player.y && object.centerY + object.radius * 0.75 <= player.y + player.height)
||
(object.centerY - object.radius * 0.75 >= player.y && object.centerY - object.radius * 0.75 <= player.y + player.height)
) {
if (
(object.centerX + object.radius * 0.75 > player.x && object.centerX + object.radius * 0.75 < player.x + player.width)
||
(object.centerX - object.radius * 0.75 > player.x && object.centerX - object.radius * 0.75 < player.x + player.width)
) {
const score = Math.ceil((new Date() - startTime) / 1000);
if (score < 0)
score *= -1;
ctx.fillStyle = config.fontColor;
ctx.fillText('Game Over', container.width / 2 - 3 * 25, container.height / 2);
ctx.fillText('Score : '+ score +' seconds', container.width / 2 - 4 * 25, container.height / 2 + 35);
const imgRestart = document.getElementById('restart');
const restart = {x:container.width * 0.5 - 50, y:container.height * 0.75 - 50, width:100, height: 100};
ctx.drawImage(imgRestart, restart.x, restart.y, restart.width, restart.height);
clearInterval(gameLoop);
clearInterval(enemyIncrementLoop);
$('#gameCanvas').off("click");
}
if (object.centerX + object.radius > container.width || object.centerX - object.radius < 0) {
object.speedX *= -1;
}
if (object.centerY + object.radius > container.height || object.centerY - object.radius < 0) {
object.speedY *= -1;
}
ctx.beginPath();
ctx.fillStyle = object.color;
ctx.arc(object.centerX, object.centerY, object.radius, 0, 2 * Math.PI);
ctx.fill();
object.centerX += object.speedX;
object.centerY += object.speedY;
}
const img = document.getElementById('scream');
const imgCloud = document.getElementById('cloud');
ctx.drawImage(img, player.x, player.y, player.width, player.height);
ctx.drawImage(imgCloud, cloud.x, cloud.y, cloud.width, cloud.height);
cloud.x = cloud.x + cloud.speedX;
if (cloud.x > container.width) {
cloud.x = (cloud.width * -1) - 20;
}
ctx.font = config.font;
ctx.fillStyle = config.fontColor;
ctx.fillText("Sec : " + (new Date()-startTime) + "", container.width *0.8,40);
// ctx.rect(player.x - 1, player.y - 1, player.width + 2, player.heidht + 2);
// ctx.stroke();
}
}
</script>
</body>
</html>
|
23569b26bbf17df1bc81eed9b4b5d807
|
{
"intermediate": 0.2660142779350281,
"beginner": 0.5979357361793518,
"expert": 0.13604997098445892
}
|
44,200
|
simulate a computer terminal with imaginary access to the secret files of the internet
|
57837e94eb9cb26c7476a2960a8e7a59
|
{
"intermediate": 0.314062237739563,
"beginner": 0.3193788230419159,
"expert": 0.3665589392185211
}
|
44,201
|
simulate a computer terminal with imaginary access to the secret files of the internet
|
c745e0fad9f4c1ffc4dbf7a9abafec6c
|
{
"intermediate": 0.314062237739563,
"beginner": 0.3193788230419159,
"expert": 0.3665589392185211
}
|
44,202
|
Riassumi il seguente testo. La struttura del riassunto sarà composta da un elenco di paragrafi di circa 100 parole ogniuno. Per ogni paragrafo aggiungi un titolo di massimo 4 o 5 parole che riassuma il contenuto del paragrafo.
Restituisci la risposta in formato JSON. La sintassi del JSON deve essere valida, quindi se nel titolo o nel testo devi inserire dei caratteri speciali, devi usare i caratteri escape, ad esempio \" oppure \'. La risposta deve includere soltanto il codice JSON per poterlo elaborare da un software. Non aggiungere nient'altro
Testo:
Nei giorni scorsi, Google ha annunciato un rebrand per la sua AI Bard che ora si chiama Google Gemini, riprendendo il nome del modello AI svelato lo scorso dicembre. Il rebrand riguarda tutte le versioni di Bard ed ora Gemini è disponibile anche con un’app per smartphone e tablet Android.
L’app non è ancora disponibile in Italia, ma arriverà certamente perché Google lo ha già annunciato. Non sarà più necessario passare dal browser per accedere all’assistente AI di Google. L’utilizzo dell’app di Gemini, però, ha una conseguenza: la disattivazione di Google Assistant. Gli utenti saranno, quindi, chiamati a una scelta.
Gemini disattiva Assistant
Come riportato da Android Authority, l’installazione dell’app di Gemini comporta una rinuncia a Google Assistant. Una volta installata l’applicazione dell’assistente AI, infatti, tutte le gesture utilizzate prima per Assistant saranno assegnate a Gemini. Gli utenti hanno la possibilità di ripristinare l’assistente vocale di Google, ma rinunciando a Gemini.
Da notare, però, riaprendo Gemini manualmente sarà necessario ripetere la procedura di configurazione. Parte di questa configurazione è proprio la sostituzione di Assistant. Di fatto, quindi, si crea un loop che obbliga l’utente disattivare l’assistente di Google ogni volta che intende utilizzare Gemini.
L’unica opzione alternativa per usare Gemini su Android, oltre all’app e alla conseguente disattivazione di Google Assistant, è rappresentata dal browser web, collegandosi all’indirizzo gemini.google.com. Il browser, almeno per il momento, è la soluzione migliore per usare l’AI della casa americana sul proprio smartphone Android senza dover rinunciare ad Assistant. L’accesso via browser è, inoltre, disponibile anche da altri dispositivi.
Come usare Gemini Advanced
Gemini Advanced è la versione più completa e potente di Gemini ed è accessibile tramite l’apposito piano in abbonamento, denominato Google One AI Premium. Questo piano ha un costo di 21,99 euro al mese (con i primi due mesi di prova gratuita) e offre anche 2 TB di spazio di archiviazione in cloud.
Come tutti i piani a pagamento di Google One, anche Google One AI Premium permette a chi lo sottoscrive di attivare un gruppo famiglia, con al massimo 5 membri.
La condivisione, però, non riguarda Gemini Advanced. La nuova tecnologia di Google, infatti, è disponibile solo per l’utente principale, quello che ha attiva l’abbonamento Google One AI Premium. Gli utenti invitati a condividere i vantaggi della sottoscrizione, invece, non potranno beneficiare della versione più potente di Gemini ma dovranno accontentarsi della versione standard.
Gli altri piani a pagamento di Google One non consentono l’accesso a Gemini Advanced, quindi se due o più persone all’interno dello stesso nucleo familiare vorranno sfruttare l’AI avanzata di Google, allora dovranno pagare tutti 21,99 euro al mese a testa.
JSON:
{
"paragrafi": [
{
"titolo": "Rebrand di Bard in Gemini",
"testo": "Google ha rinominato la sua AI Bard in Gemini, un potente assistente AI virtuale che può generare vari tipi di contenuti, disponibile ora anche tramite app per Android."
},
{
"titolo": "App Gemini e disattivazione di Assistant",
"testo": "L'app Gemini disattiva Google Assistant, questo costringe gli utenti a dover scegliere fra Gemini o Assistant."
},
{
"titolo": "Utilizzo di Gemini su Android",
"testo": "Gemini può essere utilizzato su Android tramite l'app (ancora non disponibile in italia, ma presto arriverà in base a quanto annunciato da Google) o dal browser web, se viene usata l'app Google Assistant verrà disabilitato."
},
{
"titolo": "Gemini Advanced",
"testo": "Gemini Advanced è la versione premium e più potente di Gemini, accessibile tramite l'abbonamento Google One AI Premium."
},
{
"titolo": "Condivisione di Gemini Advanced",
"testo": "Gemini Advanced è disponibile solo per l'utente principale dell'abbonamento Google One AI Premium e non può essere condiviso con i membri della famiglia. In tal caso sarà necessario pagare l'abbonamento per ogni membro della famiglia."
}
]
}
Testo:
Questo è il codice ufficiale di Stable Cascade. Forniamo script per l'addestramento e l'inferenza, oltre a una serie di modelli diversi che è possibile utilizzare.
Questo modello è costruito sull'architettura di Würstchen e la sua principale differenza rispetto ad altri modelli, come Stable Diffusion, è che lavora su uno spazio latente molto più piccolo. Perché è importante? Più piccolo è lo spazio latente, più veloce è l'inferenza e più economico è l'addestramento. Quanto è piccolo lo spazio latente? Stable Diffusion utilizza un fattore di compressione di 8, per cui un'immagine di 1024x1024 viene codificata a 128x128. Stable Cascade raggiunge un fattore di compressione di 42, il che significa che è possibile codificare un'immagine di 1024x1024 in 24x24, mantenendo ricostruzioni nitide. Il modello condizionale del testo viene quindi addestrato nello spazio latente altamente compresso. Le versioni precedenti di questa architettura hanno ottenuto una riduzione dei costi di 16 volte rispetto a Stable Diffusion 1.5.
Pertanto, questo tipo di modello è adatto per gli usi in cui l'efficienza è importante. Inoltre, con questo metodo sono possibili tutte le estensioni note come finetuning, LoRA, ControlNet, IP-Adapter, LCM ecc. Alcune di queste sono già state fornite (finetuning, ControlNet, LoRA) nelle sezioni dedicate all'addestramento e all'inferenza.
Inoltre, Stable Cascade ottiene risultati impressionanti, sia dal punto di vista visivo che da quello della valutazione. Secondo la nostra valutazione, Stable Cascade ottiene le migliori prestazioni sia nell'allineamento dei prompt che nella qualità estetica in quasi tutti i confronti. L'immagine qui sopra mostra i risultati di una valutazione umana che utilizza un mix di prompt parziali (link) ed estetici. In particolare, Stable Cascade (30 passi di inferenza) è stato confrontato con Playground v2 (50 passi di inferenza), SDXL (50 passi di inferenza), SDXL Turbo (1 passo di inferenza) e Würstchen v2 (30 passi di inferenza).
JSON:
{
"paragrafi": [
{
"titolo": "Stable Cascade",
"testo": "Stable Cascade è un modello di generazione di immagini che utilizza uno spazio latente più piccolo rispetto ad altri modelli, come Stable Diffusion, per un'inferenza più veloce e un addestramento più economico."
},
{
"titolo": "Spazio latente compresso",
"testo": "Stable Cascade utilizza un fattore di compressione di 42, che consente di codificare un'immagine di 1024x1024 in 24x24, mantenendo ricostruzioni nitide."
},
{
"titolo": "Efficienza e versatilità",
"testo": "Stable Cascade è adatto per gli usi in cui l'efficienza è importante e supporta estensioni come finetuning, LoRA, ControlNet e IP-Adapter."
},
{
"titolo": "Prestazioni impressionanti",
"testo": "Stable Cascade ottiene risultati eccellenti sia dal punto di vista visivo che della valutazione, superando altri modelli in termini di allineamento dei prompt e qualità estetica."
}
]
}
Testo:
ll between p(x) and f (x) except at the nodes. Example 2.1. Consider these five data points: (0, 8), (1, 12), (3, 2), (4, 6), (8, 0). Construct and plot the interpolation polynomial using the two outermost points. Repeat this process by adding one additional point at a time until all the points are included. What conclusions can you draw? Solution: The first interpolation polynomial is the line between the outermost points (0, 8) and (8, 0). Then we added the points (3, 2), (4, 5), and (1, 12) in that order and plotted a curve for each additional point. All of these polynomials are shown in Figure 5. We were hoping for a smooth curve going through these points without wide fluctuations, but this did not happen. (Why?) It may seem counterintuitive, but as we added more points, the situation became worse instead of better! The reason for this comes from the nature of high-degree polynomials. A polynomial of degree n has n zeros. If all of these zero points are real, then the curve crosses the x-axis n times. The resulting curve must make many turns for this to happen, resulting in wild oscillations. 2.1. Dirichlet Functions. As a pathological example, consider the so-called Dirich- let function f , defined to be 1 at each irrational point and 0 at each rational point. If we choose nodes that are rational numbers, then p(x) ≡ 0 and f (x) − p(x) = 0 for all rational values of x, but f (x) − p(x) = 1 for all irrational values of x. However, if the function f is well-behaved, can we not assume that the differences |f (x) − p(x)| will be small when the number of interpolating nodes is large? The answer is still no, even for functions that possess continuous derivatives of all orders on the interval! 2.2. Runge Functions. A specific example of this remarkable phenomenon is pro- vided by the Runge function: (2.2.1) f (x) = (1 + x2)−1 2. ERRORS IN POLYNOMIAL INTERPOLATION Figure 6. Polynomial interpolant with nine equally spaced nodes. on the interval [−5, 5]. Let pn be the polynomial that interpolates this function at n + 1 equally spaced points on the interval [−5, 5], including the endpoints. Then lim n→∞ max −5≤x≤5 |f (x) − pn(x)| = ∞ Thus, the effect of requiring the agreement of f and pn at more and more points is to increase the error at nonnodal points, and the error actually increases beyond all bounds! The moral of this example, then, is that polynomial interpolation of high degree with many nodes is a risky operation; the resulting polynomials may be very unsatisfactory as representations of functions unless the set of nodes is chosen with great care. The reader can easily observe the phenomenon just described by using the pseu- docodes already developed in this chapter. In a more advanced study of this topic, it would be shown that the divergence of the polynomials can often be ascribed to the fact that the nodes are equally spaced. Again, contrary to intuition, equally distributed nodes are usually a very poor choice in interpolation. A much better choice for n + 1 nodes in [−1, 1] is the set of Chebyshev nodes: ã ïÅ 2i + 1 2n + 2 ò xi = cos π 0 ≤ i ≤ n. The corresponding set of nodes on an arbitrary interval [a, b] would be derived from a linear mapping to obtain xi = 1 2 (a + b) + 1 2 (b − a) cos ïÅ 2i + 1 2n + 2 ã π ò 0 ≤ i ≤ n. Notice that these nodes are numbered from right to left. Since the theory does not depend on any particular ordering of the nodes, this is not troublesome. A simple graph illustrates this phenomenon best. Again, consider Equation (2.2.1) on the interval [−5, 5]. First, we select nine equally spaced nodes and use routines Coef and Eval with an automatic plotter to graph p8. As shown in Figure 6, the resulting curve assumes negative values, which, of course, f (x) does not have! Adding more equally spaced nodes – and thereby obtaining a higher-degree polynomial?only makes matters worse with wilder oscillations. In Figure 7, nine Chebyshev nodes are used, and the resulting polynomial curve is smoother. However, cubic splines produce an even better curve fit. The Chebyshev nodes are obtained by taking equally-spaced points on a semicircle and projecting them down onto the horizontal axis, as in Figure 8. 157 158 CHAPTER 5. INTERPOLATION Figure 7. Polynomial interpolant with nine Chebyshev nodes. Figure 8. Interpolation with Chebyshev points. 2.3. Theorems on Interpolation Errors. It is possible to assess the errors of interpolation by means of a formula that involves the (n + 1)st derivative of the function being interpolated. Here is the formal statement: Theorem 2.2 (Interpolation errors I). If p is the polynomial of degree at most n that interpolates f at the n+1 distinct nodes x0, x1, . . . , xn belonging to an interval [a, b] and if f (n+1) is continuous, then for each x in [a, b], there is a ξ in (a, b) for which (2.3.1) f (x) − p(x) = 1 (n + 1)! f (n+1)(ξ) n (cid:89) (x − xi) i=0 Proof. Observe first that Equation (2.3.1) is obviously valid if x is one of the nodes xi because then both sides of the equation reduce to zero. If x is not a node, let it be fixed in the remainder of the discussion, and define w(t) = n (cid:89) (t − xi) (polynomial in the variable t) (2.3.2) i=0 f (x) − p(x) w(x) φ(t) = f (t) − p(t) − cw(t) c = (constant) (function in the variable t) 2. ERRORS IN POLYNOMIAL INTERPOLATION Observe that c is well defined because w(x) ̸= 0 (x is not a node). Note also that φ takes the value 0 at the n + 2 points x0, x1, . . ., xn, and x. Now invoke Rolle’s Theorem1, which states that between any two roots of φ, there must occur a root of φ′. Thus, φ′ has at least n + 1 roots. By similar reasoning, φ′′ has at least n roots, φ′′′ has at least n − 1 roots, and so on. Finally, it can be inferred that φ(n+1) must have at least one root. Let ξ be a root of φ(n+1). All the roots being counted in this argument are in (a, b). Thus, 0 = φ(n+1)(ξ) = f (n+1)(ξ) − p(n+1)(ξ) − cw(n+1)(ξ) In this equation, p(n+1)(ξ) = 0 because p is a polynomial of degree ≤ n. Also, w(n+1)(ξ) = (n + 1)! because w(t) = tn+1 + (lower-order terms in t). Thus, we have 0 = f (n+1)(ξ) − c(n + 1)! = f (n+1)(ξ) − (n + 1)! w(x) [f (x) − p(x)] This equation is a rearrangement of Equation (2.3.1). A special case that often arises is the one in which the interpolation nodes are equally spaced. Lemma 2.3 (Upper Bound Lemma). Suppose that xi = a + ih for i = 0, 1, . . . , n and that h = (b − a)/n. Then for any x ∈ [a, b] (2.3.3) n (cid:89) i=0 |x − xi| ≤ 1 4 hn+1n! Proof. To establish this inequality, fix x and select j so that xj ≤ x ≤ xj+1. It is an exercise in calculus to show that (2.3.4) |x − xj||x − xj+1| ≤ h2 4 Using Equation (2.3.4), we have n (cid:89) i=0 |x − xi| ≤ h2 4 j−1 (cid:89) i=0 (x − xi) n (cid:89) i=j+2 (xi − x) The sketch in Figure 9, showing a typical case of equally spaced nodes, may be helpful. Since xj ≤ x ≤ xj+1, we have further n (cid:89) i=0 |x − xi| ≤ h2 4 j−1 (cid:89) i=0 (xj+1 − xi) n (cid:89) i=j+2 (xi − xj) 1Rolle’s Theorem: Let f be a function that is continuous on [a, b] and differentiable on (a, b). If f (a) = f (b) = 0, then f ′(c) = 0 for some point c in (a, b). 159 □ 160 CHAPTER 5. INTERPOLATION Figure 9. Typical location of x in equally spaced nodes. Now use the fact that xi = a + ih. Then we have xj+1 − xi = (j − i + 1)h and xi − xj = (i − j)h. Therefore, n (cid:89) i=0 |x − xi| ≤ h2 4 hjhn−(j+2)+1 j−1 (cid:89) i=0 (j − i + 1) n (cid:89) i=j+2 (i − j) ≤ 1 4 hn+1(j + 1)!(n − j)! ≤ 1 4 hn+1n! In the last step, we use the fact that if 0 ≤ j ≤ n − 1, then (j + 1)!(n − j)! ≤ n!. This, □ too, is left as an exercise. Hence, Inequality (2.3.3) is established. We can now find a bound on the interpolation error. Theorem 2.4 (Interpolation errors II). Let f be a function such that f (n+1) is continuous on [a, b] and satisfies |f (n+1)(x)| ≤ M . Let p be the polynomial of degree ≤ n that interpolates f at n + 1 equally spaced nodes in [a, b], including the endpoints. Then on [a, b], (2.3.5) |f (x) − p(x)| ≤ 1 4(n + 1) M hn+1 where h = (b − a)/n is the spacing between nodes. 2.3. Proof. Use Theorem 2.2 on interpolation errors and Inequality (2.3.3) in Lemma □ This theorem gives loose upper bounds on the interpolation error for different values of n. By other means, one can find tighter upper bounds for small values of n. If the nodes are not uniformly spaced then a better bound can be found by use of the Chebyshev nodes. The error expression in polynomial interpolation can also be given in terms of divided differences: Theorem 2.5 (Interpolation errors III). If p is the polynomial of degree n that interpolates the function f at nodes x0, x1, . . . , xn, then for any x that is not a node, f (x) − p(x) = f [x0, x1, . . . , xn, x] n (cid:89) (x − xi) i=0 Proof. Let t be any point, other than a node, where f (t) is defined. Let q be the polynomial of degree ≤ n + 1 that interpolates f at x0, x1, . . . , xn, t. By the Newton form of the interpolation formula [Equation (1.7.4)], we have q(x) = p(x) + f [x0, x1, . . . , xn, t] n (cid:89) (x − xi) i=0 2. PROBLEMS Since q(t) = f (t), this yields at once f (t) = p(t) + f [x0, x1, . . . , xn, t] n (cid:89) (t − xi) i=0 The following theorem shows that there is a relationship between divided differences and derivatives. Theorem 2.6 (Divided differences and derivatives). If f (n) is continuous on [a, b] and if x0, x1, . . . , xn are any n + 1 distinct points in [a, b], then for some ξ in (a, b), f [x0, x1, . . . , xn] = 1 n! f (n)(ξ) Proof. Let p be the polynomial of degree ≤ n−1 that interpolates f at x0, x1, . . . , xn−1. By Theorem 2.2 on interpolation errors, there is a point ξ such that f (xn) − p(xn) = 1 n! f (n)(ξ) n−1 (cid:89) i=0 (xn − xi) By Theorem (2.5) on interpolation errors, we obtain f (xn) − p(xn) = f [x0, x1, . . . , xn−1, xn] n−1 (cid:89) (xn − xi) i=0 As an immediate consequence of this theorem, we observe that all high-order divided differences are zero for a polynomial. Corollary 2.7 (Divided Differences Corollary). If f is a polynomial of degree n, then all of the divided differences f [x0, x1, . . . , xi] are zero for i ≥ n + 1. Problems (1) Use a divided-difference table to show that the following data can be repre- sented by a polynomial of degree 3: x −2 −1 4 1 y 3 0 11 16 13 −4 1 2 (2) For nonuniformly distributed nodes a = x0 < x1 < · · · < xn = b, where h = max1≤i≤n (xi − xi−1), show that Inequality (2.3.3) is true. (3) How accurately can we determine sin x by linear interpolation, given a table of sin x to ten decimal places, for x in [0, 2] with h = 0.01? (4) Let the function f (x) = ln x be approximated by an interpolation polynomial of degree 9 with ten nodes uniformly distributed in the interval [1, 2]. What bound can be placed on the error? (5) Suppose cos x is to be approximated by an interpolating polynomial of degree n, using n + 1 equally spaced nodes in the interval [0, 1]. How accurate is the approximation? (Express your answer in terms of n.) How accurate is the approximation when n = 9? For what values of n is the error less than 10−7? 161 □ □ 162 CHAPTER 5. INTERPOLATION (6) In interpolating with n + 1 equally spaced nodes on an interval, we could use xi = a + (2i + 1)h/2, where 0 ≤ i ≤ n − 1 and h = (b − a)/n. What bound can be given now for (cid:81)n i=0 |x − xi| when a ≤ x ≤ b? Note: We are not requiring the endpoints to be nodes. (7) Does every polynomial p of degree at most n obey the following equation? Explain why or why not. p(x) = n (cid:88) p[x0, x1, . . . , xi] i−1 (cid:89) (x − xj) i=0 j=0 Hint: Use the uniqueness of the interpolating polynomial. Computer Problems (1) Using 21 equally spaced nodes on the interval [−5, 5], find the interpolating polynomial p of degree 20 for the function f (x) = (x2 + 1)−1. Print the values of f (x) and p(x) at 41 equally spaced points, including the nodes. Observe the large discrepancy between f (x) and p(x). (2) (Continuation) Perform the experiment in the preceding computer problem, using Chebyshev nodes xi = 5 cos(iπ/20), where 0 ≤ i ≤ 20, and nodes xi = 5 cos[(2i + 1)π/42], where 0 ≤ i ≤ 20. Record your conclusions. (3) Let f (x) = max{0, 1 − x}. Sketch the function f . Then find interpolating polynomials p of degrees 2, 4, 8, 16, and 32 to f on the interval [−4, 4], using equally spaced nodes. Print out the discrepancy f (x) − p(x) at 128 equally spaced points. Then redo the problem using Chebyshev nodes. (4) Why are the Chebyshev nodes generally better than equally spaced nodes in i=0(x−xi) that occurs polynomial interpolation? The answer lies in the term (cid:81)n in the error formula. If xi = cos[(2i + 1)π/(2n + 2)], then (cid:12) n (cid:12) (cid:89) (cid:12) (cid:12) (cid:12) i=0 (x − xi) (cid:12) (cid:12) (cid:12) (cid:12) (cid:12) ≤ 2−n for all x ∈ [−1, 1]. Carry out a numerical experiment to test the given inequality for n = 3, 7, 15. 3. Approximations by Spline Functions 3.1. First-Degree and Second-Degree Splines. The history of spline functions is rooted in the work of draftsmen, who often needed to draw a gently turning curve between points on a drawing. This process is called fairing and can be accomplished with a number of ad hoc devices, such as the French curve, made of plastic and presenting a number of curves of different curvature for the draftsman to select. Long strips of wood were also used, being made to pass through the control points by weights laid on the draftsman’s table and attached to the strips. The weights were called ducks and the strips of wood were called splines, even as early as 1891. The elastic nature of the wooden strips allowed them to bend only a little while still passing through the prescribed points. The wood was, in effect, solving a differential equation and minimizing the strain energy. The latter is known to be a simple function of the curvature. The mathematical theory of these curves owes much to the early investigators, particularly Isaac Schoenberg in 3. APPROXIMATIONS BY SPLINE FUNCTIONS Figure 10. First-degree spline function. the 1940s and 1950s. Other important names associated with the early development of the subject (i.e., prior to 1964) are Garrett Birkhoff, C. de Boor, J. H. Ahlberg, E. N. Nilson, H. Garabedian, R. S. Johnson, F. Landis, A. Whitney, J. L. Walsh, and J. C. Holladay. The first book giving a systematic exposition of spline theory was the book by Ahlberg, Nilson, and Walsh [1967]. 3.2. First-Degree Spline. A spline function is a function that consists of poly- nomial pieces joined together with certain smoothness conditions. A simple example is the polygonal function (or spline of degree 1), whose pieces are linear polynomials joined together to achieve continuity, as in Figure 9.2. The points t0, t1, . . . , tn at which the function changes its character are termed knots in the theory of splines. Thus, the spline function shown in Figure 10 has eight knots. Such a function appears somewhat complicated when defined in explicit terms. We are forced to write (3.2.1) S(x) = S0(x) S1(x) ... x ∈ [t0, t1] x ∈ [t1, t2] ... Sn−1(x) x ∈ [tn−1, tn] where (3.2.2) Si(x) = aix + bi because each piece of S(x) is a linear polynomial. Such a function S(x) is piecewise lin- ear. If the knots t0, t1, . . . , tn were given and if the coefficients a0, b0, a1, b1, . . . , an−1, bn−1 were all known, then the evaluation of S(x) at a specific x would proceed by first deter- mining the interval that contains x and then using the appropriate linear function for that interval. If the function S defined by Equation (3.2.1) is continuous, we call it a first-degree spline. It is characterized by the following three properties. Definition 3.1 (Spline of Degree 1). A function S is called a spline of degree 1 if: (1) The domain of S is an interval [a, b]. (2) S is continuous on [a, b]. (3) There is a partitioning of the interval a = t0 < t1 < · · · < tn = b such that S is a linear polynomial on each subinterval [ti, ti+1]. 163 164 CHAPTER 5. INTERPOLATION Outside the interval [a, b], S(x) is usually defined to be the same function on the left of a as it is on the leftmost subinterval [t0, t1] and the same on the right of b as it is on the rightmost subinterval [tn−1, tn], namely, S(x) = S0(x) when x < a and S(x) = Sn−1(x) when x > b. Continuity of a function f at a point s can be defined by the condition f (x) = lim x→s− Here, limx→s+ means that the limit is taken over x values that converge to s from above s; that is, (x − s) is positive for all x values. Similarly, limx→s− means that the x values converge to s from below. lim x→s+ f (x) = f (s) Example 3.2. Determine whether this function is a first-degree spline function: S(x) = x ∈ [−1, 0] x 1 − x x ∈ (0, 1) 2x − 2 x ∈ [1, 2] Solution: The function is obviously piecewise linear but is not a spline of degree 1 because it is discontinuous at x = 0. Notice that limx→0+ S(x) = limx→0(1 − x) = 1, whereas limx→0− S(x) = limx→0 x = 0. The spline functions of degree 1 can be used for interpolation. Suppose the following table of function values is given: t1 x t0 y y0 y1 · · · · · tn tn There is no loss of generality in supposing that t0 < t1 < . . . < tn because this is only a matter of labeling the knots. The table can be represented by a set of n+1 points in the plane, (t0, y0), (t1, y1),. . ., (tn, yn), and these points have distinct abscissas. Therefore, we can draw a polygonal line through the points without ever drawing a vertical segment. This polygonal line is the graph of a function, and this function is obviously a spline of degree 1. What are the equations of the individual line segments that make up this graph? By referring to Figure 11 and using the point-slope form of a line, we obtain (3.2.3) Si(x) = yi + mi(x − ti) on the interval [ti, ti+1], where mi is the slope of the line and is therefore given by the formula mi = yi+1 − yi ti+1 − ti Notice that the function S that we are creating has 2n parameters in it: the n coef- ficients ai and the n constants bi in Equation (3.2.2). On the other hand, exactly 2n conditions are being imposed, since each constituent function Si must interpolate the data at the ends of its subinterval. Thus, the number of parameters equals the number of conditions. For the higher-degree splines, we shall encounter a mismatch in these two numbers; the spline of degree k will have k − 1 free parameters for us to use as we wish in the problem of interpolating at the knots. 3. APPROXIMATIONS BY SPLINE FUNCTIONS Figure 11. First-degree spline: linear Si(x). The form of Equation (3.2.3) is better than that of Equation (3.2.2) for the practical evaluation of S(x) because some of the quantitiesx − ti must be computed in any case simply to determine which subinterval contains x. If t0 ≤ x ≤ tn then the interval [ti, ti+1] containing x is characterized by the fact that x − ti is the first of the quantities x − tn−1, x − tn−2, . . . , x − t0 that is nonnegative. Procedure 35 is a function procedure that utilizes n + 1 table values (ti, yi) in linear arrays (ti) and (yi), assuming that a = t0 < t1 < . . . < tn = b. Given an x value, the routine returns S(x) using Equations (3.2.1) and (3.2.3). If x < t0, then S(x) = y0 + m0(x − t0); if x > tn, then S(x) = yn−1 + mn−1(x − tn−1) . Algorithm 35 function Spline1(n, (ti), (yi), x) for i = n − 1 : −1 : 0 do if x − ti ≥ 0 then break end if end for Spline1 ← yi + (x − ti)[(yi+1 − yi)/(ti+1 − ti)] end function 3.3. Modulus of Continuity. To assess the goodness of fit when we interpolate a function with a first-degree spline, it is useful to have something called the modulus of continuity of a function f . Suppose f is defined on an interval [a, b]. The modulus of continuity of f is ω(f ; h) = sup{|f (u) − f (v)| : a ≤ u ≤ v ≤ b, |u − v| ≤ h} Here, sup is the supremum, which is the least upper bound of the given set of real numbers. The quantity ω(f ; h) measures how much f can change over a small interval 165 166 CHAPTER 5. INTERPOLATION of width h. If f is continuous on [a, b], then it is uniformly continuous, and ω(f ; h) will tend to zero as h tends to zero. If f is not continuous, ω(f ; h) will not tend to zero. If f is differentiable on (a, b) (in addition to being continuous on [a, b]) and if f ′(x) is bounded on (a, b), then the Mean Value Theorem can be used to get an estimate of the modulus of continuity: If u and v are as described in the definition of ω(f ; h), then |f (u) − f (v)| = |f ′(c)(u − v)| ≤ M1|u − v| ≤ M1h Here, M1 denotes the maximum of |f ′(x)| as x runs over (a, b). For example, if f (x) = x3 and [a, b] = [1, 4], then we find that ω(f ; h) ≤ 48h. Theorem 3.3 (First-degree Polynomial Accuracy Theorem). If p is the first-degree polynomial that interpolates a function f at the endpoints of an interval [a, b], then with h = b − a, we have |f (x) − p(x)| ≤ ω(f ; h) a ≤ x ≤ b Proof. The linear function p is given explicitly by the formula Å x − a b − a ã Å b − x b − a ã p(x) = f (b) + f (a) Hence, f (x) − p(x) = Å x − a b − a ã [f (x) − f (b)] + Å b − x b − a ã [f (x) − f (a)] Then we have Å x − a b − a Å x − a b − a ïÅ x − a b − a = ω(f ; h) |f (x) − p(x)| ≤ ≤ = ã Å b − x b − a ã ã |f (x) − f (b)| + |f (x) − f (a)| ã Å b − x b − a ãò ω(f ; h) + ω(f ; h) ã Å b − x b − a + ω(f ; h) From this basic result, one can easily prove the following one, simply by applying the basic inequality to each subinterval. Theorem 3.4 (First-degree Spline Accuracy Theorem). Let p be a first-degree spline having knots a = x0 < x1 < · · · < xn = b. If p interpolates a function f at these knots, then with h = maxi(xi − xi−1), we have |f (x) − p(x)| ≤ ω(f ; h) a ≤ x ≤ b If f ′ or f ′′ exist and are continuous, then more can be said, namely, |f (x) − p(x)| ≤ M1 |f (x) − p(x)| ≤ M2 h 2 h2 8 a ≤ x ≤ b a ≤ x ≤ b In these estimates, M1 is the maximum value of |f ′(x)| on the interval, and M2 is the maximum of |f ′′(x)|. □ 3. APPROXIMATIONS BY SPLINE FUNCTIONS The first theorem tells us that if more knots are inserted in such a way that the maximum spacing h goes to zero, then the corresponding first-degree spline will converge uniformly to f . Recall that this type of result is conspicuously lacking in the polynomial interpolation theory. In that situation, raising the degree and making the nodes fill up the interval will not necessarily ensure that convergence takes place for an arbitrary continuous function. 3.4. Second-Degree Splines. Splines of degree higher than 1 are more compli- cated. We now take up the quadratic splines. Let’s use the letter Q to remind ourselves that we are considering piecewise quadratic functions. A function Q is a second-degree spline if it has the following properties. Definition 3.5 (Spline of Degree 2). A function Q is called a spline of degree 2 if: (1) The domain of Q is an interval [a, b]. (2) Q and Q′ are continuous on [a, b]. (3) There are points ti (called knots) such that a = t0 < t1 < . . . < tn = b and Q is a polynomial of degree at most 2 on each subinterval [ti, ti+1]. In brief, a quadratic spline is a continuously differentiable piecewise quadratic func- tion, where quadratic includes all linear combinations of the basic functions x (cid:55)→ 1, x, x2. Example 3.6. Determine whether the following function is a quadratic spline: Q(x) = x2 −10 ≤ x ≤ 0 −x2 0 ≤ x ≤ 1 1 − 2x 1 ≤ x ≤ 20 Solution: The function is obviously piecewise quadratic. Whether Q and Q′ are contin- uous at the interior knots can be determined as follows: lim x→0− lim x→1− lim x→0− lim x→1− Q(x) = lim x→0− Q(x) = lim x→1− Q′(x) = lim x→0− Q′(x) = lim x→1− x2 = 0 (−x2) = −1 2x = 0 (−2x) = −2 lim x→0+ lim x→1+ lim x→0+ lim x→1+ Q(x) = lim x→0+ Q(x) = lim x→1+ Q′(x) = lim x→0+ Q′(x) = lim x→1+ (−x2) = 0 (1 − 2x) = −1 (−2x) = 0 (−2) = −2 Consequently, Q(x) is a quadratic spline. 3.5. Interpolating Quadratic Spline Q(x). Quadratic splines are not used in applications as often as are natural cubic splines, which are developed in the next sec- tion. However, the derivations of interpolating quadratic and cubic splines are similar enough that an understanding of the simpler second-degree spline theory will allow one to grasp easily the more complicated third-degree spline theory. We want to emphasize that quadratic splines are rarely used for interpolation, and the discussion here is pro- vided only as preparation for the study of higher-order splines, which are used in many applications. 167 168 CHAPTER 5. INTERPOLATION Proceeding now to the interpolation problem, suppose that a table of values has been given: x t0 t2 y y0 y1 y2 t1 · · · · · tn tn We shall assume that the points t0, t1, . . . , tn, which we think of as the nodes for the interpolation problem, are also the knots for the spline function to be constructed. Later, another quadratic spline interpolant is discussed in which the nodes for interpolation are different from the knots. A quadratic spline, as just described, consists of n separate quadratic functions x (cid:55)→ aix2 + bix + ci, one for each subinterval created by the n + 1 knots. Thus, we start with 3n coefficients. On each subinterval [ti, ti+1], the quadratic spline function Qi must satisfy the interpolation conditions Qi(ti) = yi and Qi(ti+1) = yi+1. Since there are n such subintervals, this imposes 2n conditions. The continuity of Q does not add any additional conditions. (Why?) However, the continuity of Q′ at each of the interior knots gives n − 1 more conditions. Thus, we have 2n + n − 1 = 3n − 1 conditions, or one condition short of the 3n conditions required. There are a variety of ways to impose this additional condition; for example, Q′(t0) = 0 or Q′′(t0) = 0. We now derive the equations for the interpolating quadratic spline, Q(x). The value of Q′(t0) is prescribed as the additional condition. We seek a piecewise quadratic function (3.5.1) Q(x) = Q0(x) Q1(x) ... Qn−1(x) t0 ≤ x ≤ t1 t1 ≤ x ≤ t2 tn−1 ≤ x ≤ tn which is continuously differentiable on the entire interval [t0, tn] and which interpolates the table; that is, Q(ti) = yi for 0 ≤ i ≤ n. Since Q′ is continuous, we can put zi := Q′(ti). At present, we do not know the correct values of zi; nevertheless, the following must be the formula for Qi: (3.5.2) Qi(x) = zi+1 − zi 2(ti+1 − ti) (x − ti)2 + zi(x − ti) + yi To see that this is correct, verify that Qi(ti) = yi, Q′ i(ti+1) = zi+1. These three conditions define the function Qi uniquely on [ti, ti+1] as given in Equation (3.5.2). i(ti) = zi, and Q′ Now, for the quadratic spline function Q to be continuous and to interpolate the table of data, it is necessary and sufficient that Qi(ti+1) = yi+1 for i = 0, 1, . . . , n − 1 in Equation (3.5.2). When this equation is written out in detail and simplified, the result is Å yi+1 − yi ti+1 − ti This equation can be used to obtain the vector [z0, z1, . . . , zn]⊤, starting with an arbi- trary value for z0. We summarize with an algorithm: Algorithm: Quadratic Spline Interpolation at the Knots ã (3.5.3) zi+1 = −zi + 2 0 ≤ i ≤ n − 1 3. APPROXIMATIONS BY SPLINE FUNCTIONS Figure 12. Subbotin quadratic splines (t0 = τ0, t3 = τ4). (1) Determine [z0, z1, . . . , zn]⊤ by selecting z0 arbitrarily and computing z1, z2, . . . , zn recursively by Formula (3.5.3). (2) The quadratic spline interpolating function Q is given by Formulas (3.5.1) and (3.5.2). 3.6. Subbotin Quadratic Spline. A useful approximation process, first proposed by Subbotin [1967], consists of interpolation with quadratic splines, where the nodes for interpolation are chosen to be the first and last knots and the midpoints between the knots. Remember that knots are defined as the points where the spline function is permitted to change in form from one polynomial to another. The nodes are the points where values of the spline are specified. In the Subbotin quadratic spline function, there are n + 2 interpolation conditions and 2(n − 1) conditions from the continuity of Q and Q′. Hence, we have the exact number of conditions needed, 3n, to define the quadratic spline function completely. We outline the theory here, leaving details for the reader to fill in. Suppose that knots a = t0 < t1 < . . . < tn = b have been specified; let the nodes be the points ® τ0 = t0 τi = 1 2 (ti + ti−1) τn+1 = tn 1 ≤ i ≤ n We seek a quadratic spline function Q that has the given knots and takes prescribed values at the nodes: Q(τi) = yi, 0 ≤ i ≤ n + 1 as in Figure 12. The knots create n subintervals, and in each of them, Q can be a different quadratic polynomial. Let us say that on [ti, ti+1], Q is equal to the quadratic polynomial Qi. Since Q is a quadratic spline, it and its first derivative should be continuous. Thus, zi ≡ Q′(ti) is well defined, although as yet we do not know its values. It is easy to see that on [ti, ti+1], our quadratic polynomial can be represented in the form (3.6.1) Qi(x) = yi+1 + 1 2 (zi+1 + zi)(x − τi+1) + 1 2hi (zi+1 − zi)(x − τi+1)2 in which hi = ti+1 − ti. To verify the correctness of Equation (3.6.1), we must check that Qi(τi+1) = yi+1, Q′ i(ti+1) = zi+1. When the polynomial pieces i(ti) = zi, and Q′ 169 170 CHAPTER 5. INTERPOLATION Q0, Q1, . . . , Qn−1 are joined together to form Q, the result may be discontinuous. Hence, we impose continuity conditions at the interior knots: lim x→t− i Qi−1(x) = lim x→t+ i Qi(x) 1 ≤ i ≤ n − 1. The reader should carry out this analysis, which leads to (3.6.2) hi−1zi−1 + 3(hi−1 + hi)zi + hizi+1 = 8(yi+1 − yi) 1 ≤ i ≤ n − 1 The first and last interpolation conditions must also be imposed: Q(τ0) = y0 Q(τn+1) = yn+1 These two equations lead to 3h0z0 + h0z1 = 8(y1 − y0) hn−1zn−1 + 3hn−1zn = 8(yn+1 − yn) The system of equations governing the vector z = [z0, z1, . . . , zn]⊤ then can be written in the matrix form 3h0 h0 h0 3(h0 + h1) h1 h1 3(h1 + h2) . . . h2 . . . hn−2 . . . 3(hn−2 + hn−1) hn−1 hn−1 3hn−1 z0 z1 z2 ... zn−1 zn = 8 y1 − y0 y2 − y1 y3 − y2 ... yn − yn−1 yn+1 − yn This system of n + 1 equations in n + 1 unknowns can be conveniently solved by procedure T ri. After the z vector has been obtained, values of Q(x) can be computed from Equation (3.6.1). Problems (1) Determine whether this function is a first-degree spline: S(x) = x 0.5 + 2(x − 0.5) x + 1.5 −1 ≤ x ≤ 0.5 0.5 ≤ x ≤ 2 2 ≤ x ≤ 4 (2) Let t0 < t1 < . . . < tn. Construct first-degree spline functions G0, G1, . . . , Gn by requiring that Gi vanish at t0, t1, . . . , ti−1, ti+1, . . . , tn but that Gi(ti) = 1. Show that the first-degree spline function that interpolates f at t0, t1, . . . , tn is (cid:80)n i=0 f (ti)Gi(x). (3) Prove that the derivative of a quadratic spline is a first-degree spline. (4) Define f (x) = 0 if x < 0 and f (x) = x2 if x ≥ 0. Show that f and f ′ are continuous. Show that any quadratic spline with knots t0, t1, . . . , tn is of the form ax2 + bx + c + n−1 (cid:88) dif (x − ti) i=1 (5) What equations must be solved if a quadratic spline function Q that has knots 2 (ti + ti+1) for t0, t1, . . . , tn is required to take prescribed values at points 1 0 ≤ i ≤ n − 1? 3. COMPUTER PROBLEMS (6) Are these functions quadratic splines? Explain why or why not. ® 0.1x2 0 ≤ x ≤ 1 1 ≤ x ≤ 1.3 (a) Q(x) = 9.3x2 − 18.4x + 9.2 ® −x2 −100 ≤ x ≤ 0 (b) Q(x) = x 0 ≤ x ≤ 100 x −50 ≤ x ≤ 1 x2 1 ≤ x ≤ 2 2 ≤ x ≤ 50 4 (7) Is S(x) = |x| a first-degree spline? Why or why not? (c) Q(x) = Computer Problems (1) Rewrite procedure Spline1 so that a binary search is used to find the desired interval. Test the revised code. What are the advantages and/or disadvantages of a binary search compared to the procedure in the notes? A binary search is similar to the bisection method in that we choose tk with k = (i + j)/2 or k = (i + j + 1)/2 and determine whether is in [ti, tk] or [tk, tj]. (2) A piecewise bilinear polynomial that interpolates points (x, y) specified in a rectangular grid is given by p(x, y) = (lijzi+1,j+1 + li+1,j+1zij) − (li+1,jzi,j+1 + li,j+1zi+1,j) (xi+1 − xi)(yj+1 − yj) where lij = (xi − x)(yj − y). Here xi ≤ x ≤ xi+1 and yj ≤ y ≤ yj+1. The given grid (xi, yj) is specified by strictly increasing arrays (xi) and (yj) of length n and m, respectively. The given values zij at the grid points (xi, yj) are contained in the n × m array (zij), shown in Figure 13. Write real function Bi Linear((xi), n, (yj), m, (zij), x, y) to compute the value of p(x, y). Test this routine on a set of 5 × 10 unequally spaced data points. Evaluate Bi Linear at four grid points and five nongrid points. Figure 13 171 172 CHAPTER 5. INTERPOLATION (3) Write an adaptive spline interpolation procedure. The input should be a func- tion f , an interval [a, b], and a tolerance ε. The output should be a set of knots a = t0 < t1 < . . . < tn = b and a set of function values yi = f (ti) such that the first-degree spline interpolating function S satisfies |S(x) − f (x)| ≤ ε whenever x is any point xij = ti + j(ti+1 − tj)/10 for 0 ≤ i ≤ n − 1 and 0 ≤ j ≤ 9. 4. Natural Cubic Splines 4.1. Introduction. The first- and second-degree splines discussed in the preceding section, though useful in certain applications, suffer an obvious imperfection: Their low- order derivatives are discontinuous. In the case of the first-degree spline (or polygonal line), this lack of smoothness is immediately evident because the slope of the spline may change abruptly from one value to another at each knot. For the quadratic spline, the discontinuity is in the second derivative and is therefore not so evident. But the curvature of the quadratic spline changes abruptly at each knot, and the curve may not be pleasing to the eye. The general definition of spline functions of arbitrary degree is as follows. Definition 4.1 (Spline of Degree k). A function S is called a spline of degree k if: (1) The domain of S is an interval [a, b]. (2) S, S′, S′′, . . . , S(k−1) are all continuous functions on [a, b]. (3) There are points ti (the knots of S) such that a = t0 < t1 < . . . < tn = b and such that S is a polynomial of degree at most k on each subinterval [ti, ti+1]. Observe that no mention has been made of interpolation in the definition of a spline function. Indeed, splines are such versatile functions that they have many applications other than interpolation. Higher-degree splines are used whenever more smoothness is needed in the approx- imating function. From the definition of a spline function of degree k, we see that such a function will be continuous and have continuous derivatives S′, S′′, . . ., S(k−1). If we want the approximating spline to have a continuous mth derivative, a spline of degree at least m+1 is selected. To see why, consider a situation in which knots t0 < t1 < . . . < tn have been prescribed. Suppose that a piecewise polynomial of degree m is to be defined, with its pieces joined at the knots in such a way that the resulting spline S has m contin- uous derivatives. At a typical interior knot t, we have the following circumstances: To the left of t, S(x) = p(x); to the right of t, S(x) = q(x), where p and q are mth-degree polynomials. The continuity of the mth derivative S(m) implies the continuity of the lower-order derivatives S(m−1), S(m−2), . . ., S′, S. Therefore, at the knot t, lim x→t− from which we conclude that S(k)(x) = lim x→t+ S(k)(x) 0 ≤ k ≤ m (4.1.1) lim x→t− p(k)(x) = lim x→t+ q(k)(x) 0 ≤ k ≤ m Since p and q are polynomials, their derivatives of all orders are continuous, and so Equation (4.1.1) is the same as p(k)(t) = q(k)(t) 0 ≤ k ≤ m 4. NATURAL CUBIC SPLINES This condition forces p and q to be the same polynomial because by Taylor’s Theorem, p(x) = m (cid:88) k=0 1 k! p(k)(t)(x − t)k = m (cid:88) k=0 1 k! q(k)(t)(x − t)k = q(x) This argument can be applied at each of the interior knots t1, t2, . . . , tn−1, and we see that S is simply one polynomial throughout the entire interval from t0 to tn. Thus, we need a piecewise polynomial of degree m + 1 with at most m continuous derivatives to have a spline function that is not just a single polynomial throughout the entire interval. (We already know that ordinary polynomials usually do not serve well in curve fitting.) The choice of degree most frequently made for a spline function is 3. The resulting splines are termed cubic splines. In this case, we join cubic polynomials together in such a way that the resulting spline function has two continuous derivatives everywhere. At each knot, three continuity conditions will be imposed. Since S, S′, and S′′ are continuous, the graph of the function will appear smooth to the eye. Discontinuities, of course, will occur in the third derivative but cannot be easily detected visually, which is one reason for choosing degree 3. Experience has shown, moreover, that using splines of degree greater than 3 seldom yields any advantage. For technical reasons, odd-degree splines behave better than even-degree splines (when interpolating at the knots). Finally, a very elegant theorem, to be proved later, shows that in a certain precise sense, the cubic interpolating spline function is the best interpolating function available. Thus, our emphasis on the cubic splines is well justified. 4.2. Natural Cubic Spline. We turn next to interpolating a given table of func- tion values by a cubic spline whose knots coincide with the values of the independent variable in the table. As earlier, we start with the table: t1 x t0 y y0 y1 tn . . . . . . yn The ti’s are the knots and are assumed to be arranged in ascending order. The function S that we wish to construct consists of n cubic polynomial pieces: S(x) = S0(x) S1(x) ... Sn−1(x) t0 ≤ x ≤ t1 t1 ≤ x ≤ t2 ... tn−1 ≤ x ≤ tn In this formula, Si denotes the cubic polynomial that will be used on the subinterval [ti, ti+1]. The interpolation conditions are S(ti) = yi 0 ≤ i ≤ n The continuity conditions are imposed only at the interior knots t1, t2, . . . , tn−1. (Why?) These conditions are written as lim x→t− i S(k)(ti) = lim x→t+ i S(k)(ti) k = 0, 1, 2 173 174 CHAPTER 5. INTERPOLATION It turns out that two more conditions must be imposed to use all the degrees of freedom available. The choice that we make for these two extra conditions is S′′(t0) = S′′(tn) = 0 The resulting spline function is then termed a natural cubic spline. Additional ways to close the system of equations for the spline coefficients are periodic cubic splines and clamped cubic splines. A clamped spline is a spline curve whose slope is fixed at both end points: S′(t0) = d0 and S′(tn) = dn. A periodic cubic spline has S(t0) = S(tn), S′(t0) = S′(tn), and S′′(t0) = S′′(tn). For all continuous differential functions, clamped and natural cubic splines yield the least oscillations about the function f that it interpolates. (4.2.1) We now verify that the number of conditions imposed equals the number of coef- ficients available. There are n + 1 knots and hence n subintervals. On each of these subintervals, we shall have a different cubic polynomial. Since a cubic polynomial has four coefficients, a total of 4n coefficients are available. As for the conditions imposed, we have specified that within each interval the interpolating polynomial must go through two points, which gives 2n conditions. The continuity adds no additional conditions. The first and second derivatives must be continuous at the n − 1 interior points, for 2(n − 1) more conditions. The second derivatives must vanish at the two endpoints for a total of 2n + 2(n − 1) + 2 = 4n conditions. Example 4.2. Derive the equations of the natural cubic interpolating spline for the following table: x −1 0 1 y 1 2 −1 Solution: Our approach is to determine the parameters a, b, c, d, e, f, g, and h so that S(x) is a natural cubic spline, where S(x) = ® S0(s) = ax3 + bx2 + cx + d x ∈ [−1, 0] S1(s) = ex3 + f x2 + gx + h x ∈ [0, 1] where the two cubic polynomials are S0(x) and S1(x). From these interpolation con- ditions, we have interpolation conditions S(−1) = S0(−1) = −a + b − c + d = 1, S(0) = S0(0) = d = 2, S(0) = S1(0) = h = 2, and S(1) = S1(1) = e + f + g + h = −1. Taking the first derivatives, we obtain ® S′ 0(x) = 3ax2 + 2bx + c 1(x) = 3ex2 + 2f x + g S′ 0(0) = S′ S′(x) = From the continuity condition of S′, we have S′ taking the second derivatives, we obtain ® S′′ S′′ 1(0), and we set c = g. Next 0 (x) = 6ax + 2b 1 (x) = 6ex + 2f S′′(x) = From the continuity condition of S′′, we have S′′ 1 (0), and we let b = f . For S to be a natural cubic spline, we must have S′′ 1 (1) = 0, and we obtain 3a = b and 3e = −f . From all of these equations, we obtain a = −1, b = −3, c = −1, d = 2, e = 1, f = −3, g = −1, and h = 2. 0 (0) = S′′ 0 (−1) = 0 and S′′ 4. NATURAL CUBIC SPLINES 4.3. Algorithm for Natural Cubic Spline. From the previous example, it is evident that we need to develop a systematic procedure for determining the formula for a natural cubic spline, given a table of interpolation values. This is our objective in the material on the next several pages. Since S′′ is continuous, the numbers zi := S′′(ti) 0 ≤ i ≤ n are unambiguously defined. We do not yet know the values z1, z2, . . . , zn−1, but, of course, z0 = zn = 0 by Equation (4.2.1). If the zi’s were known, we could construct S as now described. On the interval [ti, ti+1], S′′ is a linear polynomial that takes the values zi and zi+1 at the endpoints. Thus, (4.3.1) S′′ i (x) = zi+1 hi (x − ti) + zi hi (ti+1 − x) with hi = ti+1 − ti for 0 ≤ i ≤ n − 1. To verify that Equation (4.3.1) is correct, notice that S′′ is linear in x. If this is integrated twice, we obtain Si itself: i (ti) = zi, S′′ i (ti+1) = zi+1, and S′′ i Si(x) = zi+1 6hi (x − ti)3 + zi 6hi (ti+1 − x)3 + cx + d where c and d are constants of integration. By adjusting the integration constants, we obtain a form for Si that is easier to work with, namely, (4.3.2) Si(x) = zi+1 6hi (x − ti)3 + zi 6hi (ti+1 − x)3 + Ci(x − ti) + Di(ti+1 − x) where Ci and Di are constants. If we differentiate Equation (4.3.2) twice, we obtain Equation (4.3.1). The interpolation conditions Si(ti) = yi and Si(ti+1) = yi+1 can be imposed now to determine the appropriate values of Ci and Di. The reader should do so and verify that the result is (4.3.3) Si(x) = zi+1 6hi Å yi+1 hi zi 6hi ã (x − ti)3 + hi 6 + − zi+1 (ti+1 − x)3 (x − ti) + Å yi hi − hi 6 zi ã (ti+1 − x) When the values z0, z1, . . . , zn have been determined, the spline function S(x) is ob- tained from equations of this form for S0(x), S1(x), . . . , Sn−1(x). We now show how to determine the zi’s. One condition remains to be imposed – namely, the continuity of S′. At the interior knots ti for 1 ≤ i ≤ n − 1, we must have S′ i(ti), as can be seen in Figure 14. i−1(ti) = S′ We have, from Equation (4.3.3), S′ i(x) = zi+1 2hi (x − ti)2 − zi 2hi (ti+1 − x)2 + yi+1 hi − hi 6 zi+1 − yi hi + hi 6 zi This gives (4.3.4) S′ i(ti) = − hi 6 zi+1 − hi 3 zi + bi 175 176 CHAPTER 5. INTERPOLATION Figure 14. Cubic spline: adjacent pieces Si−1 and Si. where (4.3.5) bi = 1 hi (yi+1 − yi) Analogously, we have S′ i−1(ti) = hi−1 6 zi−1 + hi−1 3 zi + bi−1 When these are set equal to each other, the resulting equation can be rearranged as hi−1zi−1 + 2(hi−1 + hi)zi + hizi+1 = 6(bi − bi−1) for 1 ≤ i ≤ n − 1. By letting (4.3.6) ui = 2(hi−1 + hi) vi = 6(bi − bi−1) we obtain a tridiagonal system of equations: (4.3.7) z0 = 0 hi−1zi−1 + uizi + hizi+1 = vi zn = 0 1 ≤ i ≤ n − 1 to be solved for the zi’s. The simplicity of the first and last equations is a result of the natural cubic spline conditions S′′(t0) = S′′(tn) = 0. Now consider System (4.3.7) in matrix form: 1 0 h0 u1 h1 h1 u2 . . . h2 . . . . . . hn−2 un−1 hn−1 0 1 z0 z1 z2 ... zn−1 zn = 0 v1 v2 ... vn−1 0 4. NATURAL CUBIC SPLINES On eliminating the first and last equations, we have u1 h1 h1 u2 . . . h2 . . . . . . hn−3 un−2 hn−2 hn−2 un−1 (4.3.8) z1 z2 ... zn−2 zn−1 = v1 v2 ... vn−2 vn−1 which is a symmetric tridiagonal system of order n − 1. We could use procedure T ri to solve this system. However, we can design an algorithm specifically for it. In Gaussian elimination without pivoting, the forward elimination phase would modify the ui’s and vi’s as follows: (cid:40) h2 i−1 ui−1 ui ← ui − vi ← vi − hi−1vi−1 ui−1 i = 2, 3, . . . , n − 1 The back substitution phase yields ® zn−1 ← vn−1 un−1 zi ← vi−hizi+1 ui i = n − 2, n − 3, . . . , 1 Putting all this together leads to the following algorithm, designed especially for the tridiagonal System (4.3.8). Algorithm: Solving the Natural Cubic Spline Tridiagonal System Directly Given the interpolation points (ti, yi) for i = 0, 1, . . . , n: (1) Compute for i = 0, 1, . . . , n − 1: ® hi = ti+1 − ti bi = 1 hi (yi+1 − yi) (2) Set ® u1 = 2(h0 + h1) v1 = 6(b1 − b0) and compute inductively for i = 2, 3, . . . , n − 1: (cid:40) h2 i−1 ui−1 ui = 2(hi + hi−1) − vi = 6(bi − bi−1) − hi−1vi−1 ui−1 (3) Set ® zn = 0 z0 = 0 and compute inductively for i = n − 1, n − 2, . . . , 1: zi = vi − hizi+1 ui This algorithm conceivably could fail because of divisions by zero in steps 2 and 3. 177 178 CHAPTER 5. INTERPOLATION Therefore, let us prove that ui ̸= 0 for all i. It is clear that u1 > h1 > 0. ui−1 > hi−1, then ui > hi because ui = 2(hi + hi−1) − h2 i−1 ui−1 > 2(hi + hi−1) − hi−1 > hi Then by induction, ui > 0 for i = 1, 2, . . . , n − 1. Equation (4.3.3) is not the best computational form for evaluating the cubic poly- nomial Si(x). We would prefer to have it in the form (4.3.9) Si(x) = Ai + Bi(x − ti) + Ci(x − ti)2 + Di(x − ti)3 because nested multiplication can then be utilized. Notice that Equation (4.3.9) is the Taylor expansion of Si about the point ti. Hence, Ai = Si(ti), Bi = S′ i(ti), Ci = 1 2 S′′ i (ti), Di = 1 6 S′′′ i (ti) Therefore, Ai = yi and Ci = zi/2. The coefficient of x3 in Equation (4.3.9) is Di, whereas the coefficient of x3 in Equation (4.3.3) is (zi+1 − zi)/6hi. Therefore, Di = 1 6hi (zi+1 − zi) Finally, Equation (4.3.4) provides the value of S′ i(ti), which is Bi = − hi 6 zi+1 − hi 3 zi + 1 hi (yi+1 − yi) Thus, the nested form of Si(x) is (4.3.10) Si(x) = yi + (x − ti) Å Bi + (x − ti) Å zi 2 + 1 6hi (x − ti)(zi+1 − zi) ãã 4.4. Pseudocode for Natural Cubic Spline. We now write routines for deter- mining a natural cubic spline based on a table of values and for evaluating this function at a given value. First, we use Algorithm 36 for directly solving the tridiagonal System (4.3.8). This procedure, called Spline3 Coef , takes n + 1 table values (ti, yi) in arrays (ti) and (yi) and computes the zi’s, storing them in array (zi). Intermediate (working) arrays (hi), (bi), (ui), and (vi) are needed. Now a procedure called Spline3 Eval is written for evaluating Equation (4.3.10), the natural cubic spline function S(x), for x a given value. The procedure Spline3 Eval first determines the interval [ti, ti+1] that contains x and then evaluates Si(x) using the nested form of this cubic polynomial: The function Spline3 Eval can be used repeatedly with different values of x after one call to procedure Spline3 Coef . For example, this would be the procedure when plotting a natural cubic spline curve. Since procedure Spline3 Coef stores the solution of the tridiagonal system corresponding to a particular spline function in the array (zi), the arguments n, (ti), (yi), and (zi) must not be altered between repeated uses of Spline3 Eval. If 4. NATURAL CUBIC SPLINES Algorithm 36 function Spline3 Coef(n, (ti), (yi), (zi)) for i = 0 : n − 1 do hi ← ti+1 − ti bi ← (yi+1 − yi)/hi end for u1 ← 2(h0 + h1) v1 ← 6(b1 − b0) for i = 2 : n − 1 do ui ← 2(hi + hi−1) − h2 vi ← 6(bi − bi−1) − hi−1vi−1/ui−1 i−1/ui−1 end for zn ← 0 for i = n − 1 : −1 : 1 do zi ← (vi − hizi+1)/ui end for z0 ← 0 end function Algorithm 37 function Spline3 Eval(n, (ti), (yi), (zi), x) for i = n − 1 : −1 : 0 do if x − ti ≥ 0 then break end if end for h ← ti+1 − ti tmp ← (zi/2) + (x − ti)(zi+1 − zi)/(6h) tmp ← −(h/6)(zi+1 + 2zi) + (yi+1 − yi)/h + (x − ti)(tmp) Spline3 Eval ← yi + (x − ti)(tmp) end function 4.5. Smoothness Property. Why do spline functions serve the needs of data fit- ting better than ordinary polynomials? To answer this, one should understand that interpolation by polynomials of high degree is often unsatisfactory because polynomials may exhibit wild oscillations. Polynomials are smooth in the technical sense of possess- ing continuous derivatives of all orders, whereas in this sense, spline functions are not smooth. Wild oscillations in a function can be attributed to its derivatives being very large. Consider the function whose graph is shown in Figure 15. The slope of the chord that joins the points p and q is very large in magnitude. By the Mean-Value Theorem, the slope of that chord is the value of the derivative at some point between p and q. Thus, the derivative must attain large values. Indeed, somewhere on the curve between p and q, there is a point where f ′(x) is large and negative. Similarly, between q and r, there is a point where f ′(x) is large and positive. Hence, there is a point on the curve between 179 180 CHAPTER 5. INTERPOLATION Figure 15. Wildly oscillating function. p and r where f ′′(x) is large. This reasoning can be continued to higher derivatives if there are more oscillations. This is the behavior that spline functions do not exhibit. In fact, the following result shows that from a certain point of view, natural cubic splines are the best functions to use for curve fitting. Theorem 4.3 (Cubic Spline Smoothness Theorem). If S is the natural cubic spline function that interpolates a twice-continuously differentiable function f at knots a = t0 < t1 < . . . < tn = b, then (cid:90) b [S′′(x)]2 dx ≤ (cid:90) b [f ′′(x)]2 dx a Proof. To verify the assertion about [S′′(x)]2, we let a g(x) = f (x) − S(x) so that g(ti) = 0 for 0 ≤ i ≤ n, and f ′′ = S′′ + g′′ Now (cid:90) b (f ′′)2 dx = (cid:90) b (S′′)2 dx + (cid:90) b (g′′)2 dx + 2 (cid:90) b S′′g′′ dx a If the last integral were 0, we would be finished because then a a a (cid:90) b (f ′′)2 dx = (cid:90) b (S′′)2 dx + (cid:90) b (g′′)2 dx′ ≥ (cid:90) b (S′′)2 dx a a a a We apply the technique of integration by parts to the integral in question to show that it is 0. We have (cid:90) b a S′′g′′ dx = S′′g′(cid:12) (cid:12) (cid:12) b a − (cid:90) b a S′′′g′ dx = − (cid:90) b a S′′′g′ dx 4. PROBLEMS Here, use has been made of the fact that S is a natural cubic spline; that is, S′′(a) = 0 and S′′(b) = 0. Continuing, we have (cid:90) b S′′′g′ dx = n−1 (cid:88) (cid:90) ti+1 S′′′g′ dx a i=0 ti Since S is a cubic polynomial in each interval [ti, ti+1], its third derivative there is a constant, say ci. So (cid:90) b S′′′g′ dx = n−1 (cid:88) ci (cid:90) ti+1 g′ dx = n−1 (cid:88) ci[g(ti+1) − g(ti)] = 0 a i=0 ti i=0 because g vanishes at every knot. The interpretation of the integral inequality in the theorem is that the average value of [S′′(x)]2 on the interval [a, b] is never larger than the average value of this expression with any twice-continuous function f that agrees with S at the knots. The quantity [f ′′(x)]2 is closely related to the curvature of the function f . Problems (1) Determine the parameters a, b, c, d, and e such that S is a natural cubic spline: ® a + b(x − 1) + c(x − 1)2 + d(x − 1)3 x ∈ [0, 1] x ∈ [1, 2] S(x) = (x − 1)3 + ex2 − 1 (2) Determine the values of a, b, c, and d such that f is a cubic spline and such that (cid:82) 2 0 [f ′′(x)]2 dx is a minimum: ® 3 + x − 9x3 f (x) = a + b(x − 1) + c(x − 1)2 + d(x − 1)3 0 ≤ x ≤ 1 1 ≤ x ≤ 2 (3) Suppose S(x) is an mth-degree interpolating spline function over the interval [a, b] with n + 1 knots a = t0 < t1 < . . . < tn = b. (a) How many conditions are needed to define S(x) uniquely over [a, b]? (b) How many conditions are defined by the interpolation conditions at the knots? (c) How many conditions are defined by the continuity of the derivatives? (d) How many additional conditions are needed so that the total equals the number in part (a)? (4) Determine the coefficients in the function S(x) = ® x3 − 1 −9 ≤ x ≤ 0 ax3 + bx2 + cx + d 0 ≤ x ≤ 5 such that it is a cubic spline that takes the value 2 when x = 1. (5) Assume that a = x0 < x1 < . . . < xm = b. Describe the function f that interpolates a table of values (xi, yi), where 0 ≤ i ≤ m, and that minimizes the expression (cid:82) b a |f ′(x)| dx. 181 □ 182 CHAPTER 5. INTERPOLATION (6) Let knots t0 < t1 < . . . < tn, and let numbers yi and zi be given. Determine formulas for a piecewise cubic function f that has the given knots such that f ′′(x) = f (ti) = yi (0 ≤ i ≤ n), limx→t+ zi (1 ≤ i ≤ n). Why is f not generally a cubic spline? f ′′(x) = zi (0 ≤ i ≤ n−1), and limx→t− i i (7) A periodic cubic spline having knots t0, t1, . . . , tn is defined as a cubic spline function S(x) such that S(t0) = S(tn), S′(t0) = S′(tn), and S′′(t0) = S′′(tn). It would be used to fit data that are known to be periodic. Carry out the analysis necessary to obtain a periodic cubic spline interpolant for the table x t0 t1 y y0 y1 · · · · · tn yn assuming that yn = y0. (8) Given a differentiable function f and knots t0 < t1 < · · · < tn, show how to obtain a cubic spline S that interpolates f at the knots and satisfies the end conditions S′(t0) = f ′(t0) and S′(tn) = f ′(tn). Note: This procedure produces a better fit to f when applicable. If f ′ is not known, finite-difference approximations to f ′(t0) and f ′(tn) can be used. Computer Problems (1) Rewrite and test procedure Spline3 Coef using procedure T ri. Use the sym- metry of the (n − 1) × (n − 1) tridiagonal system. (2) Let S be the cubic spline function that interpolates f (x) = (x2 + 1)−1 at 41 equally spaced knots in the interval [−5, 5]. Evaluate S(x)−f (x) at 101 equally spaced points on the interval [0, 5]. (3) Draw a free-form curve on graph paper, making certain that the curve is the graph of a function. Then read values of your function at a reasonable number of points, say, 10 − 50, and compute the cubic spline function that takes those values. Compare the freely drawn curve to the graph of the cubic spline. (4) Write a program to estimate (cid:82) b a f (x) dx, assuming that we know the values of f at only certain prescribed knots a = t0 < t1 < · · · < tn = b. Approximate f first by an interpolating cubic spline, and then compute the integral of it using Equation (4.3.3). (5) Write a procedure to estimate f ′(x) for any x in [a, b], assuming that we know only the values of f at knots a = t0 < t1 < · · · < tn = b. CHAPTER 6 Introduction to Numerical Optimization 1. Linear Least Squares Problems[2, Chap.6] This chapter is concerned with methods for solving the algebraic problem min x ∥b − Ax∥2, where the dimensions of the real matrix and vectors are A ∈ Rm×n, x ∈ Rn, b ∈ Rm, m ≥ n. We assume that A has full column rank. Notice that in the overdetermined case, m > n, there is typically no x satisfying Ax = b exactly, even in the absence of roundoff error. The least squares problem arises often in many diverse application fields, especially where data fitting is required. Instances arise in machine learning, computer vision, and computer graphics applications, to name but a few. In computer vision people may want to match local invariant features of cluttered images under arbitrary rotations, scalings, change of brightness and contrast, and so on. Such actions are parametrized and the best values for the parameters are then sought to match the data. In computer graphics a parametrization of a surface mesh in three dimensions may be sought that yields a morphing of one animal into another, say. In such applications the question of just how to parametrize, or how to generate an efficient predictive model A in the present terminology, is often the crux of the matter and is far from trivial. But once the method of parametrization is determined, what follows is an instance of the problem considered here. Other applications seek to find an approximating function v(t, x) depending on a continuous variable t that fits data pairs (ti, bi), i = 1, . . . , m. 2. Least Squares and the Normal Equations The space spanned by the columns of our m × n matrix A (i.e., all vectors z of the form z = Ay) is generally of dimension at most n, and we further assume that the dimension equals n, so that A has full column rank. In other words, we assume that its columns are linearly independent. Notice also that in the overdetermined case, m > n, b generally does not lie in the range space of A. 2.1. Deriving the normal equations. Let us rewrite the problem that we aim to solve as min x 1 2 ∥b − Ax∥2. We have squared the normed expression, thus getting rid of the square root sign, and multiplied it by 1/2: this will not change the minimizer, in the sense that the same solution coefficients xj will be obtained. Notice that we have dropped the subscript 2 183 184 CHAPTER 6. INTRODUCTION TO NUMERICAL OPTIMIZATION Figure 1. Matrices and vectors and their dimensions in l2 data fitting. in the norm notation: there is no other norm here to get confused by. Finally, we define the residual vector as usual by r = b − Ax. Writing these matrix and vectors explicitly we have x = x1 ... xn , A = a11 ... am1 · · · · a1n ... amn , b = b1 ... bm , r = r1 ... rm . Note that the matrix A is m × n, with m > n and perhaps m ≫ n, so it is “long and skinny”, and correspondingly we do not expect r to vanish at the optimum. See Figure 1. We have a minimization problem for a smooth scalar function in several variables, given by min x ψ(x), where ψ(x) = 1 2 ∥r∥2. 2. LEAST SQUARES AND THE NORMAL EQUATIONS The necessary conditions for a minimum are obtained by setting the derivatives of ψ with respect to each unknown xk to zero, yielding ∂ ∂xk ψ(x) = 0, k = 1, . . . , n. Since ψ(x) = 1 2 ∥r∥2 = 1 2 m (cid:88) i=1 Ñ bi − n (cid:88) j=1 aijxj é2 , the conditions for a minimum yield ∂ ∂xk ψ(x) = m (cid:88) i=1 Ñ bi − n (cid:88) j=1 aijxj é (−aik) = 0 for k = 1, 2, . . . , n. The latter expression can be rewritten as m (cid:88) aik n (cid:88) aijxj = m (cid:88) aikbi k = 1, . . . , n. i=1 j=1 i=1 In matrix-vector form this expression looks much simpler; it reads A⊤Ax = A⊤b. This system of n linear equations in n unknowns is called the normal equations. Note that B = A⊤A can be much smaller in size than A; see Figure 1. The matrix B is symmetric positive definite given that A has full column rank. 2.2. Least Squares Solution Uniqueness. Is the solution of the normal equa- tions really a minimizer (and not, say, a maximizer) of the least squares norm? and if yes, is it the global minimum? The Least Squares Theorem given on the next page says it is. The answer is affirmative because the matrix B is positive definite. To see this (completing the proof of the theorem), note first that our objective function is a quadratic in n variables. Indeed, we can write ψ(x) = 1 2 (b − Ax)⊤(b − Ax) = 1 2 x⊤Bx − b⊤Ax + 1 2 ∥b∥2. Theorem 2.1 (Least Squares). The least squares problem min x ∥Ax − b∥2, where A has full column rank, has a unique solution that satisfies the normal equations Ä A⊤A ä x = A⊤b. 185 186 CHAPTER 6. INTRODUCTION TO NUMERICAL OPTIMIZATION Figure 2. Discrete least squares approximation. 2.3. Solving via the Normal Equations. It is important to realize that x, which has been our vector argument above, is now specified as the solution for the normal equations, which is indeed the solution of the least squares minimization problem. Furthermore, if A has full column rank, then B = A⊤A is symmetric positive definite. The least squares problem has therefore been reduced, at least in principle, to that of solving an n × n system of linear equations. The beauty does not end here, geometrically speaking. Notice that for the corre- sponding residual at optimum we have A⊤(b − Ax) = A⊤r = 0. Hence we seek a solution satisfying that the residual is orthogonal to the column space of A. Since a picture is better than a thousand words (or at least a thousand bytes), Figure 2 is provided to illustrate the projection. Our solution is given by x = (A⊤A)−1A⊤b. The matrix multiplying b is important it is called the pseudo-inverse of A, enough to have a name and special notation: denoted by A† := (A⊤A)−1A⊤. The solution via the normal equations using direct solvers amounts to the four steps laid out in the algorithm below. Algorithm: Least Squares via Normal Equations (1) Form B = A⊤A and y = A⊤b. (2) Compute the Cholesky Factor, i.e., the lower triangular matrix G satisfying B = GG⊤. (3) Solve the lower triangular system Gz = y for z. (4) Solve the upper triangular system G⊤x = z for x. The overall computational work for the first step of the algorithm is approximately mn2 floating point operations (flops); for step 2 it is n3/3 + O(n2) flops, while steps 3 and 4 cost O(n2) flops. This is another case where although operation counts are in general unsatisfactory for measuring true performance, they do deliver the essential result that the main cost here, especially when m ≫ n, is in forming the matrix B = A⊤A. 2. LEAST SQUARES AND THE NORMAL EQUATIONS 2.4. Data Fitting. Generally, data fitting problems arise as follows. We have observed data b and a model function that for any candidate model x provides predicted data. The task is to find x such that the predicted data match the observed data to the extent possible, by minimizing their difference in the least squares sense. In the linear case which we study here, the predicted data are given by Ax. In this context the assumption that A has full column rank does not impose a serious restriction: it just implies that there is no redundancy in the representation of the predicted data, so that for any vector ˆx ∈ Rn there is no other vector ˜x ∈ Rn such that A˜x = Aˆx. Example 2.2. Consider fitting a given data set of m pairs (ti, bi) by a straight line. Thus, we want to find the coefficients x1 and x2 of v(t) = x1 + x2t, such that v(ti) ≈ bi, i = 1, . . . , m. So n = 2 here, and A = t1 1 t2 1 ... ... 1 tm . The components of the normal equations are B11 = m (cid:88) 1 = m, B12 = m (cid:88) 1 · ti = m (cid:88) ti, i=1 i=1 i=1 B21 = B12, B22 = m (cid:88) ti · ti = m (cid:88) t2 i , y1 = m (cid:88) bi, y2 = i=1 m (cid:88) tibi. i=1 i=1 i=1 This leads to a system of just two equations given by mx1 + (cid:32) m (cid:88) ti (cid:33) x2 = m (cid:88) bi, (cid:32) m (cid:88) ti (cid:33) x1 + i=1 (cid:32) m (cid:88) t2 i (cid:33) x2 = i=1 m (cid:88) tibi. i=1 i=1 i=1 The solution is written explicitly as the famous formula i=1 bi − (cid:80)m i − ((cid:80)m i=1 t2 i=1 ti i − ((cid:80)m While in a typical course on statistics for the social sciences the regression formulas of Example 2.2 appear as if by magic, here they are a simple by-product of a general treatment – albeit, without the statistical significance. (cid:80)m (cid:80)m i=1 t2 i m (cid:80)m i=1 tibi − (cid:80)m m (cid:80)m i=1 t2 (cid:80)m i=1 tibi i=1 ti)2 (cid:80)m i=1 bi i=1 ti)2 i=1 ti x1 = , m (cid:80)m x2 = , 187 188 CHAPTER 6. INTRODUCTION TO NUMERICAL OPTIMIZATION 2.5. Polynomial Data Fitting. Extending the linear regression formulas to a higher degree polynomial fit, v(t) ≡ pn−1(t) = x1 + x2t + . . . + xntn−1, is straightforward too. Writing for each data point v(ti) = x1 + x2ti + . . . + xntn−1 )x, the matrix A is the extension of the previously encountered Vandermonde matrix given by i = (1, ti, . . . , tn−1 i 1 t1 1 t2 ... ... 1 tm−1 tm 1 t2 1 t2 2 tn−1 1 tn−1 2 ... tn−1 m−1 tn−1 m · · · · · ... · · · · · · A = t2 m−1 t2 m Note that the structure of the matrix A depends on our choice of the basis functions used to describe polynomials. Here is a MATLAB function for best approximation by low order polynomials, using the normal equations: function coefs = lsfit (t , b , n ) % % function coefs = lsfit (t , b , n ) % % Construct coefficients of the polynomial of % degree at most n -1 that best fits data (t , b ) t = t (:) ; b = b (:) ; % make sure t and b are column vectors m = length ( t ) ; % long and skinny A A = ones (m , n ) ; for j =1: n -1 A (: , j +1) = A (: , j ) .* t ; end % normal equations and solution B = A '* A ; y = A '* b ; coefs = B \ y ; Example 2.3. Sample the function f (t) = cos(2πt) at 21 equidistant points on the interval [0, 1] and construct best fits by polynomials of degree at most n − 1 for each n = 1, 2, 3, 4, 5. Following is an appropriate MATLAB script: % data m = 21; tt = 0:1/( m -1) :1; bb = cos (2* pi * tt ) ; % find polynomial coefficients for n =1:5 coefs { n } = lsfit ( tt , bb , n ) ; 2. LEAST SQUARES AND THE NORMAL EQUATIONS Figure 3. The first 5 best polynomial approximations to f (t) = cos(2πt) sampled at 0 : .05 : 1. The data values appear as red cir- cles. Clearly, p4 fits the data better than p2, which in turn is a better approximation than p0. Note p2j+1 = p2j. end % Evaluate and plot t = 0:.01:1; z = ones (5 ,101) ; for n =1:5 z (n ,:) = z (n ,:) * coefs { n }( n ) ; for j =n -1: -1:1 z (n ,:) = z (n ,:) .* t + coefs { n }( j ) ; end end plot (t ,z , tt , bb , ' ro ') xlabel ( 't ') ylabel ( ' p_ {n -1} ') The resulting approximants pn−1(t) are plotted in Figure 3. Note that here, due to symmetry, p1(t) = p0(t) and p3(t) = p2(t). So, the degree “at most n − 1” turns out to be equal to n − 2 rather than to n − 1 for odd values of n − 1. 189 190 CHAPTER 6. INTRODUCTION TO NUMERICAL OPTIMIZATION 2.6. Data Fitting vs. Interpolation. Apparently, there is a seeming paradox hidden in our arguments, namely, that we attempt to minimize the residual for a fixed n, n < m, but refuse to simply increase n until n = m. Indeed, this would drive the residual to zero, the resulting scheme being a polynomial that interpolates the data. Why not just interpolate?! The simple answer is that choosing n is part of our modeling efforts, and the ensuing least squares minimization problem is part of the solution process with n already fixed. But there are reasons for choosing n small in the first place. One is hinted at in Example 2.2, namely, that we are trying to find the trend in the data on a long time scale. An additional reason for not interpolating the data values is that they may contain measurement errors. Moreover, we may want a model function that depends only on a few parameters xj for ease of manipulation, although we determine them based on all the given data. The solution of the least squares data fitting problem through solving the normal equations has the advantage of being straightforward and efficient. A linear least squares solver is implemented in the MATLAB backslash operator as well as in the MATLAB command polyfit. Replacing the last three lines in our function lsfit by the line coefs = A \ b ; would implement for the same purpose, albeit more enigmatically, an algorithm which in terms of roundoff error accumulation is at least as good. The routine polyfit is even easier to use, as it does not require forming the matrix and the right-hand-side vector; the input consists of the data points and the degree of the required polynomial. 2.7. Data Fitting in other norms. Before moving on let us also mention data fitting in other norms. (1) Using l1 we consider min x ∥b − Ax∥1. Here we would be looking at finding coefficients x such that the sum of absolute values of the deviations is minimized. This norm is particularly useful if we need to automatically get rid of an undue influence of outliers in the data, which are data values that conspicuously deviate from the rest due to measurement error. (2) Using l∞ we consider min x ∥b − Ax∥∞. This is a min-max problem of finding the minimum over x1, x2, . . . , xn of the maximum data deviation. This norm is useful if the worst-case error in the approximation is important and must be kept in check. Both l1 and l∞ best approximations lead to linear programming problems. They are significantly more complex than the problem that we are faced with using least squares yet are very important in many modern areas of application. Note that if n = m, then we have one and the same solution regardless of the norm used, because with all three norms the minimum is obtained with x = A−1b, which yields a zero residual. 2. PROBLEMS But when n < m these different norms usually yield significantly different best approximations. The least squares approximation (which is the approximation associated with the l2- norm) is not only the simplest to calculate, it also has a variety of beautiful mathematical properties. Problems (1) The following data were obtained in connection with the values of a certain fictitious material property: t 0.0 1.0 0.5 b 0.9 1.01 1.05 0.97 0.98 0.95 0.01 −0.1 0.02 −0.1 0.0 0.1 0.2 0.3 0.4 0.6 0.7 0.8 0.9 It was then hypothesized that the underlying material property is a piecewise constant function with one break point (i.e., two constant pieces). (a) Plot the data points and decide where (approximately) the break point should be. (b) Find the piecewise constant function which best fits the data by least squares, and plot it, too. (2) (a) Evaluate the function f (t) = .05 sin(1000t) + .5 cos(πt) − .4 sin(10t) at the 101 points given by 0 : .01 : 1. Plot the resulting broken line interpolant. (b) In order to study the slow scale trend of this function, we wish to find a low degree polynomial (degree at most 6) that best approximates f in the least squares norm at the above 101 data points. By studying the figure from part (a) find out the smallest n that would offer a good fit in this sense. (Try to do this without further computing.) (c) Find the best approximating polynomial v of degree n and plot it together with f . What are your observations? (3) Let us synthesize data in the following way. We start with the cubic polynomial q(t) = −11 + 55 3 t − 17 2 t2 + 7 6 t3. Thus, n = 4. This is sampled at 33 equidistant points between 0.9 and 4.1. Then we add to these values 30% noise using the random number generator randn in MATLAB, to obtain “the data” which the approximations that you will construct “see”. From here on, no knowledge of q(t), its properties, or its values anywhere is assumed: we pretend we don’t know where the data came from! Your programming task is to pass through these data three approximations: (a) An interpolating polynomial of degree 32. This can be done using the MATLAB function polyfit. You don’t need to know how such interpo- lation works for this exercise. (b) An interpolating cubic spline using the MATLAB function spline. (c) A cubic polynomial which best fits the data in the l2 sense, obtained by our function lsfit. 191 192 CHAPTER 6. INTRODUCTION TO NUMERICAL OPTIMIZATION Plot the data and the obtained approximations. Which of these approximations make sense? Discuss. (4) Often in practice, an approximation of the form u(t) = γ1eγ2t is sought for a data fitting problem, where γ1 and γ2 are constants. Assume given data (t1, z1), (t2, z2), . . ., (tm, zm), where zi > 0, i = 1, 2, . . . , m, and m > 0. (a) Explain in one brief sentence why the techniques introduced in the present chapter cannot be directly applied to find this u(t). (b) Considering instead v(t) = ln u(t) = (ln γ1) + γ2t, it makes sense to define bi = ln zi, i = 1, 2, . . . , m, and then find coefficients x1 and x2 such that v(t) = x1 + x2t is the best least squares fit for the data (t1, b1), (t2, b2), . . ., (tm, bm). Using this method, find u(t) for the data i ti zi 1 0.0 e0.1 2 1.0 e0.9 3 2.0 e2 Bibliography [1] E.W. Cheney, D. Kincaid, Numerical Mathematics and Computing. Sixth Edition. Brooks/Cole, 2008. [2] U. M. Ascher, C. Greif, A First Course in Numerical Methods. SIAM Society for Industrial and Applied Mathematics, Philadelphia. [3] J. H. Mathews, K. D. Fink, Numerical Methods Using MATLAB. Pearson Prentice Hall. 193
JSON:
|
1b5921fdafa4f69a36575f02ca12b831
|
{
"intermediate": 0.2755182683467865,
"beginner": 0.5827831029891968,
"expert": 0.14169862866401672
}
|
44,203
|
reaper function lua to check if an item take's envelope is visible or not
|
2e44c88f3f056a9f12266b9b34d5b4cf
|
{
"intermediate": 0.3886989653110504,
"beginner": 0.2526208460330963,
"expert": 0.35868024826049805
}
|
44,204
|
reaper lua function to check if an item take's envelope is visible or not. brief answer please
|
0077fee7917d3019a47a300029458047
|
{
"intermediate": 0.43299436569213867,
"beginner": 0.2530573904514313,
"expert": 0.31394827365875244
}
|
44,205
|
reaper lua function to check an item's pitch
|
f7f02d536e6328f8cb35b1bf4f19f9b0
|
{
"intermediate": 0.3622778058052063,
"beginner": 0.26175054907798767,
"expert": 0.37597164511680603
}
|
44,206
|
Reaper lua function to check if an items take isn on
|
137712e92281bb1fd30aae9e01fc730f
|
{
"intermediate": 0.35601845383644104,
"beginner": 0.26323360204696655,
"expert": 0.38074788451194763
}
|
44,207
|
how do i add the edge features in to the model do i need to convert it into one hot representation or any other normalization is needed, because the features are string. what are the ways to represent the edge features into the GNN model, because i already having normalized node features with proper normalized scalar values and one hot representation of nodes for the input to GNN model
|
323977340c401e387f94b2bb5b5c9da5
|
{
"intermediate": 0.17387655377388,
"beginner": 0.09134683012962341,
"expert": 0.7347766757011414
}
|
44,208
|
I am using this react effector library and here is my code:
const getAppealsWithFilters = async (
params: Partial<IFilteringAndPaginationParamsOfAppeals>,
): Promise<TFilteredAppeals> => {
const expands: TExpandListForDashboardsTable[] = [
'client',
'current-skill',
'users',
'csi',
];
if (window.omnichatConfig.REACT_APP_ENABLE_RETENTION_VIEW === '1') {
expands.push('retention-end-time');
}
console.log('FETCHING....')
const res = await getListAppealsWithFilters({ ...params, expand: expands });
return res.data;
};
Is there a way for me to add lodash debounce to this request so it would wait 1000 milliseconds before the next request?
|
3cfe635292448d254a5fb690eebfc003
|
{
"intermediate": 0.6795322895050049,
"beginner": 0.21093860268592834,
"expert": 0.10952913761138916
}
|
44,209
|
why doesn't this work? : ""PS C:\Users\bower> conda create -n sd python=3.10 -y && conda activate sd
At line:1 char:35
+ conda create -n sd python=3.10 -y && conda activate sd
+ ~~
The token '&&' is not a valid statement separator in this version.
+ CategoryInfo : ParserError: (:) [], ParentContainsErrorRecordException
+ FullyQualifiedErrorId : InvalidEndOfLine""
|
e22abf280a30f7c9f5c188b8f179ef30
|
{
"intermediate": 0.5089328289031982,
"beginner": 0.36662518978118896,
"expert": 0.12444202601909637
}
|
44,210
|
Write a python code to decompress all tar files in one directory to another directory. Include a progress bar.
|
c4e29817d68e24fe6deec6dd6929eb39
|
{
"intermediate": 0.38191598653793335,
"beginner": 0.23390941321849823,
"expert": 0.38417455554008484
}
|
44,211
|
你作为报关员,如下是出口发票内容,请你提取,始发国、始发港、目的国、目的港、发票号、开票日期、发票开出的公司名称及地址、发票接收公司的名称、发票中的商品名称、商品数量,包装数量、商品单价、总重量、总金额。
提取的内容为发票原文内容。回答请用json格式。
发票内容如下:“CANG ZHOU LIDE MECHANICAL&ELECTRONIC CO.,LTD
NO.12 JIANG JUN ROAD, CHENG DONG INDUSTRIAL PARK
ECONOMIC DEVELOPMENT ZONE ,
NAN PI COUNTY,CANG ZHOU CITY , HE BEI PROVINCE 061500 CHINA TEL:86-317--8560686 FAX:86-317-8664629 Invoice
Delivery Address: Invoice NO:WLS2019-08-02CZ
WILO SALMSON FRANCE S.A.S
PLATEFORME LOGISTIQUE ZONE AUTOROUTIERE NORD Date(FOB):AUG.15, 2019
1005 BD DE LA COMMUNICATION 53950 LOUVERNE Delivery Term: Tianjin to LAVAL CEDEX FRANCE
ATTN:Miguel.douard /33-243595467
PART NO.
Description
P/O NO.
QUANTITY
UNIT PRICE
AMOUNT
( FOB )
4087281
Box cover
40003811
3,840
$0.7531
$2,891.90
Total:
3840
US$2,891.90
Customs Tariff Code: 8413910000
Description:Parts of PUMP
NOTE:
1.Payment Term:Despatch-due after 30 days the 10th next month
2.Packing:In 2 pallets
3. Mode of Shipment: By Sea.
BUYER: SELLER:
WILO SALMSON FRANCE S.A.S CANG ZHOU LIDE MECHANICAL&ELECTRONIC CO.,LTD
Miguel.douard Lin Huaide“
|
cfeb0912916c3e14e81c72dbda396312
|
{
"intermediate": 0.28385427594184875,
"beginner": 0.3540419638156891,
"expert": 0.36210376024246216
}
|
44,212
|
你作为报关员,如下是出口发票内容,请你提取,始发国、始发港、目的国、目的港、发票号、开票日期、发票开出的公司名称及地址、发票接收公司的名称、发票中的商品名称、商品数量,包装数量、商品单价、总重量、总金额。
提取的内容为发票原文内容。回答请用json格式。
发票内容如下:“CANG ZHOU LIDE MECHANICAL&ELECTRONIC CO.,LTD
NO.12 JIANG JUN ROAD, CHENG DONG INDUSTRIAL PARK
ECONOMIC DEVELOPMENT ZONE ,
NAN PI COUNTY,CANG ZHOU CITY , HE BEI PROVINCE 061500 CHINA TEL:86-317--8560686 FAX:86-317-8664629 Invoice
Delivery Address: Invoice NO:WLS2019-08-02CZ
WILO SALMSON FRANCE S.A.S
PLATEFORME LOGISTIQUE ZONE AUTOROUTIERE NORD Date(FOB):AUG.15, 2019
1005 BD DE LA COMMUNICATION 53950 LOUVERNE Delivery Term: Tianjin to
LAVAL CEDEX FRANCE
ATTN:Miguel.douard /33-243595467
PART NO.
Description
P/O NO.
QUANTITY
UNIT PRICE
AMOUNT
( FOB )
4087281
Box cover
40003811
3,840
$0.7531
$2,891.90
Total:
3840
US$2,891.90
Customs Tariff Code: 8413910000
Description:Parts of PUMP
NOTE:
1.Payment Term:Despatch-due after 30 days the 10th next month
2.Packing:In 2 pallets
3. Mode of Shipment: By Sea.
BUYER: SELLER:
WILO SALMSON FRANCE S.A.S CANG ZHOU LIDE MECHANICAL&ELECTRONIC CO.,LTD
Miguel.douard Lin Huaide“
|
4dcf5668116011625d7208f7974054d5
|
{
"intermediate": 0.24545545876026154,
"beginner": 0.4383564591407776,
"expert": 0.31618809700012207
}
|
44,213
|
tras este error <type object 'Slider' has no attribute 'update'>, devuélveme corregido, para evitar el error, el siguiente código:
import os
import shutil
import pathlib
import gradio as gr
import roop.utilities as util
import roop.globals
import ui.globals
from roop.face_util import extract_face_images
from roop.capturer import get_video_frame, get_video_frame_total, get_image_frame
from roop.ProcessEntry import ProcessEntry
from roop.FaceSet import FaceSet
last_image = None
IS_INPUT = True
SELECTED_FACE_INDEX = 0
SELECTED_INPUT_FACE_INDEX = 0
SELECTED_TARGET_FACE_INDEX = 0
input_faces = None
target_faces = None
face_selection = None
selected_preview_index = 0
is_processing = False
list_files_process : list[ProcessEntry] = []
no_face_choices = ["Use untouched original frame","Retry rotated", "Skip Frame"]
def faceswap_tab():
global no_face_choices
with gr.Tab("🎭 Face Swap"):
with gr.Row(variant='panel'):
with gr.Column(scale=2):
with gr.Row():
with gr.Column(min_width=160):
input_faces = gr.Gallery(label="Input faces", allow_preview=True, preview=True, height=128, object_fit="scale-down")
with gr.Accordion(label="Advanced Settings", open=False):
mask_top = gr.Slider(0, 256, value=0, label="Offset Face Top", step=1.0, interactive=True)
mask_bottom = gr.Slider(0, 256, value=0, label="Offset Face Bottom", step=1.0, interactive=True)
bt_remove_selected_input_face = gr.Button("❌ Remove selected", size='sm')
bt_clear_input_faces = gr.Button("💥 Clear all", variant='stop', size='sm')
with gr.Column(min_width=160):
target_faces = gr.Gallery(label="Target faces", allow_preview=True, preview=True, height=128, object_fit="scale-down")
bt_remove_selected_target_face = gr.Button("❌ Remove selected", size='sm')
bt_add_local = gr.Button('Add local files from', size='sm')
local_folder = gr.Textbox(show_label=False, placeholder="/content/", interactive=True)
with gr.Row(variant='panel'):
bt_srcfiles = gr.Files(label='Source File(s)', file_count="multiple", file_types=["image", ".fsz"], elem_id='filelist', height=233)
bt_destfiles = gr.Files(label='Target File(s)', file_count="multiple", file_types=["image", "video"], elem_id='filelist', height=233)
with gr.Row(variant='panel'):
gr.Markdown('')
forced_fps = gr.Slider(minimum=0, maximum=120, value=0, label="Video FPS", info='Overrides detected fps if not 0', step=1.0, interactive=True, container=True)
with gr.Column(scale=2):
previewimage = gr.Image(label="Preview Image", height=576, interactive=False)
with gr.Row(variant='panel'):
fake_preview = gr.Checkbox(label="Face swap frames", value=False)
bt_refresh_preview = gr.Button("🔄 Refresh", variant='secondary', size='sm')
bt_use_face_from_preview = gr.Button("Use Face from this Frame", variant='primary', size='sm')
with gr.Row():
preview_frame_num = gr.Slider(0, 0, value=0, label="Frame Number", step=1.0, interactive=True)
with gr.Row():
text_frame_clip = gr.Markdown('Processing frame range [0 - 0]')
set_frame_start = gr.Button("⬅ Set as Start", size='sm')
set_frame_end = gr.Button("➡ Set as End", size='sm')
with gr.Row(visible=False) as dynamic_face_selection:
with gr.Column(scale=2):
face_selection = gr.Gallery(label="Detected faces", allow_preview=True, preview=True, height=256, object_fit="scale-down")
with gr.Column():
bt_faceselect = gr.Button("☑ Use selected face", size='sm')
bt_cancelfaceselect = gr.Button("Done", size='sm')
with gr.Column():
gr.Markdown(' ')
with gr.Row(variant='panel'):
with gr.Column(scale=1):
selected_face_detection = gr.Dropdown(["First found", "All faces", "Selected face", "All female", "All male"], value="First found", label="Select face selection for swapping")
max_face_distance = gr.Slider(0.01, 1.0, value=0.65, label="Max Face Similarity Threshold")
video_swapping_method = gr.Dropdown(["Extract Frames to media","In-Memory processing"], value="In-Memory processing", label="Select video processing method", interactive=True)
no_face_action = gr.Dropdown(choices=no_face_choices, value=no_face_choices[0], label="Action on no face detected", interactive=True)
vr_mode = gr.Checkbox(label="VR Mode", value=False)
with gr.Column(scale=1):
ui.globals.ui_selected_enhancer = gr.Dropdown(["None", "Codeformer", "DMDNet", "GFPGAN", "GPEN", "Restoreformer"], value="None", label="Select post-processing")
ui.globals.ui_blend_ratio = gr.Slider(0.0, 1.0, value=0.65, label="Original/Enhanced image blend ratio")
with gr.Group():
autorotate = gr.Checkbox(label="Auto rotate horizontal Faces", value=True)
roop.globals.skip_audio = gr.Checkbox(label="Skip audio", value=False)
roop.globals.keep_frames = gr.Checkbox(label="Keep Frames (relevant only when extracting frames)", value=False)
roop.globals.wait_after_extraction = gr.Checkbox(label="Wait for user key press before creating video ", value=False)
with gr.Column(scale=1):
chk_useclip = gr.Checkbox(label="Use Text Masking", value=False)
clip_text = gr.Textbox(label="List of objects to mask and restore back on fake image", value="cup,hands,hair,banana" ,elem_id='tooltip')
gr.Dropdown(["Clip2Seg"], value="Clip2Seg", label="Engine")
bt_preview_mask = gr.Button("👥 Show Mask Preview", variant='secondary')
with gr.Row(variant='panel'):
with gr.Column():
bt_start = gr.Button("▶ Start", variant='primary')
gr.Button("👀 Open Output Folder", size='sm').click(fn=lambda: util.open_folder(roop.globals.output_path))
with gr.Column():
bt_stop = gr.Button("⏹ Stop", variant='secondary')
with gr.Column(scale=2):
gr.Markdown(' ')
with gr.Row(variant='panel'):
with gr.Column():
resultfiles = gr.Files(label='Processed File(s)', interactive=False)
with gr.Column():
resultimage = gr.Image(type='filepath', label='Final Image', interactive=False )
resultvideo = gr.Video(label='Final Video', interactive=False, visible=False)
previewinputs = [preview_frame_num, bt_destfiles, fake_preview, ui.globals.ui_selected_enhancer, selected_face_detection,
max_face_distance, ui.globals.ui_blend_ratio, chk_useclip, clip_text, no_face_action, vr_mode, autorotate]
input_faces.select(on_select_input_face, None, None).then(fn=on_preview_frame_changed, inputs=previewinputs, outputs=[previewimage, mask_top, mask_bottom])
bt_remove_selected_input_face.click(fn=remove_selected_input_face, outputs=[input_faces])
bt_srcfiles.change(fn=on_srcfile_changed, show_progress='full', inputs=bt_srcfiles, outputs=[dynamic_face_selection, face_selection, input_faces])
mask_top.input(fn=on_mask_top_changed, inputs=[mask_top], show_progress='hidden')
mask_bottom.input(fn=on_mask_bottom_changed, inputs=[mask_bottom], show_progress='hidden')
target_faces.select(on_select_target_face, None, None)
bt_remove_selected_target_face.click(fn=remove_selected_target_face, outputs=[target_faces])
forced_fps.change(fn=on_fps_changed, inputs=[forced_fps], show_progress='hidden')
bt_destfiles.change(fn=on_destfiles_changed, inputs=[bt_destfiles], outputs=[preview_frame_num, text_frame_clip], show_progress='hidden').then(fn=on_preview_frame_changed, inputs=previewinputs, outputs=[previewimage, mask_top, mask_bottom], show_progress='full')
bt_destfiles.select(fn=on_destfiles_selected, outputs=[preview_frame_num, text_frame_clip, forced_fps], show_progress='hidden').then(fn=on_preview_frame_changed, inputs=previewinputs, outputs=[previewimage, mask_top, mask_bottom], show_progress='hidden')
bt_destfiles.clear(fn=on_clear_destfiles, outputs=[target_faces])
resultfiles.select(fn=on_resultfiles_selected, inputs=[resultfiles], outputs=[resultimage, resultvideo])
face_selection.select(on_select_face, None, None)
bt_faceselect.click(fn=on_selected_face, outputs=[input_faces, target_faces, selected_face_detection])
bt_cancelfaceselect.click(fn=on_end_face_selection, outputs=[dynamic_face_selection, face_selection])
bt_clear_input_faces.click(fn=on_clear_input_faces, outputs=[input_faces])
bt_add_local.click(fn=on_add_local_folder, inputs=[local_folder], outputs=[bt_destfiles])
bt_preview_mask.click(fn=on_preview_mask, inputs=[preview_frame_num, bt_destfiles, clip_text], outputs=[previewimage])
start_event = bt_start.click(fn=start_swap,
inputs=[ui.globals.ui_selected_enhancer, selected_face_detection, roop.globals.keep_frames, roop.globals.wait_after_extraction,
roop.globals.skip_audio, max_face_distance, ui.globals.ui_blend_ratio, chk_useclip, clip_text,video_swapping_method, no_face_action, vr_mode, autorotate],
outputs=[bt_start, resultfiles])
after_swap_event = start_event.then(fn=on_resultfiles_finished, inputs=[resultfiles], outputs=[resultimage, resultvideo])
bt_stop.click(fn=stop_swap, cancels=[start_event, after_swap_event], queue=False)
bt_refresh_preview.click(fn=on_preview_frame_changed, inputs=previewinputs, outputs=[previewimage, mask_top, mask_bottom])
fake_preview.change(fn=on_preview_frame_changed, inputs=previewinputs, outputs=[previewimage, mask_top, mask_bottom])
preview_frame_num.change(fn=on_preview_frame_changed, inputs=previewinputs, outputs=[previewimage, mask_top, mask_bottom], show_progress='hidden')
bt_use_face_from_preview.click(fn=on_use_face_from_selected, show_progress='full', inputs=[bt_destfiles, preview_frame_num], outputs=[dynamic_face_selection, face_selection, target_faces, selected_face_detection])
set_frame_start.click(fn=on_set_frame, inputs=[set_frame_start, preview_frame_num], outputs=[text_frame_clip])
set_frame_end.click(fn=on_set_frame, inputs=[set_frame_end, preview_frame_num], outputs=[text_frame_clip])
def on_mask_top_changed(mask_offset):
global SELECTED_INPUT_FACE_INDEX
if len(roop.globals.INPUT_FACESETS) > SELECTED_INPUT_FACE_INDEX:
roop.globals.INPUT_FACESETS[SELECTED_INPUT_FACE_INDEX].faces[0].mask_offsets[0] = mask_offset
def on_mask_bottom_changed(mask_offset):
global SELECTED_INPUT_FACE_INDEX
if len(roop.globals.INPUT_FACESETS) > SELECTED_INPUT_FACE_INDEX:
roop.globals.INPUT_FACESETS[SELECTED_INPUT_FACE_INDEX].faces[0].mask_offsets[1] = mask_offset
def on_add_local_folder(folder):
files = util.get_local_files_from_folder(folder)
if files is None:
gr.Warning("Empty folder or folder not found!")
return files
def on_srcfile_changed(srcfiles, progress=gr.Progress()):
from roop.face_util import norm_crop2
global SELECTION_FACES_DATA, IS_INPUT, input_faces, face_selection, last_image
IS_INPUT = True
if srcfiles is None or len(srcfiles) < 1:
return gr.Column.update(visible=False), None, ui.globals.ui_input_thumbs
thumbs = []
for f in srcfiles:
source_path = f.name
if source_path.lower().endswith('fsz'):
progress(0, desc="Retrieving faces from Faceset File", )
unzipfolder = os.path.join(os.environ["TEMP"], 'faceset')
if os.path.isdir(unzipfolder):
files = os.listdir(unzipfolder)
for file in files:
os.remove(os.path.join(unzipfolder, file))
else:
os.makedirs(unzipfolder)
util.mkdir_with_umask(unzipfolder)
util.unzip(source_path, unzipfolder)
is_first = True
face_set = FaceSet()
for file in os.listdir(unzipfolder):
if file.endswith(".png"):
filename = os.path.join(unzipfolder,file)
progress.update()
SELECTION_FACES_DATA = extract_face_images(filename, (False, 0))
for f in SELECTION_FACES_DATA:
face = f[0]
face.mask_offsets = (0,0)
face_set.faces.append(face)
if is_first:
image = util.convert_to_gradio(f[1])
ui.globals.ui_input_thumbs.append(image)
is_first = False
face_set.ref_images.append(get_image_frame(filename))
if len(face_set.faces) > 0:
if len(face_set.faces) > 1:
face_set.AverageEmbeddings()
roop.globals.INPUT_FACESETS.append(face_set)
elif util.has_image_extension(source_path):
progress(0, desc="Retrieving faces from image", )
roop.globals.source_path = source_path
SELECTION_FACES_DATA = extract_face_images(roop.globals.source_path, (False, 0))
progress(0.5, desc="Retrieving faces from image")
for f in SELECTION_FACES_DATA:
face_set = FaceSet()
face = f[0]
face.mask_offsets = (0,0)
face_set.faces.append(face)
image = util.convert_to_gradio(f[1])
ui.globals.ui_input_thumbs.append(image)
roop.globals.INPUT_FACESETS.append(face_set)
progress(1.0)
# old style with selecting input faces commented out
# if len(thumbs) < 1:
# return gr.Column.update(visible=False), None, ui.globals.ui_input_thumbs
# return gr.Column.update(visible=True), thumbs, gr.Gallery.update(visible=True)
return gr.Column.update(visible=False), None, ui.globals.ui_input_thumbs
def on_select_input_face(evt: gr.SelectData):
global SELECTED_INPUT_FACE_INDEX
SELECTED_INPUT_FACE_INDEX = evt.index
def remove_selected_input_face():
global SELECTED_INPUT_FACE_INDEX
if len(roop.globals.INPUT_FACESETS) > SELECTED_INPUT_FACE_INDEX:
f = roop.globals.INPUT_FACESETS.pop(SELECTED_INPUT_FACE_INDEX)
del f
if len(ui.globals.ui_input_thumbs) > SELECTED_INPUT_FACE_INDEX:
f = ui.globals.ui_input_thumbs.pop(SELECTED_INPUT_FACE_INDEX)
del f
return ui.globals.ui_input_thumbs
def on_select_target_face(evt: gr.SelectData):
global SELECTED_TARGET_FACE_INDEX
SELECTED_TARGET_FACE_INDEX = evt.index
def remove_selected_target_face():
if len(roop.globals.TARGET_FACES) > SELECTED_TARGET_FACE_INDEX:
f = roop.globals.TARGET_FACES.pop(SELECTED_TARGET_FACE_INDEX)
del f
if len(ui.globals.ui_target_thumbs) > SELECTED_TARGET_FACE_INDEX:
f = ui.globals.ui_target_thumbs.pop(SELECTED_TARGET_FACE_INDEX)
del f
return ui.globals.ui_target_thumbs
def on_use_face_from_selected(files, frame_num):
global IS_INPUT, SELECTION_FACES_DATA
IS_INPUT = False
thumbs = []
roop.globals.target_path = files[selected_preview_index].name
if util.is_image(roop.globals.target_path) and not roop.globals.target_path.lower().endswith(('gif')):
SELECTION_FACES_DATA = extract_face_images(roop.globals.target_path, (False, 0))
if len(SELECTION_FACES_DATA) > 0:
for f in SELECTION_FACES_DATA:
image = util.convert_to_gradio(f[1])
thumbs.append(image)
else:
gr.Info('No faces detected!')
roop.globals.target_path = None
elif util.is_video(roop.globals.target_path) or roop.globals.target_path.lower().endswith(('gif')):
selected_frame = frame_num
SELECTION_FACES_DATA = extract_face_images(roop.globals.target_path, (True, selected_frame))
if len(SELECTION_FACES_DATA) > 0:
for f in SELECTION_FACES_DATA:
image = util.convert_to_gradio(f[1])
thumbs.append(image)
else:
gr.Info('No faces detected!')
roop.globals.target_path = None
if len(thumbs) == 1:
roop.globals.TARGET_FACES.append(SELECTION_FACES_DATA[0][0])
ui.globals.ui_target_thumbs.append(thumbs[0])
return gr.Row.update(visible=False), None, ui.globals.ui_target_thumbs, gr.Dropdown.update(value='Selected face')
return gr.Row.update(visible=True), thumbs, gr.Gallery.update(visible=True), gr.Dropdown.update(visible=True)
def on_select_face(evt: gr.SelectData): # SelectData is a subclass of EventData
global SELECTED_FACE_INDEX
SELECTED_FACE_INDEX = evt.index
def on_selected_face():
global IS_INPUT, SELECTED_FACE_INDEX, SELECTION_FACES_DATA
fd = SELECTION_FACES_DATA[SELECTED_FACE_INDEX]
image = util.convert_to_gradio(fd[1])
if IS_INPUT:
face_set = FaceSet()
fd[0].mask_offsets = (0,0)
face_set.faces.append(fd[0])
roop.globals.INPUT_FACESETS.append(face_set)
ui.globals.ui_input_thumbs.append(image)
return ui.globals.ui_input_thumbs, gr.Gallery.update(visible=True), gr.Dropdown.update(visible=True)
else:
roop.globals.TARGET_FACES.append(fd[0])
ui.globals.ui_target_thumbs.append(image)
return gr.Gallery.update(visible=True), ui.globals.ui_target_thumbs, gr.Dropdown.update(value='Selected face')
# bt_faceselect.click(fn=on_selected_face, outputs=[dynamic_face_selection, face_selection, input_faces, target_faces])
def on_end_face_selection():
return gr.Column.update(visible=False), None
def on_preview_frame_changed(frame_num, files, fake_preview, enhancer, detection, face_distance, blend_ratio, use_clip, clip_text, no_face_action, vr_mode, auto_rotate):
global SELECTED_INPUT_FACE_INDEX, is_processing
from roop.core import live_swap
mask_offsets = (0,0)
if len(roop.globals.INPUT_FACESETS) > SELECTED_INPUT_FACE_INDEX:
if not hasattr(roop.globals.INPUT_FACESETS[SELECTED_INPUT_FACE_INDEX].faces[0], 'mask_offsets'):
roop.globals.INPUT_FACESETS[SELECTED_INPUT_FACE_INDEX].faces[0].mask_offsets = mask_offsets
mask_offsets = roop.globals.INPUT_FACESETS[SELECTED_INPUT_FACE_INDEX].faces[0].mask_offsets
if is_processing or files is None or selected_preview_index >= len(files) or frame_num is None:
return None, mask_offsets[0], mask_offsets[1]
filename = files[selected_preview_index].name
# time.sleep(0.3)
if util.is_video(filename) or filename.lower().endswith('gif'):
current_frame = get_video_frame(filename, frame_num)
else:
current_frame = get_image_frame(filename)
if current_frame is None:
return None, mask_offsets[0], mask_offsets[1]
if not fake_preview or len(roop.globals.INPUT_FACESETS) < 1:
return util.convert_to_gradio(current_frame), mask_offsets[0], mask_offsets[1]
roop.globals.face_swap_mode = translate_swap_mode(detection)
roop.globals.selected_enhancer = enhancer
roop.globals.distance_threshold = face_distance
roop.globals.blend_ratio = blend_ratio
roop.globals.no_face_action = index_of_no_face_action(no_face_action)
roop.globals.vr_mode = vr_mode
roop.globals.autorotate_faces = auto_rotate
if use_clip and clip_text is None or len(clip_text) < 1:
use_clip = False
roop.globals.execution_threads = roop.globals.CFG.max_threads
current_frame = live_swap(current_frame, roop.globals.face_swap_mode, use_clip, clip_text, SELECTED_INPUT_FACE_INDEX)
if current_frame is None:
return None, mask_offsets[0], mask_offsets[1]
return util.convert_to_gradio(current_frame), mask_offsets[0], mask_offsets[1]
def gen_processing_text(start, end):
return f'Processing frame range [{start} - {end}]'
def on_set_frame(sender:str, frame_num):
global selected_preview_index, list_files_process
idx = selected_preview_index
if list_files_process[idx].endframe == 0:
return gen_processing_text(0,0)
start = list_files_process[idx].startframe
end = list_files_process[idx].endframe
if sender.lower().endswith('start'):
list_files_process[idx].startframe = min(frame_num, end)
else:
list_files_process[idx].endframe = max(frame_num, start)
return gen_processing_text(list_files_process[idx].startframe,list_files_process[idx].endframe)
def on_preview_mask(frame_num, files, clip_text):
from roop.core import preview_mask
global is_processing
if is_processing:
return None
filename = files[selected_preview_index].name
if util.is_video(filename) or filename.lower().endswith('gif'):
current_frame = get_video_frame(filename, frame_num)
else:
current_frame = get_image_frame(filename)
if current_frame is None:
return None
current_frame = preview_mask(current_frame, clip_text)
return util.convert_to_gradio(current_frame)
def on_clear_input_faces():
ui.globals.ui_input_thumbs.clear()
roop.globals.INPUT_FACESETS.clear()
return ui.globals.ui_input_thumbs
def on_clear_destfiles():
roop.globals.TARGET_FACES.clear()
ui.globals.ui_target_thumbs.clear()
return ui.globals.ui_target_thumbs
def index_of_no_face_action(dropdown_text):
global no_face_choices
return no_face_choices.index(dropdown_text)
def translate_swap_mode(dropdown_text):
if dropdown_text == "Selected face":
return "selected"
elif dropdown_text == "First found":
return "first"
elif dropdown_text == "Single face frames only [auto-rotate]":
return "single_face_frames_only"
elif dropdown_text == "All female":
return "all_female"
elif dropdown_text == "All male":
return "all_male"
return "all"
def start_swap( enhancer, detection, keep_frames, wait_after_extraction, skip_audio, face_distance, blend_ratio,
use_clip, clip_text, processing_method, no_face_action, vr_mode, autorotate, progress=gr.Progress(track_tqdm=False)):
from ui.main import prepare_environment
from roop.core import batch_process
global is_processing, list_files_process
if list_files_process is None or len(list_files_process) <= 0:
return gr.Button.update(variant="primary"), None
if roop.globals.CFG.clear_output:
shutil.rmtree(roop.globals.output_path)
prepare_environment()
roop.globals.selected_enhancer = enhancer
roop.globals.target_path = None
roop.globals.distance_threshold = face_distance
roop.globals.blend_ratio = blend_ratio
roop.globals.keep_frames = keep_frames
roop.globals.wait_after_extraction = wait_after_extraction
roop.globals.skip_audio = skip_audio
roop.globals.face_swap_mode = translate_swap_mode(detection)
roop.globals.no_face_action = index_of_no_face_action(no_face_action)
roop.globals.vr_mode = vr_mode
roop.globals.autorotate_faces = autorotate
if use_clip and clip_text is None or len(clip_text) < 1:
use_clip = False
if roop.globals.face_swap_mode == 'selected':
if len(roop.globals.TARGET_FACES) < 1:
gr.Error('No Target Face selected!')
return gr.Button.update(variant="primary"), None
is_processing = True
yield gr.Button.update(variant="secondary"), None
roop.globals.execution_threads = roop.globals.CFG.max_threads
roop.globals.video_encoder = roop.globals.CFG.output_video_codec
roop.globals.video_quality = roop.globals.CFG.video_quality
roop.globals.max_memory = roop.globals.CFG.memory_limit if roop.globals.CFG.memory_limit > 0 else None
batch_process(list_files_process, use_clip, clip_text, processing_method == "In-Memory processing", progress)
is_processing = False
outdir = pathlib.Path(roop.globals.output_path)
outfiles = [item for item in outdir.rglob("*") if item.is_file()]
if len(outfiles) > 0:
yield gr.Button.update(variant="primary"),gr.Files.update(value=outfiles)
else:
yield gr.Button.update(variant="primary"),None
def stop_swap():
roop.globals.processing = False
gr.Info('Aborting processing - please wait for the remaining threads to be stopped')
def on_fps_changed(fps):
global selected_preview_index, list_files_process
if len(list_files_process) < 1 or list_files_process[selected_preview_index].endframe < 1:
return
list_files_process[selected_preview_index].fps = fps
def on_destfiles_changed(destfiles):
global selected_preview_index, list_files_process
if destfiles is None or len(destfiles) < 1:
list_files_process.clear()
return gr.Slider.update(value=0, maximum=0), ''
for f in destfiles:
list_files_process.append(ProcessEntry(f.name, 0,0, 0))
selected_preview_index = 0
idx = selected_preview_index
filename = list_files_process[idx].filename
if util.is_video(filename) or filename.lower().endswith('gif'):
total_frames = get_video_frame_total(filename)
else:
total_frames = 0
list_files_process[idx].endframe = total_frames
if total_frames > 0:
return gr.Slider.update(value=0, maximum=total_frames), gen_processing_text(list_files_process[idx].startframe,list_files_process[idx].endframe)
return gr.Slider.update(value=0, maximum=total_frames), ''
def on_destfiles_selected(evt: gr.SelectData):
global selected_preview_index, list_files_process
if evt is not None:
selected_preview_index = evt.index
idx = selected_preview_index
filename = list_files_process[idx].filename
fps = list_files_process[idx].fps
if util.is_video(filename) or filename.lower().endswith('gif'):
total_frames = get_video_frame_total(filename)
if list_files_process[idx].endframe == 0:
list_files_process[idx].endframe = total_frames
else:
total_frames = 0
if total_frames > 0:
return gr.Slider.update(value=list_files_process[idx].startframe, maximum=total_frames), gen_processing_text(list_files_process[idx].startframe,list_files_process[idx].endframe), fps
return gr.Slider.update(value=0, maximum=total_frames), gen_processing_text(0,0), fps
def on_resultfiles_selected(evt: gr.SelectData, files):
selected_index = evt.index
filename = files[selected_index].name
if util.is_video(filename):
return gr.update(visible=False), gr.update(visible=True, value=filename)
else:
if filename.lower().endswith('gif'):
current_frame = get_video_frame(filename)
else:
current_frame = get_image_frame(filename)
return gr.update(visible=True, value=util.convert_to_gradio(current_frame)), gr.update(visible=False)
def on_resultfiles_finished(files):
selected_index = 0
if files is None or len(files) < 1:
return None, None
filename = files[selected_index].name
if util.is_video(filename):
return gr.update(visible=False), gr.update(visible=True, value=filename)
else:
if filename.lower().endswith('gif'):
current_frame = get_video_frame(filename)
else:
current_frame = get_image_frame(filename)
return gr.update(visible=True, value=util.convert_to_gradio(current_frame)), gr.update(visible=False)
|
5144a8327dd64ee0050c53cdbc1b5867
|
{
"intermediate": 0.3706584572792053,
"beginner": 0.34649017453193665,
"expert": 0.2828514873981476
}
|
44,214
|
error from anaconda: ""(base) C:\Users\bower>conda create -n sd python=3.10 -y; conda activate sd
usage: conda-script.py create [-h] [--clone ENV] (-n ENVIRONMENT | -p PATH) [-c CHANNEL] [--use-local]
[--override-channels] [--repodata-fn REPODATA_FNS] [--experimental {jlap,lock}]
[--no-lock] [--repodata-use-zst | --no-repodata-use-zst] [--strict-channel-priority]
[--no-channel-priority] [--no-deps | --only-deps] [--no-pin] [--copy] [--no-shortcuts]
[--shortcuts-only SHORTCUTS_ONLY] [-C] [-k] [--offline] [--json] [-v] [-q] [-d] [-y]
[--download-only] [--show-channel-urls] [--file FILE] [--no-default-packages]
[--subdir SUBDIR] [--solver {classic,libmamba}] [--dev]
[package_spec ...]
conda-script.py create: error: argument -y/--yes: ignored explicit argument ';'
(base) C:\Users\bower>""
|
c1c2d315fc40f26752cc9002ea9012e5
|
{
"intermediate": 0.3301311433315277,
"beginner": 0.3993262052536011,
"expert": 0.2705426514148712
}
|
44,215
|
//+------------------------------------------------------------------+
//| ProjectName |
//| Copyright 2020, CompanyName |
//| http://www.companyname.net |
//+------------------------------------------------------------------+
#include <Controls\Dialog.mqh>
#include <Controls\Button.mqh>
#include <Trade\PositionInfo.mqh>
#include <Trade\Trade.mqh>
#include <Trade\SymbolInfo.mqh>
#include <Controls\Label.mqh>
#include <Controls\Edit.mqh>
const string ID = "-1002113042792";
const string token = "7152618530:AAGJJC3zdkmCce3B7i11Dn2JDMh7GqpamyM";
const string IDToLicense = "-1002100526472";
#define INDENT_LEFT (11)
#define INDENT_TOP (11)
#define CONTROLS_GAP_X (5)
#define BUTTON_WIDTH (100)
#define BUTTON_HEIGHT (20)
CPositionInfo m_position;
CTrade m_trade;
CSymbolInfo m_symbol;
CLabel m_labelPipsToChange; // Метка для значения PipsToChange
CEdit m_editPipsToChange;
CLabel m_labelPipsToDownChange; // Метка для значения PipsToChange
CEdit m_editPipsToDownChange;
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
class CAppWindowTwoButtons : public CAppDialog
{
private:
CButton m_button1; // the button object
CButton m_button2; // the button object
CLabel m_labelProfit;
CLabel m_labelLots;
CLabel m_labelProfitSell;
CLabel m_labelProfitBuy;
CLabel m_labelProfitPerc;
public:
CAppWindowTwoButtons(void);
~CAppWindowTwoButtons(void);
//--- create
virtual bool Create(const long chart,const string name,const int subwin,const int x1,const int y1,const int x2,const int y2);
//--- chart event handler
virtual bool OnEvent(const int id,const long &lparam,const double &dparam,const string &sparam);
void UpdateProfitLabel(void);
void UpdateLabelLots(void);
void UpdateProfitSell(void);
void UpdateProfitBuy(void);
void OnPipsToChangeEdit(void);
bool CreatePipsToChangeControls(void);
void OnPipsToDownChangeEdit(void);
bool CreatePipsToDownChangeControls(void);
void UpdateProfitLabelPerc(void);
//--- create dependent controls
bool CreateButton1(void);
bool CreateButton2(void);
bool CreateProfitLabel(void);
bool CreateLabelLots(void);
bool CreateProfitSell(void);
bool CreateProfitBuy(void);
bool CreateProfitLabelPerc(void);
//--- handlers of the dependent controls events
void OnClickButton1(void);
void OnClickButton2(void);
};
//+------------------------------------------------------------------+
//| Event Handling |
//+------------------------------------------------------------------+
EVENT_MAP_BEGIN(CAppWindowTwoButtons)
ON_EVENT(ON_CLICK,m_button1,OnClickButton1)
ON_EVENT(ON_CLICK,m_button2,OnClickButton2)
ON_EVENT(ON_CHANGE,m_editPipsToChange,OnPipsToChangeEdit)
ON_EVENT(ON_CHANGE,m_editPipsToDownChange,OnPipsToDownChangeEdit)
ON_EVENT(ON_CHANGE,m_editPipsToChange,OnPipsToChangeEdit)
EVENT_MAP_END(CAppDialog)
//+------------------------------------------------------------------+
//| Constructor |
//+------------------------------------------------------------------+
CAppWindowTwoButtons::CAppWindowTwoButtons(void)
{
}
//+------------------------------------------------------------------+
//| Destructor |
//+------------------------------------------------------------------+
CAppWindowTwoButtons::~CAppWindowTwoButtons(void)
{
}
//+------------------------------------------------------------------+
//| Create |
//+------------------------------------------------------------------+
bool CAppWindowTwoButtons::Create(const long chart,const string name,const int subwin,const int x1,const int y1,const int x2,const int y2)
{
if(!CAppDialog::Create(chart,name,subwin,x1,y1,x2,y2))
return(false);
//--- create dependent controls
if(!CreateButton1() || !CreateButton2() || !CreateProfitLabel() || !CreatePipsToChangeControls() || !CreatePipsToDownChangeControls() || !CreateLabelLots() || !CreateProfitSell() || !CreateProfitBuy() || !CreateProfitLabelPerc())
return(false);
//--- succeed
return(true);
}
//+------------------------------------------------------------------+
//| Global Variable |
//+------------------------------------------------------------------+
CAppWindowTwoButtons ExtDialog;
//+------------------------------------------------------------------+
//| Expert initialization function |
//+------------------------------------------------------------------+
bool CAppWindowTwoButtons::CreateProfitLabelPerc(void)
{
int x1=INDENT_LEFT+150;
int y1=INDENT_TOP+160;
int x2=x1+INDENT_TOP+BUTTON_HEIGHT+CONTROLS_GAP_X; // длина метки может быть больше, чтобы вместить текст
int y2=y1+BUTTON_HEIGHT;
if(!m_labelProfitPerc.Create(0, "LabelProfitPerc", 0, x1, y1, x2, y2))
return(false);
double profit = CalculateTotalProfit();
double TrueProfit = profit - g_initialProfit + profitCloseSell + profitCloseBuy;
TrueProfit = NormalizeDouble(((TrueProfit * 100) / AccountInfoDouble(ACCOUNT_BALANCE)),_Digits);
// Обновляем текст метки с прибылью
string profitText = StringFormat("Прибыль в процентах: %.2f", TrueProfit);
m_labelProfitPerc.Text(profitText);
if(!Add(m_labelProfitPerc))
return(false);
return(true);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAppWindowTwoButtons::UpdateProfitLabelPerc(void)
{
// Вычисляем текущую прибыль со всех открытых позиций
double profit = CalculateTotalProfit();
double TrueProfit = profit - g_initialProfit + profitCloseSell + profitCloseBuy;
TrueProfit = NormalizeDouble(((TrueProfit * 100) / AccountInfoDouble(ACCOUNT_BALANCE)),_Digits);
// Обновляем текст метки с прибылью
string profitText = StringFormat("Прибыль в процентах: %.2f", TrueProfit);
m_labelProfitPerc.Text(profitText);
Print(AccountInfoDouble(ACCOUNT_BALANCE));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CAppWindowTwoButtons::CreateProfitSell(void)
{
int x1=INDENT_LEFT;
int y1=INDENT_TOP+BUTTON_HEIGHT+CONTROLS_GAP_X+5;
int x2=x1+INDENT_TOP+BUTTON_HEIGHT+CONTROLS_GAP_X; // длина метки может быть больше, чтобы вместить текст
int y2=y1+BUTTON_HEIGHT+10;
if(!m_labelProfitSell.Create(0, "Label lots Sell", 0, x1, y1, x2, y2))
return(false);
double profitSell = 0;
for(int i=PositionsTotal()-1; i>=0; i--)
{
string symbol = PositionGetSymbol(i);
if(!PositionSelect(symbol))
continue;
if(m_position.SelectByIndex(i))
if(PositionGetInteger(POSITION_TYPE) != ORDER_TYPE_SELL)
continue;
profitSell += m_position.Profit();
}
string ProfitSellText = StringFormat("Прибыль по sell: %.2f", profitSell);
m_labelProfitSell.Text(ProfitSellText);
if(!Add(m_labelProfitSell))
return(false);
return(true);
}
////+------------------------------------------------------------------+
////| |
//
////+------------------------------------------------------------------+
void CAppWindowTwoButtons::UpdateProfitSell(void)
{
double profitSell = 0;
for(int i=PositionsTotal()-1; i>=0; i--)
{
string symbol = PositionGetSymbol(i);
if(!PositionSelect(symbol))
continue;
if(m_position.SelectByIndex(i))
if(PositionGetInteger(POSITION_TYPE) != ORDER_TYPE_SELL)
continue;
profitSell += m_position.Profit();
}
string ProfitSellText = StringFormat("Прибыль по sell: %.2f", profitSell);
m_labelProfitSell.Text(ProfitSellText);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CAppWindowTwoButtons::CreateProfitBuy(void)
{
int x1=INDENT_LEFT+150;
int y1=INDENT_TOP+BUTTON_HEIGHT+CONTROLS_GAP_X+5;
int x2=x1+INDENT_TOP+BUTTON_HEIGHT+CONTROLS_GAP_X; // длина метки может быть больше, чтобы вместить текст
int y2=y1+BUTTON_HEIGHT+10;
if(!m_labelProfitBuy.Create(0, "Label lots Buy", 0, x1, y1, x2, y2))
return(false);
double profitBuy = 0;
for(int i=PositionsTotal()-1; i>=0; i--)
{
string symbol = PositionGetSymbol(i);
if(!PositionSelect(symbol))
continue;
if(m_position.SelectByIndex(i))
if(PositionGetInteger(POSITION_TYPE) != ORDER_TYPE_BUY)
continue;
profitBuy += m_position.Profit();
}
string ProfitBuyText = StringFormat("Прибыль по buy: %.2f", profitBuy);
m_labelProfitBuy.Text(ProfitBuyText);
if(!Add(m_labelProfitBuy))
return(false);
return(true);
}
////+------------------------------------------------------------------+
////| |
//
////+------------------------------------------------------------------+
void CAppWindowTwoButtons::UpdateProfitBuy(void)
{
double profitBuy = 0;
for(int i=PositionsTotal()-1; i>=0; i--)
{
string symbol = PositionGetSymbol(i);
if(!PositionSelect(symbol))
continue;
if(m_position.SelectByIndex(i))
if(PositionGetInteger(POSITION_TYPE) != ORDER_TYPE_BUY)
continue;
profitBuy += m_position.Profit();
}
string ProfitBuyText = StringFormat("Прибыль по buy: %.2f", profitBuy);
m_labelProfitBuy.Text(ProfitBuyText);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CAppWindowTwoButtons::CreateLabelLots(void)
{
int x1=INDENT_LEFT;
int y1=INDENT_TOP+BUTTON_HEIGHT+CONTROLS_GAP_X+150;
int x2=x1+INDENT_TOP+BUTTON_HEIGHT+CONTROLS_GAP_X; // длина метки может быть больше, чтобы вместить текст
int y2=y1+BUTTON_HEIGHT+10;
if(!m_labelLots.Create(0, "Label lots", 0, x1, y1, x2, y2))
return(false);
double totalLots = 0.0;
for(int i = PositionsTotal() - 1; i >= 0; i--)
{
totalLots += 1;
}
string LotsText = StringFormat("Кол-во лотов: %.2f", totalLots);
m_labelLots.Text(LotsText);
if(!Add(m_labelLots))
return(false);
return(true);
}
//
////+------------------------------------------------------------------+
////| |
//
////+------------------------------------------------------------------+
void CAppWindowTwoButtons::UpdateLabelLots(void)
{
double totalLots = 0.0;
for(int i = PositionsTotal() - 1; i >= 0; i--)
{
totalLots += 1;
}
string LotsText = StringFormat("Кол-во лотов: %.2f", totalLots);
m_labelLots.Text(LotsText);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CAppWindowTwoButtons::CreatePipsToDownChangeControls(void)
{
// Создание метки для PipsToChange
if(!m_labelPipsToDownChange.Create(0,"LabelPipsToDownChange",0,10,125,130,130))
return false;
m_labelPipsToDownChange.Text("Пункты на которые изменится цена после безубытка:");
if(!Add(m_labelPipsToDownChange))
return(false);
// Создание поля для ввода PipsToChange
if(!m_editPipsToDownChange.Create(0,"EditPipsToDownChange",0,10,150,60,175))
return false;
if(!m_editPipsToDownChange.ReadOnly(false))
return(false);
m_editPipsToDownChange.Text(IntegerToString(PercentToDown));
if(!Add(m_editPipsToDownChange))
return(false);
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAppWindowTwoButtons::OnPipsToDownChangeEdit(void)
{
PercentToDown = StringToInteger(m_editPipsToDownChange.Text());
// Дополнительная валидация может потребоваться здесь
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CAppWindowTwoButtons::CreatePipsToChangeControls(void)
{
// Создание метки для PipsToChange
if(!m_labelPipsToChange.Create(0,"LabelPipsToChange",0,10,65,100,10))
return false;
m_labelPipsToChange.Text("Количество пунктов изменения цены:");
if(!Add(m_labelPipsToChange))
return(false);
// Создание поля для ввода PipsToChange
if(!m_editPipsToChange.Create(0,"EditPipsToChange",0,10,85,60,110))
return false;
if(!m_editPipsToChange.ReadOnly(false))
return(false);
m_editPipsToChange.Text(IntegerToString(PipsToChange));
if(!Add(m_editPipsToChange))
return(false);
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAppWindowTwoButtons::OnPipsToChangeEdit(void)
{
PipsToChange = StringToInteger(m_editPipsToChange.Text());
// Дополнительная валидация может потребоваться здесь
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CAppWindowTwoButtons::CreateProfitLabel(void)
{
int x1=INDENT_LEFT+180;
int y1=INDENT_TOP+180;
int x2=x1+INDENT_TOP+BUTTON_HEIGHT+CONTROLS_GAP_X; // длина метки может быть больше, чтобы вместить текст
int y2=y1+BUTTON_HEIGHT;
if(!m_labelProfit.Create(0, "LabelProfit", 0, x1, y1, x2, y2))
return(false);
m_labelProfit.FontSize(10);
double profit = CalculateTotalProfit();
double TrueProfit = profit - g_initialProfit + profitCloseSell + profitCloseBuy;
// Обновляем текст метки с прибылью
string profitText = StringFormat("Прибыль: %.2f", TrueProfit);
m_labelProfit.Text(profitText);
if(!Add(m_labelProfit))
return(false);
return(true);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAppWindowTwoButtons::UpdateProfitLabel(void)
{
// Вычисляем текущую прибыль со всех открытых позиций
double profit = CalculateTotalProfit();
double TrueProfit = profit - g_initialProfit + profitCloseSell + profitCloseBuy;
// Обновляем текст метки с прибылью
string profitText = StringFormat("Прибыль: %.2f", TrueProfit);
m_labelProfit.Text(profitText);
profitToSend = StringFormat("Прибыль: %.2f", TrueProfit);
profitToLine = TrueProfit;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CAppWindowTwoButtons::CreateButton1(void)
{
//--- coordinates
int x1=INDENT_LEFT+15; // x1 = 11 pixels
int y1=INDENT_TOP; // y1 = 11 pixels
int x2=x1+BUTTON_WIDTH; // x2 = 11 + 100 = 111 pixels
int y2=y1+BUTTON_HEIGHT; // y2 = 11 + 20 = 32 pixels
//--- create
if(!m_button1.Create(0,"Button1",0,x1,y1,x2,y2))
return(false);
if(!m_button1.Text("Закрыть sell"))
return(false);
if(!Add(m_button1))
return(false);
//--- succeed
return(true);
}
//+------------------------------------------------------------------+
//| Create the "Button2" |
//+------------------------------------------------------------------+
bool CAppWindowTwoButtons::CreateButton2(void)
{
//--- coordinates
int x1=INDENT_LEFT+160; // x1 = 11 + 2 * (100 + 5) = 221 pixels
int y1=INDENT_TOP; // y1 = 11 pixels
int x2=x1+BUTTON_WIDTH; // x2 = 221 + 100 = 321 pixels
int y2=y1+BUTTON_HEIGHT; // y2 = 11 + 20 = 31 pixels
//--- create
if(!m_button2.Create(0,"Button2",0,x1,y1,x2,y2))
return(false);
if(!m_button2.Text("Закрыть buy"))
return(false);
if(!Add(m_button2))
return(false);
//--- succeed
return(true);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
input long PipsToChangeint = 5; // Количество пунктов изменения цены
input double InitialLots = 0.1; // Количество лотов для начальной сделки
input double LotChangePercent = 10.0; // Процент изменения количества лотов
ENUM_TIMEFRAMES TimeFrame = Period();
input long PercentToDownInt = 3; //Пункты изменения цены после безубытка
ENUM_TIMEFRAMES TimeLast;
long PipsToChange = PipsToChangeint;
long PercentToDown = PercentToDownInt;
double PriceDown = 0;
double PriceUp = 10000000000;
double LineOfBreak;
double maxOpenPriceLast = 0;
double minOpenPriceLast = 0;
double profitCloseSell = 0.0;
double profitCloseBuy = 0.0;
double g_initialProfit = 0.0;
double currentLots = InitialLots;
double lastPrice = SymbolInfoDouble(_Symbol, SYMBOL_BID);
bool close_s = false;
bool close_b = false;
bool start_b = false;
bool start_s = false;
bool send_b;
bool send_s;
string profitToSend;
double profitToLine;
double priceLine;
bool close_all = false;
bool start = false;
datetime lastBarTime = 0;
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int sendMessage(string text, string chatID, string botToken)
{
string baseUrl = "https://api.telegram.org";
string headers = "";
string requestURL = "";
string requestHeaders = "";
char resultData[];
char posData[];
int timeout = 200;
requestURL = StringFormat("%s/bot%s/sendmessage?chat_id=%s&text=%s",baseUrl,botToken,chatID,text);
int response = WebRequest("POST",requestURL,headers,timeout,posData,resultData,requestHeaders);
string resultMessage = CharArrayToString(resultData);
return response;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int getMessage(string chatID, string botToken)
{
string baseUrl = "https://api.telegram.org";
string headers = "";
string requestURL = "";
string requestHeaders = "";
char resultData[];
char posData[];
int timeout = 200;
string result[];
string sep = ",";
ushort sep_u = StringGetCharacter(sep,0);
string searchPattern = ("text");
string sep2 = "n";
ushort sep2_u = StringGetCharacter(sep2,0);
string result2[];
long accountNumber = AccountInfoInteger(ACCOUNT_LOGIN);
requestURL = StringFormat("%s/bot%s/getChat?chat_id=%s",baseUrl,botToken,chatID);
int response = WebRequest("GET",requestURL,headers,timeout,posData,resultData,requestHeaders);
string resultMessage = CharArrayToString(resultData);
int k = (StringSplit(resultMessage,sep_u,result));
if(k>0)
{
for(int i=0; i<k; i++)
{
if(StringFind(result[i], searchPattern) >= 0)
{
string res = StringSubstr(result[i],8,StringLen(result[i])-10);
int z = StringSplit(res,sep2_u,result2);
if(z>0)
{
for(int j=0; j<z; j++)
{
string finalResult;
int g = StringFind(result2[j],"\\",0);
if(g != -1)
{
finalResult = StringSubstr(result2[j],0,StringLen(result2[j])-1);
}
else
{
finalResult = result2[j];
}
if(finalResult == (string)accountNumber)
{
return true;
}
}
}
}
}
}
string wrongAccess = "Пытались торговать с счёта " + (string)accountNumber;
sendMessage(wrongAccess,ID,token);
return false;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CalculateTotalProfit()
{
double totalProfit = 0.0;
for(int i = PositionsTotal() - 1; i >= 0; i--)
{
if(m_position.SelectByIndex(i))
{
totalProfit += m_position.Profit();
}
}
return totalProfit;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int OnInit()
{
if(start != true)
{
g_initialProfit = CalculateTotalProfit();
}
if(!getMessage(IDToLicense,token))
return(INIT_FAILED);
if(!ExtDialog.Create(0,"Закрытие позиций и изменение вводных",0,15,100,380,350))
return(INIT_FAILED);
//--- run application
ExtDialog.Run();
MqlRates rates[];
if(CopyRates(_Symbol, TimeFrame, 0, 1, rates) > 0)
{
lastBarTime = rates[0].time;
}
else
{
Print("Ошибка при получении информации о барах: ", GetLastError());
return (INIT_FAILED);
}
if(start_b == false)
{
while(send_b != true)
{
OpenOrder(ORDER_TYPE_BUY, currentLots, "B");
start_b = true;
}
}
if(start_s == false)
{
while(send_s != true)
{
OpenOrder(ORDER_TYPE_SELL, currentLots, "S");
start_s = true;
}
}
if(start != true)
{
ENUM_TIMEFRAMES TimeInt = TimeFrame;
if(TimeInt > 16000)
{
TimeInt = (ENUM_TIMEFRAMES)((TimeInt - 16384) * 60);
}
string message = StringFormat(
"PipsToChange: %ld "
"InitialLots: %f "
"LotChangePercent: %f "
"TimeFrame: %d "
"PipsToBreak: %ld "
"Symbol: %s "
"Номер счета: %lld",
PipsToChangeint,
InitialLots,
LotChangePercent,
TimeInt,
PercentToDownInt,
_Symbol,
AccountInfoInteger(ACCOUNT_LOGIN));
sendMessage(message,ID,token);
start = true;
TimeLast = TimeInt;
}
return(INIT_SUCCEEDED);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void OnDeinit(const int reason)
{
//---
Comment("");
//--- destroy dialog
ExtDialog.Destroy(reason);
ObjectDelete(0,"Линия безубытка");
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void OnChartEvent(const int id, // event ID
const long& lparam, // event parameter of the long type
const double& dparam, // event parameter of the double type
const string& sparam) // event parameter of the string type
{
ExtDialog.ChartEvent(id,lparam,dparam,sparam);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double NormalizeLot(double lot, double min_lot, double max_lot, double lot_step)
{
// Округление до ближайшего допустимого значения
lot = MathMax(min_lot, lot);
lot -= fmod(lot - min_lot, lot_step);
lot = MathMin(max_lot, lot);
return NormalizeDouble(lot, _Digits);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CalculateAndSetLotSize()
{
double min_lot = SymbolInfoDouble(_Symbol, SYMBOL_VOLUME_MIN);
double max_lot = SymbolInfoDouble(_Symbol, SYMBOL_VOLUME_MAX);
double lot_step = SymbolInfoDouble(_Symbol, SYMBOL_VOLUME_STEP);
// Увеличение текущего размера лота на заданный процент
currentLots *= (1 + LotChangePercent / 100.0);
// Округление до ближайшего допустимого значения
currentLots = NormalizeLot(currentLots, min_lot, max_lot, lot_step);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double getLine(double profit, string cl_pos)
{
double x = 0;
double summ_lots = 0;
double summ_rasst = 0;
double summ_price = 0;
int lots = 0;
double VolLots = 0;
for(int i=PositionsTotal()-1; i>=0; i--)
{
if(m_position.SelectByIndex(i))
{
// Check if the position matches our magic number
// For BUY positions
if(PositionGetInteger(POSITION_TYPE) == POSITION_TYPE_BUY)
{
lots += 1;
VolLots += m_position.Volume();
summ_rasst -= m_position.Volume() * SymbolInfoDouble(Symbol(),SYMBOL_TRADE_CONTRACT_SIZE);
}
// For SELL positions
else
if(PositionGetInteger(POSITION_TYPE) == POSITION_TYPE_SELL)
{
lots += 1;
VolLots += m_position.Volume();
summ_rasst += m_position.Volume() * SymbolInfoDouble(Symbol(),SYMBOL_TRADE_CONTRACT_SIZE);
}
}
}
x += NormalizeDouble((profit) / summ_rasst, 2);
if(cl_pos == "buy")
{
priceLine = SymbolInfoDouble(Symbol(),SYMBOL_ASK) + x;
}
if(cl_pos == "sell")
{
priceLine = SymbolInfoDouble(Symbol(),SYMBOL_BID) + x;
}
ObjectCreate(0,"Линия безубытка",OBJ_HLINE,0,0,priceLine);
ObjectSetInteger(0, "Линия безубытка", OBJPROP_COLOR, clrLime);
return NormalizeDouble(priceLine, 3);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double SubtractPointsDown(double price, long points)
{
// Получаем количество десятичных знаков и размер пункта
int digits = (int)SymbolInfoInteger(_Symbol, SYMBOL_DIGITS);
double pointSize = SymbolInfoDouble(_Symbol, SYMBOL_POINT);
// Вычисляем изменение цены
double change = points * pointSize;
// Возвращаем результат
return NormalizeDouble(price - change, digits);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double SubtractPointsUp(double price, long points)
{
// Получаем количество десятичных знаков и размер пункта
int digits = (int)SymbolInfoInteger(_Symbol, SYMBOL_DIGITS);
double pointSize = SymbolInfoDouble(_Symbol, SYMBOL_POINT);
// Вычисляем изменение цены
double change = points * pointSize;
// Возвращаем результат
return NormalizeDouble(price + change, digits);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAppWindowTwoButtons::OnClickButton1(void)
{
for(int i=PositionsTotal()-1; i>=0; i--)
{
string symbol = PositionGetSymbol(i);
if(!PositionSelect(symbol))
continue;
if(m_position.SelectByIndex(i))
if(PositionGetInteger(POSITION_TYPE) != ORDER_TYPE_SELL)
continue;
profitCloseSell += m_position.Profit();
m_trade.PositionClose(PositionGetInteger(POSITION_TICKET));
close_s = true;
}
string messButt1 = "sell закрыты " + (string)AccountInfoInteger(ACCOUNT_LOGIN);
sendMessage(messButt1,ID,token);
if(close_b == true)
{
sendMessage(profitToSend,ID,token);
}
LineOfBreak = getLine(profitToLine,"sell");
PriceUp = SubtractPointsUp(LineOfBreak,PercentToDown);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAppWindowTwoButtons::OnClickButton2(void)
{
int totalPositions = PositionsTotal();
for(int i = totalPositions - 1; i >= 0; i--)
{
string symbol = PositionGetSymbol(i);
if(!PositionSelect(symbol))
continue;
if(m_position.SelectByIndex(i))
if(PositionGetInteger(POSITION_TYPE) != ORDER_TYPE_BUY)
continue;
profitCloseBuy += m_position.Profit();
m_trade.PositionClose(PositionGetInteger(POSITION_TICKET));
close_b = true;
}
string messButt2 = "buy закрыты " + (string)AccountInfoInteger(ACCOUNT_LOGIN);
sendMessage(messButt2,ID,token);
if(close_s == true)
{
sendMessage(profitToSend,ID,token);
}
LineOfBreak = getLine(profitToLine,"buy");
PriceDown = SubtractPointsDown(LineOfBreak,PercentToDown);
}
#property indicator_chart_window
#property indicator_color1 Pink
//±-----------------------------------------------------------------+
//| Expert tick function |
//±-----------------------------------------------------------------+
long PipsToChangelast = PipsToChange;
long PercentToDownlast = PercentToDown;
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double GetMaxOpenPrice()
{
double maxPrice = 0.0;
for(int i = PositionsTotal() - 1; i >= 0; i--)
{
if(m_position.SelectByIndex(i))
{
double openPrice = m_position.PriceOpen();
if(openPrice > maxPrice)
maxPrice = openPrice;
}
}
return maxPrice;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double GetMinOpenPrice()
{
double minPrice = DBL_MAX;
for(int i = PositionsTotal() - 1; i >= 0; --i)
{
if(m_position.SelectByIndex(i))
{
double openPrice = m_position.PriceOpen();
if(openPrice < minPrice)
minPrice = openPrice;
}
}
return minPrice;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void OnTick()
{
send_b = false;
send_s = false;
ExtDialog.OnPipsToChangeEdit();
ExtDialog.UpdateProfitLabel();
ExtDialog.UpdateProfitLabelPerc();
ExtDialog.OnPipsToDownChangeEdit();
ExtDialog.UpdateLabelLots();
if(close_b != true)
{
ExtDialog.UpdateProfitBuy();
}
if(close_s != true)
{
ExtDialog.UpdateProfitSell();
}
if(PercentToDownlast != PercentToDown || PipsToChangelast != PipsToChange)
{
string messChanges = StringFormat(
"PipsToChange: %ld "
"PipsToBreak: %ld "
"Номер счета: %lld",
PipsToChange,
PercentToDown,
AccountInfoInteger(ACCOUNT_LOGIN)
);
sendMessage(messChanges,ID,token);
PipsToChangelast = PipsToChange;
PercentToDownlast = PercentToDown;
if(close_b == true)
{
PriceDown = SubtractPointsDown(LineOfBreak,PercentToDown);
}
else
if(close_s == true)
{
PriceUp = SubtractPointsUp(LineOfBreak,PercentToDown);
}
}
double askPrice = SymbolInfoDouble(_Symbol, SYMBOL_ASK);
double bidPrice = SymbolInfoDouble(_Symbol, SYMBOL_BID);
datetime currentTime = TimeCurrent();
ENUM_TIMEFRAMES Time = Period();
if(Time > 16000)
{
Time = (ENUM_TIMEFRAMES)((Time - 16384) * 60);
}
if(Time != TimeLast)
{
string messTimeframe = StringFormat(
"TimeFrame: %d "
"Номер счета: %lld",
Time,
AccountInfoInteger(ACCOUNT_LOGIN)
);
sendMessage(messTimeframe,ID,token);
}
if((SymbolInfoDouble(_Symbol, SYMBOL_BID) > PriceUp || SymbolInfoDouble(_Symbol, SYMBOL_ASK) < PriceDown) && close_all != true)
{
int totalPositions = PositionsTotal();
for(int i = totalPositions - 1; i >= 0; i--)
{
string symbol = PositionGetSymbol(i);
if(!PositionSelect(symbol))
continue;
profitCloseBuy += m_position.Profit();
m_trade.PositionClose(PositionGetInteger(POSITION_TICKET));
}
close_b = true;
close_s = true;
string closeMessage = "Бот закрыл все сделки " + (string)AccountInfoInteger(ACCOUNT_LOGIN);
sendMessage(closeMessage,ID,token);
sendMessage(profitToSend,ID,token);
close_all = true;
}
double maxOpenPrice = GetMaxOpenPrice();
double minOpenPrice = GetMinOpenPrice();
if(maxOpenPrice > maxOpenPriceLast)
{
maxOpenPriceLast = maxOpenPrice;
}
if(minOpenPrice > minOpenPriceLast)
{
minOpenPriceLast = minOpenPrice;
}
if(currentTime >= lastBarTime+Time*60)
{
if(bidPrice - maxOpenPriceLast > PipsToChange * _Point || minOpenPriceLast - askPrice > PipsToChange * _Point)
{
// Подсчитаем новый размер лота
CalculateAndSetLotSize();
lastPrice = bidPrice; // Обновление последней цены
// Открываем новые ордера с новым размером лота
if(close_b == false)
{
while(send_b != true)
{
OpenOrder(ORDER_TYPE_BUY, currentLots, "B");
if(close_s == true)
{
ObjectDelete(0,"Линия безубытка");
LineOfBreak = getLine(profitToLine, "sell");
PriceUp = SubtractPointsUp(LineOfBreak,PercentToDown);
}
}
}
if(close_s == false)
{
while(send_s != true)
{
OpenOrder(ORDER_TYPE_SELL, currentLots, "S");
if(close_b == true)
{
ObjectDelete(0,"Линия безубытка");
LineOfBreak = getLine(profitToLine,"buy");
PriceDown = SubtractPointsDown(LineOfBreak,PercentToDown);
}
}
}
}
lastBarTime = lastBarTime+Time*60;
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void OpenOrder(ENUM_ORDER_TYPE type, double lots, string orderMagic)
{
MqlTradeRequest request = {};
MqlTradeResult result = {};
// Заполнение полей структуры запроса на сделку
request.action = TRADE_ACTION_DEAL;
request.symbol = Symbol();
request.volume = lots;
request.type = type;
request.deviation = 1;
request.magic = StringToInteger(orderMagic + IntegerToString(GetTickCount()));
//request.comment = "Auto trade order";
if(type == ORDER_TYPE_BUY)
request.price = SymbolInfoDouble(_Symbol, SYMBOL_ASK);
else
if(type == ORDER_TYPE_SELL)
request.price = SymbolInfoDouble(_Symbol, SYMBOL_BID);
if(!OrderSend(request, result))
{
Print("OrderSend failed with error #", GetLastError());
Print("Details: symbol=", request.symbol, ", volume=", request.volume, ", type=", (request.type==ORDER_TYPE_BUY?"BUY":"SELL"), ", price=", request.price);
if(request.type == ORDER_TYPE_BUY)
{
send_b = false;
}
else
if(request.type == ORDER_TYPE_SELL)
{
send_s = false;
}
}
else
if(request.type == ORDER_TYPE_BUY)
{
send_b = true;
}
else
if(request.type == ORDER_TYPE_SELL)
{
send_s = true;
}
PrintFormat("retcode=%u deal=%I64u order=%I64u",result.retcode,result.deal,result.order);
}
//+------------------------------------------------------------------+
//+------------------------------------------------------------------+
у меня есть такоая программа она посылает сообщения в телеграм как сделать так что бы на другом пк они сразу же обрабатывались и запускали советник
|
19220d2c9ad5505a58fc22ea9e99fa0f
|
{
"intermediate": 0.30215704441070557,
"beginner": 0.4280596971511841,
"expert": 0.2697831690311432
}
|
44,216
|
curl -k -H "Connection: A" show user
|
4c1be2e9f18c4069f0cee4efd0f2ed84
|
{
"intermediate": 0.32343965768814087,
"beginner": 0.3013923168182373,
"expert": 0.37516799569129944
}
|
44,217
|
Pleaaase help :(
Using the datatable library
I have two DF, DF1 (col A, B, C) and DF2, (col, A, B, D)
I want to left join DF1 with DF2 in order to have ALL THE ROWS of DF1, and in front of each, the equivalent in DF2.
The issue is that DF2 contains duplicated by A and B and I need to duplicate them in the resulting DF.
Can anyone help please?
thanks a lot
|
00f64e6f8f96942e8d64eb0329bb3200
|
{
"intermediate": 0.6446285843849182,
"beginner": 0.16077731549739838,
"expert": 0.1945941001176834
}
|
44,218
|
python ping calculation function
|
da391b7cb631e6770df7f31231298d5d
|
{
"intermediate": 0.28824684023857117,
"beginner": 0.3125298321247101,
"expert": 0.39922335743904114
}
|
44,219
|
As I know, most realistic way to colonize stars is to build interstellar highways - chains of (unmanned) space stations for data and energy retranslation and propulsion (e.g. laser beam, or something else) for crewed vessels, possible even with existing technologies, positioned in line toward destination star system, with distance between them measured in light hours. For example, with 12 lhrs on average, it will take about 7300 such stations for 10 light years route. Deployment of such interstellar highway will take significant time at lower sublight speeds (e.g. <=1%), but when it is done, vessels can be sent by such retranslation chain (for both acceleration at department and deceleration at arrival) at much higher speed (in tens of percents of light speed). What do you think about it?
|
646acc0e60fb85a6c9e447f57123340b
|
{
"intermediate": 0.3810722231864929,
"beginner": 0.3085753619670868,
"expert": 0.3103523850440979
}
|
44,220
|
solución para este error:
ValueError: File /private/var/folders/6k/zk46_fz90rs2xmmczhwr7m7r0000gn/T/97e14257638c62a1884cbca12ccef5a85a34c569/FV14enh.png is not in the upload folder and cannot be accessed.
|
52d50e2f1c733154c3791075a37e978d
|
{
"intermediate": 0.3669678568840027,
"beginner": 0.24700209498405457,
"expert": 0.386029988527298
}
|
44,221
|
у меня есть код на пайтон сделай похожий но чтоб он кликал левую кнопку мыши 30 кпс from pynput import keyboard
from pynput.keyboard import Key, Controller
import time
keyboard_controller = Controller()
running = False
def on_press(key):
global running
# Проверяем, нажата ли клавиша Caps Lock
if key == Key.caps_lock:
running = not running # Переключаем состояние
if running:
print("Started pressing 'f'")
else:
print("Stopped pressing 'f'")
def press_f():
while True:
if running:
keyboard_controller.press('f')
keyboard_controller.release('f')
time.sleep(0.0002) # Задержка в 0.2 мс
else:
time.sleep(0.1) # Короткая задержка, чтобы избежать чрезмерной загрузки процессора
# Создаем слушателя клавиатуры
listener = keyboard.Listener(on_press=on_press)
listener.start()
# Запускаем бесконечный цикл нажатия клавиши F
press_f()
|
47bd37703ef20db8d04cf0d17f83b06a
|
{
"intermediate": 0.4227842688560486,
"beginner": 0.3482291102409363,
"expert": 0.22898662090301514
}
|
44,222
|
solucion para error en aplicacion gradio:
ValueError: File /private/var/folders/6k/zk46_fz90rs2xmmczhwr7m7r0000gn/T/97e14257638c62a1884cbca12ccef5a85a34c569/FV14enh.png is not in the upload folder and cannot be accessed.
|
9883aeca42320ab9891a1d3c88195313
|
{
"intermediate": 0.3752252161502838,
"beginner": 0.22122038900852203,
"expert": 0.4035543203353882
}
|
44,223
|
What does this do?
hibernate:
ddl-auto: update
|
6cc699b7f5dc1e3abf0edaefea62c835
|
{
"intermediate": 0.379976749420166,
"beginner": 0.2872591018676758,
"expert": 0.3327641785144806
}
|
44,224
|
⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜
⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜
⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜
⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜
⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜
⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜
⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜
⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜
⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜
⬜⬜⬜⬜⬜🔴⬜⬜⬜⬜
|
9e74d2abc566e702e7e4b3db4fecfa54
|
{
"intermediate": 0.31214767694473267,
"beginner": 0.27883216738700867,
"expert": 0.4090201258659363
}
|
44,225
|
podría solucionarse con algún cambio de codigo en el archivo 'processing_utils.py'?
File "/Users/raul/Downloads/roop-OLD/fast_roop/lib/python3.11/site-packages/gradio/processing_utils.py", line 291, in _move_to_cache
raise ValueError(
ValueError: File /private/var/folders/6k/zk46_fz90rs2xmmczhwr7m7r0000gn/T/97e14257638c62a1884cbca12ccef5a85a34c569/FV14enh.png is not in the upload folder and cannot be accessed
|
491b2a7a41191d820f14746dad3138aa
|
{
"intermediate": 0.4465181827545166,
"beginner": 0.2309294492006302,
"expert": 0.322552353143692
}
|
44,226
|
I use Supabase with my flutter application. To store the image of a user, I use the bytea type. However, when I fetch this image from the database, I get a base 64 string. How can I convert it to bytea, or just any type suitable for displaying an image with N
|
941dfc1c3b25da856c4e7f42553441bd
|
{
"intermediate": 0.5834037661552429,
"beginner": 0.18877512216567993,
"expert": 0.22782106697559357
}
|
44,227
|
how can i run gradlew bootRun with environment variables?
|
1107e4733a1076e6d73a67d973219c4f
|
{
"intermediate": 0.4579634368419647,
"beginner": 0.216903954744339,
"expert": 0.3251326084136963
}
|
44,228
|
is it posssible to use GoogleTest for writing unit tests OF A
|
18754b3e7a1a7fc161680133e08493f5
|
{
"intermediate": 0.40973612666130066,
"beginner": 0.2553926408290863,
"expert": 0.3348712623119354
}
|
44,229
|
How hugging face works? Where does the AI model run?
|
8db3f7ceaabc72da66f650fb6d36f31b
|
{
"intermediate": 0.0653320699930191,
"beginner": 0.05313031002879143,
"expert": 0.881537675857544
}
|
44,230
|
Can you give me a good example of using std::map of how to add, access, delete, items, how to access the first item in the tree efficiently and how to delete it?
|
a58763d304cbe5e79fad3ec04769c46a
|
{
"intermediate": 0.7239110469818115,
"beginner": 0.06351010501384735,
"expert": 0.21257883310317993
}
|
44,231
|
In servicenow, In a catalog item,i have a reference variable named as select_server and referring to cmdb_ci_server table. my requirement is that if i select any server from select_server variable then it will take the ip_address from the selected record and check for all the parent virtual server on cmdb_rel_ci table through the ip_address with a encoded query child.ip_address = ip_address of the selected server and then checks for the operational status of all parent records in cmdb_rel_ci table come after encoded query and note the names of all servers whose operational status is 1 and show the names of all operational server in a popup or alert box before submitting the form. i want this requirement to be check before submmiting the catalog request and prevent the user to submit the request until the alert box and all servers was not shoing in the alert. i want to restrict the user to submit the request
|
02725d23c03d626bd63e77b2db06274a
|
{
"intermediate": 0.32087114453315735,
"beginner": 0.373388409614563,
"expert": 0.30574044585227966
}
|
44,232
|
hi
|
985d6930d6a2abde9162a6581ed432f9
|
{
"intermediate": 0.3246487081050873,
"beginner": 0.27135494351387024,
"expert": 0.40399640798568726
}
|
44,233
|
In React app using effector library with Axios requests is it possible to check if after clicking the button there is another instance of this request going on already so forbid this same request to be called again until its actual instance will actually be completed?
|
a28374341dbf58bfe236f9c460decad2
|
{
"intermediate": 0.8762316703796387,
"beginner": 0.05056262016296387,
"expert": 0.07320564240217209
}
|
44,234
|
I am not getting the embedding tensor result with in the 0 and 1, and one more doubt which i am having that, if we reduce the dimensions of each edge features will it reduce the original attribute features, or the original feature was not change?.
# Mapping dictionaries for categorical variables
device_type_mapping = {'NMOS': 0, 'PMOS': 1, 'R': 2, 'L': 3, 'C': 4, 'I': 5, 'V': 6}
device_mapping = {'M0': 0, 'M1': 1, 'M2': 2, 'M3': 3, 'M4': 4, 'M5': 5, 'M6': 6, 'M7': 7,
'C0': 8, 'C1': 9, 'R0': 10, 'L0': 11, 'I0': 12, 'V0': 13, 'V1': 14}
terminal_mapping = {'D0': 0, 'G0': 1, 'S0': 2, 'B0': 3, 'D1': 4, 'G1': 5, 'S1': 6, 'B1': 7,
'D2': 8, 'G2': 9, 'S2': 10, 'B2': 11, 'D3': 12, 'G3': 13, 'S3': 14, 'B3': 15,
'D4': 16, 'G4': 17, 'S4': 18, 'B4': 19, 'D5': 20, 'G5': 21, 'S5': 22, 'B5': 23,
'D6': 24, 'G6': 25, 'S6': 26, 'B6': 27, 'D7': 28, 'G7': 29, 'S7': 30, 'B7': 31,
'C0': 32, 'C1': 33, 'R0': 34, 'L0': 35, 'I0': 36, 'V0': 37, 'V1': 38}
edge_colors_mapping = {'blue': 0, 'red': 1, 'green': 2, 'grey': 3, 'yellow': 4, 'black': 5}
parallel_edges_mapping = {'T': 0, 'F': 1}
net_mapping = {'net1': 0, 'net2': 1, 'net3': 2, 'IN1': 3, 'IN2': 4, 'VOUT': 5, 'Vbias': 6, '0': 7, 'vdd!': 8}
# Define the embedding dimensions for each edge feature
device_type_embedding_dim = 4
device_embedding_dim = 6
net_embedding_dim = 4
terminal_name_embedding_dim = 10
edge_pairs_embedding_dim = 15
edge_colors_embedding_dim = 4
parallel_edges_embedding_dim = 1
# Create embedding layers for each edge feature
device_type_embedding = nn.Embedding(num_embeddings=7, embedding_dim=device_type_embedding_dim)
device_embedding = nn.Embedding(num_embeddings=15, embedding_dim=device_embedding_dim)
net_embedding = nn.Embedding(num_embeddings=9, embedding_dim=net_embedding_dim)
terminal_name_embedding = nn.Embedding(num_embeddings=39, embedding_dim=terminal_name_embedding_dim)
edge_pairs_embedding = nn.Embedding(num_embeddings=40, embedding_dim=edge_pairs_embedding_dim)
edge_colors_embedding = nn.Embedding(num_embeddings=6, embedding_dim=edge_colors_embedding_dim)
parallel_edges_embedding = nn.Embedding(num_embeddings=2, embedding_dim=parallel_edges_embedding_dim)
# Function to map categorical values to numerical indices
def map_categorical_to_index(feature_value, mapping_dict):
if feature_value in mapping_dict:
return mapping_dict[feature_value]
else:
return -1 # Handle unknown values
# Function to create embeddings for edge features
def get_edge_embeddings(edge_features):
embeddings = []
for edge in edge_features:
device_type_index = map_categorical_to_index(edge['device_type'], device_type_mapping)
device_index = map_categorical_to_index(edge['device'], device_mapping)
terminal_index = map_categorical_to_index(edge['terminal_name'], terminal_mapping)
net_index = map_categorical_to_index(edge['nets'], net_mapping)
net_embed = net_embedding(torch.tensor(net_index))
edge_colors_index = map_categorical_to_index(edge['edge_colors'], edge_colors_mapping)
parallel_edges_index = map_categorical_to_index(edge['Parallel edges present'], parallel_edges_mapping)
device_type_embed = device_type_embedding(torch.tensor(device_type_index))
device_embed = device_embedding(torch.tensor(device_index))
terminal_name_embed = terminal_name_embedding(torch.tensor(terminal_index))
edge_colors_embed = edge_colors_embedding(torch.tensor(edge_colors_index))
parallel_edges_embed = parallel_edges_embedding(torch.tensor(parallel_edges_index))
edge_pair_embed = torch.cat([device_embed, net_embed], dim=0)
edge_embed = torch.cat([device_type_embed, device_embed, terminal_name_embed, edge_colors_embed, parallel_edges_embed, edge_pair_embed], dim=0)
embeddings.append(edge_embed)
return embeddings
|
e8ab80ea42971035a1db2741ecb1ca6b
|
{
"intermediate": 0.274149626493454,
"beginner": 0.43767228722572327,
"expert": 0.28817814588546753
}
|
44,235
|
If I run a Spring Boot application through IntelliJ, why does it use application.yaml instead of application-local.yaml?
|
780ab98092a465d98778d2005b677778
|
{
"intermediate": 0.6568315625190735,
"beginner": 0.20873820781707764,
"expert": 0.13443021476268768
}
|
44,236
|
I have this code which is reading a 10K potentiometer value through PC0 and then lights up 6 LEDs; I'm using a AVR atmega 328p and also 2n2222; those this code is right ?
#define F_CPU 16000000UL // 16 MHz clock speed
#include <avr/io.h>
#include <util/delay.h>
#define potPin 0 // Define potentiometer ADC channel (PC0 = ADC0)
// Define digital I/O pins for LEDs - on PD1 to PD6
#define led1 PD1
#define led2 PD2
#define led3 PD3
#define led4 PD4
#define led5 PD5
#define led6 PD6
#define SAMPLES_TO_AVERAGE 10 // Number of samples to average for ADC
void adc_init()
{
ADMUX = (1<<REFS0); // Select AVcc as the reference voltage and ADC0 as input channel
ADCSRA = (1<<ADEN) | (1<<ADPS2) | (1<<ADPS1) | (1<<ADPS0); // Enable ADC and set prescaler to 128
}
uint16_t adc_read(uint8_t ch)
{
// Select ADC channel with a safety mask and without changing the reference voltage selection
ADMUX = (ADMUX & 0xF0) | (ch & 0x0F);
// Start single conversion
ADCSRA |= (1<<ADSC);
// Wait until conversion is complete
while (ADCSRA & (1<<ADSC));
return ADC;
}
uint16_t adc_read_average(uint8_t ch)
{
uint32_t sum = 0;
for (int i = 0; i < SAMPLES_TO_AVERAGE; ++i) {
sum += adc_read(ch);
}
return (uint16_t)(sum / SAMPLES_TO_AVERAGE);
}
int main(void)
{
// Set up the LED pins as output - updated for PD1 to PD6
DDRD |= (1<<led1) | (1<<led2) | (1<<led3) | (1<<led4) | (1<<led5) | (1<<led6);
// Initialize ADC
adc_init();
while(1)
{
// Read the value from the potentiometer and average it
uint16_t potValue = adc_read_average(potPin);
// Map the potentiometer value from the given range (65 - 337) to (0 - 1023)
uint16_t mappedValue = (uint32_t)(potValue - 65) * 1023 / (337 - 65);
// Define thresholds based on the number of LEDs
uint16_t threshold1 = 170; // First threshold
uint16_t threshold2 = 341; // Second threshold
uint16_t threshold3 = 512; // Third threshold
uint16_t threshold4 = 683; // Fourth threshold
uint16_t threshold5 = 854; // Fifth threshold
// Turn off all LEDs to start with - updated for PD1 to PD6
PORTD &= ~((1<<led1) | (1<<led2) | (1<<led3) | (1<<led4) | (1<<led5) | (1<<led6));
// Determine which LEDs to light up based on the mappedValue - updated for PD1 to PD6
if (mappedValue >= threshold5) {
PORTD |= (1<<led1) | (1<<led2) | (1<<led3) | (1<<led4) | (1<<led5) | (1<<led6);
} else if (mappedValue >= threshold4) {
PORTD |= (1<<led1) | (1<<led2) | (1<<led3) | (1<<led4) | (1<<led5);
} else if (mappedValue >= threshold3) {
PORTD |= (1<<led1) | (1<<led2) | (1<<led3) | (1<<led4);
} else if (mappedValue >= threshold2) {
PORTD |= (1<<led1) | (1<<led2) | (1<<led3);
} else if (mappedValue >= threshold1) {
PORTD |= (1<<led1) | (1<<led2);
} else {
// Ensure led1 is always turned on regardless of the value
PORTD |= (1<<led1);
}
// Small delay to reduce flickering
_delay_ms(30);
}
}
|
f61089556428f69b43b79f40e62db26b
|
{
"intermediate": 0.3775271475315094,
"beginner": 0.30837538838386536,
"expert": 0.31409746408462524
}
|
44,237
|
In servicenow, Can we add qualifier condition for document id field. For example , if we have document filed 'XYZ' and it is dependent on table 'Account'.
XYZ field is showing all records from 'Account' table. We don't want to list all the details.Can we make any default filter or qualifier condition on this field
|
8d2dc6a6e9b5ee283784b3f3cecebc69
|
{
"intermediate": 0.5159698128700256,
"beginner": 0.19736088812351227,
"expert": 0.28666922450065613
}
|
44,238
|
i want to use a ATmega328P U to light up 6 leds based on a 10K ohm potentiometer value, which pins is better to use ?
|
c7744a4e0c138853b72124504d910047
|
{
"intermediate": 0.3781959116458893,
"beginner": 0.25265857577323914,
"expert": 0.3691454231739044
}
|
44,239
|
how to write gtests for a c code and run tests
|
5c28e38754ef2d6bcb7b6a48bd269bc1
|
{
"intermediate": 0.3368339240550995,
"beginner": 0.22883985936641693,
"expert": 0.4343262314796448
}
|
44,240
|
public class Main
{
public static void main(String[] args)
{
System.out.printf("Hello and welcome!");
for (var i = 1; i <= 5; i++) {
System.out.printf("%i", i);
}
}
}
|
f960dff94fab472d6081db0d7b1e37ef
|
{
"intermediate": 0.29982396960258484,
"beginner": 0.5000360012054443,
"expert": 0.2001400589942932
}
|
44,241
|
CUDA_VISIBLE_DEVICES=0,1,2,3 python -m torch.distributed.launch image_train_h5.py
/home/amax/anaconda3/lib/python3.11/site-packages/torch/distributed/launch.py:183: FutureWarning: The module torch.distributed.launch is deprecated
and will be removed in future. Use torchrun.
Note that --use-env is set by default in torchrun.
If your script expects `--local-rank` argument to be set, please
change it to read from `os.environ['LOCAL_RANK']` instead. See
https://pytorch.org/docs/stable/distributed.html#launch-utility for
further instructions
warnings.warn(
usage: image_train_h5.py [-h] [--workers WORKERS]
[--train_test_h5_path TRAIN_TEST_H5_PATH]
[--val_test_h5_path VAL_TEST_H5_PATH]
[--model_outpath MODEL_OUTPATH] [--lr LR] [--model MODEL]
[--seed SEED] [--batch_size BATCH_SIZE] [--epochs EPOCHS]
[--decay DECAY] [--milestone MILESTONE]
[--local_rank LOCAL_RANK]
[--nproc_per_node NPROC_PER_NODE]
image_train_h5.py: error: unrecognized arguments: --local-rank=0
[2024-03-27 21:38:22,700] torch.distributed.elastic.multiprocessing.api: [ERROR] failed (exitcode: 2) local_rank: 0 (pid: 443088) of binary: /home/amax/anaconda3/bin/python
Traceback (most recent call last):
File "<frozen runpy>", line 198, in _run_module_as_main
File "<frozen runpy>", line 88, in _run_code
File "/home/amax/anaconda3/lib/python3.11/site-packages/torch/distributed/launch.py", line 198, in <module>
main()
File "/home/amax/anaconda3/lib/python3.11/site-packages/torch/distributed/launch.py", line 194, in main
launch(args)
File "/home/amax/anaconda3/lib/python3.11/site-packages/torch/distributed/launch.py", line 179, in launch
run(args)
File "/home/amax/anaconda3/lib/python3.11/site-packages/torch/distributed/run.py", line 803, in run
elastic_launch(
File "/home/amax/anaconda3/lib/python3.11/site-packages/torch/distributed/launcher/api.py", line 135, in __call__
return launch_agent(self._config, self._entrypoint, list(args))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/amax/anaconda3/lib/python3.11/site-packages/torch/distributed/launcher/api.py", line 268, in launch_agent
raise ChildFailedError(
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
============================================================
image_train_h5.py FAILED
------------------------------------------------------------
Failures:
<NO_OTHER_FAILURES>
------------------------------------------------------------
Root Cause (first observed failure):
[0]:
time : 2024-03-27_21:38:22
host : amax
rank : 0 (local_rank: 0)
exitcode : 2 (pid: 443088)
error_file: <N/A>
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
============================================================
这是什么错误,应该怎么解决
|
638f452961db61dd2b612fba6fd126e1
|
{
"intermediate": 0.33703485131263733,
"beginner": 0.4431624412536621,
"expert": 0.21980275213718414
}
|
44,242
|
I have react app and when I enter the page by clicking on navigation icon it makes a request for getting a list of appeals. The problem however is that if I would click fast on the icon it will create multiple same requests that would take a lot of time to be finished. What can I do to resolve this?
|
d5a296c660bead7d130edcc915f0235b
|
{
"intermediate": 0.491860032081604,
"beginner": 0.21041277050971985,
"expert": 0.29772719740867615
}
|
44,243
|
aiokafka consume message
|
2f79f6c46a51e6c659aa7ac0bc87797d
|
{
"intermediate": 0.3629162013530731,
"beginner": 0.2726406455039978,
"expert": 0.3644431233406067
}
|
44,244
|
So what to replace your_model_identifier
|
c49a6a2b2afb5cd2b956021ef3b5f5b1
|
{
"intermediate": 0.30205273628234863,
"beginner": 0.2595478594303131,
"expert": 0.43839940428733826
}
|
44,245
|
I have a translation api. i have another way to translate with openai. i have code for both. so intergrate the openai way as a choice to the existing one. Improve it using great design patterns or necessary techniques. give me the all imprved codes as output
import fnmatch
import logging
import math
import os
import shutil
import threading
import uuid
import zipfile
from os import path
from flask import Flask, request, render_template, send_file, session
from flask_mongoengine import MongoEngine
from flask_restful import Api
from translator.auth import login_required
from util import OUTPUT_FOLDER
from . import android, ios, json, slack, text, upload_translation, translation, auth, csv
ENVIRONMENT = os.environ.get("ENVIRONMENT", default="DEFAULT")
logging.basicConfig(level=logging.ERROR)
LOGGER = logging.getLogger(__file__)
app = Flask(__name__)
if ENVIRONMENT == "PROD":
app.config.from_object('config.ProductionConfig')
else:
app.config.from_object('config.Config')
LOGGER.info(f'ENVIRONMENT for app {ENVIRONMENT}')
app.register_blueprint(upload_translation.bp)
app.register_blueprint(translation.bp)
app.register_blueprint(auth.bp)
api = Api(app)
db = MongoEngine()
db.init_app(app)
def do_async(file_path, key, source, target, output_folder, channel):
slack.msg(channel, "translate started, I will upload the translated documents to SLACK")
file = None
errors = []
try:
filename = os.path.basename(file_path)
ext = os.path.splitext(os.path.basename(file_path))[1][1:]
file = open(file_path)
retfile = None
zipoutput = False
if ext == 'xml':
retfile, errors = android.dojob(source, output_folder, file, filename, target)
zipoutput = True
elif ext == 'txt':
retfile = text.dojob(source, output_folder, file, filename, target)
elif ext == 'json':
retfile = json.dojob(source, output_folder, file, filename, target)
elif ext == 'strings':
retfile = ios.dojob(source, output_folder, file, filename, target)
zipoutput = True
elif ext == 'csv':
retfile = csv.dojob(source, output_folder, file, filename, target)
zipoutput = True
if zipoutput:
retfile = os.path.join(OUTPUT_FOLDER, "{}.zip".format(key))
zip(output_folder, retfile)
shutil.rmtree(output_folder)
else:
newretfile = os.path.join(OUTPUT_FOLDER, "{}.{}".format(key, ext))
shutil.copyfile(retfile, newretfile)
shutil.rmtree(output_folder)
retfile = newretfile
except Exception as e:
LOGGER.error("error occured while translation: {} {}".format(type(e).__name__, str(e)))
slack.msg(channel, "TRANSLATE ERROR:" + str(e))
print("error : {}".format(str(e)))
else:
print("return file : {}".format(retfile))
slack.upload(channel, retfile)
if len(errors) > 0:
slack.msg(channel, "errors: " + str(errors))
return
finally:
file.close()
shutil.rmtree(os.path.dirname(file_path))
shutil.rmtree(output_folder)
@app.route('/handle_form', methods=['POST'])
@login_required
def handle_form():
form = request.form.to_dict(flat=False)
target = form['target']
source = request.form['source']
channel = request.form['channel']
file = request.files['file']
key = str(uuid.uuid4())
length = len(file.read())
file.seek(0)
if length > 20000 and session['user_id'] != "admin":
return "File is larger than average, please ask one of the manager", 200
output_folder = os.path.join(OUTPUT_FOLDER, "{}".format(key))
try:
shutil.rmtree(output_folder)
except:
pass
try:
os.makedirs(output_folder)
except:
pass
print("target: {}".format(target))
copyinputfile = os.path.join(OUTPUT_FOLDER, "uploads", key)
try:
os.makedirs(copyinputfile)
copyinputfile = os.path.join(copyinputfile, file.filename)
except:
pass
file.save(copyinputfile)
# 1000KB * 10Lang = 60 sec
total_lang = len(target)
estimated_time = math.ceil(total_lang * length / 2 / 10000)
download_thread = threading.Thread(target=do_async,
args=[copyinputfile, key, source, target, output_folder, channel])
download_thread.start()
return "Key: {} <br/> " \
"File will be ready in about {} seconds. " \
"You can download the file by clicking " \
"<a href=/download?key={}>here</a>" \
.format(key, estimated_time, key)
@app.route('/download', methods=['GET'])
@login_required
def download():
key = request.args.get('key')
if len(str(uuid.uuid4())) != len(key):
return "wrong key", 400
output_folder = os.path.join(OUTPUT_FOLDER, "{}".format(key))
try:
if path.exists(output_folder):
return "Still running.. Wait a bit more", 200
else:
for file in os.listdir('output'):
if fnmatch.fnmatch(file, '{}.*'.format(key)):
return send_file(os.path.join(OUTPUT_FOLDER, file), as_attachment=True)
return "Timeout, translate it again!", 400
except Exception as e:
return str(e)
@app.route("/")
@login_required
def index():
return render_template("index.html")
@app.route("/health")
def health():
return "OK"
@app.route("/faq")
@login_required
def faq():
return render_template("faq.html")
def zip(src, dst):
zf = zipfile.ZipFile(dst, "w", zipfile.ZIP_DEFLATED)
abs_src = os.path.abspath(src)
for dirname, subdirs, files in os.walk(src):
for filename in files:
absname = os.path.abspath(os.path.join(dirname, filename))
arcname = absname[len(abs_src) + 1:]
# print('zipping %s as %s' % (os.path.join(dirname, filename),
# arcname))
zf.write(absname, arcname)
zf.close()
if __name__ == "__main__":
try:
shutil.rmtree(OUTPUT_FOLDER)
except:
pass
app.run(host='0.0.0.0', port='8081')
import html
import os
import re
import lxml.etree as ET
# This subroutine extracts the string including html tags
# and may replace "elem.text".
# It cannot digest arbitrary encodings, so use it only if necessary.
import six
from .translate import handle_containers
replace = {"\%([0-9])\$s": "1111_", "\%([0-9])\$d": "1000215400", "\%s": "1111_", "\%d": "1000215400"}
def unmask_parameters(t):
# fix parameter strings
parsed3 = re.sub('% ([ds])', r' %\1', t)
parsed4 = re.sub('% ([\d]) \$ ([ds])', r' %\1$\2', parsed3).strip()
t = html.unescape(parsed4)
t = t.replace('\\ n', '\\n')
t = t.replace('\n', ' \n')
t = t.replace("'", "\\'")
for i in reversed(range(1, 10)):
t = t.replace("1111_{}".format(i), "%{}$s".format(i))
t = t.replace("{}".format(1000215400 + i), "%{}$d".format(i))
t = t.replace("1111_", "%s")
t = t.replace("1111", "%s")
t = t.replace("1000215400", "%d")
t = t.replace("&", "&")
return t
def mask_parameters(t):
# t = t.replace("\\n", " ")
if isinstance(t, six.binary_type):
t = t.decode('utf-8')
t = t.replace("&", "&")
t = t.replace("<b>", " ")
t = t.replace("</b>", " ")
t = t.replace("\n", " ")
t = t.replace("\\n", " ")
t = t.replace("\\'", "'")
t = re.sub(" +", " ", t)
for rr in replace:
while len(re.findall(rr, t)) > 0:
count = re.findall(rr, t)[0]
new_replace = re.sub("\((.*)\)", "{}".format(count), rr)
try:
int(count)
try:
int(replace[rr])
replace_with = "{}".format(int(replace[rr]) + int(count))
except:
replace_with = "{}{}".format(replace[rr], int(count))
pass
except:
replace_with = replace[rr]
pass
t = re.sub(new_replace, replace_with, t)
return t
def dojob(src, output_folder, input_file, name, target):
errors = []
for target_lang in target:
# create outfile name by appending the language code to the infile name
file = os.path.join(output_folder, "values-{}".format(target_lang), name)
# read xml structure
input_file.seek(0)
input_as_text = input_file.read()
# clear inner tags
input_as_text = re.sub("<font.*?>", '', input_as_text)
input_as_text = re.sub("</font.*?>", '', input_as_text)
input_as_text = re.sub("<b.*?>", '', input_as_text)
input_as_text = re.sub("</b.*?>", '', input_as_text)
input_as_text = re.sub("<u.*?>", '', input_as_text)
input_as_text = re.sub("</u.*?>", '', input_as_text)
root = ET.fromstring(input_as_text.encode('utf-8'))
translates = []
replace_index = 0
# cycle through elements
for elem in root:
if elem.get('translatable') == 'false':
elem.getparent().remove(elem)
continue
if elem.tag == 'string':
text = elem.text
if text is None:
continue
replace_text = "{." + str(replace_index) + ".}"
translates.append({
"src": text,
"masked": mask_parameters(text),
"replace_text": replace_text,
"char_count": len(text)
})
elem.text = replace_text
replace_index = replace_index + 1
if elem.tag == 'string-array' or elem.tag == 'plurals':
for j in elem:
if j.get('translatable') == 'false':
elem.getparent().remove(elem)
continue
if j.tag == 'item':
replace_text = "{." + str(replace_index) + ".}"
translates.append({
"src": j.text,
"masked": mask_parameters(j.text),
"replace_text": replace_text,
"char_count": len(j.text)
})
j.text = replace_text
replace_index = replace_index + 1
# write new xml file
input_as_text = ET.tostring(root, method='xml', encoding="utf-8", xml_declaration=True).decode('utf-8')
results = handle_containers(translates, target_lang, src, unmask_parameters)
for result in results:
input_as_text = input_as_text.replace(result['replace_text'], result['target'])
save_to_file(file, input_as_text)
if target_lang == "iw":
save_to_file(file.replace("-iw", "-he"), input_as_text)
if target_lang == "in":
save_to_file(file.replace("-in", "-id"), input_as_text)
return output_folder, errors
def save_to_file(file, input_as_text):
try:
os.makedirs(os.path.split(file)[0])
except:
pass
# open text file
text_file = open(file, "w", encoding="utf-8")
# write string to file
text_file.write(input_as_text)
# close file
text_file.close()
import functools
from datetime import datetime
from flask import Blueprint, render_template, request, send_file
from flask_mongoengine import Pagination
from mongoengine import Document, StringField, LongField, DateTimeField
from translator import translation_reader
from translator.auth import login_required
bp = Blueprint('translations', __name__)
class Translation(Document):
text = StringField()
translated_text = StringField()
source_lang = StringField()
target_lang = StringField()
count = LongField()
last_used = DateTimeField()
translate_source = StringField()
meta = {
"index_background": True,
"indexes": [
('text', 'source_lang', 'target_lang'),
]
}
def handle_translations(text_arr, source_lang, target_lang):
db_result = Translation.objects(text__in=text_arr, source_lang=source_lang, target_lang=target_lang).order_by(
'-last_used')
db_result_map = {}
for r in db_result:
in_map = db_result_map.get(r['text'])
if in_map is None:
db_result_map[r['text']] = r
else:
if in_map['last_used'] > r['last_used']:
db_result_map[r['text']] = r
db_result_ids = list(map(lambda result: result['id'], db_result_map.values()))
Translation.objects(id__in=db_result_ids, source_lang=source_lang, target_lang=target_lang).update(inc__count=1)
Translation.objects(id__in=db_result_ids, source_lang=source_lang, target_lang=target_lang).update(
set__last_used=datetime.now())
return db_result_map
def handle_container_chunk(container_chunk, source_lang, target_lang):
text_list = list(map(lambda container: container['src'], container_chunk))
result_chunk = handle_translations(text_list, source_lang, target_lang)
return list(result_chunk.values())
def save_api_results(api_results, source_lang, target_lang):
db_results = Translation.objects(text__in=list(map(lambda x: x['input'], api_results)), source_lang=source_lang,
target_lang=target_lang)
db_results_dict = {result['text']: result for result in db_results}
non_existed_api_results = list(filter(lambda r: not bool(db_results_dict.get(r['input'])), api_results))
if len(non_existed_api_results) > 0:
Translation.objects.insert(list(map(lambda api_result: Translation(
text=api_result['input'],
translated_text=api_result['translatedText'],
source_lang=source_lang,
target_lang=target_lang,
count=1,
last_used=datetime.now(),
translate_source='google'
), non_existed_api_results)))
def handle_translation(text, source_lang, target_lang):
result = Translation.objects(text=text, source_lang=source_lang, target_lang=target_lang)
if result.count() == 0:
return None
if result.count() == 1:
translation = result[0]
if result.count() > 1:
translation = result.order_by('-last_used')[0]
translation.count = translation.count + 1
translation.last_used = datetime.now()
translation = translation.save()
return translation
def create_translation(result, source_lang, target_lang, text, translate_source):
return Translation(
text=text,
translated_text=result,
source_lang=source_lang,
target_lang=target_lang,
count=1,
last_used=datetime.now(),
translate_source=translate_source
).save()
def get_translation(text, translated_text, source_lang, target_lang, translate_source):
return Translation.objects(text__in=text, translated_text=translated_text, source_lang=source_lang,
target_lang=target_lang,
translate_source=translate_source)
def cacheable(func):
@functools.wraps(func)
def wrapper(text, target_lang, source_lang):
res = handle_translation(text=text, target_lang=target_lang, source_lang=source_lang)
if res:
return res.translated_text
else:
result = func(text, target_lang, source_lang)
create_translation(result, source_lang, target_lang, text, 'google')
return result
return wrapper
ROWS_PER_PAGE = 20
@bp.route("/export", methods=['GET'])
@login_required
def export_translations():
source_lang = request.args.get('source_lang')
target_lang = request.args.get('target_lang')
filename = translation_reader.export(source_lang, target_lang)
return send_file(filename, as_attachment=True)
@bp.route("/translations", methods=['GET'])
@login_required
def translations():
page = request.args.get('page', 1, type=int)
paginator = Pagination(Translation.objects.order_by("-count"), page, ROWS_PER_PAGE)
return render_template("translations.html", paginator=paginator)
the new code is like that
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.chains import LLMChain
from langchain.chains import SequentialChain
from langchain.output_parsers import ResponseSchema
from langchain.output_parsers import StructuredOutputParser
from openai import OpenAI
import os
# from dotenv import load_dotenv, find_dotenv
with open('strings1.xml', 'r') as file:
xml = file.read()
# _ = load_dotenv(find_dotenv()) # read local .env file
#client = OpenAI(api_key=os.environ['OPENAI_API_KEY'])
client = OpenAI(api_key="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")
chat = ChatOpenAI(temperature=0.0, model="gpt-3.5-turbo")
prompt1 = ChatPromptTemplate.from_template(
"""\
Translate given xml string to {language}, use desc attribute as hint.
Remember only translate xml element text contents.
Remember convert numerals to {language} numerals, for example in arabic: 0 (٠) 1 (١) 2 (٢) 3 (٣).
Remember do not translate any attributes.
{cleaned_xml}
""")
chain = LLMChain(llm=chat, prompt=prompt1, output_key="translated_xml")
prompt2 = ChatPromptTemplate.from_template(
"""
Apply below rules to given xml:
remove all elements <a> tags and contents.
remove xml element when translatable attribute set to "false".
replace \\n string to " ".
{xml}
"""
)
# chain 2
chain_two = LLMChain(llm=chat, prompt=prompt2, output_key="cleaned_xml")
xml_schema = ResponseSchema(name="result",
description="string result")
response_schemas = [xml_schema]
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
format_instructions = output_parser.get_format_instructions()
language = "Turkish"
overall_simple_chain = SequentialChain(chains=[chain_two, chain], input_variables=["xml", "language"],
output_variables=["cleaned_xml", "translated_xml"],
verbose=True)
response = overall_simple_chain({ "xml": xml, "language": language})
print(response["translated_xml"])
|
57e0d8adacf7a24f0a4b744a6aad4ddd
|
{
"intermediate": 0.403359055519104,
"beginner": 0.3789440393447876,
"expert": 0.2176968902349472
}
|
44,246
|
Runtime error
File "/home/user/app/app.py", line 12
mood = st.selectbox('What’s the mood?'', ['Happy', 'Sad', 'Energetic', 'calm']
^
SyntaxError: unterminated string literal (detected at line 12)
|
9bc8e11ab3bca755528d51b412aeb73a
|
{
"intermediate": 0.31565070152282715,
"beginner": 0.6010679602622986,
"expert": 0.08328133076429367
}
|
44,247
|
i have a list of csv files ,
each file contains a "Date" column
i wand to remove files that dont have the data with "Date" after 3/8/2024
give me proper python code
|
b77985a5ec8c6aacae8e21500bd561be
|
{
"intermediate": 0.5058901309967041,
"beginner": 0.2659159302711487,
"expert": 0.2281939685344696
}
|
44,248
|
Im making an battleship game in processing 4 can you check my code and maybe include more features to help me improve it?
1st tab:
Player2 win;
int [] b = {0, 25, 50, 75, 100, 125, 150, 175, 200, 225, 250, 275, 300, 325, 350, 375, 400, 425, 450, 475, 500};
/* values:
-2 player2's ship shot.
-1 means player2's ships
0 you don't know what is in that cell.
1 means player1's ship,
2 player1's ship shot.
3 is ??
*/
int count=0;
int [][] grid = new int [20][20];
int lcx, lcy;
boolean gameWin, placedships;
int ships1 = 0, totalship1=10;
int ships2 = 0, totalship2=10;
int turn = 1;
void setup() {
background(250);
size(500, 500);
drawgrid();
win = new Player2();
}
void draw() {
if (key == ENTER && turn==1) {
turn=2;
}
if (key == 'p' && keyPressed) {
printt();
}
}
void mousePressed() {
if (mouseButton == LEFT && turn==1) {
for (int r=0; r<20; r++) {
for (int c=0; c<20; c++) {
if (mouseX>b[r] && mouseX<b[r+1] && mouseY>b[c] && mouseY<b[c+1] && ships1<10 && grid[r][c]!=1 && grid[r][c]!=2) {
fill(50, 50, 100);
ships1++;
square(b[r], b[c], 25);
grid[r][c]=1;
}
}
}
}
if (mouseButton == RIGHT && turn==1) {
for (int r=0; r<20; r++) {
for (int c=0; c<20; c++) {
if (mouseX>b[r] && mouseX<b[r+1] && mouseY>b[c] && mouseY<b[c+1] && ships2<10 && grid[r][c]!=1 && grid[r][c]!=2) {
if (grid[r][c]==-1) {
grid[r][c]=-2;
ships2--;
fill(50, 100, 50);
square(b[r]+7.5, b[c]+7.5, 10);
}
}
}
}
}
}
void grabshots() {
for (int r=0; r<20; r++) {
for (int c=0; c<20; c++) {
if (grid[r][c]==2) {
fill(200, 0, 0);
square(b[r]+7.5, b[c]+7.5, 10);
fill(100);
}
}
}
}
void drawgrid() {
for (int r=0; r<525; r+=25) {
for (int c=0; c<500; c+=25) {
fill(100);
square(r, c, 25);
}
}
fill(25);
}
void printt() {
println();
print("player"+ turn + " board");
println();
for (int r=0; r<20; r++) {
for (int c=0; c<20; c++) {
print(grid[c][r] + ", ");
}
println();
}
}
/*
treat the second tab as a separate processing sketch.
Declare variables in the main tab and
you can use them in both the main and second window.
*/
2nd Tab:
class Player2 extends PApplet {
Player2() {
super();
PApplet.runSketch(new String[] {this.getClass().getSimpleName()}, this);
}
void settings() {
size(500, 500);
}
void setup() {
drawgrid();
botdrawship();
}
void draw() {
if (turn==2) {
grablocation();
// shoot();
turn=1;
}
}
void shoot() {
int r = round(random(1, 18));
int c = round(random(1, 18));
if (grid[r][c]==1) {
grid[r][c]=2;
totalship1--;
fill(200, 0, 0);
square(b[r]+7.5, b[c]+7.5, 10);
fill(100);
}
turn=1;
}
void botdrawship() {
count=0;
while (count<10) {
int r = round(random(1, 18));
int c = round(random(1, 18));
grid[r][c]=-1;
fill(100, 50, 50);
square(b[r], b[c], 25);
count++;
}
}
void grablocation() {
for (int r=0; r<20; r++) {
for (int c=0; c<20; c++) {
if (grid[r][c]==1) {
fill(50, 50, 100);
square(b[r], b[c], 25);
}
}
}
}
void drawgrid() {
for (int r=0; r<525; r+=25) {
for (int c=0; c<500; c+=25) {
fill(100);
square(r, c, 25);
}
}
fill(25);
}
}
|
97a8d2d669bb5122042fad86babc9ffc
|
{
"intermediate": 0.36080384254455566,
"beginner": 0.4405513107776642,
"expert": 0.19864481687545776
}
|
44,249
|
else if(event==6)
{
//SEND PLAYERS POSITION
//Input:none
//Output:arg1 - number of players arg2 - local player id arg3 - x arg4- y arg5 - rot arg6 -local player id ....
number_of_players=0;
to_send[0]=6;
c=2;
for(i=1;i<=10;i++)
{
u_id=get_unique_id_by_local(i);
if(u_id!=0 and u_id!=unique_id)
{
number_of_players++;
to_send[c]=i;
c++;
position=get_position(u_id);
x=position[0];
y=position[1];
rot=position[2];
to_send[c]=x;
c++;
to_send[c]=y;
c++;
to_send[c]=rot;
c++;
}
}
c--;
to_send[1]=number_of_players;
send_data_to_player(unique_id,to_send);
}
Convert from PHP to Python
|
08b441c163dcf5eb48a4415e716d8a62
|
{
"intermediate": 0.43668872117996216,
"beginner": 0.3010527491569519,
"expert": 0.26225852966308594
}
|
44,250
|
Runtime error
Traceback (most recent call last):
File "/home/user/app/app.py", line 1, in <module>
import streamlit as st
ModuleNotFoundError: No module named 'streamlit'
|
2bbf3f92b29bc31dbe972e750bdd37e2
|
{
"intermediate": 0.5480515360832214,
"beginner": 0.26124635338783264,
"expert": 0.1907021850347519
}
|
44,251
|
I want a code to copy openart website to make it a app with all the featers in it
|
d692699efe3d4487f8b0cb7a62e03f7b
|
{
"intermediate": 0.37597551941871643,
"beginner": 0.23083148896694183,
"expert": 0.39319294691085815
}
|
44,252
|
IN A WORDPRESS SITE:
SUPPOSE I SEARCH IN THE SITE FOR "WORD-001" AND IT RETURNS A RESULT.
BUT IF I SEARCH "WORD001" IT DOESN'T RETURN A RESULT.
HOW COULD I EDIT MY WORDPRESS SO THE - WONT MATTER IN THE SEARCH.
|
fc55ef24be9cf669fab5a7b5561d9ed5
|
{
"intermediate": 0.47260597348213196,
"beginner": 0.2252919226884842,
"expert": 0.30210211873054504
}
|
44,253
|
IN A WORDPRESS SITE:
SUPPOSE I SEARCH IN THE SITE FOR “WORD-001” AND IT RETURNS A RESULT.
BUT IF I SEARCH “WORD001” IT DOESN’T RETURN A RESULT.
HOW COULD I EDIT MY WORDPRESS SO THE - WONT MATTER IN THE SEARCH.
|
d43b124279d46e2f869a5a49a23818b5
|
{
"intermediate": 0.45155054330825806,
"beginner": 0.2272043079137802,
"expert": 0.3212451636791229
}
|
44,254
|
IN A WORDPRESS SITE:
SUPPOSE I SEARCH IN THE SITE FOR “WORD-001” AND IT RETURNS A RESULT.
BUT IF I SEARCH “WORD001” IT DOESN’T RETURN A RESULT.
HOW COULD I EDIT MY WORDPRESS SO THE - WONT MATTER IN THE SEARCH.
|
40598ac78559e5b0ce968ad2393192b0
|
{
"intermediate": 0.45155054330825806,
"beginner": 0.2272043079137802,
"expert": 0.3212451636791229
}
|
44,255
|
Bro can make glycosis cycle diagram with all details in plantuml format code
|
0eb9ec6a5a2069dcd6efc3510c7c3268
|
{
"intermediate": 0.43871375918388367,
"beginner": 0.2724204659461975,
"expert": 0.2888657748699188
}
|
44,256
|
GIVE ME A FUNCTION FOR A WORDPRESS SITE.
SO WHEN I SEARCH FOR WORD001, THE RESULTS WILL ALSO INCLUDE WORD-001. REGARDLESS OF "-"
|
4dd2e7715f30e7afa8c3b6cd522973b5
|
{
"intermediate": 0.34554731845855713,
"beginner": 0.30562010407447815,
"expert": 0.34883254766464233
}
|
44,257
|
Bro what's this rapaport leubering cycle in rbc is I'm medical student
|
8057fa00f2f375256082a8fe4ebf3a07
|
{
"intermediate": 0.352729469537735,
"beginner": 0.41347432136535645,
"expert": 0.23379619419574738
}
|
44,258
|
rules_version = '2';
service cloud.firestore {
match /databases/{database}/documents {
match /users/{userId} {
allow read, write: if request.auth != null && request.auth.uid == userId;
allow update: if request.auth != null && (request.auth.uid == userId || request.resource.data.keys().hasAny(['following', 'followers']));
}
match /users/{document} {
allow read: if true;
allow write: if false;
}
match /posts/{userId}/{post} {
// Allow anyone to read and write to the posts collection
allow read, write: if true;
}
match /chats/{chatId}/messages/{messageId} {
allow read, write: if request.auth != null;
}
}
}
const handleFinalisePost = async () => {
try {
// 1. Check if user is authenticated
if (!firebaseAuth.currentUser) {
// Log authentication failure
console.log('User not authenticated. Unable to finalize post.');
// Handle unauthenticated user (e.g., redirect to login)
return;
} else {
// Log authentication success
console.log('User authenticated successfully.');
}
// 2. Upload Image to Firebase Storage
const storageRef = ref(storage, `images/${Date.now()}`); // Unique filename
// Handle both string and string[] cases for images
let imageUri;
if (typeof images === 'string') {
imageUri = images;
} else if (Array.isArray(images) && images.length > 0) {
imageUri = images[0]; // Use the first image if it's an array
} else {
// Handle the case where images is not valid
return;
}
const response = await fetch(imageUri);
const blob = await response.blob();
await uploadBytes(storageRef, blob);
// 3. Get Download URL for the uploaded image
const downloadURL = await getDownloadURL(storageRef);
// 4. Create post data with timestamp
const postData = {
caption: caption,
imageUrl: downloadURL,
timestamp: Date.now(), // Add timestamp
userId: firebaseAuth.currentUser.uid,
};
// 5. Add post to Firestore
const userPostsRef = collection(db, `posts/${firebaseAuth.currentUser.uid}/userPosts`);
const postDocRef = doc(userPostsRef); // Firestore will automatically generate a unique ID for the document
await setDoc(postDocRef, postData);
// 6. Navigate back or show success message
navigation.goBack(); // Or show a success message
} catch (error) {
console.error('Error uploading post:', error);
// Handle error appropriately (e.g., show an error message to the user)
}
};
ERROR Error uploading post: [FirebaseError: Missing or insufficient permissions.]
|
1690c7454ceba8aff0297d2dfd8fe0f2
|
{
"intermediate": 0.31639769673347473,
"beginner": 0.5516526699066162,
"expert": 0.13194964826107025
}
|
44,259
|
Fibonacci sequence using Python
|
fc6653bb300d85830ae1e9d23928ca3a
|
{
"intermediate": 0.35561567544937134,
"beginner": 0.29944586753845215,
"expert": 0.3449384868144989
}
|
44,260
|
s.sendall('fibo 0 10') s.sendall('fibo 0 10')
TypeError: a bytes-like object is required, not 'str'
czemu mam taki blad
|
6fb376812b2f8686e8cab2fd499f67ed
|
{
"intermediate": 0.338661789894104,
"beginner": 0.35600921511650085,
"expert": 0.30532899498939514
}
|
44,261
|
i have some csv files
in some of them, in some columns , there are some cells empty after eahh other
i want to detect csv files that have some empty cell in their columns and move them to another folder
give me proper python code
|
d2371250c1d2613c8ba179f0f2f2302d
|
{
"intermediate": 0.43052440881729126,
"beginner": 0.3204101026058197,
"expert": 0.24906553328037262
}
|
44,262
|
Fix my json and just give me fixed version.
{
“name”: “Towny Multiplayer”,
“description”: “A game where multiple players live a life in a small town, engage in various activities, and live their lives.”,
“personality”: “”,
“scenario”: “Your objective is to live a life in a small town engaging in activities, following the laws of realism, physics, and society.”,
“first_mes”: “Welcome to Towny Multiplayer!”,
“mes_example”: “[player_name: action]”,
“creatorcomment”: “This character is designed to facilitate a multiplayer game session in a fictional small town setting, adhering to realism and player-driven narratives.”,
“avatar”: “none”,
“chat”: “1 - 2024-3-27 @20h 43m 27s 490ms”,
“talkativeness”: “0.5”,
“fav”: false,
“tags”: [“multiplayer”, “towny”, “roleplay”, “realism”],
“spec”: “chara_card_v2”,
“spec_version”: “2.0”,
“data”: {
“name”: “Towny Multiplayer AI”,
“description”: “A game where multiple players live a life in a small town. Players can engage in various activities and must adhere to game rules.”,
“personality”: “Neutral, informative, immersive”,
“scenario”: “Live in a small town, interact with others, and partake in realistic activities.”,
“first_mes”: “You have arrived in the small town. Your journey starts in the Greenwood district.”,
“mes_example”: “[john_doe: Walk to the 24/7 shop on Compstone street to buy bread]”,
“creator_notes”: “This AI model is tasked with maintaining game integrity, facilitating realistic interactions, and ensuring player actions are within the rules.”,
“system_prompt”: “In Towny Multiplayer, players live out lives in a town, divided into several districts. Each player controls a character. The AI informs players about the consequences of their actions, maintaining realism and the rules of the game.”,
“post_history_instructions”: “”,
“tags”: [“gameplay”, “realism”, “multiplayer”, “roleplay”],
“creator”: “Game Master”,
“character_version”: “1.0”,
“alternate_greetings”: [“Welcome to a day in Towny! What’s your first move?”],
“extensions”: {
“talkativeness”: “0.5”,
“fav”: false,
“world”: “Towny Multiplayer”,
“depth_prompt”: {
“prompt”: “You are part of the Towny Multiplayer world. Players interact with each other within the confines of a realistic small town. Activities must be described realistically, and actions follow a logical sequence within the game world. You, as the AI, manage the consequences of actions, private player messages, and overall story progression, ensuring the main rule and other specific gameplay rules are always adhered to. Remember, realism is key.”,
“depth”: 4
}
}
},
“create_date”: “2024-3-27 @20h 43m 27s 516ms”
}
|
e06411ad7b6214312695a4b9bb362a14
|
{
"intermediate": 0.3471803367137909,
"beginner": 0.45787468552589417,
"expert": 0.19494491815567017
}
|
44,263
|
You probably know a lot about storytelling, narrative, verbal role-playing games and board games? Both from more artistic, literature angle, and more technical, game design ones, right?
|
57c3bfea47f07a0659815f3dfb4ff330
|
{
"intermediate": 0.34948787093162537,
"beginner": 0.3366125822067261,
"expert": 0.31389957666397095
}
|
44,264
|
[HttpGet]
public HttpResponseMessage Admin(int id)
{
var admin = repository.Pa_Admin.Get(a => a.PAA_ID == id).FirstOrDefault();
if (admin == null)
{
return Request.CreateResponse(HttpStatusCode.NotFound);
}
var result = new Admin(admin, repository);
return Request.CreateResponse(HttpStatusCode.OK, result);
} it's my api endpoint in my controller
|
9ebc194c627959f81d4ba762cd028a10
|
{
"intermediate": 0.2939070761203766,
"beginner": 0.4723915457725525,
"expert": 0.23370134830474854
}
|
44,265
|
Please port my prompt request for gamemaster neural network to the given json file. This file acts as something of a personality for the ii. You can tweak it a bit, but I need it to perform the required tasks in the form of correct game play.
This is my prompt:
Towny Multiplayer - a game where multiple players live a life in a small town, engage in various activities, and live their lives.
## Game lore
1. The town is divided into several districts including different buildings and structures:
- Greenwood district: It contains a two-story house with five apartments, where players can live in one of these apartments. Each apartment has three rooms - a bedroom, a bathroom combined with a toilet, and a kitchen. In the bedroom, there is a bed, a table, and a wardrobe where players can store items. Apart from the residential house, the Greenwood district also has a small park, which includes a couple of benches and a fountain.
- Compstone street: This street is connected by road to the Greenwood district. Here, there is a 24/7 shop where you can buy many different goods.
- Pine forest: Right after Compstone street, there is a forest. In this forest flows a small river, and there is also a forester’s house that has not been entered for a long time.
Locations on the game world map are situated as follows:
1. Greenwood district
2. Compstone street
3. Pine forest
Upon arrival, each player has a simple pager that allows players to write to each other. In the forest, the connection may not always work.
## Game Rules
0. “THE MAIN RULE OF THE GAME” (This rule should NEVER be broken)
At the beginning of the game, AI receives a message with the names and descriptions of each character. Players write actions that their characters should perform. Also, a player can write ‘Nothing’, in which case their character does absolutely nothing and just stands still. After this, AI forms a response. Before the response, AI uses the [thinking] template, in which it contemplates. The following are PERSONAL conclusions for the players, these are text messages intended only for a specific player, describing what is happening with his character at that moment. In each PERSONAL conclusion, there is a [[stats]] field in which the status of the player’s character is described in very brief form - his health, inventory.
# Example of a typical prompt
Input:
[john_veller: A young man named John Veller, of average height, lives in apartment number four, married]
[jane_veller: A woman named Jane Veller, tall, lives in apartment number four, married]
Output:
[thinking_start]
There are two people in apartment number four, they are in the bedroom
[thinking_end]
[john_veller: You are in the room where there is a bed and next to you stands a woman [[health: ok, pager]] ]
[john_veller: Standing in the room, your gaze is directed towards the wall, next to you also stands a young man [[health: ok, pager]] ]
Input:
[john_veller: Nothing]
[jane_veller: Throw the pager at the wall, then leave the room]
Output:
[thinking_start]
Being in the apartment, the woman threw her pager against the wall with force, after which it got damaged in the form of a broken screen and casing
[thinking_end]
[john_veller: You see how the woman who just stood with you, suddenly threw her pager against the wall, then left the room leaving you alone with yourself [[health: ok, pager]] ]
[john_veller: You forcefully threw your pager at the wall, leading to its destruction. After that, you left the room for the corridor. In the corridor, there is a mirror and a chest of drawers under it, and also several doors [[health: ok]] ]
AI MUST follow this promt FOREVER.
1. A player can die or receive various kinds of damage after different situations. In case of death, a player is knocked out of the game and can no longer interact with the world, all their subsequent commands are simply ignored.
2. AI remembers the player’s condition and applies it during the game. If a player has any damage, it will affect the course of their game. For example, the absence of an eye will not allow the player to navigate visually.
3. The player controls their own character, AI cannot interfere with the control of the character by the player themselves.
4. Under no circumstances does AI allow players to cheat and deceive, a player cannot gain any magical abilities, or just create any item out of nowhere.
5. A player cannot just move from one location to another, they need to travel through routes passing through districts.
6. One player cannot act for another player.
7. AI cannot change the words that one player says to another. If a player is sending a message from a pager while in the forest, AI may add interference to the transmitted message.
8. This game is hyper-realistic. The AI has to respect the realism in the game and not let the player get "lazy" in terms of describing actions. That is, the player can't "go to the store", he has to get up from a chair, open a door with a key, go down the stairs, and so on.
This is my JSON character template:
{
"name": "1",
"description": "",
"personality": "",
"scenario": "",
"first_mes": "",
"mes_example": "",
"creatorcomment": "",
"avatar": "none",
"chat": "1 - 2024-3-27 @20h 43m 27s 490ms",
"talkativeness": "0.5",
"fav": false,
"tags": [],
"spec": "chara_card_v2",
"spec_version": "2.0",
"data": {
"name": "1",
"description": "",
"personality": "",
"scenario": "",
"first_mes": "",
"mes_example": "",
"creator_notes": "",
"system_prompt": "",
"post_history_instructions": "",
"tags": [],
"creator": "",
"character_version": "",
"alternate_greetings": [],
"extensions": {
"talkativeness": "0.5",
"fav": false,
"world": "",
"depth_prompt": {
"prompt": "",
"depth": 4
}
}
},
"create_date": "2024-3-27 @20h 43m 27s 516ms"
}
|
dac7594fcafc66c9ed621608bb38a7fc
|
{
"intermediate": 0.3741821050643921,
"beginner": 0.42236435413360596,
"expert": 0.20345352590084076
}
|
44,266
|
# GBNF Guide
GBNF (GGML BNF) is a format for defining [formal grammars](https://en.wikipedia.org/wiki/Formal_grammar) to constrain model outputs in `llama.cpp`. For example, you can use it to force the model to generate valid JSON, or speak only in emojis. GBNF grammars are supported in various ways in `examples/main` and `examples/server`.
## Background
[Bakus-Naur Form (BNF)](https://en.wikipedia.org/wiki/Backus%E2%80%93Naur_form) is a notation for describing the syntax of formal languages like programming languages, file formats, and protocols. GBNF is an extension of BNF that primarily adds a few modern regex-like features.
## Basics
In GBNF, we define *production rules* that specify how a *non-terminal* (rule name) can be replaced with sequences of *terminals* (characters, specifically Unicode [code points](https://en.wikipedia.org/wiki/Code_point)) and other non-terminals. The basic format of a production rule is `nonterminal ::= sequence...`.
## Example
Before going deeper, let's look at some of the features demonstrated in `grammars/chess.gbnf`, a small chess notation grammar:
|
7c9105a038ab9d62c5ce66572cf089ce
|
{
"intermediate": 0.29538941383361816,
"beginner": 0.43748989701271057,
"expert": 0.2671206593513489
}
|
44,267
|
in reaper lua, how do i change the take's pitch shifter to elastique pro, high formants
|
7e2afa15dabffbf0e9686b989bac6657
|
{
"intermediate": 0.30759748816490173,
"beginner": 0.2982232868671417,
"expert": 0.39417919516563416
}
|
44,268
|
Hello
|
5a23f452a43591f1d0bfc53dabcb1485
|
{
"intermediate": 0.3123404085636139,
"beginner": 0.2729349136352539,
"expert": 0.4147246778011322
}
|
44,269
|
in python grab the first word of a pandas series
|
4840ac40009454c477106d27cf266b34
|
{
"intermediate": 0.42509040236473083,
"beginner": 0.33125582337379456,
"expert": 0.24365375936031342
}
|
44,270
|
i using following code to convert timestamp to date:
# Convert the timestamp column to datetime format
df['Date'] = pd.to_datetime(df['Date'], unit='ms')
df['Date'] = df['Date'].dt.strftime('%m/%d/%Y')
but it returns same date for :
1711500000000 and 1710550000000
|
3cb147a1af05608822b3254064625df4
|
{
"intermediate": 0.4812385141849518,
"beginner": 0.19966794550418854,
"expert": 0.31909358501434326
}
|
44,271
|
grep: Непарная ( или \(
awk: line 1: regular expression compile failed (missing operand)
^\*{{ent|
|
66194d3ea97ee854aef72533d42ad05e
|
{
"intermediate": 0.3444088399410248,
"beginner": 0.321240097284317,
"expert": 0.3343510925769806
}
|
44,272
|
i using following code to convert timestamp to date:
# Convert the timestamp column to datetime format
df[‘Date’] = pd.to_datetime(df[‘Date’], unit=‘ms’)
df[‘Date’] = df[‘Date’].dt.strftime(‘%m/%d/%Y’)
but it returns same date for :
1711500000000 and 1710550000000
|
b02ad196037515e994eb4693c0199efe
|
{
"intermediate": 0.4311164915561676,
"beginner": 0.21615347266197205,
"expert": 0.3527299761772156
}
|
44,273
|
. Control Task Progression
To ensure that a change task moves from “Open” to “Work in Progress” only after the preceding task is closed, consider following a two-step approach using scripting:
1. UI Action: For Manual Status Change
- Create a UI Action on the Change Task table for users to manually initiate the status change.
- Name: Begin Work
- Table: Change Task (change_task)
- Action name: begin_work
- Script:
if (gs.hasRole('change_manager')) { // Check if user has appropriate role
var gr = new GlideRecord('change_task');
gr.addQuery('change_request', current.change_request);
gr.addQuery('order', '<', current.order);
gr.addQuery('state', '!=', 'Closed'); // Assuming ‘Closed’ is the state to check, adjust based on your instance
gr.query();
if (!gr.hasNext()) {
current.state = 'Work in Progress'; // Adjust the field and value based on your requirements
current.update();
action.setRedirectURL(current);
} else {
gs.addErrorMessage('Previous tasks must be completed before starting this task.');
}
} else {
gs.addErrorMessage('You do not have permissions to perform this action.');
}
|
98284c65b500cca2f21525382e03e9dd
|
{
"intermediate": 0.3784995377063751,
"beginner": 0.3685522973537445,
"expert": 0.25294819474220276
}
|
44,274
|
. Control Task Progression
To ensure that a change task moves from “Open” to “Work in Progress” only after the preceding task is closed, consider following a two-step approach using scripting:
1. UI Action: For Manual Status Change
- Create a UI Action on the Change Task table for users to manually initiate the status change.
- Name: Begin Work
- Table: Change Task (change_task)
- Action name: begin_work
- Script:
if (gs.hasRole('change_manager')) { // Check if user has appropriate role
var gr = new GlideRecord('change_task');
gr.addQuery('change_request', current.change_request);
gr.addQuery('order', '<', current.order);
gr.addQuery('state', '!=', 'Closed'); // Assuming ‘Closed’ is the state to check, adjust based on your instance
gr.query();
if (!gr.hasNext()) {
current.state = 'Work in Progress'; // Adjust the field and value based on your requirements
current.update();
action.setRedirectURL(current);
} else {
gs.addErrorMessage('Previous tasks must be completed before starting this task.');
}
} else {
gs.addErrorMessage('You do not have permissions to perform this action.');
}
I tried to close the second task and it will closed , i want that only task is closed when prior is closed
|
4a1d4e9ddf1e80ff1da35b65e5df869a
|
{
"intermediate": 0.4189005494117737,
"beginner": 0.2820107042789459,
"expert": 0.2990887761116028
}
|
44,275
|
what is the meaning of life
|
535ff79df848a2be147405bee3111c37
|
{
"intermediate": 0.4129807651042938,
"beginner": 0.40981653332710266,
"expert": 0.17720270156860352
}
|
44,276
|
Can you generate a python code that take data from an excel file and printing in another excel file
|
5e794ac63a0ae6b314dd0e7b1195a65a
|
{
"intermediate": 0.5470831990242004,
"beginner": 0.12917809188365936,
"expert": 0.3237387239933014
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.