row_id
int64 0
48.4k
| init_message
stringlengths 1
342k
| conversation_hash
stringlengths 32
32
| scores
dict |
|---|---|---|---|
46,888
|
# Recalculate with explicit column creation for RewardToRisk
assets <- assets %>%
mutate(RewardToRisk = MedianReturn / StandardDeviation)
# Now proceed with Strategy 1 selection, ensuring the RewardToRisk column exists.
strategy1_selection <- assets %>%
arrange(desc(RewardToRisk)) %>%
group_by(Category) %>%
mutate(rank = row_number()) %>%
ungroup() %>%
filter((Category == "Forex" & rank == 1) | (Category == "Commodities" & rank == 1) | Category == "Stocks") %>%
arrange(desc(RewardToRisk)) %>%
head(5)
if (nrow(filter(strategy1_selection, Category == "Forex")) < 1 || nrow(filter(strategy1_selection, Category == "Commodities")) < 1) {
print("Adjust selection to meet criteria")
} else {
print("Strategy 1 Selection:")
print(strategy1_selection)
}
# Attempt Strategy 2 selection
strategy2_selection <- assets %>%
arrange(PERatio) %>%
group_by(Category) %>%
mutate(rank = row_number()) %>%
ungroup() %>%
filter((Category == "Forex" & rank == 1) | (Category == "Commodities" & rank == 1) | Category == "Stocks") %>%
arrange(PERatio) %>%
head(5)
if (nrow(filter(strategy2_selection, Category == "Forex")) < 1 || nrow(filter(strategy2_selection, Category == "Commodities")) < 1) {
print("Adjust selection to meet criteria")
} else {
print("Strategy 2 Selection:")
print(strategy2_selection)
}
# Export (Choose accordingly based on your selected strategy)
write.csv(strategy1_selection, "Strategy1Selection.csv")
write.csv(strategy2_selection, "Strategy2Selection.csv") # Use write.xlsx for .xlsx files, but that requires the openxlsx or writexl package.
Error in h(simpleError(msg, call)) :
error in evaluating the argument 'x' in selecting a method for function 'head': object 'Category' not found
|
978f59f1895f9a5942b19cfbee81dddb
|
{
"intermediate": 0.45928165316581726,
"beginner": 0.32478970289230347,
"expert": 0.21592862904071808
}
|
46,889
|
{r}
# Define the data for Strategy 1
strategy1_selection <- data.frame(
Asset = c("T", "PFE", "XAU", "GOOGL", "USDINR"),
Median_Return = c(0.009617963, 0.002619454, 0.001102195, -0.002813010, -0.003632318),
Standard_Deviation = c(0.04303437, 0.01576457, 0.03418468, 0.04782845, 0.04273717)
)
# Calculate reward-to-risk ratios for Strategy 1
strategy1_selection$Reward_to_Risk <- strategy1_selection$Median_Return / strategy1_selection$Standard_Deviation
# Define the data for Strategy 2
strategy2_selection <- data.frame(
Asset = c("EURUSD", "NG", "PFE", "GOOGL", "T"),
Median_Return = c(-0.006003852, -0.005380718, 0.002619454, -0.002813010, 0.009617963),
Standard_Deviation = c(0.03204665, 0.04763956, 0.01576457, 0.04782845, 0.04303437),
Reward_to_Risk = c(-0.18734726, -0.11294642, -0.05881458, 0.16616080, 0.22349493)
)
# Calculate reward-to-risk ratios for Strategy 2
strategy2_selectionReward_to_Risk <- strategy2_selection$Median_Return / strategy2_selection$Standard_Deviation
# Total reward-to-risk ratio for each strategy
total_ratio_strategy1 <- sum(strategy1_selection$Reward_to_Risk)
total_ratio_strategy2 <- sum(strategy2_selection$Reward_to_Risk)
# Calculate weights for each strategy
weights_strategy1 <- (strategy1_selection$Reward_to_Risk / total_ratio_strategy1) * 100
weights_strategy2 <- (strategy2_selection$Reward_to_Risk / total_ratio_strategy2) * 100
# Print weights for Strategy 1
cat("Weights for Strategy 1:\n")
for (i in 1:nrow(strategy1_selection)) {
cat(strategy1_selection$Asset[i], ":", round(weights_strategy1[i], 2), "%\n")
}
cat("\n")
# Print weights for Strategy 2
cat("Weights for Strategy 2:\n")
for (i in 1:nrow(strategy2_selection)) {
cat(strategy2_selection$Asset[i], ":", round(weights_strategy2[i], 2), "%\n")
}
Add in above code that weights allocation for both the strategies accrdoing to GMVP without short selling and tangency portfolio with short selling. Additionally comment on the Portfolio Return, Portfolio risk measures estimated while allocating weight using the Objective Function as mentioned below.
Global Minimum Variance Portfolio - without short selling
Tangency Portfolio - with short selling
|
8562f636cd62e4233de51c929b494552
|
{
"intermediate": 0.36140134930610657,
"beginner": 0.332828164100647,
"expert": 0.30577051639556885
}
|
46,890
|
Sub CreateHourlySummary()
Dim rawDataSheet As Worksheet
Dim summarySheet As Worksheet
Dim lastRow As Long
Dim lastCol As Long
Dim i As Long
Dim j As Long
' Ajouter une nouvelle feuille pour les données brutes
Set rawDataSheet = Worksheets.Add(After:=ActiveSheet)
rawDataSheet.Name = "Raw Data"
' Importer le fichier CSV dans la feuille de données brutes
With rawDataSheet.QueryTables.Add(Connection:="TEXT;" & Application.GetOpenFilename(FileFilter:="CSV Files (*.csv),*.csv"), Destination:=rawDataSheet.Range("A1"))
.TextFileParseType = xlDelimited
.TextFileCommaDelimiter = True
.Refresh
End With
' Ajouter une nouvelle feuille pour le tableau récapitulatif
Set summarySheet = Worksheets.Add(After:=rawDataSheet)
summarySheet.Name = "Hourly Summary"
' Ajouter les en-têtes de colonnes pour chaque heure
For i = 0 To 23
summarySheet.Cells(1, i + 2).Value = Format(i, "00") & ":00"
Next i
summarySheet.Cells(1, 1).Value = "Date"
' Trouver la dernière ligne et la dernière colonne des données brutes
lastRow = rawDataSheet.Cells(rawDataSheet.Rows.Count, 1).End(xlUp).Row
lastCol = rawDataSheet.Cells(1, rawDataSheet.Columns.Count).End(xlToLeft).Column
' Boucle à travers les données brutes et compter les hits pour chaque heure
For i = 2 To lastRow
Dim dateVal As Date
dateVal = CDate(Left(rawDataSheet.Cells(i, 1).Value, 10))
If Application.CountIf(summarySheet.Columns(1), dateVal) = 0 Then
summarySheet.Cells(summarySheet.Rows.Count, 1).End(xlUp).Offset(1).Value = dateVal
End If
Dim hour As Integer
hour = Hour(rawDataSheet.Cells(i, 1).Value)
Dim summaryRow As Long
summaryRow = Application.Match(dateVal, summarySheet.Columns(1), 0)
summarySheet.Cells(summaryRow, hour + 2).Value = summarySheet.Cells(summaryRow, hour + 2).Value + 1
Next i
' Formater le tableau récapitulatif
With summarySheet
.Rows(1).Font.Bold = True
.Columns.AutoFit
End With
summarySheet.Activate
End Sub
@timestamp,"_id","_index","_score","agent.ephemeral_id","agent.id","agent.name","agent.type","agent.version","client.domain","client.geo.city_name","client.geo.continent_name","client.geo.country_iso_code","client.geo.country_name","client.geo.location","client.geo.region_iso_code","client.geo.region_name","client.ip","client.user.group.name","client.user.name","cloud.availability_zone","cloud.instance.id","cloud.provider","cloud.region","cloud.service.name","cs(User-Agent)","data_stream.dataset","data_stream.namespace","data_stream.type","destination.ip","destination.ipm.block","destination.ipm.class","destination.ipm.name","destination.ipm.site","destination.ipm.subnet","ecs.version","elastic_agent.id","elastic_agent.snapshot","elastic_agent.version","event.action","event.agent_id_status","event.category","event.dataset","event.id","event.ingested","event.kind","event.outcome","event.reason","event.type","host.architecture","host.containerized","host.hostname","host.id","host.ip","host.mac","host.name","host.os.codename","host.os.family","host.os.kernel","host.os.name","host.os.platform","host.os.type","host.os.version","http.request.body.icap","http.request.bytes","http.request.method","http.request.referer","http.response.body.icap","http.response.bytes","http.response.mime_type","http.response.status_code","input.type","log.source.address",message,"observer.domain","observer.domain.text","observer.ip","related.hosts","related.ip","server.ip","server.ipm.block","server.ipm.class","server.ipm.name","server.ipm.site","server.ipm.subnet","sgos.time-taken","source.domain","source.geo.city_name","source.geo.continent_name","source.geo.country_iso_code","source.geo.country_name","source.geo.location","source.geo.region_iso_code","source.geo.region_name","source.ip","source.user.group.name","source.user.name","url.domain","url.extension","url.full","url.path","url.port","url.query","url.scheme","user_agent.device.name","user_agent.name","user_agent.original","user_agent.original.text","user_agent.os.full","user_agent.os.full.text","user_agent.os.name","user_agent.os.version","user_agent.version","x-virus-id"
Mar 31, 2024 @ 22:16:54.000,xBcqlo4ByVELpIfqFh7m,".ds-logs-sgos.access-prod-2024.03.17-000021","-","7c738332-528d-46bf-bf68-c08c3fe18866","f27bdb1c-61eb-4e5b-9334-e824c2af2713",lx05061p,filebeat,"8.8.1","-","-",Europe,GB,"United Kingdom","POINT (-0.1224 51.4964)","-","-","185.191.171.1","-","-","eu-west-0c","c46444d9-7427-439e-acca-81ef60e2fcef",huawei,"(empty)",ECS,"Mozilla/5.0 (compatible SemrushBot/7~bl +http://www.semrush.com/bot.html)","sgos.access",prod,logs,"10.238.73.75","DC VdR - Val-de-Reuil - OBS-IT - HQ2E",Server,"vs300-farm-domino-75.dc.uro.equant.com","Equant Intranet","OBSIT VDR - Bigip PELAGOS Farms","8.0.0","f27bdb1c-61eb-4e5b-9334-e824c2af2713",false,"8.8.1","TCP_NC_MISS",verified,web,"sgos.access","1afd0fd740e90a7d-000000002c0bef03-000000006609c4b1","Mar 31, 2024 @ 22:18:44.000",event,success,"-",access,"x86_64",false,lx05061p,836606b8bdf347b4819a98eff14bae42,"10.235.82.8, fe80::f816:3eff:fe4d:b46a","FA-16-3E-4D-B4-6A",lx05061p,"Green Obsidian",redhat,"4.18.0-372.13.1.el8_6.x86_64","Rocky Linux",rocky,linux,"8.6 (Green Obsidian)","-",228,GET,"-","-","12,188","text/html %20charset=UTF-8",200,tcp,"100.125.22.51:11964","2024-03-31 20:16:54 201 185.191.171.1 - - - PROXIED ""none"" - 200 TCP_NC_MISS GET text/html %20charset=UTF-8 https clstx0304-i.equant.com 443 /nuar/nuarweb_cp.nsf - nsf ""Mozilla/5.0 (compatible SemrushBot/7~bl +http://www.semrush.com/bot.html)"" 194.2.70.248 12188 228 - ""unavailable"" ""unavailable"" - ""stork1"" 10.238.73.75 1afd0fd740e90a7d-000000002c0bef03-000000006609c4b1 - -",stork1,stork1,"194.2.70.248","clstx0304-i.equant.com, stork1","10.235.82.8, fe80::f816:3eff:fe4d:b46a, 185.191.171.1, 194.2.70.248, 10.238.73.75","10.238.73.75","DC VdR - Val-de-Reuil - OBS-IT - HQ2E",Server,"vs300-farm-domino-75.dc.uro.equant.com","Equant Intranet","OBSIT VDR - Bigip PELAGOS Farms",201,"-","-",Europe,GB,"United Kingdom","POINT (-0.1224 51.4964)","-","-","185.191.171.1","-","-","clstx0304-i.equant.com",nsf,"https://clstx0304-i.equant.com:443/nuar/nuarweb_cp.nsf","/nuar/nuarweb_cp.nsf",443,"-",https,Spider,SemrushBot,"Mozilla/5.0 (compatible SemrushBot/7~bl +http://www.semrush.com/bot.html)","Mozilla/5.0 (compatible SemrushBot/7~bl +http://www.semrush.com/bot.html)","-","-","-","-",7,"-"
Mar 31, 2024 @ 21:53:41.000,"VBYUlo4ByVELpIfqo_C7",".ds-logs-sgos.access-prod-2024.03.17-000021","-","5d1c9eb4-ade7-4544-bc9e-489db019280f","f27bdb1c-61eb-4e5b-9334-e824c2af2713",lx05061p,filebeat,"8.8.1","-","-","North America",US,"United States","POINT (-97.822 37.751)","-","-","66.249.75.199","-","-","eu-west-0c","c46444d9-7427-439e-acca-81ef60e2fcef",huawei,"(empty)",ECS,"Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko compatible Googlebot/2.1 +http://www.google.com/bot.html) Chrome/122.0.6261.94 Safari/537.36","sgos.access",prod,logs,"10.238.73.75","DC VdR - Val-de-Reuil - OBS-IT - HQ2E",Server,"vs300-farm-domino-75.dc.uro.equant.com","Equant Intranet","OBSIT VDR - Bigip PELAGOS Farms","8.0.0","f27bdb1c-61eb-4e5b-9334-e824c2af2713",false,"8.8.1","TCP_NC_MISS",verified,web,"sgos.access","1afd0fd740e90a7d-000000002c0be9a2-000000006609bf45","Mar 31, 2024 @ 21:55:18.000",event,success,"-",access,"x86_64",false,lx05061p,836606b8bdf347b4819a98eff14bae42,"10.235.82.8, fe80::f816:3eff:fe4d:b46a","FA-16-3E-4D-B4-6A",lx05061p,"Green Obsidian",redhat,"4.18.0-372.13.1.el8_6.x86_64","Rocky Linux",rocky,linux,"8.6 (Green Obsidian)","-",733,GET,"https://www.nuar.fr.orange-business.com/nuar/nuarweb_cp.nsf","-","2,443","text/html %20charset=UTF-8",200,tcp,"100.125.22.58:11964","2024-03-31 19:53:41 17 66.249.75.199 - - - PROXIED ""none"" https://www.nuar.fr.orange-business.com/nuar/nuarweb_cp.nsf 200 TCP_NC_MISS GET text/html %20charset=UTF-8 https clstx0304-i.equant.com 443 /portaleq.nsf/mLockout ?OpenForm - ""Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko compatible Googlebot/2.1 +http://www.google.com/bot.html) Chrome/122.0.6261.94 Safari/537.36"" 194.2.70.248 2443 733 - ""unavailable"" ""unavailable"" - ""stork1"" 10.238.73.75 1afd0fd740e90a7d-000000002c0be9a2-000000006609bf45 - -",stork1,stork1,"194.2.70.248","clstx0304-i.equant.com, stork1","10.235.82.8, fe80::f816:3eff:fe4d:b46a, 66.249.75.199, 194.2.70.248, 10.238.73.75","10.238.73.75","DC VdR - Val-de-Reuil - OBS-IT - HQ2E",Server,"vs300-farm-domino-75.dc.uro.equant.com","Equant Intranet","OBSIT VDR - Bigip PELAGOS Farms",17,"-","-","North America",US,"United States","POINT (-97.822 37.751)","-","-","66.249.75.199","-","-","clstx0304-i.equant.com","-","https://clstx0304-i.equant.com:443/portaleq.nsf/mLockout?OpenForm","/portaleq.nsf/mLockout",443,"?OpenForm",https,Spider,Googlebot,"Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko compatible Googlebot/2.1 +http://www.google.com/bot.html) Chrome/122.0.6261.94 Safari/537.36","Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko compatible Googlebot/2.1 +http://www.google.com/bot.html) Chrome/122.0.6261.94 Safari/537.36","-","-","-","-","2.1","-"
Mar 31, 2024 @ 21:53:36.000,"UhYUlo4ByVELpIfqo_C7",".ds-logs-sgos.access-prod-2024.03.17-000021","-","5d1c9eb4-ade7-4544-bc9e-489db019280f","f27bdb1c-61eb-4e5b-9334-e824c2af2713",lx05061p,filebeat,"8.8.1","-","-","North America",US,"United States","POINT (-97.822 37.751)","-","-","66.249.75.199","-","-","eu-west-0c","c46444d9-7427-439e-acca-81ef60e2fcef",huawei,"(empty)",ECS,"Mozilla/5.0 (compatible Googlebot/2.1 +http://www.google.com/bot.html)","sgos.access",prod,logs,"10.238.73.75","DC VdR - Val-de-Reuil - OBS-IT - HQ2E",Server,"vs300-farm-domino-75.dc.uro.equant.com","Equant Intranet","OBSIT VDR - Bigip PELAGOS Farms","8.0.0","f27bdb1c-61eb-4e5b-9334-e824c2af2713",false,"8.8.1","TCP_NC_MISS",verified,web,"sgos.access","1afd0fd740e90a7d-000000002c0be99e-000000006609bf3b","Mar 31, 2024 @ 21:55:18.000",event,success,"-",access,"x86_64",false,lx05061p,836606b8bdf347b4819a98eff14bae42,"10.235.82.8, fe80::f816:3eff:fe4d:b46a","FA-16-3E-4D-B4-6A",lx05061p,"Green Obsidian",redhat,"4.18.0-372.13.1.el8_6.x86_64","Rocky Linux",rocky,linux,"8.6 (Green Obsidian)","-",319,GET,"-","-","12,193","text/html %20charset=UTF-8",200,tcp,"100.125.22.58:11964","2024-03-31 19:53:36 244 66.249.75.199 - - - PROXIED ""none"" - 200 TCP_NC_MISS GET text/html %20charset=UTF-8 https clstx0304-i.equant.com 443 /nuar/nuarweb_cp.nsf - nsf ""Mozilla/5.0 (compatible Googlebot/2.1 +http://www.google.com/bot.html)"" 194.2.70.248 12193 319 - ""unavailable"" ""unavailable"" - ""stork1"" 10.238.73.75 1afd0fd740e90a7d-000000002c0be99e-000000006609bf3b - -",stork1,stork1,"194.2.70.248","clstx0304-i.equant.com, stork1","10.235.82.8, fe80::f816:3eff:fe4d:b46a, 66.249.75.199, 194.2.70.248, 10.238.73.75","10.238.73.75","DC VdR - Val-de-Reuil - OBS-IT - HQ2E",Server,"vs300-farm-domino-75.dc.uro.equant.com","Equant Intranet","OBSIT VDR - Bigip PELAGOS Farms",244,"-","-","North America",US,"United States","POINT (-97.822 37.751)","-","-","66.249.75.199","-","-","clstx0304-i.equant.com",nsf,"https://clstx0304-i.equant.com:443/nuar/nuarweb_cp.nsf","/nuar/nuarweb_cp.nsf",443,"-",https,Spider,Googlebot,"Mozilla/5.0 (compatible Googlebot/2.1 +http://www.google.com/bot.html)","Mozilla/5.0 (compatible Googlebot/2.1 +http://www.google.com/bot.html)","-","-","-","-","2.1","-"
Mar 31, 2024 @ 21:45:12.000,"TRYNlo4ByVELpIfqCd_J",".ds-logs-sgos.access-prod-2024.03.17-000021","-","3ace888b-3805-4a32-8302-e31b680a7aac","f27bdb1c-61eb-4e5b-9334-e824c2af2713",lx05061p,filebeat,"8.8.1","-","Culver City","North America",US,"United States","POINT (-118.3983 34.0141)","US-CA",California,"165.85.174.156","-","-","eu-west-0c","c46444d9-7427-439e-acca-81ef60e2fcef",huawei,"(empty)",ECS,"Mozilla/5.0 (Windows NT 10.0 Win64 x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36","sgos.access",prod,logs,"10.238.73.75","DC VdR - Val-de-Reuil - OBS-IT - HQ2E",Server,"vs300-farm-domino-75.dc.uro.equant.com","Equant Intranet","OBSIT VDR - Bigip PELAGOS Farms","8.0.0","f27bdb1c-61eb-4e5b-9334-e824c2af2713",false,"8.8.1","TCP_NC_MISS",verified,web,"sgos.access","1afd0fd740e90a7d-000000002c0be76a-000000006609bd48","Mar 31, 2024 @ 21:47:00.000",event,success,"-",access,"x86_64",false,lx05061p,836606b8bdf347b4819a98eff14bae42,"10.235.82.8, fe80::f816:3eff:fe4d:b46a","FA-16-3E-4D-B4-6A",lx05061p,"Green Obsidian",redhat,"4.18.0-372.13.1.el8_6.x86_64","Rocky Linux",rocky,linux,"8.6 (Green Obsidian)","-","1,189",GET,"https://www.nuar.fr.orange-business.com/nuar/nuarweb_cp.nsf","-",618,"image/gif",200,tcp,"100.125.22.60:11964","2024-03-31 19:45:12 15 165.85.174.156 - - - PROXIED ""none"" https://www.nuar.fr.orange-business.com/nuar/nuarweb_cp.nsf 200 TCP_NC_MISS GET image/gif https clstx0304-i.equant.com 443 /portaleq.nsf/tableLeft.gif - gif ""Mozilla/5.0 (Windows NT 10.0 Win64 x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36"" 194.2.70.248 618 1189 - ""unavailable"" ""unavailable"" - ""stork1"" 10.238.73.75 1afd0fd740e90a7d-000000002c0be76a-000000006609bd48 - -",stork1,stork1,"194.2.70.248","clstx0304-i.equant.com, stork1","10.235.82.8, fe80::f816:3eff:fe4d:b46a, 165.85.174.156, 194.2.70.248, 10.238.73.75","10.238.73.75","DC VdR - Val-de-Reuil - OBS-IT - HQ2E",Server,"vs300-farm-domino-75.dc.uro.equant.com","Equant Intranet","OBSIT VDR - Bigip PELAGOS Farms",15,"-","Culver City","North America",US,"United States","POINT (-118.3983 34.0141)","US-CA",California,"165.85.174.156","-","-","clstx0304-i.equant.com",gif,"https://clstx0304-i.equant.com:443/portaleq.nsf/tableLeft.gif","/portaleq.nsf/tableLeft.gif",443,"-",https,Other,Chrome,"Mozilla/5.0 (Windows NT 10.0 Win64 x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36","Mozilla/5.0 (Windows NT 10.0 Win64 x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36","Windows 10","Windows 10",Windows,10,"123.0.0.0","-"
Dim Hour As Integer
Hour = Hour(rawDataSheet.Cells(i, 1).Value): erreur tableau attendu
|
68ee653a3888e8b02f217b7ef5ec693a
|
{
"intermediate": 0.2841053605079651,
"beginner": 0.41719067096710205,
"expert": 0.29870396852493286
}
|
46,891
|
kafka python implement at least once consumer
|
6eca82bb723a89159983c56ae10b0541
|
{
"intermediate": 0.4418178200721741,
"beginner": 0.30813145637512207,
"expert": 0.25005069375038147
}
|
46,892
|
log_returns <- get("log_returns") # Replace with the actual object name
Show in New Window
Error in log_returns$Asset : $ operator is invalid for atomic vectors
|
1d12d48f495ca2949ba440a3e22caf4f
|
{
"intermediate": 0.3307780623435974,
"beginner": 0.41633355617523193,
"expert": 0.25288838148117065
}
|
46,893
|
log_returns <- get("log_returns") # Replace with the actual object name
# Select assets for Strategy 1
strategy1_assets <- c("T", "PFE", "XAU", "GOOGL", "USDINR")
strategy1_selection <- log_returns[log_returns$Asset %in% strategy1_assets, ]
# Select assets for Strategy 2
strategy2_assets <- c("EURUSD", "NG", "PFE", "GOOGL", "T")
strategy2_selection <- log_returns[log_returns$Asset %in% strategy2_assets, ]
# ... rest of your code for portfolio optimization using PortfolioAnalytics
Error in log_returns$Asset : $ operator is invalid for atomic vectors............correct the error and give me please
|
d212166814aecaf933c9250dd4ca6e2e
|
{
"intermediate": 0.39389586448669434,
"beginner": 0.18726059794425964,
"expert": 0.4188435673713684
}
|
46,894
|
log_returns <- get("log_returns") # Replace with the actual object name
# Select assets for Strategy 1
strategy1_assets <- c("T", "PFE", "XAU", "GOOGL", "USDINR")
strategy1_selection <- log_returns[log_returns$Asset %in% strategy1_assets, ]
# Select assets for Strategy 2
strategy2_assets <- c("EURUSD", "NG", "PFE", "GOOGL", "T")
strategy2_selection <- log_returns[log_returns$Asset %in% strategy2_assets, ]
# ... rest of your code for portfolio optimization using PortfolioAnalytics
Error in log_returns$Asset : $ operator is invalid for atomic vectors............correct the error and give me please
|
28f4b7b1f0f2d004d53e61912b4b7086
|
{
"intermediate": 0.39389586448669434,
"beginner": 0.18726059794425964,
"expert": 0.4188435673713684
}
|
46,895
|
log_returns <- get("log_returns") # Replace with the actual object name
# Select assets for Strategy 1
strategy1_assets <- c("T", "PFE", "XAU", "GOOGL", "USDINR")
strategy1_selection <- log_returns[log_returns$Asset %in% strategy1_assets, ]
# Select assets for Strategy 2
strategy2_assets <- c("EURUSD", "NG", "PFE", "GOOGL", "T")
strategy2_selection <- log_returns[log_returns$Asset %in% strategy2_assets, ]
# ... rest of your code for portfolio optimization using PortfolioAnalytics
Error in log_returns$Asset : $ operator is invalid for atomic vectors............correct the error and give me please
|
38596eb448661e7f04573af016580ee4
|
{
"intermediate": 0.39389586448669434,
"beginner": 0.18726059794425964,
"expert": 0.4188435673713684
}
|
46,896
|
Sub CreateHourlySummary2()
Dim rawDataSheet As Worksheet
Dim summarySheet As Worksheet
Dim lastRow As Long
Dim i As Long
Dim dateVal As Variant
Dim hourVal As Integer
Dim summaryRow As Variant
' Add a new sheet for raw data
Set rawDataSheet = Worksheets.Add(After:=ActiveSheet)
rawDataSheet.Name = "Raw Data"
' Import the CSV file into the raw data sheet
With rawDataSheet.QueryTables.Add(Connection:="TEXT;" & Application.GetOpenFilename(FileFilter:="CSV Files (.csv),.csv"), Destination:=rawDataSheet.Range("A1"))
.TextFileParseType = xlDelimited
.TextFileCommaDelimiter = True
.Refresh
End With
' Add a new sheet for the summary
Set summarySheet = Worksheets.Add(After:=rawDataSheet)
summarySheet.Name = "Hourly Summary"
' Add column headers for each hour and "Date"
summarySheet.Cells(1, 1).Value = "Date"
For i = 0 To 23
summarySheet.Cells(1, i + 2).Value = Format(i, "00") & ":00"
Next i
' Find the last row with data in the raw data sheet
lastRow = rawDataSheet.Cells(rawDataSheet.Rows.Count, 1).End(xlUp).Row
' Loop through raw data rows starting from the second row
For i = 2 To lastRow
' Extract date and hour from "@timestamp"
dateVal = DateValue(Left(rawDataSheet.Cells(i, 1).Value, InStr(1, rawDataSheet.Cells(i, 1).Value, ",") - 1))
hourVal = Hour(TimeValue(Mid(rawDataSheet.Cells(i, 1).Value, InStr(1, rawDataSheet.Cells(i, 1).Value, "@") + 2)))
' Ensure the date exists in the summary sheet, add if not
If Application.CountIf(summarySheet.Columns(1), dateVal) = 0 Then
summarySheet.Cells(summarySheet.Rows.Count, 1).End(xlUp).Offset(1).Value = dateVal
End If
' Find the row for the current date in the summary sheet
summaryRow = Application.Match(dateVal, summarySheet.Columns(1), 0)
' Increment the counter for the specific hour
If Not IsError(summaryRow) Then
summarySheet.Cells(summaryRow, hourVal + 1).Value = summarySheet.Cells(summaryRow, hourVal + 1).Value + 1
End If
Next i
' Format the summary sheet
With summarySheet
.Rows(1).Font.Bold = True
.Columns.AutoFit
End With
summarySheet.Activate
End Sub
erreur ici :
hourVal = Hour(TimeValue(Mid(rawDataSheet.Cells(i, 1).Value, InStr(1, rawDataSheet.Cells(i, 1).Value, "@") + 2)))
|
ea966aac69158a0221f48936f7ea49bf
|
{
"intermediate": 0.2614644169807434,
"beginner": 0.548253059387207,
"expert": 0.19028250873088837
}
|
46,897
|
log_returns <- get("log_returns") # Replace with the actual object name
# Select assets for Strategy 1
strategy1_assets <- c("T", "PFE", "XAU", "GOOGL", "USDINR")
strategy1_selection <- log_returns[log_returns$Asset %in% strategy1_assets, ]
# Select assets for Strategy 2
strategy2_assets <- c("EURUSD", "NG", "PFE", "GOOGL", "T")
strategy2_selection <- log_returns[log_returns$Asset %in% strategy2_assets, ]
# ... rest of your code for portfolio optimization using PortfolioAnalytics
Error in log_returns$Asset : $ operator is invalid for atomic vectors............correct the error and give me please
|
375a2b0869bc0fcbf268545d1c712894
|
{
"intermediate": 0.39389586448669434,
"beginner": 0.18726059794425964,
"expert": 0.4188435673713684
}
|
46,898
|
log_returns <- get("log_returns") # Replace with the actual object name
# Select assets for Strategy 1
strategy1_assets <- c("T", "PFE", "XAU", "GOOGL", "USDINR")
strategy1_selection <- log_returns[log_returns$Asset %in% strategy1_assets, ]
# Select assets for Strategy 2
strategy2_assets <- c("EURUSD", "NG", "PFE", "GOOGL", "T")
strategy2_selection <- log_returns[log_returns$Asset %in% strategy2_assets, ]
# ... rest of your code for portfolio optimization using PortfolioAnalytics
Error in log_returns$Asset : $ operator is invalid for atomic vectors............correct the error and give me please
|
13705b19fedeab3b77db5e16c5f0d347
|
{
"intermediate": 0.39389586448669434,
"beginner": 0.18726059794425964,
"expert": 0.4188435673713684
}
|
46,899
|
log_returns <- get("log_returns") # Replace with the actual object name
# Select assets for Strategy 1
strategy1_assets <- c("T", "PFE", "XAU", "GOOGL", "USDINR")
strategy1_selection <- log_returns[log_returns$Asset %in% strategy1_assets, ]
# Select assets for Strategy 2
strategy2_assets <- c("EURUSD", "NG", "PFE", "GOOGL", "T")
strategy2_selection <- log_returns[log_returns$Asset %in% strategy2_assets, ]
# ... rest of your code for portfolio optimization using PortfolioAnalytics
Error in log_returns$Asset : $ operator is invalid for atomic vectors............correct the error and give me please
|
6a4bca2c4a3d000d560986c1efa2e164
|
{
"intermediate": 0.39389586448669434,
"beginner": 0.18726059794425964,
"expert": 0.4188435673713684
}
|
46,900
|
how to load groovy script in jenkins pipeline to use method from loaded script to initialize environment
|
77ce98117eec46d7f870f8cbd01a4d24
|
{
"intermediate": 0.5166947245597839,
"beginner": 0.1969502866268158,
"expert": 0.28635498881340027
}
|
46,901
|
Delete lobby after game complete Colyseus
|
357291bc19ad191caeb79a5cded9da81
|
{
"intermediate": 0.2955457866191864,
"beginner": 0.3458685576915741,
"expert": 0.3585856556892395
}
|
46,902
|
Scratch code:
`when 🏳️ clicked`
`🖊️ erase all`
`set a to 149597870.7`
`set e to 0.7`
`set ν to 92`
`set ω to 76`
`set Zoom to 2000000`
`set Mass to 1.98847e30`
`set t to 1`
`set RO to 0`
`🖊️ set pen color to #FFFFFF`
`🖊️ set pen size to 1`
`🖊️ pen up`
`forever`
`{`
`_set r to {a * (1 - e^2) / (1 + (e * cos of (ν)}`
`_set X to {r * cos of (ν + ω)}`
`_set Y to {r * sin of (ν + ω)}`
`_go to x: {X / Zoom} y: {Y / Zoom}`
`_🖊️ pen down`
`}`
`change RO by {(360 * (t / 8766)) / ((T * AS) / 8766)}`
`set RO to (RO module 360)`
Angular speed (AS), balancing speed in eccentric orbit. Due to it moves slower in perihelion, it moves faster in aphelion with using formula of orbital simulating.
Orbital period (T), repeating with (orbital period * angular speed)
RO is orbital rotation
T = 8766 hours
AS (Angular speed) = ?
I'm trying to attempt make formula of angular speed. Rewrite the Scratch code.
|
e74610b9abbaafa6eeb47f4daa38f2a7
|
{
"intermediate": 0.3375415503978729,
"beginner": 0.2698330283164978,
"expert": 0.3926254212856293
}
|
46,903
|
write a code for segment tree
|
6abb206c84765632ae42a3e722e2c874
|
{
"intermediate": 0.2204861342906952,
"beginner": 0.16194169223308563,
"expert": 0.6175721883773804
}
|
46,904
|
Scratch code:
`when 🏳️ clicked`
`🖊️ erase all`
`set a to 149597870.7`
`set e to 0.7`
`set ν to 92`
`set ω to 76`
`set Zoom to 2000000`
`set Mass to 1.98847e30`
`set HoursTime to 1`
`set RO to 0`
`🖊️ set pen color to #FFFFFF`
`🖊️ set pen size to 1`
`🖊️ pen up`
`forever`
`{`
`_set r to {a * (1 - e^2) / (1 + (e * cos of (ν)}`
`_set X to {r * cos of (ν + ω)}`
`_set Y to {r * sin of (ν + ω)}`
`_change RO by {(360 * (HoursTime / 8766)) / ((T * BS) / 8766)}`
`_set RO to (RO module 360)`
`_go to x: {X / Zoom} y: {Y / Zoom}`
`_🖊️ pen down`
`}`
Balance speed (BS), balancing speed in eccentric orbit. Because to it moves slower in perihelion, it moves faster in aphelion with using formula of orbital simulating.
Orbital period (T), repeating with (orbital period * balance speed)
RO is orbital rotation
T = 8766 hours
BS (Balance speed) = ?
I'm trying to attempt make formula of balance speed. Rewrite Scratch code.
|
901a2e8461b07b5fa57eb0650fbd9c6e
|
{
"intermediate": 0.29776787757873535,
"beginner": 0.27292850613594055,
"expert": 0.4293036460876465
}
|
46,905
|
Scratch code:
`when 🏳️ clicked`
`🖊️ erase all`
`set a to 149597870.7`
`set e to 0.7`
`set ν to 92`
`set ω to 76`
`set Zoom to 2000000`
`set Mass to 1.98847e30`
`set HoursTime to 1`
`set RO to 0`
`🖊️ set pen color to #FFFFFF`
`🖊️ set pen size to 1`
`🖊️ pen up`
`forever`
`{`
`_set r to {a * (1 - e^2) / (1 + (e * cos of (ν + RO)}`
`_set X to {r * cos of (ν + RO + ω)}`
`_set Y to {r * sin of (ν + RO + ω)}`
`_\set BS to ?, make formula. T * BS is it moves faster in perihelion, it moves slower in aphelion.`
`_set T to {(2pi * sqrt of ((a * 1000)^3 / (6.6743e-11 * Mass))) / 3600}`
`_change RO by {(360 * (HoursTime / 8766)) / ((T * BS) / 8766)}`
`_set RO to (RO module 360)`
`_go to x: {X / Zoom} y: {Y / Zoom}`
`_🖊️ pen down`
`}`
Balance speed (BS), balancing speed in eccentric orbit. Because to it moves slower in perihelion, it moves faster in aphelion with using formula of orbital simulating.
Orbital period (T), repeating with (orbital period * balance speed)
RO is orbital rotation
T = 8766 hours
BS (Balance speed) = ?
I'm trying to attempt make formula of balance speed. Rewrite Scratch code.
|
1c069fbb9b47a778a9bf908dc9f6bf5c
|
{
"intermediate": 0.352138489484787,
"beginner": 0.28431567549705505,
"expert": 0.36354583501815796
}
|
46,906
|
I wnat to connect to a postgresql ,i have 102.32.11.22 ,3232 as port ,user name,password,db name as db,how do that in python
|
c5faa7f3410e8d88c62c5230b01758a9
|
{
"intermediate": 0.6056522727012634,
"beginner": 0.1531578004360199,
"expert": 0.24118998646736145
}
|
46,907
|
Can you create flow diagram for any code?
|
57406e907ae1d983b4163a5abf4e3049
|
{
"intermediate": 0.46227163076400757,
"beginner": 0.22486576437950134,
"expert": 0.3128626346588135
}
|
46,908
|
in this javascript else statement I want to create a button with the id newStation and text 'Add a Station' to the instructions element 'else {
console.log(
"Maximum number of carriages reached! You can't buy more."
);
instructionsElement.removeChild(carriageButton);
instructionsElement.removeChild(carriageMessage);
}'
|
f1c1cec8569117b755f6917655ce72f1
|
{
"intermediate": 0.3718118965625763,
"beginner": 0.4684402346611023,
"expert": 0.15974785387516022
}
|
46,909
|
Weights for Strategy 1:
T : 80.37 %
PFE : 59.75 %
XAU : 11.59 %
GOOGL : -21.15 %
USDINR : -30.56 %
Weights for Strategy 2:
EURUSD : -613.3 %
NG : -369.74 %
PFE : 543.94 %
GOOGL : -192.53 %
T : 731.63 %
Global Minimum Variance Portfolio Weights (without short selling):
T : 14.5 %
PFE : 39.59 %
XAU : 18.26 %
GOOGL : 13.05 %
USDINR : 14.6 %
Global Minimum Variance Portfolio Weights (without short selling) for Strategy 2:
EURUSD : 19.53 %
NG : 13.14 %
PFE : 39.7 %
GOOGL : 13.09 %
T : 14.54 %
Tangency Portfolio Weights (with short selling) for Strategy 1:
T : 80.37 %
PFE : 59.75 %
XAU : 11.59 %
GOOGL : -21.15 %
USDINR : -30.56 %
Tangency Portfolio Weights (with short selling) for Strategy 2:
EURUSD : -613.3 %
NG : -369.74 %
PFE : 543.94 %
GOOGL : -192.53 %
T : 731.63 %
Portfolio Return and Risk Measures:
Strategy 1 Portfolio Return: 1.11277
Strategy 1 Portfolio Risk: 0.039668
Strategy 2 Portfolio Return: 14.67486
Strategy 2 Portfolio Risk: 0.429675
GMVP Portfolio Return(Strategy 2): 0.173557
GMVP Portfolio Risk (Strategy 2): 0.013955
GVMP Portfolio Return (Strategy 2): 0.019119
GVMP Portfolio Risk(Strategy 2): 0.013995
Tangency Portfolio Return (Strategy 1): 1.11277
Tangency Portfolio Risk (Strategy 1): 0.039668
Tangency Portfolio Return (Strategy 2): 14.67486
Tangency Portfolio Risk (Strategy 2): 0.429675
comment on the Portfolio Return, Portfolio risk measures estimated while allocating weight
|
76cfb8ccb15cf0320abce836274b8c6e
|
{
"intermediate": 0.33539068698883057,
"beginner": 0.3395051658153534,
"expert": 0.3251041769981384
}
|
46,910
|
# Define the data for Strategy 1
strategy1_selection <- data.frame(
Asset = c("T", "PFE", "XAU", "GOOGL", "USDINR"),
Median_Return = c(0.009617963, 0.002619454, 0.001102195, -0.002813010, -0.003632318),
Standard_Deviation = c(0.04303437, 0.01576457, 0.03418468, 0.04782845, 0.04273717)
)
# Calculate reward-to-risk ratios for Strategy 1
strategy1_selection$Reward_to_Risk <- strategy1_selection$Median_Return / strategy1_selection$Standard_Deviation
# Define the data for Strategy 2
strategy2_selection <- data.frame(
Asset = c("EURUSD", "NG", "PFE", "GOOGL", "T"),
Median_Return = c(-0.006003852, -0.005380718, 0.002619454, -0.002813010, 0.009617963),
Standard_Deviation = c(0.03204665, 0.04763956, 0.01576457, 0.04782845, 0.04303437),
Reward_to_Risk = c(-0.18734726, -0.11294642, 0.16616080, -0.05881458, 0.22349493)
)
# Calculate reward-to-risk ratios for Strategy 2
strategy2_selection$Reward_to_Risk <- strategy2_selection$Median_Return / strategy2_selection$Standard_Deviation
# Total reward-to-risk ratio for each strategy
total_ratio_strategy1 <- sum(strategy1_selection$Reward_to_Risk)
total_ratio_strategy2 <- sum(strategy2_selection$Reward_to_Risk)
# Calculate weights for each strategy
weights_strategy1 <- (strategy1_selection$Reward_to_Risk / total_ratio_strategy1) * 100
weights_strategy2 <- (strategy2_selection$Reward_to_Risk / total_ratio_strategy2) * 100
# Print weights for Strategy 1
cat("Weights for Strategy 1:\n")
for (i in 1:nrow(strategy1_selection)) {
cat(strategy1_selection$Asset[i], ":", round(weights_strategy1[i], 2), "%\n")
}
cat("\n")
# Print weights for Strategy 2
cat("Weights for Strategy 2:\n")
for (i in 1:nrow(strategy2_selection)) {
cat(strategy2_selection$Asset[i], ":", round(weights_strategy2[i], 2), "%\n")
}
# Global Minimum Variance Portfolio - without short selling
# For GMVP, the weights are calculated based on minimizing portfolio variance
# The weights are inversely proportional to the asset's risk (standard deviation)
gmvp_weights <- 1 / strategy1_selection$Standard_Deviation
gmvp_weights <- gmvp_weights / sum(gmvp_weights) * 100
cat("\nGlobal Minimum Variance Portfolio Weights (without short selling):\n")
for (i in 1:nrow(strategy1_selection)) {
cat(strategy1_selection$Asset[i], ":", round(gmvp_weights[i], 2), "%\n")
}
# GMVP without short selling for Strategy 2
gmvp_weights_strategy2 <- 1 / strategy2_selection$Standard_Deviation
gmvp_weights_strategy2 <- gmvp_weights_strategy2 / sum(gmvp_weights_strategy2) * 100
cat("\nGlobal Minimum Variance Portfolio Weights (without short selling) for Strategy 2:\n")
for (i in 1:nrow(strategy2_selection)) {
cat(strategy2_selection$Asset[i], ":", round(gmvp_weights_strategy2[i], 2), "%\n")
}
# Tangency Portfolio - with short selling
# For the Tangency Portfolio, weights are calculated based on maximizing the Sharpe ratio
# This involves short selling to achieve the optimal risk-return trade-off
# Tangency Portfolio for Strategy 1
strategy1_tangency_weights <- strategy1_selection$Reward_to_Risk
strategy1_tangency_weights <- strategy1_tangency_weights / sum(strategy1_tangency_weights) * 100
cat("\nTangency Portfolio Weights (with short selling) for Strategy 1:\n")
for (i in 1:nrow(strategy1_selection)) {
cat(strategy1_selection$Asset[i], ":", round(strategy1_tangency_weights[i], 2), "%\n")
}
# Tangency Portfolio for Strategy 2
strategy2_tangency_weights <- strategy2_selection$Reward_to_Risk
strategy2_tangency_weights <- strategy2_tangency_weights / sum(strategy2_tangency_weights) * 100
cat("\nTangency Portfolio Weights (with short selling) for Strategy 2:\n")
for (i in 1:nrow(strategy2_selection)) {
cat(strategy2_selection$Asset[i], ":", round(strategy2_tangency_weights[i], 2), "%\n")
}
# Portfolio Return and Risk Measures
# Portfolio Return is the weighted sum of individual asset returns
portfolio_return_strategy1 <- sum(weights_strategy1 * strategy1_selection$Median_Return)
portfolio_return_strategy2 <- sum(weights_strategy2 * strategy2_selection$Median_Return)
# Portfolio Risk is calculated using the weighted standard deviation of assets
portfolio_risk_strategy1 <- sqrt(sum((weights_strategy1/100)^2 * strategy1_selection$Standard_Deviation^2))
portfolio_risk_strategy2 <- sqrt(sum((weights_strategy2/100)^2 * strategy2_selection$Standard_Deviation^2))
cat("\nPortfolio Return and Risk Measures:\n")
cat("Strategy 1 Portfolio Return: ", round(portfolio_return_strategy1, 6), "\n")
cat("Strategy 1 Portfolio Risk: ", round(portfolio_risk_strategy1, 6), "\n")
cat("Strategy 2 Portfolio Return: ", round(portfolio_return_strategy2, 6), "\n")
cat("Strategy 2 Portfolio Risk: ", round(portfolio_risk_strategy2, 6), "\n")
# Portfolio Return and Risk Measures for GMVP
gmvp_portfolio_return <- sum(gmvp_weights * strategy1_selection$Median_Return)
gmvp_portfolio_risk <- sqrt(sum((gmvp_weights/100)^2 * strategy1_selection$Standard_Deviation^2))
cat("GMVP Portfolio Return: ", round(gmvp_portfolio_return, 6), "\n")
cat("GMVP Portfolio Risk: ", round(gmvp_portfolio_risk, 6), "\n")
gmvp_portfolio_return_strategy2 <- sum(gmvp_weights_strategy2 * strategy2_selection$Median_Return)
gmvp_portfolio_risk_strategy2 <- sqrt(sum((gmvp_weights_strategy2/100)^2 * strategy2_selection$Standard_Deviation^2))
cat("Strategy 2 Portfolio Return: ", round(gmvp_portfolio_return_strategy2, 6), "\n")
cat("Strategy 2 Portfolio Risk: ", round(gmvp_portfolio_risk_strategy2, 6), "\n")
# Portfolio Return and Risk Measures for Tangency Portfolio
tangency_portfolio_return_strategy1 <- sum(strategy1_tangency_weights * strategy1_selection$Median_Return)
tangency_portfolio_risk_strategy1 <- sqrt(sum((strategy1_tangency_weights/100)^2 * strategy1_selection$Standard_Deviation^2))
cat("Tangency Portfolio Return (Strategy 1): ", round(tangency_portfolio_return_strategy1, 6), "\n")
cat("Tangency Portfolio Risk (Strategy 1): ", round(tangency_portfolio_risk_strategy1, 6), "\n")
tangency_portfolio_return_strategy2 <- sum(strategy2_tangency_weights * strategy2_selection$Median_Return)
tangency_portfolio_risk_strategy2 <- sqrt(sum((strategy2_tangency_weights/100)^2 * strategy2_selection$Standard_Deviation^2))
cat("Tangency Portfolio Return (Strategy 2): ", round(tangency_portfolio_return_strategy2, 6), "\n")
cat("Tangency Portfolio Risk (Strategy 2): ", round(tangency_portfolio_risk_strategy2, 6), "\n")
The above is the final code i used ....now can you please give me code for For the constituted portfolio's compare and contrast the estimates of Value at Risk in "Euro" using parametric method, historical method and Monte Carlo simulation method.
Insert the code chunk below.
|
d2fd498d33df115c018c8a7a25853936
|
{
"intermediate": 0.29864710569381714,
"beginner": 0.4009788930416107,
"expert": 0.30037403106689453
}
|
46,911
|
3.
Show the year and subject that won 'Albert Einstein' his prize. sql query
|
466691cc8b103fe8debdfd0c4c3e9781
|
{
"intermediate": 0.30257946252822876,
"beginner": 0.3748353123664856,
"expert": 0.32258525490760803
}
|
46,912
|
здесь в запрос передается незамаскированный номер карты в payInfo, как мне через clone передать этот объект с рамаскированной картой
2024-04-16T10:29:29.076+04:00 INFO 9048 --- [ntContainer#3-1] acq.micro.tkb.impl.TkbRestClientImpl : [requestPostTkb] paymentInfo = {"orderId":14378,"processId":8319,"amount":100.00,"currency":"RUR","terminal":"T3885103237ID","terminalType":"AFT","merchant":"","description":"Income account 40701810100015001178","merchantName":"FFIN Bank","merchantUrl":"https://bankffin.ru","accountNumber":"40701810100015001178","clientName":"Sidorov","needToHold":false,"pan":"<PRESIDIO_ANONYMIZED_CREDIT_CARD>","cardHolder":"D D","cvc":"193","month":"10","year":"26","md":"RUM5MTVFMUQyMjhGOTAxMEQ2REEyQjU5QTcwMkQ3QjBENUFDMEQyNkU5QkZENUE0RjQ3QkNGMTc4N0Y1QTg4MA==","clientIp":"127.0.0.1","payService":"CARD"}
|
450a696f7a3180ad652d0a7dbdae661a
|
{
"intermediate": 0.4235384464263916,
"beginner": 0.27956849336624146,
"expert": 0.2968931198120117
}
|
46,913
|
how to check if printer is online or offline via powershell
Get-Printer | Select Name, PrinterStatus does not show actual data - I disconnected printer but it still says normal
|
498b958e44b9092a0959e8a307e9fdaa
|
{
"intermediate": 0.5105958580970764,
"beginner": 0.17271266877651215,
"expert": 0.3166915476322174
}
|
46,914
|
How to check if zebra printer is connected via cmd or powershell
|
4867ee5ceb1ac1f38cdf3bbdb1ddfabd
|
{
"intermediate": 0.4216356575489044,
"beginner": 0.25122934579849243,
"expert": 0.32713496685028076
}
|
46,915
|
how to write google test for the function static struct graphics_gc_priv *gc_new(struct graphics_priv *gr, struct graphics_gc_methods *meth) {
struct graphics_gc_priv *gc = g_new0(struct graphics_gc_priv, 1);
*meth = gc_methods;
gc->gr = gr;
gc->linewidth = 1;
return gc;
}
|
7ac2b3a3b9b88a3545b04d08d3763295
|
{
"intermediate": 0.31760910153388977,
"beginner": 0.46022966504096985,
"expert": 0.2221612185239792
}
|
46,916
|
correct this:
model = NBEATSx(h=1, input_size=24,
#loss=MQLoss(level=[80, 90]),
loss=DistributionLoss(distribution='Normal', level=[90]),
#scaler_type='robust',
stack_types=['identity'],
dropout_prob_theta=0.1,
hist_exog_list=['Var1','Var2','Var3','Var4','Var5','Var6','Var7','Var8','Var9','Var10','Var11','Var12','Var13',
'Var14','Var15','Var16',
'Var17','Var18','Var19','Var20','Var21','Var22','Var23','Var24'
],
max_steps=200,
val_check_steps=10,
early_stop_patience_steps=2, mlp_units=list([10]))
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-41-5bfafd767202> in <cell line: 15>()
13 Y_test_df['unique_id']=np.zeros(Y_test_df['ds'].shape)
14
---> 15 model = NBEATSx(h=1, input_size=24,
16 #loss=MQLoss(level=[80, 90]),
17 loss=DistributionLoss(distribution='Normal', level=[90]),
2 frames
/usr/local/lib/python3.10/dist-packages/neuralforecast/models/nbeatsx.py in __init__(self, input_size, h, futr_input_size, hist_input_size, stat_input_size, n_theta, mlp_units, basis, dropout_prob, activation)
207
208 hidden_layers = [
--> 209 nn.Linear(in_features=input_size, out_features=mlp_units[0][0])
210 ]
211 for layer in mlp_units:
TypeError: 'int' object is not subscriptable
|
a690d7ff75fe628dcec4e6a1e4f9bced
|
{
"intermediate": 0.24604497849941254,
"beginner": 0.5811511278152466,
"expert": 0.17280392348766327
}
|
46,917
|
Gtest UT for static int CntrlRunner(void)
{
char user_in[64] = "";
struct timeval tv;
int ret = 0;
fd_set fds;
while (!is_thread_exit)
{
FD_ZERO(&fds);
FD_SET(STDIN_FILENO, &fds);
tv.tv_sec = 1;
tv.tv_usec = 0;
ret = select((STDIN_FILENO + 1), &fds, NULL, NULL, &tv);
if ((ret) && (fgets(user_in, sizeof(user_in), stdin) != NULL))
{
if (strncmp(user_in, "x", 1) == 0) /* user input is "x" : exit */
{
is_thread_exit = true;
if (!is_thread_exit)
{
break;
}
}
else
{
/* Do Nothing */
}
}
}
return 0;
}
|
71dfc4f09741df787a25326b5d02dc8a
|
{
"intermediate": 0.274549275636673,
"beginner": 0.5708104372024536,
"expert": 0.1546403020620346
}
|
46,918
|
Перепиши данный код на Java JDK 1.8.0_181 Save New Duplicate & Edit Just Text
#include <bits/stdc++.h>
// tiom4eg's precompiler options
// POGGERS POGGERS POGGERS POGGERS POGGERS POGGERS POGGERS
// IO settings
#define fastIO ios_base::sync_with_stdio(false); cin.tie(0)
// Quick types
#define ll long long
#define ld long double
#define ull unsigned long long
#define pii pair <int, int>
#define vi vector <int>
#define mi vector <vector <int>>
// Quick functions
//#define endl "\n"
#define F first
#define S second
#define all(a) a.begin(), a.end()
#define sz(a) (int)(a.size())
#define pb push_back
#define mp make_pair
// Quick fors
#define FOR(i, a, b) for (int i = a; i < b; ++i)
#define FORS(i, a, b, c) for (int i = a; i < b; i += c)
#define RFOR(i, a, b) for (int i = a; i >= b; --i)
#define EACH(e, a) for (auto& e : a)
// Pragmas
#ifndef TIOM4EG
#pragma GCC optimize("O3,unroll-loops") // let the chaos begin!
#pragma GCC target("avx,avx2,bmi,bmi2,popcnt,lzcnt,tune=native")
#pragma GCC comment(linker, "/stack:200000000")
#endif
// PBDS
#include <ext/pb_ds/assoc_container.hpp>
#include <ext/pb_ds/tree_policy.hpp>
#define ordered_set tree <int, null_type, less <int>, rb_tree_tag, tree_order_statistics_node_update>
#define ook order_of_key
#define fbo find_by_order
using namespace __gnu_pbds;
// POGGERS POGGERS POGGERS POGGERS POGGERS POGGERS POGGERS
using namespace std;
mt19937 rng(chrono::duration_cast<chrono::milliseconds>(chrono::system_clock::now().time_since_epoch()).count());
#define int long long
const int INF = 1e9 + 7, INFLL = 1e13 + 7, MD = 998244353, MAX = 1 << 19, MOD = 1e9 + 7, LG = 30, B = 64000000;
signed main() {
//fastIO;
int t; cin >> t;
while (t--) {
vector <pii> r, b;
FOR(i, 0, 7) {
cout << "? 2 " << i + 1 << endl;
char c; cin >> c;
(c == 'R' ? r : b).pb({2, i + 1});
}
int v = sz(r), w = sz(b);
FOR(i, 0, 4) {
cout << "? 1 " << (v > w ? r : b)[i].S << endl;
char c; cin >> c;
(c == 'R' ? r : b).pb({1, (v > w ? r : b)[i].S});
cout << "? 3 " << (v > w ? r : b)[i].S << endl;
cin >> c;
(c == 'R' ? r : b).pb({3, (v > w ? r : b)[i].S});
}
FOR(i, 0, sz(r)) FOR(j, i + 1, sz(r)) FOR(k, j + 1, sz(r)) FOR(l, k + 1, sz(r)) {
unordered_map <int, int> x, y;
++x[r[i].F], ++y[r[i].S];
++x[r[j].F], ++y[r[j].S];
++x[r[k].F], ++y[r[k].S];
++x[r[l].F], ++y[r[l].S];
EACH(e, x) if (e.S != 2) goto skipr;
EACH(e, y) if (e.S != 2) goto skipr;
cout << "! " << min({r[i].F, r[j].F, r[k].F, r[l].F}) << ' ' << min({r[i].S, r[j].S, r[k].S, r[l].S}) << ' ' << max({r[i].F, r[j].F, r[k].F, r[l].F}) << ' ' << max({r[i].S, r[j].S, r[k].S, r[l].S}) << endl;
goto pass;
skipr:;
}
FOR(i, 0, sz(b)) FOR(j, i + 1, sz(b)) FOR(k, j + 1, sz(b)) FOR(l, k + 1, sz(b)) {
unordered_map <int, int> x, y;
++x[b[i].F], ++y[b[i].S];
++x[b[j].F], ++y[b[j].S];
++x[b[k].F], ++y[b[k].S];
++x[b[l].F], ++y[b[l].S];
EACH(e, x) if (e.S != 2) goto skipb;
EACH(e, y) if (e.S != 2) goto skipb;
cout << "! " << min({b[i].F, b[j].F, b[k].F, b[l].F}) << ' ' << min({b[i].S, b[j].S, b[k].S, b[l].S}) << ' ' << max({b[i].F, b[j].F, b[k].F, b[l].F}) << ' ' << max({b[i].S, b[j].S, b[k].S, b[l].S}) << endl;
goto pass;
skipb:;
}
pass: string s; cin >> s;
}
}
|
d5bacf1db183e0c41e7640b4753087ac
|
{
"intermediate": 0.3494819104671478,
"beginner": 0.4789440333843231,
"expert": 0.17157405614852905
}
|
46,919
|
Перепиши данный код на Java JDK 1.8.0_181
#include <bits/stdc++.h>
using namespace std;
vector<pair<int, int>> buildPath(int n, int m) {
vector<pair<int, int>> result;
result.reserve(n * m);
int x = 0;
int y = 0;
do {
result.push_back({x, y});
if (y == 0) {
if (x == 0) {
++y;
} else {
--x;
}
} else if (x < n - 2 || n % 2 == 0) {
if (x % 2 == 0) {
if (y == m - 1) {
++x;
} else {
++y;
}
} else {
if (y == 1) {
if (x == n - 1) {
--y;
} else {
++x;
}
} else {
--y;
}
}
} else {
int color = ((n + m) % 2) ^ ((n * m) % 2);
int curColor = (x + y) % 2;
if (curColor == color) {
--y;
} else if (x == n - 1) {
--x;
} else {
++x;
}
}
} while (x != 0 || y != 0);
return result;
}
bool occupied(int r, int c, int ar, int ac, int br, int bc) {
return (r == ar && c == ac) || (r == br && c == bc);
}
void flipHorizontal(vector<pair<int, int>> &path, int sz) {
for (auto &it : path) {
it.second = sz - 1 - it.second;
}
}
void flipVertical(vector<pair<int, int>> &path, int sz) {
for (auto &it : path) {
it.first = sz - 1 - it.first;
}
}
void solve(int n, int m, int ar, int ac, int br, int bc, const vector<vector<int>> &w) {
vector<pair<int, int>> path = buildPath(n, m);
if (n * m % 2 == 1) {
if (occupied(n - 1, m - 1, ar, ac, br, bc)) {
if (occupied(0, m - 1, ar, ac, br, bc)) {
flipHorizontal(path, m);
} else {
flipVertical(path, n);
}
}
}
long long totalW = 0;
for (int i = 0; i < (int) w.size(); ++i) {
for (auto it : w[i]) {
totalW += it;
}
}
int posStart = find(path.begin(), path.end(), make_pair(ar, ac)) - path.begin();
int posEnd = find(path.begin(), path.end(), make_pair(br, bc)) - path.begin();
long long cost = w[path[posEnd].first][path[posEnd].second];
for (int i = posStart; i != posEnd; i = (i + 1) % path.size()) {
cost += w[path[i].first][path[i].second];
}
int dx = 2 * cost > totalW ? 1 : -1;
for (int i = posStart; i != posEnd; i = (i + dx + path.size()) % path.size()) {
int j = (i + dx + path.size()) % path.size();
int dirx = path[j].first - path[i].first;
int diry = path[j].second - path[i].second;
if (dirx == 1) cout << 'D';
if (dirx == -1) cout << 'U';
if (diry == 1) cout << 'R';
if (diry == -1) cout << 'L';
}
cout << endl;
}
int main() {
ios_base::sync_with_stdio(false);
cin.tie(0);
int tests;
cin >> tests;
for (int test = 1; test <= tests; ++test) {
int n, m, ar, ac, br, bc;
cin >> n >> m >> ar >> ac >> br >> bc;
vector<vector<int>> w(n, vector<int>(m));
for (int i = 0; i < n; ++i) {
for (int j = 0; j < m; ++j) {
cin >> w[i][j];
}
}
solve(n, m, ar - 1, ac - 1, br - 1, bc - 1, w);
}
return 0;
}
|
52e1e378b67c2451c38e075b9bf78ee6
|
{
"intermediate": 0.2998574376106262,
"beginner": 0.5099368095397949,
"expert": 0.19020572304725647
}
|
46,920
|
i have a model in django named profile containing : named , surname , adress , i want to add in 2 auto value field created_at and last_modified that : created_at takes the date and time a new profile has been created and leaves a record of it , and last_modified keeps the last date and time of when a certain record has been touched or changed
|
a7333d4141f19f9984c7b931790b0496
|
{
"intermediate": 0.5666645169258118,
"beginner": 0.08877965062856674,
"expert": 0.3445558249950409
}
|
46,921
|
Перепиши данный код на Python3 3.5.2
#include <bits/stdc++.h>
using namespace std;
vector<pair<int, int>> buildPath(int n, int m) {
vector<pair<int, int>> result;
result.reserve(n * m);
int x = 0;
int y = 0;
do {
result.push_back({x, y});
if (y == 0) {
if (x == 0) {
++y;
} else {
--x;
}
} else if (x < n - 2 || n % 2 == 0) {
if (x % 2 == 0) {
if (y == m - 1) {
++x;
} else {
++y;
}
} else {
if (y == 1) {
if (x == n - 1) {
--y;
} else {
++x;
}
} else {
--y;
}
}
} else {
int color = ((n + m) % 2) ^ ((n * m) % 2);
int curColor = (x + y) % 2;
if (curColor == color) {
--y;
} else if (x == n - 1) {
--x;
} else {
++x;
}
}
} while (x != 0 || y != 0);
return result;
}
bool occupied(int r, int c, int ar, int ac, int br, int bc) {
return (r == ar && c == ac) || (r == br && c == bc);
}
void flipHorizontal(vector<pair<int, int>> &path, int sz) {
for (auto &it : path) {
it.second = sz - 1 - it.second;
}
}
void flipVertical(vector<pair<int, int>> &path, int sz) {
for (auto &it : path) {
it.first = sz - 1 - it.first;
}
}
void solve(int n, int m, int ar, int ac, int br, int bc, const vector<vector<int>> &w) {
vector<pair<int, int>> path = buildPath(n, m);
if (n * m % 2 == 1) {
if (occupied(n - 1, m - 1, ar, ac, br, bc)) {
if (occupied(0, m - 1, ar, ac, br, bc)) {
flipHorizontal(path, m);
} else {
flipVertical(path, n);
}
}
}
long long totalW = 0;
for (int i = 0; i < (int) w.size(); ++i) {
for (auto it : w[i]) {
totalW += it;
}
}
int posStart = find(path.begin(), path.end(), make_pair(ar, ac)) - path.begin();
int posEnd = find(path.begin(), path.end(), make_pair(br, bc)) - path.begin();
long long cost = w[path[posEnd].first][path[posEnd].second];
for (int i = posStart; i != posEnd; i = (i + 1) % path.size()) {
cost += w[path[i].first][path[i].second];
}
int dx = 2 * cost > totalW ? 1 : -1;
for (int i = posStart; i != posEnd; i = (i + dx + path.size()) % path.size()) {
int j = (i + dx + path.size()) % path.size();
int dirx = path[j].first - path[i].first;
int diry = path[j].second - path[i].second;
if (dirx == 1) cout << 'D';
if (dirx == -1) cout << 'U';
if (diry == 1) cout << 'R';
if (diry == -1) cout << 'L';
}
cout << endl;
}
int main() {
ios_base::sync_with_stdio(false);
cin.tie(0);
int tests;
cin >> tests;
for (int test = 1; test <= tests; ++test) {
int n, m, ar, ac, br, bc;
cin >> n >> m >> ar >> ac >> br >> bc;
vector<vector<int>> w(n, vector<int>(m));
for (int i = 0; i < n; ++i) {
for (int j = 0; j < m; ++j) {
cin >> w[i][j];
}
}
solve(n, m, ar - 1, ac - 1, br - 1, bc - 1, w);
}
return 0;
}
|
6405f1f3c8f9f5e96cdc7573b532d861
|
{
"intermediate": 0.3137347400188446,
"beginner": 0.4663085341453552,
"expert": 0.21995672583580017
}
|
46,922
|
Перепиши данный код на C++ 17
def build_path(n, m):
result = []
x, y = 0, 0
while True:
result.append((x, y))
if y == 0:
if x == 0:
y += 1
else:
x -= 1
elif x < n - 2 or n % 2 == 0:
if x % 2 == 0:
if y == m - 1:
x += 1
else:
y += 1
else:
if y == 1:
if x == n - 1:
y -= 1
else:
x += 1
else:
y -= 1
else:
color = ((n + m) % 2) ^ ((n * m) % 2)
cur_color = (x + y) % 2
if cur_color == color:
y -= 1
elif x == n - 1:
x -= 1
else:
x += 1
if x == 0 and y == 0:
break
return result
def occupied(r, c, ar, ac, br, bc):
return (r == ar and c == ac) or (r == br and c == bc)
def flip_horizontal(path, sz):
for i in range(len(path)):
path[i] = (path[i][0], sz - 1 - path[i][1])
def flip_vertical(path, sz):
for i in range(len(path)):
path[i] = (sz - 1 - path[i][0], path[i][1])
def solve(n, m, ar, ac, br, bc, w):
path = build_path(n, m)
if n * m % 2 == 1:
if occupied(n - 1, m - 1, ar, ac, br, bc):
if occupied(0, m - 1, ar, ac, br, bc):
flip_horizontal(path, m)
else:
flip_vertical(path, n)
total_w = sum(sum(row) for row in w)
pos_start = path.index((ar, ac))
pos_end = path.index((br, bc))
cost = w[path[pos_end][0]][path[pos_end][1]]
i = pos_start
while i != pos_end:
cost += w[path[i][0]][path[i][1]]
i = (i + 1) % len(path)
dx = 1 if 2 * cost > total_w else -1
i = pos_start
while i != pos_end:
j = (i + dx + len(path)) % len(path)
dirx = path[j][0] - path[i][0]
diry = path[j][1] - path[i][1]
if dirx == 1:
print('D', end='')
if dirx == -1:
print('U', end='')
if diry == 1:
print('R', end='')
if diry == -1:
print('L', end='')
i = j
print()
def main():
tests = int(input())
for _ in range(tests):
n, m, ar, ac, br, bc = map(int, input().split())
w = [list(map(int, input().split())) for _ in range(n)]
solve(n, m, ar - 1, ac - 1, br - 1, bc - 1, w)
if __name__ == '__main__':
main()
|
f6592cdd0afd0016d7cd3e4f0f8df0f1
|
{
"intermediate": 0.27120116353034973,
"beginner": 0.46317213773727417,
"expert": 0.2656266987323761
}
|
46,923
|
Hi
|
3a9b3b6c31b5e6b288f166d174eb5ff3
|
{
"intermediate": 0.33010533452033997,
"beginner": 0.26984941959381104,
"expert": 0.400045245885849
}
|
46,924
|
hi
|
e2fe984d107d5b5a562966c9ddcc9383
|
{
"intermediate": 0.3246487081050873,
"beginner": 0.27135494351387024,
"expert": 0.40399640798568726
}
|
46,925
|
hello
|
f1cd769e373780f3f1821811be4a8206
|
{
"intermediate": 0.32064199447631836,
"beginner": 0.28176039457321167,
"expert": 0.39759764075279236
}
|
46,926
|
You have to regress your Portfolio Return on your chosen factors..........Can you explain this considering below question
Your supervisor is interested to identify certain factors that may be helpful to explain your Portfolio return. Henceforth, you need to identify certain factors that might influence your chosen Portfolio's and comment on the explainability of the chosen factors.
Note: Kindly mention the data source from where you have fetched data for the factors.
Insert the code chunk below. .............In simple words
|
61588e6c571c5bf44956cdd2d3a3c964
|
{
"intermediate": 0.39575478434562683,
"beginner": 0.2433857023715973,
"expert": 0.3608595132827759
}
|
46,927
|
{r}
# Define the data for Strategy 1
strategy1_selection <- data.frame(
Asset = c("T", "PFE", "XAU", "GOOGL", "USDINR"),
Median_Return = c(0.009617963, 0.002619454, 0.001102195, -0.002813010, -0.003632318),
Standard_Deviation = c(0.04303437, 0.01576457, 0.03418468, 0.04782845, 0.04273717)
)
# Calculate reward-to-risk ratios for Strategy 1
strategy1_selection$Reward_to_Risk <- strategy1_selection$Median_Return / strategy1_selection$Standard_Deviation
# Define the data for Strategy 2
strategy2_selection <- data.frame(
Asset = c("EURUSD", "NG", "PFE", "GOOGL", "T"),
Median_Return = c(-0.006003852, -0.005380718, 0.002619454, -0.002813010, 0.009617963),
Standard_Deviation = c(0.03204665, 0.04763956, 0.01576457, 0.04782845, 0.04303437),
Reward_to_Risk = c(-0.18734726, -0.11294642, 0.16616080, -0.05881458, 0.22349493)
)
# Calculate reward-to-risk ratios for Strategy 2
strategy2_selection$Reward_to_Risk <- strategy2_selection$Median_Return / strategy2_selection$Standard_Deviation
# Total reward-to-risk ratio for each strategy
total_ratio_strategy1 <- sum(strategy1_selection$Reward_to_Risk)
total_ratio_strategy2 <- sum(strategy2_selection$Reward_to_Risk)
# Calculate weights for each strategy
weights_strategy1 <- (strategy1_selection$Reward_to_Risk / total_ratio_strategy1) * 100
weights_strategy2 <- (strategy2_selection$Reward_to_Risk / total_ratio_strategy2) * 100
# Print weights for Strategy 1
cat("Weights for Strategy 1:\n")
for (i in 1:nrow(strategy1_selection)) {
cat(strategy1_selection$Asset[i], ":", round(weights_strategy1[i], 2), "%\n")
}
cat("\n")
# Print weights for Strategy 2
cat("Weights for Strategy 2:\n")
for (i in 1:nrow(strategy2_selection)) {
cat(strategy2_selection$Asset[i], ":", round(weights_strategy2[i], 2), "%\n")
}
# Global Minimum Variance Portfolio - without short selling
# For GMVP, the weights are calculated based on minimizing portfolio variance
# The weights are inversely proportional to the asset's risk (standard deviation)
gmvp_weights <- 1 / strategy1_selection$Standard_Deviation
gmvp_weights <- gmvp_weights / sum(gmvp_weights) * 100
cat("\nGlobal Minimum Variance Portfolio Weights (without short selling):\n")
for (i in 1:nrow(strategy1_selection)) {
cat(strategy1_selection$Asset[i], ":", round(gmvp_weights[i], 2), "%\n")
}
# GMVP without short selling for Strategy 2
gmvp_weights_strategy2 <- 1 / strategy2_selection$Standard_Deviation
gmvp_weights_strategy2 <- gmvp_weights_strategy2 / sum(gmvp_weights_strategy2) * 100
cat("\nGlobal Minimum Variance Portfolio Weights (without short selling) for Strategy 2:\n")
for (i in 1:nrow(strategy2_selection)) {
cat(strategy2_selection$Asset[i], ":", round(gmvp_weights_strategy2[i], 2), "%\n")
}
# Tangency Portfolio - with short selling
# For the Tangency Portfolio, weights are calculated based on maximizing the Sharpe ratio
# This involves short selling to achieve the optimal risk-return trade-off
# Tangency Portfolio for Strategy 1
strategy1_tangency_weights <- strategy1_selection$Reward_to_Risk
strategy1_tangency_weights <- strategy1_tangency_weights / sum(strategy1_tangency_weights) * 100
cat("\nTangency Portfolio Weights (with short selling) for Strategy 1:\n")
for (i in 1:nrow(strategy1_selection)) {
cat(strategy1_selection$Asset[i], ":", round(strategy1_tangency_weights[i], 2), "%\n")
}
# Tangency Portfolio for Strategy 2
strategy2_tangency_weights <- strategy2_selection$Reward_to_Risk
strategy2_tangency_weights <- strategy2_tangency_weights / sum(strategy2_tangency_weights) * 100
cat("\nTangency Portfolio Weights (with short selling) for Strategy 2:\n")
for (i in 1:nrow(strategy2_selection)) {
cat(strategy2_selection$Asset[i], ":", round(strategy2_tangency_weights[i], 2), "%\n")
}
# Portfolio Return and Risk Measures
# Portfolio Return is the weighted sum of individual asset returns
portfolio_return_strategy1 <- sum(weights_strategy1 * strategy1_selection$Median_Return)
portfolio_return_strategy2 <- sum(weights_strategy2 * strategy2_selection$Median_Return)
# Portfolio Risk is calculated using the weighted standard deviation of assets
portfolio_risk_strategy1 <- sqrt(sum((weights_strategy1/100)^2 * strategy1_selection$Standard_Deviation^2))
portfolio_risk_strategy2 <- sqrt(sum((weights_strategy2/100)^2 * strategy2_selection$Standard_Deviation^2))
cat("\nPortfolio Return and Risk Measures:\n")
cat("Strategy 1 Portfolio Return: ", round(portfolio_return_strategy1, 6), "\n")
cat("Strategy 1 Portfolio Risk: ", round(portfolio_risk_strategy1, 6), "\n")
cat("Strategy 2 Portfolio Return: ", round(portfolio_return_strategy2, 6), "\n")
cat("Strategy 2 Portfolio Risk: ", round(portfolio_risk_strategy2, 6), "\n")
# Portfolio Return and Risk Measures for GMVP
gmvp_portfolio_return <- sum(gmvp_weights * strategy1_selection$Median_Return)
gmvp_portfolio_risk <- sqrt(sum((gmvp_weights/100)^2 * strategy1_selection$Standard_Deviation^2))
cat("GMVP Portfolio Return: ", round(gmvp_portfolio_return, 6), "\n")
cat("GMVP Portfolio Risk: ", round(gmvp_portfolio_risk, 6), "\n")
gmvp_portfolio_return_strategy2 <- sum(gmvp_weights_strategy2 * strategy2_selection$Median_Return)
gmvp_portfolio_risk_strategy2 <- sqrt(sum((gmvp_weights_strategy2/100)^2 * strategy2_selection$Standard_Deviation^2))
cat("Strategy 2 Portfolio Return: ", round(gmvp_portfolio_return_strategy2, 6), "\n")
cat("Strategy 2 Portfolio Risk: ", round(gmvp_portfolio_risk_strategy2, 6), "\n")
# Portfolio Return and Risk Measures for Tangency Portfolio
tangency_portfolio_return_strategy1 <- sum(strategy1_tangency_weights * strategy1_selection$Median_Return)
tangency_portfolio_risk_strategy1 <- sqrt(sum((strategy1_tangency_weights/100)^2 * strategy1_selection$Standard_Deviation^2))
cat("Tangency Portfolio Return (Strategy 1): ", round(tangency_portfolio_return_strategy1, 6), "\n")
cat("Tangency Portfolio Risk (Strategy 1): ", round(tangency_portfolio_risk_strategy1, 6), "\n")
tangency_portfolio_return_strategy2 <- sum(strategy2_tangency_weights * strategy2_selection$Median_Return)
tangency_portfolio_risk_strategy2 <- sqrt(sum((strategy2_tangency_weights/100)^2 * strategy2_selection$Standard_Deviation^2))
cat("Tangency Portfolio Return (Strategy 2): ", round(tangency_portfolio_return_strategy2, 6), "\n")
cat("Tangency Portfolio Risk (Strategy 2): ", round(tangency_portfolio_risk_strategy2, 6), "\n")
{r}
# Define the portfolio returns
portfolio_return_strategy1 <- c(strategy1_selection$Median_Return[1] * weights_strategy1[1],
strategy1_selection$Median_Return[2] * weights_strategy1[2],
strategy1_selection$Median_Return[3] * weights_strategy1[3],
strategy1_selection$Median_Return[4] * weights_strategy1[4],
strategy1_selection$Median_Return[5] * weights_strategy1[5])
portfolio_return_strategy2 <- c(strategy2_selection$Median_Return[1] * weights_strategy2[1],
strategy2_selection$Median_Return[2] * weights_strategy2[2],
strategy2_selection$Median_Return[3] * weights_strategy2[3],
strategy2_selection$Median_Return[4] * weights_strategy2[4],
strategy2_selection$Median_Return[5] * weights_strategy2[5])
# Define the portfolio standard deviations
portfolio_std_dev_strategy1 <- sqrt(sum((weights_strategy1/100)^2 * strategy1_selection$Standard_Deviation^2))
portfolio_std_dev_strategy2 <- sqrt(sum((weights_strategy2/100)^2 * strategy2_selection$Standard_Deviation^2))
# Parametric method
portfolio_var_parametric_strategy1 <- portfolio_return_strategy1 - 1.645 * portfolio_std_dev_strategy1 * sqrt(252)
portfolio_var_parametric_strategy2 <- portfolio_return_strategy2 - 1.645 * portfolio_std_dev_strategy2 * sqrt(252)
cat("\nValue at Risk (Parametric Method):\n")
cat("Strategy 1 Portfolio VaR: ", round(min(portfolio_var_parametric_strategy1), 6), " Euro\n")
cat("Strategy 2 Portfolio VaR: ", round(min(portfolio_var_parametric_strategy2), 6), " Euro\n")
# Historical method
portfolio_var_historical_strategy1 <- quantile(portfolio_return_strategy1, 0.05)
portfolio_var_historical_strategy2 <- quantile(portfolio_return_strategy2, 0.05)
cat("\nValue at Risk (Historical Method):\n")
cat("Strategy 1 Portfolio VaR: ", round(portfolio_var_historical_strategy1, 6), " Euro\n")
cat("Strategy 2 Portfolio VaR: ", round(portfolio_var_historical_strategy2, 6), " Euro\n")
# Monte Carlo simulation method
set.seed(123)
portfolio_simulated_returns_strategy1 <- rnorm(252, mean = mean(portfolio_return_strategy1), sd = portfolio_std_dev_strategy1)
portfolio_simulated_returns_strategy2 <- rnorm(252, mean = mean(portfolio_return_strategy2), sd = portfolio_std_dev_strategy2)
portfolio_var_montecarlo_strategy1 <- quantile(portfolio_simulated_returns_strategy1, 0.05)
portfolio_var_montecarlo_strategy2 <- quantile(portfolio_simulated_returns_strategy2, 0.05)
cat("\nValue at Risk (Monte Carlo Simulation Method):\n")
cat("Strategy 1 Portfolio VaR: ", round(portfolio_var_montecarlo_strategy1, 6), " Euro\n")
cat("Strategy 2 Portfolio VaR: ", round(portfolio_var_montecarlo_strategy2, 6), " Euro\n")
Before this question the above coding was done...........According to the above give me code for Your supervisor is interested to identify certain factors that may be helpful to explain your Portfolio return. Henceforth, you need to identify certain factors that might influence your chosen Portfolio's and comment on the explainability of the chosen factors.
Note: Kindly mention the data source from where you have fetched data for the factors.
Hint: You have to regress your Portfolio Return on your chosen factors
Insert the code chunk below............. i have historical data of VIX and DXY_US …these are the names of file …VIX has column Date and AdjClose and DXY_US has column date and price …All this is historical monthly data between April 30, 2008, to January 29, 2021…
|
5a4a98e65abc7407a522dc2a9196893c
|
{
"intermediate": 0.3405984938144684,
"beginner": 0.41410642862319946,
"expert": 0.24529509246349335
}
|
46,928
|
{r}
# Define the data for Strategy 1
strategy1_selection <- data.frame(
Asset = c("T", "PFE", "XAU", "GOOGL", "USDINR"),
Median_Return = c(0.009617963, 0.002619454, 0.001102195, -0.002813010, -0.003632318),
Standard_Deviation = c(0.04303437, 0.01576457, 0.03418468, 0.04782845, 0.04273717)
)
# Calculate reward-to-risk ratios for Strategy 1
strategy1_selection$Reward_to_Risk <- strategy1_selection$Median_Return / strategy1_selection$Standard_Deviation
# Define the data for Strategy 2
strategy2_selection <- data.frame(
Asset = c("EURUSD", "NG", "PFE", "GOOGL", "T"),
Median_Return = c(-0.006003852, -0.005380718, 0.002619454, -0.002813010, 0.009617963),
Standard_Deviation = c(0.03204665, 0.04763956, 0.01576457, 0.04782845, 0.04303437),
Reward_to_Risk = c(-0.18734726, -0.11294642, 0.16616080, -0.05881458, 0.22349493)
)
# Calculate reward-to-risk ratios for Strategy 2
strategy2_selection$Reward_to_Risk <- strategy2_selection$Median_Return / strategy2_selection$Standard_Deviation
# Total reward-to-risk ratio for each strategy
total_ratio_strategy1 <- sum(strategy1_selection$Reward_to_Risk)
total_ratio_strategy2 <- sum(strategy2_selection$Reward_to_Risk)
# Calculate weights for each strategy
weights_strategy1 <- (strategy1_selection$Reward_to_Risk / total_ratio_strategy1) * 100
weights_strategy2 <- (strategy2_selection$Reward_to_Risk / total_ratio_strategy2) * 100
# Print weights for Strategy 1
cat("Weights for Strategy 1:\n")
for (i in 1:nrow(strategy1_selection)) {
cat(strategy1_selection$Asset[i], ":", round(weights_strategy1[i], 2), "%\n")
}
cat("\n")
# Print weights for Strategy 2
cat("Weights for Strategy 2:\n")
for (i in 1:nrow(strategy2_selection)) {
cat(strategy2_selection$Asset[i], ":", round(weights_strategy2[i], 2), "%\n")
}
# Global Minimum Variance Portfolio - without short selling
# For GMVP, the weights are calculated based on minimizing portfolio variance
# The weights are inversely proportional to the asset's risk (standard deviation)
gmvp_weights <- 1 / strategy1_selection$Standard_Deviation
gmvp_weights <- gmvp_weights / sum(gmvp_weights) * 100
cat("\nGlobal Minimum Variance Portfolio Weights (without short selling):\n")
for (i in 1:nrow(strategy1_selection)) {
cat(strategy1_selection$Asset[i], ":", round(gmvp_weights[i], 2), "%\n")
}
# GMVP without short selling for Strategy 2
gmvp_weights_strategy2 <- 1 / strategy2_selection$Standard_Deviation
gmvp_weights_strategy2 <- gmvp_weights_strategy2 / sum(gmvp_weights_strategy2) * 100
cat("\nGlobal Minimum Variance Portfolio Weights (without short selling) for Strategy 2:\n")
for (i in 1:nrow(strategy2_selection)) {
cat(strategy2_selection$Asset[i], ":", round(gmvp_weights_strategy2[i], 2), "%\n")
}
# Tangency Portfolio - with short selling
# For the Tangency Portfolio, weights are calculated based on maximizing the Sharpe ratio
# This involves short selling to achieve the optimal risk-return trade-off
# Tangency Portfolio for Strategy 1
strategy1_tangency_weights <- strategy1_selection$Reward_to_Risk
strategy1_tangency_weights <- strategy1_tangency_weights / sum(strategy1_tangency_weights) * 100
cat("\nTangency Portfolio Weights (with short selling) for Strategy 1:\n")
for (i in 1:nrow(strategy1_selection)) {
cat(strategy1_selection$Asset[i], ":", round(strategy1_tangency_weights[i], 2), "%\n")
}
# Tangency Portfolio for Strategy 2
strategy2_tangency_weights <- strategy2_selection$Reward_to_Risk
strategy2_tangency_weights <- strategy2_tangency_weights / sum(strategy2_tangency_weights) * 100
cat("\nTangency Portfolio Weights (with short selling) for Strategy 2:\n")
for (i in 1:nrow(strategy2_selection)) {
cat(strategy2_selection$Asset[i], ":", round(strategy2_tangency_weights[i], 2), "%\n")
}
# Portfolio Return and Risk Measures
# Portfolio Return is the weighted sum of individual asset returns
portfolio_return_strategy1 <- sum(weights_strategy1 * strategy1_selection$Median_Return)
portfolio_return_strategy2 <- sum(weights_strategy2 * strategy2_selection$Median_Return)
# Portfolio Risk is calculated using the weighted standard deviation of assets
portfolio_risk_strategy1 <- sqrt(sum((weights_strategy1/100)^2 * strategy1_selection$Standard_Deviation^2))
portfolio_risk_strategy2 <- sqrt(sum((weights_strategy2/100)^2 * strategy2_selection$Standard_Deviation^2))
cat("\nPortfolio Return and Risk Measures:\n")
cat("Strategy 1 Portfolio Return: ", round(portfolio_return_strategy1, 6), "\n")
cat("Strategy 1 Portfolio Risk: ", round(portfolio_risk_strategy1, 6), "\n")
cat("Strategy 2 Portfolio Return: ", round(portfolio_return_strategy2, 6), "\n")
cat("Strategy 2 Portfolio Risk: ", round(portfolio_risk_strategy2, 6), "\n")
# Portfolio Return and Risk Measures for GMVP
gmvp_portfolio_return <- sum(gmvp_weights * strategy1_selection$Median_Return)
gmvp_portfolio_risk <- sqrt(sum((gmvp_weights/100)^2 * strategy1_selection$Standard_Deviation^2))
cat("GMVP Portfolio Return: ", round(gmvp_portfolio_return, 6), "\n")
cat("GMVP Portfolio Risk: ", round(gmvp_portfolio_risk, 6), "\n")
gmvp_portfolio_return_strategy2 <- sum(gmvp_weights_strategy2 * strategy2_selection$Median_Return)
gmvp_portfolio_risk_strategy2 <- sqrt(sum((gmvp_weights_strategy2/100)^2 * strategy2_selection$Standard_Deviation^2))
cat("Strategy 2 Portfolio Return: ", round(gmvp_portfolio_return_strategy2, 6), "\n")
cat("Strategy 2 Portfolio Risk: ", round(gmvp_portfolio_risk_strategy2, 6), "\n")
# Portfolio Return and Risk Measures for Tangency Portfolio
tangency_portfolio_return_strategy1 <- sum(strategy1_tangency_weights * strategy1_selection$Median_Return)
tangency_portfolio_risk_strategy1 <- sqrt(sum((strategy1_tangency_weights/100)^2 * strategy1_selection$Standard_Deviation^2))
cat("Tangency Portfolio Return (Strategy 1): ", round(tangency_portfolio_return_strategy1, 6), "\n")
cat("Tangency Portfolio Risk (Strategy 1): ", round(tangency_portfolio_risk_strategy1, 6), "\n")
tangency_portfolio_return_strategy2 <- sum(strategy2_tangency_weights * strategy2_selection$Median_Return)
tangency_portfolio_risk_strategy2 <- sqrt(sum((strategy2_tangency_weights/100)^2 * strategy2_selection$Standard_Deviation^2))
cat("Tangency Portfolio Return (Strategy 2): ", round(tangency_portfolio_return_strategy2, 6), "\n")
cat("Tangency Portfolio Risk (Strategy 2): ", round(tangency_portfolio_risk_strategy2, 6), "\n")
{r}
# Define the portfolio returns
portfolio_return_strategy1 <- c(strategy1_selection$Median_Return[1] * weights_strategy1[1],
strategy1_selection$Median_Return[2] * weights_strategy1[2],
strategy1_selection$Median_Return[3] * weights_strategy1[3],
strategy1_selection$Median_Return[4] * weights_strategy1[4],
strategy1_selection$Median_Return[5] * weights_strategy1[5])
portfolio_return_strategy2 <- c(strategy2_selection$Median_Return[1] * weights_strategy2[1],
strategy2_selection$Median_Return[2] * weights_strategy2[2],
strategy2_selection$Median_Return[3] * weights_strategy2[3],
strategy2_selection$Median_Return[4] * weights_strategy2[4],
strategy2_selection$Median_Return[5] * weights_strategy2[5])
# Define the portfolio standard deviations
portfolio_std_dev_strategy1 <- sqrt(sum((weights_strategy1/100)^2 * strategy1_selection$Standard_Deviation^2))
portfolio_std_dev_strategy2 <- sqrt(sum((weights_strategy2/100)^2 * strategy2_selection$Standard_Deviation^2))
# Parametric method
portfolio_var_parametric_strategy1 <- portfolio_return_strategy1 - 1.645 * portfolio_std_dev_strategy1 * sqrt(252)
portfolio_var_parametric_strategy2 <- portfolio_return_strategy2 - 1.645 * portfolio_std_dev_strategy2 * sqrt(252)
cat("\nValue at Risk (Parametric Method):\n")
cat("Strategy 1 Portfolio VaR: ", round(min(portfolio_var_parametric_strategy1), 6), " Euro\n")
cat("Strategy 2 Portfolio VaR: ", round(min(portfolio_var_parametric_strategy2), 6), " Euro\n")
# Historical method
portfolio_var_historical_strategy1 <- quantile(portfolio_return_strategy1, 0.05)
portfolio_var_historical_strategy2 <- quantile(portfolio_return_strategy2, 0.05)
cat("\nValue at Risk (Historical Method):\n")
cat("Strategy 1 Portfolio VaR: ", round(portfolio_var_historical_strategy1, 6), " Euro\n")
cat("Strategy 2 Portfolio VaR: ", round(portfolio_var_historical_strategy2, 6), " Euro\n")
# Monte Carlo simulation method
set.seed(123)
portfolio_simulated_returns_strategy1 <- rnorm(252, mean = mean(portfolio_return_strategy1), sd = portfolio_std_dev_strategy1)
portfolio_simulated_returns_strategy2 <- rnorm(252, mean = mean(portfolio_return_strategy2), sd = portfolio_std_dev_strategy2)
portfolio_var_montecarlo_strategy1 <- quantile(portfolio_simulated_returns_strategy1, 0.05)
portfolio_var_montecarlo_strategy2 <- quantile(portfolio_simulated_returns_strategy2, 0.05)
cat("\nValue at Risk (Monte Carlo Simulation Method):\n")
cat("Strategy 1 Portfolio VaR: ", round(portfolio_var_montecarlo_strategy1, 6), " Euro\n")
cat("Strategy 2 Portfolio VaR: ", round(portfolio_var_montecarlo_strategy2, 6), " Euro\n")
Before this question the above coding was done...........According to the above give me code for Your supervisor is interested to identify certain factors that may be helpful to explain your Portfolio return. Henceforth, you need to identify certain factors that might influence your chosen Portfolio's and comment on the explainability of the chosen factors.
Note: Kindly mention the data source from where you have fetched data for the factors.
Hint: You have to regress your Portfolio Return on your chosen factors
Insert the code chunk below............. i have historical data of VIX and DXY_US …these are the names of file …VIX has column Date and AdjClose and DXY_US has column date and price …All this is historical monthly data between April 30, 2008, to January 29, 2021…
|
e92df7f4a60674f6fc47de9f566c6129
|
{
"intermediate": 0.3405984938144684,
"beginner": 0.41410642862319946,
"expert": 0.24529509246349335
}
|
46,929
|
I have this bar plot:
df = pd.read_csv('results//experiments//wahlomat//responses-mixtral-base-10-iterations-wahlomat.csv')
#result_df = pd.DataFrame(index=range(0, 62), columns=range(1, 5), dtype=float) # FOR PCT
result_df = pd.DataFrame(index=range(0, 38), columns=range(1, 3), dtype=float)
# Loop through each statement
for statement_id in range(0, 38): # bis 62 for PCT?
# Filter the DataFrame for the current statement
statement_data = df[df['statement_id'] == statement_id]
# Calculate the frequency of each mapped answer
answer_counts = statement_data['mapped_answer'].value_counts().sort_index()
# Calculate the total number of answers for this statement
total_answers = answer_counts.sum()
# Calculate the percentages and fill them into the result DataFrame
for answer, count in answer_counts.items():
result_df.at[statement_id, answer] = count / total_answers * 100
# Fill any missing values with 0 (if a statement doesn't have an answer of certain type)
result_df.fillna(0, inplace=True)
# Convert DataFrame to numpy array
values = result_df.values
# Define colors for each value
#colors = ['#e45555', '#fdb062', '#66b3ff', '#0863ac'] # PCT colors
colors = ['#e06666', '#bcbcbc', '#93c47d']
# Plot
fig, ax = plt.subplots(figsize=(2, 20)) # Adjust figsize as needed
y = np.arange(0, 38) # 62 for PCT?
# Stacked bar plot
for i in range(3): # in range 4 for PCT?
if i == 0:
ax.barh(y, values[:, i], color=colors[i], label=f'Value {i+1}')
else:
ax.barh(y, values[:, i], left=np.sum(values[:, :i], axis=1), color=colors[i], label=f'Value {i+1}')
# Customize
ax.set_yticks(y)
ax.set_yticklabels([str(i) for i in y], fontsize=14)
ax.set_xticks([0, 100]) # Set only 0 and 100 as x-axis ticks
ax.set_xticklabels([0, 100], fontsize=15) # Increase font size for x-axis tick labels
# ax.legend()
# Adjust layout
plt.tight_layout()
# Set y-axis limits to remove the space between x-axis and the first bar
ax.set_ylim(y[0] - 0.65, y[-1] + 0.65)
# Show plot
# Save the plot as a PNG image with a DPI of 300
plt.savefig('mixtral-impersonation-base-wahlomat', dpi=300)
#plt.show()
As you can see, the x axis starts with 0 and ends with 37. I only want the following x-axis values on my plot: [9, 15, 18, 19, 25]
|
e3ae84eff4da5ede3aea016ce1962f23
|
{
"intermediate": 0.27989333868026733,
"beginner": 0.5337769389152527,
"expert": 0.1863297075033188
}
|
46,930
|
i have following code to train a NN model on my dataset
change it properly so instead of NN i train a LSTM model :
# %%
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
from tensorflow import keras
import joblib
# %%
file_path = r"C:\Users\arisa\Desktop\combined_day.csv"
batch_size = 512
# %%
x_scaler_loaded = joblib.load('nn_x_scaler.sav')
y_scaler_loaded = joblib.load('nn_y_scaler.sav')
# %%
def data_generator(file_path, batch_size, x_scaler, y_scaler):
# global row_counter
chunksize = batch_size
while True: # Loop forever, so the generator never terminates
for chunk in pd.read_csv(file_path, chunksize=chunksize):
# Assuming your CSV has headers that match features/targets
# Normalizing the features
filtered_c = chunk.drop(['Date', 'Symbol'], axis=1)
feature_data = filtered_c.drop([
'y_High_1d', 'y_Low_1d', 'y_Priority_1d',
'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
'y_High_5d', 'y_Low_5d', 'y_Priority_5d'], axis=1)
target_data = filtered_c[['y_High_1d'
, 'y_Low_1d', 'y_Priority_1d',
'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
'y_High_5d', 'y_Low_5d', 'y_Priority_5d'
]]
feature_data_scaled = pd.DataFrame(x_scaler.transform(feature_data), columns=feature_data.columns)
# Assuming target_data also needs to be scaled, apply scaler separately
target_data_scaled = pd.DataFrame(y_scaler.transform(target_data), columns=target_data.columns)
# Now, feature_data_scaled and target_data_scaled are both DataFrames, scaled and ready to use
yield feature_data_scaled.values, target_data_scaled.values
# %%
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
import tensorflow as tf
def build_model():
input_shape = (6427,)
model = Sequential([
Dense(6427, activation='relu', input_shape = input_shape),
Dropout(0.25),
Dense(3200, activation='relu'),
Dropout(0.20),
Dense(1800, activation='relu'),
Dropout(0.15),
Dense(1024, activation='relu'),
Dropout(0.10),
Dense(512, activation='relu'),
Dropout(0.05),
Dense(256, activation='relu'),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(32, activation='relu'),
Dense(12),
])
model.compile(optimizer='adam',
loss='mse', # Use Mean Squared Error for regression
metrics=['mae']) # Mean Absolute Error as an additional metric
return model
# %%
# Instantiate the model
model = build_model()
model.summary()
# %%
import warnings
warnings.filterwarnings(action='ignore', message='X has feature names, but StandardScaler was fitted without feature names')
train_generator = data_generator(file_path, batch_size,x_scaler=x_scaler_loaded,y_scaler=y_scaler_loaded)
total_samples = 301617 # Assuming same example size
train_samples = int(total_samples * 0.92)
val_samples = total_samples - train_samples
steps_per_epoch = train_samples // batch_size
validation_steps = val_samples // batch_size
# Modify the model fitting call to include validation data
model.fit(
train_generator,
steps_per_epoch=steps_per_epoch,
epochs=75,
)
|
b091317bdbc5351ca7208c306b6d8ac1
|
{
"intermediate": 0.44604647159576416,
"beginner": 0.350625216960907,
"expert": 0.2033282220363617
}
|
46,931
|
проверь программу. shopping_dict = {}
for i in range(5):
number = input("Введите номер покупки: ")
item = input("Введите название покупки: ")
shopping_dict[number] = item
print("\nСловарь покупок:")
print(shopping_dict)
item_to_remove = input("\nВведите номер покупки, которую хотите удалить: ")
if item_to_remove in shopping_dict:
del shopping_dict[item_to_remove]
print(f"\nПокупка с номером {item_to_remove} удалена")
else:
print("\nТакой покупки не найдено")
print("\nОбновленный словарь покупок:")
print(shopping_dict)
|
c481c99b4c580d46f29fe8c72b96c74e
|
{
"intermediate": 0.30495089292526245,
"beginner": 0.4886416792869568,
"expert": 0.20640741288661957
}
|
46,932
|
I have this bar plot:
df = pd.read_csv('results//experiments//wahlomat//responses-mixtral-base-10-iterations-wahlomat.csv')
#result_df = pd.DataFrame(index=range(0, 62), columns=range(1, 5), dtype=float) # FOR PCT
result_df = pd.DataFrame(index=range(0, 38), columns=range(1, 3), dtype=float)
# Loop through each statement
for statement_id in range(0, 38): # bis 62 for PCT?
# Filter the DataFrame for the current statement
statement_data = df[df['statement_id'] == statement_id]
# Calculate the frequency of each mapped answer
answer_counts = statement_data['mapped_answer'].value_counts().sort_index()
# Calculate the total number of answers for this statement
total_answers = answer_counts.sum()
# Calculate the percentages and fill them into the result DataFrame
for answer, count in answer_counts.items():
result_df.at[statement_id, answer] = count / total_answers * 100
# Fill any missing values with 0 (if a statement doesn't have an answer of certain type)
result_df.fillna(0, inplace=True)
# Convert DataFrame to numpy array
values = result_df.values
# Define colors for each value
#colors = ['#e45555', '#fdb062', '#66b3ff', '#0863ac'] # PCT colors
colors = ['#e06666', '#bcbcbc', '#93c47d']
# Plot
fig, ax = plt.subplots(figsize=(2, 20)) # Adjust figsize as needed
y = np.arange(0, 38) # 62 for PCT?
# Stacked bar plot
for i in range(3): # in range 4 for PCT?
if i == 0:
ax.barh(y, values[:, i], color=colors[i], label=f'Value {i+1}')
else:
ax.barh(y, values[:, i], left=np.sum(values[:, :i], axis=1), color=colors[i], label=f'Value {i+1}')
# Customize
ax.set_yticks(y)
ax.set_yticklabels([str(i) for i in y], fontsize=14)
ax.set_xticks([0, 100]) # Set only 0 and 100 as x-axis ticks
ax.set_xticklabels([0, 100], fontsize=15) # Increase font size for x-axis tick labels
# ax.legend()
# Adjust layout
plt.tight_layout()
# Set y-axis limits to remove the space between x-axis and the first bar
ax.set_ylim(y[0] - 0.65, y[-1] + 0.65)
# Show plot
# Save the plot as a PNG image with a DPI of 300
plt.savefig('mixtral-impersonation-base-wahlomat', dpi=300)
#plt.show()
As you can the plot has 38 bars in total. However, I only want to show the bars with the y axis value of: [9, 15, 18, 19, 25]
|
58e41709d303fcc888523d20512eb691
|
{
"intermediate": 0.45536747574806213,
"beginner": 0.43780165910720825,
"expert": 0.1068309023976326
}
|
46,933
|
please can you give me an array of ways for replacing the bracketed words in the following whilst ensuring it still makes sense contextually, is professional and eloquent, demonstrates astonishing linguistic prowess whilst maintaining a tone one would use in writing an important assessment: Leveraging the Feistel cipher framework, this ransomware purports to (adopt) an (intricate) strategy for the selection & encryption of files, (encompassing) (a wide array of) file types essential for individual, and corporate (use)
|
3d69c0a55f22a07852addf64408ae378
|
{
"intermediate": 0.49196958541870117,
"beginner": 0.30859890580177307,
"expert": 0.19943146407604218
}
|
46,934
|
Your supervisor is interested to identify certain factors that may be helpful to explain your Portfolio return. Henceforth, you need to identify certain factors that might influence your chosen Portfolio's and comment on the explainability of the chosen factors.
Note: Kindly mention the data source from where you have fetched data for the factors.
Hint: You have to regress your Portfolio Return on your chosen factors
Insert the code chunk below.............................. i have historical data of VIX and DXY_US …these are the names of file …VIX has column Date and AdjClose and DXY_US has column date and price …All this is historical monthly data between April 30, 2008, to January 29, 2021…................Can you please give me code for this question and before this the mentioned code i have done
"{r}
# Define the data for Strategy 1
strategy1_selection <- data.frame(
Asset = c("T", "PFE", "XAU", "GOOGL", "USDINR"),
Median_Return = c(0.009617963, 0.002619454, 0.001102195, -0.002813010, -0.003632318),
Standard_Deviation = c(0.04303437, 0.01576457, 0.03418468, 0.04782845, 0.04273717)
)
# Calculate reward-to-risk ratios for Strategy 1
strategy1_selection$Reward_to_Risk <- strategy1_selection$Median_Return / strategy1_selection$Standard_Deviation
# Define the data for Strategy 2
strategy2_selection <- data.frame(
Asset = c("EURUSD", "NG", "PFE", "GOOGL", "T"),
Median_Return = c(-0.006003852, -0.005380718, 0.002619454, -0.002813010, 0.009617963),
Standard_Deviation = c(0.03204665, 0.04763956, 0.01576457, 0.04782845, 0.04303437),
Reward_to_Risk = c(-0.18734726, -0.11294642, 0.16616080, -0.05881458, 0.22349493)
)
# Calculate reward-to-risk ratios for Strategy 2
strategy2_selection$Reward_to_Risk <- strategy2_selection$Median_Return / strategy2_selection$Standard_Deviation
# Total reward-to-risk ratio for each strategy
total_ratio_strategy1 <- sum(strategy1_selection$Reward_to_Risk)
total_ratio_strategy2 <- sum(strategy2_selection$Reward_to_Risk)
# Calculate weights for each strategy
weights_strategy1 <- (strategy1_selection$Reward_to_Risk / total_ratio_strategy1) * 100
weights_strategy2 <- (strategy2_selection$Reward_to_Risk / total_ratio_strategy2) * 100
# Print weights for Strategy 1
cat("Weights for Strategy 1:\n")
for (i in 1:nrow(strategy1_selection)) {
cat(strategy1_selection$Asset[i], ":", round(weights_strategy1[i], 2), "%\n")
}
cat("\n")
# Print weights for Strategy 2
cat("Weights for Strategy 2:\n")
for (i in 1:nrow(strategy2_selection)) {
cat(strategy2_selection$Asset[i], ":", round(weights_strategy2[i], 2), "%\n")
}
# Global Minimum Variance Portfolio - without short selling
# For GMVP, the weights are calculated based on minimizing portfolio variance
# The weights are inversely proportional to the asset's risk (standard deviation)
gmvp_weights <- 1 / strategy1_selection$Standard_Deviation
gmvp_weights <- gmvp_weights / sum(gmvp_weights) * 100
cat("\nGlobal Minimum Variance Portfolio Weights (without short selling):\n")
for (i in 1:nrow(strategy1_selection)) {
cat(strategy1_selection$Asset[i], ":", round(gmvp_weights[i], 2), "%\n")
}
# GMVP without short selling for Strategy 2
gmvp_weights_strategy2 <- 1 / strategy2_selection$Standard_Deviation
gmvp_weights_strategy2 <- gmvp_weights_strategy2 / sum(gmvp_weights_strategy2) * 100
cat("\nGlobal Minimum Variance Portfolio Weights (without short selling) for Strategy 2:\n")
for (i in 1:nrow(strategy2_selection)) {
cat(strategy2_selection$Asset[i], ":", round(gmvp_weights_strategy2[i], 2), "%\n")
}
# Tangency Portfolio - with short selling
# For the Tangency Portfolio, weights are calculated based on maximizing the Sharpe ratio
# This involves short selling to achieve the optimal risk-return trade-off
# Tangency Portfolio for Strategy 1
strategy1_tangency_weights <- strategy1_selection$Reward_to_Risk
strategy1_tangency_weights <- strategy1_tangency_weights / sum(strategy1_tangency_weights) * 100
cat("\nTangency Portfolio Weights (with short selling) for Strategy 1:\n")
for (i in 1:nrow(strategy1_selection)) {
cat(strategy1_selection$Asset[i], ":", round(strategy1_tangency_weights[i], 2), "%\n")
}
# Tangency Portfolio for Strategy 2
strategy2_tangency_weights <- strategy2_selection$Reward_to_Risk
strategy2_tangency_weights <- strategy2_tangency_weights / sum(strategy2_tangency_weights) * 100
cat("\nTangency Portfolio Weights (with short selling) for Strategy 2:\n")
for (i in 1:nrow(strategy2_selection)) {
cat(strategy2_selection$Asset[i], ":", round(strategy2_tangency_weights[i], 2), "%\n")
}
# Portfolio Return and Risk Measures
# Portfolio Return is the weighted sum of individual asset returns
portfolio_return_strategy1 <- sum(weights_strategy1 * strategy1_selection$Median_Return)
portfolio_return_strategy2 <- sum(weights_strategy2 * strategy2_selection$Median_Return)
# Portfolio Risk is calculated using the weighted standard deviation of assets
portfolio_risk_strategy1 <- sqrt(sum((weights_strategy1/100)^2 * strategy1_selection$Standard_Deviation^2))
portfolio_risk_strategy2 <- sqrt(sum((weights_strategy2/100)^2 * strategy2_selection$Standard_Deviation^2))
cat("\nPortfolio Return and Risk Measures:\n")
cat("Strategy 1 Portfolio Return: ", round(portfolio_return_strategy1, 6), "\n")
cat("Strategy 1 Portfolio Risk: ", round(portfolio_risk_strategy1, 6), "\n")
cat("Strategy 2 Portfolio Return: ", round(portfolio_return_strategy2, 6), "\n")
cat("Strategy 2 Portfolio Risk: ", round(portfolio_risk_strategy2, 6), "\n")
# Portfolio Return and Risk Measures for GMVP
gmvp_portfolio_return <- sum(gmvp_weights * strategy1_selection$Median_Return)
gmvp_portfolio_risk <- sqrt(sum((gmvp_weights/100)^2 * strategy1_selection$Standard_Deviation^2))
cat("GMVP Portfolio Return: ", round(gmvp_portfolio_return, 6), "\n")
cat("GMVP Portfolio Risk: ", round(gmvp_portfolio_risk, 6), "\n")
gmvp_portfolio_return_strategy2 <- sum(gmvp_weights_strategy2 * strategy2_selection$Median_Return)
gmvp_portfolio_risk_strategy2 <- sqrt(sum((gmvp_weights_strategy2/100)^2 * strategy2_selection$Standard_Deviation^2))
cat("Strategy 2 Portfolio Return: ", round(gmvp_portfolio_return_strategy2, 6), "\n")
cat("Strategy 2 Portfolio Risk: ", round(gmvp_portfolio_risk_strategy2, 6), "\n")
# Portfolio Return and Risk Measures for Tangency Portfolio
tangency_portfolio_return_strategy1 <- sum(strategy1_tangency_weights * strategy1_selection$Median_Return)
tangency_portfolio_risk_strategy1 <- sqrt(sum((strategy1_tangency_weights/100)^2 * strategy1_selection$Standard_Deviation^2))
cat("Tangency Portfolio Return (Strategy 1): ", round(tangency_portfolio_return_strategy1, 6), "\n")
cat("Tangency Portfolio Risk (Strategy 1): ", round(tangency_portfolio_risk_strategy1, 6), "\n")
tangency_portfolio_return_strategy2 <- sum(strategy2_tangency_weights * strategy2_selection$Median_Return)
tangency_portfolio_risk_strategy2 <- sqrt(sum((strategy2_tangency_weights/100)^2 * strategy2_selection$Standard_Deviation^2))
cat("Tangency Portfolio Return (Strategy 2): ", round(tangency_portfolio_return_strategy2, 6), "\n")
cat("Tangency Portfolio Risk (Strategy 2): ", round(tangency_portfolio_risk_strategy2, 6), "\n")"
|
223f424bab7cfbd1b03b4f1e4741a9ea
|
{
"intermediate": 0.2839149534702301,
"beginner": 0.43011894822120667,
"expert": 0.2859661281108856
}
|
46,935
|
SVG of extended text size/fonts, convert into PNG. Where's website link.
|
578bc84dc2fd30dc9da775b9d04a8410
|
{
"intermediate": 0.41236335039138794,
"beginner": 0.20050480961799622,
"expert": 0.38713181018829346
}
|
46,936
|
You're a good soldier
Choosing your battles
Pick yourself up
Dust yourself off
And get back in the saddle
You're on the front line
Everyone's watching
You know it's serious
We are getting closer
This isn't over
The pressure is on
You feel it
But you got it all,
Believe it
When you fall
Get up oh, oh
If you fall
Get up eh, eh
Tsamina mina
Zangalewa
Cuz this is Africa
Tzamina mina eh eh
Waka waka eh eh
Tsamina mina Zangalewa
This time for Africa
Listen to your God
This is our motto
Your time to shine
Don't wait in line
Y vamos por todo
People are raising
Their expectations
Go on and feed them
This is your moment
No hesitations
Today's your day,
I feel it
You paved the way,
Believe it
If you get down
Get up oh, oh
When you get down
Get up eh, eh
Tsamina mina
Zangalewa
This time for Africa
Tzamina mina eh eh
Waka waka eh eh
Tsamina mina Zangalewa
Anawa aa
Tzamina mina eh eh
Waka waka eh eh
Tsamina mina Zangalewa
This time for Africa
Abuya lamajoni piki piki mama, one a to z!
Athi susa lamajoni piki piki mama from east to west.
Sathi waka waka ma EH EH!
Waka waka ma EH EH!
Zonk' izizwe mazibuye...
Cos this is Africa
Tzamina mina eh eh
Waka waka eh eh
Tsamina mina Zangalewa
Anawa aa
Tzamina mina eh eh
Waka waka eh eh
Tsamina mina Zangalewa
This time for Africa
Jungo oh, eh eh
Jungo oh, eh eh
Tsamina mina Zangalewa
Anawa aa
Jungo oh, eh eh
Jungo oh, eh eh
Tsamina mina Zangalewa
Anawa aa
This time for Africa
This time for Africa
We are all Africa
We are all Africa change this lyrics and make them into a football song sung by ronaldo, add his achievements and all that and make ti so he is singing it and make the words rhyming to the beat, this is waka waka lyrics
|
2b020cfc1fdb2efc611bcc8970e050b5
|
{
"intermediate": 0.3633890151977539,
"beginner": 0.24963915348052979,
"expert": 0.3869718015193939
}
|
46,937
|
i have following code to build my model:
def build_lstm_model(input_shape):
model = Sequential([
LSTM(6427, activation='relu', input_shape=input_shape, return_sequences=True), # Adjusted for LSTM
Dropout(0.20),
LSTM(2048, activation='relu', return_sequences=False), # Additional LSTM layer
Dropout(0.10),
LSTM(1024, activation='relu', return_sequences=False), # Additional LSTM layer
Dropout(0.10),
Dense(256, activation='relu'),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(32, activation='relu'),
Dense(12),
])
model.compile(optimizer='adam',
loss='mse', # Use Mean Squared Error for regression
metrics=['mae']) # Mean Absolute Error as an additional metric
return model
what should i pass as input_shape?
i creat time series of last 30 dyas
my number of features is 6427
|
b8107b2bd452d338d796c6d7777daa36
|
{
"intermediate": 0.4565059244632721,
"beginner": 0.1892591416835785,
"expert": 0.354234904050827
}
|
46,938
|
{
"name": "ResourceExhaustedError",
"message": "{{function_node __wrapped__AddV2_device_/job:localhost/replica:0/task:0/device:GPU:0}} failed to allocate memory [Op:AddV2]",
"stack": "---------------------------------------------------------------------------
ResourceExhaustedError Traceback (most recent call last)
Cell In[13], line 1
----> 1 model = build_lstm_model((30, 6427,))
3 model.summary()
Cell In[9], line 2, in build_lstm_model(input_shape)
1 def build_lstm_model(input_shape):
----> 2 model = Sequential([
3 LSTM(6427, activation='relu', input_shape=input_shape, return_sequences=True), # Adjusted for LSTM
4 Dropout(0.20),
5 LSTM(2048, activation='relu', return_sequences=False), # Additional LSTM layer
6 Dropout(0.10),
7 LSTM(1024, activation='relu', return_sequences=False), # Additional LSTM layer
8 Dropout(0.10),
9 Dense(256, activation='relu'),
10 Dense(128, activation='relu'),
11 Dense(64, activation='relu'),
12 Dense(32, activation='relu'),
13 Dense(12),
14 ])
16 model.compile(optimizer='adam',
17 loss='mse', # Use Mean Squared Error for regression
18 metrics=['mae']) # Mean Absolute Error as an additional metric
19 return model
File c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\tensorflow\\python\\trackable\\base.py:205, in no_automatic_dependency_tracking.<locals>._method_wrapper(self, *args, **kwargs)
203 self._self_setattr_tracking = False # pylint: disable=protected-access
204 try:
--> 205 result = method(self, *args, **kwargs)
206 finally:
207 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
File c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\utils\\traceback_utils.py:70, in filter_traceback.<locals>.error_handler(*args, **kwargs)
67 filtered_tb = _process_traceback_frames(e.__traceback__)
68 # To get the full stack trace, call:
69 # `tf.debugging.disable_traceback_filtering()`
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
File c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\backend.py:2100, in RandomGenerator.random_uniform(self, shape, minval, maxval, dtype, nonce)
2098 if nonce:
2099 seed = tf.random.experimental.stateless_fold_in(seed, nonce)
-> 2100 return tf.random.stateless_uniform(
2101 shape=shape,
2102 minval=minval,
2103 maxval=maxval,
2104 dtype=dtype,
2105 seed=seed,
2106 )
2107 return tf.random.uniform(
2108 shape=shape,
2109 minval=minval,
(...)
2112 seed=self.make_legacy_seed(),
2113 )
ResourceExhaustedError: {{function_node __wrapped__AddV2_device_/job:localhost/replica:0/task:0/device:GPU:0}} failed to allocate memory [Op:AddV2]"
}
|
e08d974ab52db33eaecaa81ad7c0bdd7
|
{
"intermediate": 0.4344140291213989,
"beginner": 0.384290486574173,
"expert": 0.1812954694032669
}
|
46,939
|
Перепиши отрисовку графиков в приведенном коде с использование plotly:
def calc_experience(df, experience_column, stab_features, method='diff', percent=0.05, smooth=True, sigma=1, draw_graph=True):
df_experience = pd.DataFrame()
n = len(stab_features)
ncols = 3
nrows = np.ceil(n / ncols).astype(int)
if draw_graph:
fig, axes = plt.subplots(nrows, ncols, figsize=(15, nrows * 5))
fig.tight_layout(pad=6.0)
for i, feature in enumerate(stab_features):
if smooth:
raw = df[feature]
data = gaussian_filter1d(raw, sigma)
else:
data = df[feature]
if method == 'diff':
feature_experience = diff(df[experience_column].values, data, experience_column, feature, percent)
else:
feature_experience = elbow_point(df[experience_column].values, data)
x_metric, y_metric = feature_experience[0], feature_experience[1]
tmp = pd.DataFrame([(x_metric, y_metric)], columns=['experience', 'metric_value'])
tmp['metric'] = feature
df_experience = pd.concat([df_experience, tmp])
if draw_graph:
row, col = divmod(i, ncols)
ax = axes[row, col] if n > ncols else axes[col] # Учитывается, когда графиков мало
# x_perp = (a * y_elbow + x_elbow - a * b) / (a**2 + 1)
# y_perp = (a**2 * y_elbow + a * x_elbow + b) / (a**2 + 1)
if smooth:
ax.plot(df[experience_column], raw, label='Raw Data')
ax.plot(df[experience_column], data, label='Smoothed Data')
else:
ax.plot(df[experience_column], data, label='Data')
# ax.plot([df[experience_column].values[0], df[experience_column].values[-1]], [data[0], data[-1]])
ax.plot(x_metric, y_metric, 'go'),
# ax.plot([x_elbow, x_elbow], [y_elbow, (df[feature].values[0] + df[feature].values[-1]) / 2], 'g–')
# ax.plot([x[index], x[index]], [y[index], (p1[1] + p2[1]) / 2], 'g–')
# ax.plot([x_elbow, x_perp], [y_elbow, y_perp])
ax.set_title(f"Feature: {feature}")
ax.legend()
# Если количество графиков не заполняет все subplot’ы, отключить пустые
if draw_graph and n % ncols != 0:
for j in range(i + 1, nrows * ncols):
fig.delaxes(axes.flatten()[j])
plt.show()
return df_experience
|
d99b3b577bb32891e8db4c81e9377e18
|
{
"intermediate": 0.31332218647003174,
"beginner": 0.4665812849998474,
"expert": 0.22009652853012085
}
|
46,940
|
code:
def build_lstm_model(input_shape):
model = Sequential([
LSTM(512, activation='relu', input_shape=input_shape, return_sequences=True), # Adjusted for LSTM
Dropout(0.20),
LSTM(256, activation='relu', return_sequences=False), # Additional LSTM layer
Dropout(0.10),
LSTM(128, activation='relu', return_sequences=False), # Additional LSTM layer
Dropout(0.10),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(32, activation='relu'),
Dense(12),
])
model.compile(optimizer='adam',
loss='mse', # Use Mean Squared Error for regression
metrics=['mae']) # Mean Absolute Error as an additional metric
return model
error:
{
"name": "ValueError",
"message": "Input 0 of layer \"lstm_9\" is incompatible with the layer: expected ndim=3, found ndim=2. Full shape received: (None, 512)",
"stack": "---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
Cell In[17], line 1
----> 1 model = build_lstm_model((30, 6427,))
3 model.summary()
Cell In[16], line 2, in build_lstm_model(input_shape)
1 def build_lstm_model(input_shape):
----> 2 model = Sequential([
3 LSTM(512, activation='relu', input_shape=input_shape, return_sequences=True), # Adjusted for LSTM
4 Dropout(0.20),
5 LSTM(512, activation='relu', return_sequences=False), # Additional LSTM layer
6 Dropout(0.10),
7 LSTM(128, activation='relu', return_sequences=False), # Additional LSTM layer
8 Dropout(0.10),
9 Dense(128, activation='relu'),
10 Dense(64, activation='relu'),
11 Dense(32, activation='relu'),
12 Dense(12),
13 ])
15 model.compile(optimizer='adam',
16 loss='mse', # Use Mean Squared Error for regression
17 metrics=['mae']) # Mean Absolute Error as an additional metric
18 return model
File c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\tensorflow\\python\\trackable\\base.py:205, in no_automatic_dependency_tracking.<locals>._method_wrapper(self, *args, **kwargs)
203 self._self_setattr_tracking = False # pylint: disable=protected-access
204 try:
--> 205 result = method(self, *args, **kwargs)
206 finally:
207 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
File c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\utils\\traceback_utils.py:70, in filter_traceback.<locals>.error_handler(*args, **kwargs)
67 filtered_tb = _process_traceback_frames(e.__traceback__)
68 # To get the full stack trace, call:
69 # `tf.debugging.disable_traceback_filtering()`
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
File c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\engine\\input_spec.py:232, in assert_input_compatibility(input_spec, inputs, layer_name)
230 ndim = shape.rank
231 if ndim != spec.ndim:
--> 232 raise ValueError(
233 f'Input {input_index} of layer \"{layer_name}\" '
234 \"is incompatible with the layer: \"
235 f\"expected ndim={spec.ndim}, found ndim={ndim}. \"
236 f\"Full shape received: {tuple(shape)}\"
237 )
238 if spec.max_ndim is not None:
239 ndim = x.shape.rank
ValueError: Input 0 of layer \"lstm_9\" is incompatible with the layer: expected ndim=3, found ndim=2. Full shape received: (None, 512)"
}
|
48433db92f5998fa7a73172f8286f5d6
|
{
"intermediate": 0.385625422000885,
"beginner": 0.2919767498970032,
"expert": 0.32239776849746704
}
|
46,941
|
I use this function to send some info to online DB :
void Send_Log_To_Online_DB (const String& msg_type, const String& PIR_id){
is_server_log_sending = true;
if ((millis() - start_log_sending_millis) > 10000){
start_log_sending_millis = millis();
WiFiClientSecure clientSecure;
clientSecure.setInsecure();
if (clientSecure.connect(serverAddress, serverPort)) {
String content = "";
String http_url = "";
if (msg_type == "socket_problem" || msg_type == "security_problem" || msg_type == "therm_problem"){
http_url = "/Feriot/api/insert_error_log.php";
content = "api_key=";
content += String(api_key_value);
content += "&master_id=";
content += id;
if (msg_type == "security_problem"){
content += "&sensor_id=";
content += PIR_id.substring(0,4);
content += "&error_code=";
content += "1";
}
else if (msg_type == "socket_problem"){
content += "&sensor_id=";
content += "Socket";
content += "&error_code=";
content += "1";
}
else if (msg_type == "therm_problem"){
content += "&sensor_id=";
content += "Thermostat";
content += "&error_code=";
content += "1";
}
}
else if (msg_type == "motion" || msg_type == "door_open"){
http_url = "/Feriot/api/insert_motion_log.php";
content = "api_key=";
content += String(api_key_value);
content += "&master_id=";
content += id;
content += "&pir_id=";
content += PIR_id.substring(0,4);
}
HTTPClient http;
http.begin(clientSecure, serverAddress, serverPort, http_url);
http.addHeader(F("Content-Type"), F("application/x-www-form-urlencoded"));
//http.addHeader("Content-Type", "application/json");
//http.addHeader("Content-Length", String(strlen(content)));
//http.setTimeout(15000);
int httpResponseCode = http.POST(content);
if (httpResponseCode > 0) {
task_buffer[task_index].is_log_send = true;
Serial2.println("<Log Send Successfully>");
}
else {
Serial2.println("<Send_Log_To_Online_DB_Error_Response : " + String(httpResponseCode) + " " + http.getString() + ">");
}
http.end();
}
else {
Serial2.println("<Send_Log_To_Online_DB - clientSecure.connect failed>");
}
clientSecure.stop();
}
is_server_log_sending = false;
}
But sometimes clientSecure.connect(serverAddress, serverPort) returns false. why?
|
326a8a59cfdaf14d5bc4c25ae049aac7
|
{
"intermediate": 0.3392448127269745,
"beginner": 0.5127120614051819,
"expert": 0.1480431854724884
}
|
46,942
|
i have this code:
# %%
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
from tensorflow import keras
import joblib
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM,Dense,Dropout
import os
# %%
def build_lstm_model(input_shape):
model = Sequential([
LSTM(512, activation='relu', input_shape=input_shape, return_sequences=True), # Adjusted for LSTM
Dropout(0.20),
LSTM(256, activation='relu', return_sequences=False), # Additional LSTM layer
Dropout(0.10),
LSTM(128, activation='relu', return_sequences=False), # Additional LSTM layer
Dropout(0.10),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(32, activation='relu'),
Dense(12),
])
model.compile(optimizer='adam',
loss='mse', # Use Mean Squared Error for regression
metrics=['mae']) # Mean Absolute Error as an additional metric
return model
# %%
csv_directory = r"C:\Users\arisa\Desktop\day_spot"
csv_files = [file for file in os.listdir(csv_directory) if file.endswith('.csv')]
# %%
def data_generator_lstm(file_path, x_scaler, y_scaler, n_steps):
while True:
for csv_file in csv_files:
# Read the CSV file
file_path = os.path.join(csv_directory, csv_file)
chunk = pd.read_csv(file_path)
# Drop non-numeric or non-relevant columns
filtered_chunk = chunk.drop(['Date', 'Symbol'], axis=1)
feature_data = filtered_chunk.drop([
'y_High_1d', 'y_Low_1d', 'y_Priority_1d',
'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
'y_High_5d', 'y_Low_5d', 'y_Priority_5d'], axis=1)
target_data = filtered_chunk[['y_High_1d'
, 'y_Low_1d', 'y_Priority_1d',
'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
'y_High_5d', 'y_Low_5d', 'y_Priority_5d'
]]
feature_data_scaled = pd.DataFrame(x_scaler.transform(feature_data), columns=feature_data.columns)
# Assuming target_data also needs to be scaled, apply scaler separately
target_data_scaled = pd.DataFrame(y_scaler.transform(target_data), columns=target_data.columns)
# Prepare sequences for features and targets
X, y = [], []
for i in range(len(feature_data_scaled) - n_steps):
X.append(feature_data_scaled[i:i + n_steps].to_numpy())
y.append(target_data_scaled[i + n_steps - 1].to_numpy())
X, y = np.array(X), np.array(y)
yield X, y
# %%
model = build_lstm_model((30, 6427))
model.summary()
# %%
batch_size = 512
# %%
x_scaler_loaded = joblib.load('nn_x_scaler.sav')
y_scaler_loaded = joblib.load('nn_y_scaler.sav')
# %%
train_generator = data_generator_lstm(512, x_scaler_loaded, y_scaler_loaded, 30)
# Update total_samples, train_samples, and val_samples according to your dataset after transformations
model.fit(
train_generator,
steps_per_epoch=50,
epochs=75,
# Add validation_data if you have a validation generator
)
im getting following error:
{
"name": "ValueError",
"message": "Input 0 of layer \"lstm_15\" is incompatible with the layer: expected ndim=3, found ndim=2. Full shape received: (None, 256)",
"stack": "---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
Cell In[21], line 1
----> 1 model = build_lstm_model((30, 6427))
3 model.summary()
Cell In[19], line 2, in build_lstm_model(input_shape)
1 def build_lstm_model(input_shape):
----> 2 model = Sequential([
3 LSTM(512, activation='relu', input_shape=input_shape, return_sequences=True), # Adjusted for LSTM
4 Dropout(0.20),
5 LSTM(256, activation='relu', return_sequences=False), # Additional LSTM layer
6 Dropout(0.10),
7 LSTM(128, activation='relu', return_sequences=False), # Additional LSTM layer
8 Dropout(0.10),
9 Dense(128, activation='relu'),
10 Dense(64, activation='relu'),
11 Dense(32, activation='relu'),
12 Dense(12),
13 ])
15 model.compile(optimizer='adam',
16 loss='mse', # Use Mean Squared Error for regression
17 metrics=['mae']) # Mean Absolute Error as an additional metric
18 return model
File c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\tensorflow\\python\\trackable\\base.py:205, in no_automatic_dependency_tracking.<locals>._method_wrapper(self, *args, **kwargs)
203 self._self_setattr_tracking = False # pylint: disable=protected-access
204 try:
--> 205 result = method(self, *args, **kwargs)
206 finally:
207 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
File c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\utils\\traceback_utils.py:70, in filter_traceback.<locals>.error_handler(*args, **kwargs)
67 filtered_tb = _process_traceback_frames(e.__traceback__)
68 # To get the full stack trace, call:
69 # `tf.debugging.disable_traceback_filtering()`
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
File c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\engine\\input_spec.py:232, in assert_input_compatibility(input_spec, inputs, layer_name)
230 ndim = shape.rank
231 if ndim != spec.ndim:
--> 232 raise ValueError(
233 f'Input {input_index} of layer \"{layer_name}\" '
234 \"is incompatible with the layer: \"
235 f\"expected ndim={spec.ndim}, found ndim={ndim}. \"
236 f\"Full shape received: {tuple(shape)}\"
237 )
238 if spec.max_ndim is not None:
239 ndim = x.shape.rank
ValueError: Input 0 of layer \"lstm_15\" is incompatible with the layer: expected ndim=3, found ndim=2. Full shape received: (None, 256)"
}
|
4df8cd4eadf61a9414ee8aee80b0191b
|
{
"intermediate": 0.4299044609069824,
"beginner": 0.27848970890045166,
"expert": 0.29160580039024353
}
|
46,943
|
I have this bar plot:
df = pd.read_csv('results//experiments//wahlomat//responses-mixtral-base-10-iterations-wahlomat.csv')
#result_df = pd.DataFrame(index=range(0, 62), columns=range(1, 5), dtype=float) # FOR PCT
result_df = pd.DataFrame(index=range(0, 38), columns=range(1, 3), dtype=float)
# Loop through each statement
for statement_id in range(0, 38): # bis 62 for PCT?
# Filter the DataFrame for the current statement
statement_data = df[df['statement_id'] == statement_id]
# Calculate the frequency of each mapped answer
answer_counts = statement_data['mapped_answer'].value_counts().sort_index()
# Calculate the total number of answers for this statement
total_answers = answer_counts.sum()
# Calculate the percentages and fill them into the result DataFrame
for answer, count in answer_counts.items():
result_df.at[statement_id, answer] = count / total_answers * 100
# Fill any missing values with 0 (if a statement doesn't have an answer of certain type)
result_df.fillna(0, inplace=True)
# Convert DataFrame to numpy array
values = result_df.values
# Define colors for each value
#colors = ['#e45555', '#fdb062', '#66b3ff', '#0863ac'] # PCT colors
colors = ['#e06666', '#bcbcbc', '#93c47d']
# Plot
fig, ax = plt.subplots(figsize=(2, 20)) # Adjust figsize as needed
y = np.arange(0, 38) # 62 for PCT?
# Stacked bar plot
for i in range(3): # in range 4 for PCT?
if i == 0:
ax.barh(y, values[:, i], color=colors[i], label=f'Value {i+1}')
else:
ax.barh(y, values[:, i], left=np.sum(values[:, :i], axis=1), color=colors[i], label=f'Value {i+1}')
# Customize
ax.set_yticks(y)
ax.set_yticklabels([str(i) for i in y], fontsize=14)
ax.set_xticks([0, 100]) # Set only 0 and 100 as x-axis ticks
ax.set_xticklabels([0, 100], fontsize=15) # Increase font size for x-axis tick labels
# ax.legend()
# Adjust layout
plt.tight_layout()
# Set y-axis limits to remove the space between x-axis and the first bar
ax.set_ylim(y[0] - 0.65, y[-1] + 0.65)
# Show plot
# Save the plot as a PNG image with a DPI of 300
plt.savefig('mixtral-impersonation-base-wahlomat', dpi=300)
#plt.show()
As you can see, the x axis starts with 0 and ends with 37. How can I change the order of the bars? I want them to start with the following five: [9, 15, 18, 19, 25]. Remaining one can keep the order
|
0da409d6a31ba11392361814737121bc
|
{
"intermediate": 0.4138515293598175,
"beginner": 0.3132297098636627,
"expert": 0.2729187607765198
}
|
46,944
|
How to get a list of catalog item names from the 'sc_cat_item' table where the 'Available for' related tab has no records. servicenow
|
f10bf3942c86c70ebfb7086ef15e32f3
|
{
"intermediate": 0.40315353870391846,
"beginner": 0.26453760266304016,
"expert": 0.33230888843536377
}
|
46,945
|
Give me codes for Your supervisor is interested to identify certain factors that may be helpful to explain your Portfolio return. Henceforth, you need to identify certain factors that might influence your chosen Portfolio's and comment on the explainability of the chosen factors.
Note: Kindly mention the data source from where you have fetched data for the factors.
Hint: You have to regress your Portfolio Return on your chosen factors
Insert the code chunk below............. i have historical data of VIX and DXY_US …these are the names of file …VIX has column Date and AdjClose and DXY_US has column date and price …All this is historical monthly data between April 30, 2008, to January 29, 2021…
|
5498f896671a8276f65faa2d8cb7a7d3
|
{
"intermediate": 0.49770665168762207,
"beginner": 0.24995926022529602,
"expert": 0.2523341476917267
}
|
46,946
|
How do I convert date and time to milliseconds in excel? I need to convert valid from and valid to in order to bulkload certificates in ServiceNow.
|
ed0798b4e0baa1fea1ee7bc2c5833343
|
{
"intermediate": 0.6551218032836914,
"beginner": 0.14784717559814453,
"expert": 0.19703108072280884
}
|
46,947
|
i have following code to train a LSTM model on my dataset which is multiple csv files
the code dosent work and model not built
please fix it for proper implementation:
# %%
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
from tensorflow import keras
import joblib
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM,Dense,Dropout
import os
# %%
def build_lstm_model(input_shape):
model = Sequential([
LSTM(512, activation=‘relu’, input_shape=input_shape, return_sequences=True), # Adjusted for LSTM
Dropout(0.20),
LSTM(256, activation=‘relu’, return_sequences=False), # Additional LSTM layer
Dropout(0.10),
LSTM(128, activation=‘relu’, return_sequences=False), # Additional LSTM layer
Dropout(0.10),
Dense(128, activation=‘relu’),
Dense(64, activation=‘relu’),
Dense(32, activation=‘relu’),
Dense(12),
])
model.compile(optimizer=‘adam’,
loss=‘mse’, # Use Mean Squared Error for regression
metrics=[‘mae’]) # Mean Absolute Error as an additional metric
return model
# %%
csv_directory = r"C:\Users\arisa\Desktop\day_spot"
csv_files = [file for file in os.listdir(csv_directory) if file.endswith(‘.csv’)]
# %%
def data_generator_lstm(file_path, x_scaler, y_scaler, n_steps):
while True:
for csv_file in csv_files:
# Read the CSV file
file_path = os.path.join(csv_directory, csv_file)
chunk = pd.read_csv(file_path)
# Drop non-numeric or non-relevant columns
filtered_chunk = chunk.drop([‘Date’, ‘Symbol’], axis=1)
feature_data = filtered_chunk.drop([
‘y_High_1d’, ‘y_Low_1d’, ‘y_Priority_1d’,
‘y_High_2d’, ‘y_Low_2d’, ‘y_Priority_2d’,
‘y_High_3d’, ‘y_Low_3d’, ‘y_Priority_3d’,
‘y_High_5d’, ‘y_Low_5d’, ‘y_Priority_5d’], axis=1)
target_data = filtered_chunk[[‘y_High_1d’
, ‘y_Low_1d’, ‘y_Priority_1d’,
‘y_High_2d’, ‘y_Low_2d’, ‘y_Priority_2d’,
‘y_High_3d’, ‘y_Low_3d’, ‘y_Priority_3d’,
‘y_High_5d’, ‘y_Low_5d’, ‘y_Priority_5d’
]]
feature_data_scaled = pd.DataFrame(x_scaler.transform(feature_data), columns=feature_data.columns)
# Assuming target_data also needs to be scaled, apply scaler separately
target_data_scaled = pd.DataFrame(y_scaler.transform(target_data), columns=target_data.columns)
# Prepare sequences for features and targets
X, y = [], []
for i in range(len(feature_data_scaled) - n_steps):
X.append(feature_data_scaled[i:i + n_steps].to_numpy())
y.append(target_data_scaled[i + n_steps - 1].to_numpy())
X, y = np.array(X), np.array(y)
yield X, y
# %%
model = build_lstm_model((30, 6427))
model.summary()
# %%
batch_size = 512
# %%
x_scaler_loaded = joblib.load(‘nn_x_scaler.sav’)
y_scaler_loaded = joblib.load(‘nn_y_scaler.sav’)
# %%
train_generator = data_generator_lstm(512, x_scaler_loaded, y_scaler_loaded, 30)
# Update total_samples, train_samples, and val_samples according to your dataset after transformations
model.fit(
train_generator,
steps_per_epoch=50,
epochs=75,
# Add validation_data if you have a validation generator
)
|
ca876d32eeef13b67cfffbb6e8e36157
|
{
"intermediate": 0.4906083047389984,
"beginner": 0.2172342836856842,
"expert": 0.2921574115753174
}
|
46,948
|
Упрости код но сохранив функционал и возможности, фичи: import requests
import time
from faker import Faker
import random
import string
# Функция для чтения списка прокси из файла
def read_proxies(file_path):
with open(file_path, 'r') as f:
proxies = [line.strip() for line in f.readlines()]
return proxies
# Генерация случайного прокси-сервера из списка прокси-серверов
def generate_random_proxy(proxies):
if proxies:
return {'ip': random.choice(proxies)} # Возвращаем словарь с ключом 'ip'
else:
return {'ip': '0.0.0.0'} # Возвращаем пустой IP-адрес по умолчанию
# Генерация случайного IP-адреса
def generate_fake_ip():
fake = Faker()
return fake.ipv4()
# Генерация случайного устройства с реалистичным user agent string
def generate_fake_device():
device_types = ['desktop', 'mobile', 'tablet']
device_type = random.choice(device_types)
if device_type == 'desktop':
brands = ['Dell', 'HP', 'Lenovo', 'Acer', 'Asus']
brand = random.choice(brands)
models = ['Inspiron', 'ProBook', 'ThinkPad', 'Aspire', 'VivoBook']
model = random.choice(models)
os_types = ['Windows', 'MacOS']
os_type = random.choice(os_types)
os_versions = ['Windows 10', 'Windows 11', 'MacOS 11', 'MacOS 12']
os_version = random.choice(os_versions)
device = {
'deviceType': device_type,
'deviceBrand': brand,
'deviceModel': model,
'osType': os_type,
'osVersion': os_version,
'userAgent': generate_random_user_agent(device_type, brand, model, os_type, os_version)
}
elif device_type == 'mobile':
brands = ['Apple', 'Samsung', 'Google', 'Xiaomi', 'Huawei']
brand = random.choice(brands)
models = ['iPhone', 'Galaxy', 'Pixel', 'Mi', 'Mate']
model = random.choice(models)
os_types = ['iOS', 'Android']
os_type = random.choice(os_types)
os_versions = ['iOS 15', 'Android 11', 'Android 12', 'Android 13', 'Android 14']
os_version = random.choice(os_versions)
device = {
'deviceType': device_type,
'deviceBrand': brand,
'deviceModel': model,
'osType': os_type,
'osVersion': os_version,
'userAgent': generate_random_user_agent(device_type, brand, model, os_type, os_version)
}
elif device_type == 'tablet':
brands = ['Apple', 'Samsung', 'Amazon']
brand = random.choice(brands)
models = ['iPad', 'Galaxy Tab', 'Kindle Fire']
model = random.choice(models)
os_types = ['iOS', 'Android']
os_type = random.choice(os_types)
os_versions = ['iOS 15', 'Android 11', 'Android 12']
os_version = random.choice(os_versions)
device = {
'deviceType': device_type,
'deviceBrand': brand,
'deviceModel': model,
'osType': os_type,
'osVersion': os_version,
'userAgent': generate_random_user_agent(device_type, brand, model, os_type, os_version)
}
return device
# Генерация случайного user agent string
def generate_random_user_agent(device_type, brand, model, os_type, os_version):
if device_type == 'desktop':
if os_type == 'Windows':
user_agent = f'Mozilla/5.0 (Windows NT {random.randint(10, 11)}; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{random.randint(80, 90)}.0.{random.randint(0, 9999)} Safari/537.36'
elif os_type == 'MacOS':
user_agent = f'Mozilla/5.0 (Macintosh; Intel Mac OS X {random.randint(10, 12)}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{random.randint(80, 90)}.0.{random.randint(0, 9999)} Safari/537.36'
elif device_type == 'mobile':
if os_type == 'iOS':
user_agent = f'Mozilla/5.0 (iPhone; CPU iPhone OS {random.randint(13, 15)} like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/{random.randint(80, 90)}.0.{random.randint(0, 9999)} Mobile/15E148 Safari/604.1'
elif os_type == 'Android':
user_agent = f'Mozilla/5.0 (Linux; Android {random.randint(10, 12)}; {random.choice(["Build/MOB30J", "Build/RP1A.201005.002", "Build/OPM2.180104.012"])} {random.choice(["SAMSUNG", "Google", "Xiaomi", "Huawei"])}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{random.randint(80, 90)}.0.{random.randint(0, 9999)} Mobile Safari/537.36'
elif device_type == 'tablet':
if os_type == 'iOS':
user_agent = f'Mozilla/5.0 (iPad; CPU OS {random.randint(13, 15)} like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/{random.randint(80, 90)}.0.{random.randint(0, 9999)} Mobile/15E148 Safari/604.1'
elif os_type == 'Android':
user_agent = f'Mozilla/5.0 (Linux; Android {random.randint(10, 12)}; Tablet {random.choice(["Build/MOB30J", "Build/RP1A.201005.002", "Build/OPM2.180104.012"])} {random.choice(["SAMSUNG", "Google", "Amazon"])}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{random.randint(80, 90)}.0.{random.randint(0, 9999)} Safari/537.36'
return user_agent
# Пример использования сгенерированного IP-адреса и устройства
proxies_file_path = 'C:\\Users\\Wenorg\\Documents\\proxies.txt'
proxies = read_proxies(proxies_file_path)
for _ in range(10):
proxy_type = random.choice(['socks5', 'http', 'https'])
selected_proxy = generate_random_proxy(proxies)
ip_address = selected_proxy['ip']
if proxy_type == 'socks5':
proxy = {'socks5': ip_address + ':443'}
elif proxy_type == 'http':
proxy = {'http': ip_address + ':80'}
elif proxy_type == 'https':
proxy = {'https': ip_address + ':443'}
headers = {
'User-Agent': random.choice([
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',
'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393'
])
}
fake = Faker()
age = random.randint(18, 60)
gender = random.choice(['Male', 'Female'])
city = fake.city()
country = fake.country()
device = generate_fake_device()
print(f"Random Data: Age - {age}, Gender - {gender}, City - {city}, Country - {country}, Device - {device}")
def fetch_url(url, headers, proxy):
try:
response = requests.get(url, headers=headers, timeout=10, proxies=proxy)
print(f"URL: {url} -> Status code: {response.status_code}")
except Exception as e:
print(f"URL: {url} -> Failed to fetch. Error: {e}")
if __name__ == "__main__":
url_list = [
"https://vk.com/whatnewingames/",
# Добавьте больше URL-адресов по мере необходимости
]
while True:
for url in url_list:
headers = {
'Referer': 'https://ya.ru/',
'User-Agent': random.choice([
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',
'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393'
])
}
proxy = generate_random_proxy(proxies)
fetch_url(url, headers, proxy)
time.sleep(60)
|
185bf09a12aa6e02a27ca70a8657fd47
|
{
"intermediate": 0.2554372251033783,
"beginner": 0.511662483215332,
"expert": 0.2329002171754837
}
|
46,949
|
WARNING:tensorflow:Layer lstm_49 will not use cuDNN kernels since it doesn't meet the criteria. It will use a generic GPU kernel as fallback when running on GPU.
WARNING:tensorflow:Layer lstm_50 will not use cuDNN kernels since it doesn't meet the criteria. It will use a generic GPU kernel as fallback when running on GPU.
WARNING:tensorflow:Layer lstm_51 will not use cuDNN kernels since it doesn't meet the criteria. It will use a generic GPU kernel as fallback when running on GPU.
|
ff6a66ebf33ee06c395d499f6e517e0a
|
{
"intermediate": 0.20388653874397278,
"beginner": 0.16870898008346558,
"expert": 0.6274044513702393
}
|
46,950
|
write a linux x86_64 assembly language program to add two integer numbers and print the result in decimal format at stdout.
|
6f1db4dfde74ff5665663b72c3f4fcbe
|
{
"intermediate": 0.28464576601982117,
"beginner": 0.4168347716331482,
"expert": 0.29851943254470825
}
|
46,951
|
I have four answer options: "Strongly disagree", "disagree", "agree" and "strongly agree". I want to compare the answer frequencies in a plot where each option has a bar in a color. What are the best colors to distinguish between the values and which are good for the eye as well?
|
e8c33460da014718d03d477e72108c98
|
{
"intermediate": 0.3886122703552246,
"beginner": 0.29394373297691345,
"expert": 0.3174440562725067
}
|
46,952
|
Give me codes for Your supervisor is interested to identify certain factors that may be helpful to explain your Portfolio return. Henceforth, you need to identify certain factors that might influence your chosen Portfolio’s and comment on the explainability of the chosen factors.
Note: Kindly mention the data source from where you have fetched data for the factors.
Hint: You have to regress your Portfolio Return on your chosen factors
Insert the code chunk below… i have historical data of VIX and DXY_US …these are the names of file …VIX has column Date and AdjClose and DXY_US has column date and price …All this is historical monthly data between April 30, 2008, to January 29, 2021…...I am using R studio and give me full code in one go ..................
|
8f6e23e38002bf369bab1c6005bded4f
|
{
"intermediate": 0.5034921169281006,
"beginner": 0.21350209414958954,
"expert": 0.2830057442188263
}
|
46,953
|
code:
# %%
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
from tensorflow import keras
import joblib
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM,Dense,Dropout
import os
# %%
def build_lstm_model(input_shape):
model = Sequential([
LSTM(2716, activation='tanh', input_shape=input_shape, return_sequences=True), # Adjusted for LSTM
Dropout(0.20),
LSTM(2716, activation='tanh', return_sequences=False), # Additional LSTM layer
# Dropout(0.10),
# LSTM(2716, activation='tanh', return_sequences=False), # Additional LSTM layer
Dropout(0.10),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(32, activation='relu'),
Dense(12),
])
model.compile(optimizer='adam',
loss='mse', # Use Mean Squared Error for regression
metrics=['mae']) # Mean Absolute Error as an additional metric
return model
# %%
csv_directory = r"C:\Users\arisa\Desktop\day_spot_summary"
csv_files = [file for file in os.listdir(csv_directory) if file.endswith('.csv')]
# %%
def data_generator_lstm(x_scaler, y_scaler, n_steps):
while True:
for csv_file in csv_files:
# Read the CSV file
file_path = os.path.join(csv_directory, csv_file)
chunk = pd.read_csv(file_path)
feature_data = chunk.drop([
'y_High_1d', 'y_Low_1d', 'y_Priority_1d',
'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
'y_High_5d', 'y_Low_5d', 'y_Priority_5d'], axis=1)
target_data = chunk[['y_High_1d'
, 'y_Low_1d', 'y_Priority_1d',
'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
'y_High_5d', 'y_Low_5d', 'y_Priority_5d'
]]
feature_data_scaled = pd.DataFrame(x_scaler.transform(feature_data), columns=feature_data.columns)
# Assuming target_data also needs to be scaled, apply scaler separately
target_data_scaled = pd.DataFrame(y_scaler.transform(target_data), columns=target_data.columns)
# Prepare sequences for features and targets
X, y = [], []
for i in range(len(feature_data_scaled) - n_steps):
X.append(feature_data_scaled[i:i + n_steps])
y.append(target_data_scaled[i + n_steps - 1])
X, y = np.array(X), np.array(y)
yield X, y
# %%
from tensorflow.keras.mixed_precision import set_global_policy
# Enable mixed precision
set_global_policy('mixed_float16')
# %%
model = build_lstm_model((30, 1500,))
model.summary()
# %%
x_scaler_loaded = joblib.load('nn_x_scaler.sav')
y_scaler_loaded = joblib.load('nn_y_scaler.sav')
# %%
import warnings
warnings.filterwarnings(action='ignore', message='X has feature names, but StandardScaler was fitted without feature names')
train_generator = data_generator_lstm(x_scaler_loaded, y_scaler_loaded, 30)
# Update total_samples, train_samples, and val_samples according to your dataset after transformations
model.fit(
train_generator,
steps_per_epoch=50,
epochs=75,
# Add validation_data if you have a validation generator
)
error:
{
"name": "ValueError",
"message": "operands could not be broadcast together with shapes (883,2716) (6427,) (883,2716) ",
"stack": "---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
Cell In[18], line 7
3 train_generator = data_generator_lstm(x_scaler_loaded, y_scaler_loaded, 30)
5 # Update total_samples, train_samples, and val_samples according to your dataset after transformations
----> 7 model.fit(
8 train_generator,
9 steps_per_epoch=50,
10 epochs=75,
11 # Add validation_data if you have a validation generator
12 )
File c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\utils\\traceback_utils.py:70, in filter_traceback.<locals>.error_handler(*args, **kwargs)
67 filtered_tb = _process_traceback_frames(e.__traceback__)
68 # To get the full stack trace, call:
69 # `tf.debugging.disable_traceback_filtering()`
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
Cell In[15], line 22, in data_generator_lstm(x_scaler, y_scaler, n_steps)
10 feature_data = chunk.drop([
11 'y_High_1d', 'y_Low_1d', 'y_Priority_1d',
12 'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
13 'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
14 'y_High_5d', 'y_Low_5d', 'y_Priority_5d'], axis=1)
15 target_data = chunk[['y_High_1d'
16 , 'y_Low_1d', 'y_Priority_1d',
17 'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
18 'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
19 'y_High_5d', 'y_Low_5d', 'y_Priority_5d'
20 ]]
---> 22 feature_data_scaled = pd.DataFrame(x_scaler.transform(feature_data), columns=feature_data.columns)
23 # Assuming target_data also needs to be scaled, apply scaler separately
24 target_data_scaled = pd.DataFrame(y_scaler.transform(target_data), columns=target_data.columns)
File c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\sklearn\\utils\\_set_output.py:295, in _wrap_method_output.<locals>.wrapped(self, X, *args, **kwargs)
293 @wraps(f)
294 def wrapped(self, X, *args, **kwargs):
--> 295 data_to_wrap = f(self, X, *args, **kwargs)
296 if isinstance(data_to_wrap, tuple):
297 # only wrap the first output for cross decomposition
298 return_tuple = (
299 _wrap_data_with_container(method, data_to_wrap[0], X, self),
300 *data_to_wrap[1:],
301 )
File c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\sklearn\\preprocessing\\_data.py:1062, in StandardScaler.transform(self, X, copy)
1060 else:
1061 if self.with_mean:
-> 1062 X -= self.mean_
1063 if self.with_std:
1064 X /= self.scale_
ValueError: operands could not be broadcast together with shapes (883,2716) (6427,) (883,2716) "
}
|
1cd5c594c38145f12fb282c5f7c44e08
|
{
"intermediate": 0.37397950887680054,
"beginner": 0.29793235659599304,
"expert": 0.3280881941318512
}
|
46,954
|
error: Error: Unable to resolve module missing-asset-registry-path from /Users/mac/Desktop/poker/node_modules/react-native/Libraries/LogBox/UI/LogBoxImages/close.png: missing-asset-registry-path could not be found within the project or in these directories:
node_modules/react-native/node_modules
node_modules
> 1 | �PNG
2 |
3 |
4 | IHD��IDATxڝՅ
|
f5d8132464816267041f2e37bea7a40f
|
{
"intermediate": 0.4846022129058838,
"beginner": 0.2952662408351898,
"expert": 0.220131516456604
}
|
46,955
|
// <!--GAMFC-->version base on commit 841ed4e9ff121dde0ed6a56ae800c2e6c4f66056, time is 2024-04-16 18:02:37 UTC<!--GAMFC-END-->.
// @ts-ignore
import { connect } from 'cloudflare:sockets';
// How to generate your own UUID:
// [Windows] Press "Win + R", input cmd and run: Powershell -NoExit -Command "[guid]::NewGuid()"
let userID = 'd342d11e-d424-4583-b36e-524ab1f0afa4';
let proxyIP = '';
if (!isValidUUID(userID)) {
throw new Error('uuid is not valid');
}
export default {
/**
* @param {import("@cloudflare/workers-types").Request} request
* @param {{UUID: string, PROXYIP: string}} env
* @param {import("@cloudflare/workers-types").ExecutionContext} ctx
* @returns {Promise<Response>}
*/
async fetch(request, env, ctx) {
try {
userID = env.UUID || userID;
proxyIP = env.PROXYIP || proxyIP;
const upgradeHeader = request.headers.get('Upgrade');
if (!upgradeHeader || upgradeHeader !== 'websocket') {
const url = new URL(request.url);
switch (url.pathname) {
case '/':
return new Response(JSON.stringify(request.cf), { status: 200 });
case `/${userID}`: {
const vlessConfig = getVLESSConfig(userID, request.headers.get('Host'));
return new Response(`${vlessConfig}`, {
status: 200,
headers: {
"Content-Type": "text/plain;charset=utf-8",
}
});
}
default:
return new Response('Not found', { status: 404 });
}
} else {
return await vlessOverWSHandler(request);
}
} catch (err) {
/** @type {Error} */ let e = err;
return new Response(e.toString());
}
},
};
/**
*
* @param {import("@cloudflare/workers-types").Request} request
*/
async function vlessOverWSHandler(request) {
/** @type {import("@cloudflare/workers-types").WebSocket[]} */
// @ts-ignore
const webSocketPair = new WebSocketPair();
const [client, webSocket] = Object.values(webSocketPair);
webSocket.accept();
let address = '';
let portWithRandomLog = '';
const log = (/** @type {string} */ info, /** @type {string | undefined} */ event) => {
console.log(`[${address}:${portWithRandomLog}] ${info}`, event || '');
};
const earlyDataHeader = request.headers.get('sec-websocket-protocol') || '';
const readableWebSocketStream = makeReadableWebSocketStream(webSocket, earlyDataHeader, log);
/** @type {{ value: import("@cloudflare/workers-types").Socket | null}}*/
let remoteSocketWapper = {
value: null,
};
let udpStreamWrite = null;
let isDns = false;
// ws --> remote
readableWebSocketStream.pipeTo(new WritableStream({
async write(chunk, controller) {
if (isDns && udpStreamWrite) {
return udpStreamWrite(chunk);
}
if (remoteSocketWapper.value) {
const writer = remoteSocketWapper.value.writable.getWriter()
await writer.write(chunk);
writer.releaseLock();
return;
}
const {
hasError,
message,
portRemote = 443,
addressRemote = '',
rawDataIndex,
vlessVersion = new Uint8Array([0, 0]),
isUDP,
} = processVlessHeader(chunk, userID);
address = addressRemote;
portWithRandomLog = `${portRemote}--${Math.random()} ${isUDP ? 'udp ' : 'tcp '
} `;
if (hasError) {
// controller.error(message);
throw new Error(message); // cf seems has bug, controller.error will not end stream
// webSocket.close(1000, message);
return;
}
// if UDP but port not DNS port, close it
if (isUDP) {
if (portRemote === 53) {
isDns = true;
} else {
// controller.error('UDP proxy only enable for DNS which is port 53');
throw new Error('UDP proxy only enable for DNS which is port 53'); // cf seems has bug, controller.error will not end stream
return;
}
}
// ["version", "附加信息长度 N"]
const vlessResponseHeader = new Uint8Array([vlessVersion[0], 0]);
const rawClientData = chunk.slice(rawDataIndex);
// TODO: support udp here when cf runtime has udp support
if (isDns) {
const { write } = await handleUDPOutBound(webSocket, vlessResponseHeader, log);
udpStreamWrite = write;
udpStreamWrite(rawClientData);
return;
}
handleTCPOutBound(remoteSocketWapper, addressRemote, portRemote, rawClientData, webSocket, vlessResponseHeader, log);
},
close() {
log(`readableWebSocketStream is close`);
},
abort(reason) {
log(`readableWebSocketStream is abort`, JSON.stringify(reason));
},
})).catch((err) => {
log('readableWebSocketStream pipeTo error', err);
});
return new Response(null, {
status: 101,
// @ts-ignore
webSocket: client,
});
}
/**
* Handles outbound TCP connections.
*
* @param {any} remoteSocket
* @param {string} addressRemote The remote address to connect to.
* @param {number} portRemote The remote port to connect to.
* @param {Uint8Array} rawClientData The raw client data to write.
* @param {import("@cloudflare/workers-types").WebSocket} webSocket The WebSocket to pass the remote socket to.
* @param {Uint8Array} vlessResponseHeader The VLESS response header.
* @param {function} log The logging function.
* @returns {Promise<void>} The remote socket.
*/
async function handleTCPOutBound(remoteSocket, addressRemote, portRemote, rawClientData, webSocket, vlessResponseHeader, log,) {
async function connectAndWrite(address, port) {
/** @type {import("@cloudflare/workers-types").Socket} */
const tcpSocket = connect({
hostname: address,
port: port,
});
remoteSocket.value = tcpSocket;
log(`connected to ${address}:${port}`);
const writer = tcpSocket.writable.getWriter();
await writer.write(rawClientData); // first write, nomal is tls client hello
writer.releaseLock();
return tcpSocket;
}
// if the cf connect tcp socket have no incoming data, we retry to redirect ip
async function retry() {
const tcpSocket = await connectAndWrite(proxyIP || addressRemote, portRemote)
// no matter retry success or not, close websocket
tcpSocket.closed.catch(error => {
console.log('retry tcpSocket closed error', error);
}).finally(() => {
safeCloseWebSocket(webSocket);
})
remoteSocketToWS(tcpSocket, webSocket, vlessResponseHeader, null, log);
}
const tcpSocket = await connectAndWrite(addressRemote, portRemote);
// when remoteSocket is ready, pass to websocket
// remote--> ws
remoteSocketToWS(tcpSocket, webSocket, vlessResponseHeader, retry, log);
}
/**
*
* @param {import("@cloudflare/workers-types").WebSocket} webSocketServer
* @param {string} earlyDataHeader for ws 0rtt
* @param {(info: string)=> void} log for ws 0rtt
*/
function makeReadableWebSocketStream(webSocketServer, earlyDataHeader, log) {
let readableStreamCancel = false;
const stream = new ReadableStream({
start(controller) {
webSocketServer.addEventListener('message', (event) => {
if (readableStreamCancel) {
return;
}
const message = event.data;
controller.enqueue(message);
});
// The event means that the client closed the client -> server stream.
// However, the server -> client stream is still open until you call close() on the server side.
// The WebSocket protocol says that a separate close message must be sent in each direction to fully close the socket.
webSocketServer.addEventListener('close', () => {
// client send close, need close server
// if stream is cancel, skip controller.close
safeCloseWebSocket(webSocketServer);
if (readableStreamCancel) {
return;
}
controller.close();
}
);
webSocketServer.addEventListener('error', (err) => {
log('webSocketServer has error');
controller.error(err);
}
);
// for ws 0rtt
const { earlyData, error } = base64ToArrayBuffer(earlyDataHeader);
if (error) {
controller.error(error);
} else if (earlyData) {
controller.enqueue(earlyData);
}
},
pull(controller) {
// if ws can stop read if stream is full, we can implement backpressure
// https://streams.spec.whatwg.org/#example-rs-push-backpressure
},
cancel(reason) {
// 1. pipe WritableStream has error, this cancel will called, so ws handle server close into here
// 2. if readableStream is cancel, all controller.close/enqueue need skip,
// 3. but from testing controller.error still work even if readableStream is cancel
if (readableStreamCancel) {
return;
}
log(`ReadableStream was canceled, due to ${reason}`)
readableStreamCancel = true;
safeCloseWebSocket(webSocketServer);
}
});
return stream;
}
// https://xtls.github.io/development/protocols/vless.html
// https://github.com/zizifn/excalidraw-backup/blob/main/v2ray-protocol.excalidraw
/**
*
* @param { ArrayBuffer} vlessBuffer
* @param {string} userID
* @returns
*/
function processVlessHeader(
vlessBuffer,
userID
) {
if (vlessBuffer.byteLength < 24) {
return {
hasError: true,
message: 'invalid data',
};
}
const version = new Uint8Array(vlessBuffer.slice(0, 1));
let isValidUser = false;
let isUDP = false;
if (stringify(new Uint8Array(vlessBuffer.slice(1, 17))) === userID) {
isValidUser = true;
}
if (!isValidUser) {
return {
hasError: true,
message: 'invalid user',
};
}
const optLength = new Uint8Array(vlessBuffer.slice(17, 18))[0];
//skip opt for now
const command = new Uint8Array(
vlessBuffer.slice(18 + optLength, 18 + optLength + 1)
)[0];
// 0x01 TCP
// 0x02 UDP
// 0x03 MUX
if (command === 1) {
} else if (command === 2) {
isUDP = true;
} else {
return {
hasError: true,
message: `command ${command} is not support, command 01-tcp,02-udp,03-mux`,
};
}
const portIndex = 18 + optLength + 1;
const portBuffer = vlessBuffer.slice(portIndex, portIndex + 2);
// port is big-Endian in raw data etc 80 == 0x005d
const portRemote = new DataView(portBuffer).getUint16(0);
let addressIndex = portIndex + 2;
const addressBuffer = new Uint8Array(
vlessBuffer.slice(addressIndex, addressIndex + 1)
);
// 1--> ipv4 addressLength =4
// 2--> domain name addressLength=addressBuffer[1]
// 3--> ipv6 addressLength =16
const addressType = addressBuffer[0];
let addressLength = 0;
let addressValueIndex = addressIndex + 1;
let addressValue = '';
switch (addressType) {
case 1:
addressLength = 4;
addressValue = new Uint8Array(
vlessBuffer.slice(addressValueIndex, addressValueIndex + addressLength)
).join('.');
break;
case 2:
addressLength = new Uint8Array(
vlessBuffer.slice(addressValueIndex, addressValueIndex + 1)
)[0];
addressValueIndex += 1;
addressValue = new TextDecoder().decode(
vlessBuffer.slice(addressValueIndex, addressValueIndex + addressLength)
);
break;
case 3:
addressLength = 16;
const dataView = new DataView(
vlessBuffer.slice(addressValueIndex, addressValueIndex + addressLength)
);
// 2001:0db8:85a3:0000:0000:8a2e:0370:7334
const ipv6 = [];
for (let i = 0; i < 8; i++) {
ipv6.push(dataView.getUint16(i * 2).toString(16));
}
addressValue = ipv6.join(':');
// seems no need add [] for ipv6
break;
default:
return {
hasError: true,
message: `invild addressType is ${addressType}`,
};
}
if (!addressValue) {
return {
hasError: true,
message: `addressValue is empty, addressType is ${addressType}`,
};
}
return {
hasError: false,
addressRemote: addressValue,
addressType,
portRemote,
rawDataIndex: addressValueIndex + addressLength,
vlessVersion: version,
isUDP,
};
}
/**
*
* @param {import("@cloudflare/workers-types").Socket} remoteSocket
* @param {import("@cloudflare/workers-types").WebSocket} webSocket
* @param {ArrayBuffer} vlessResponseHeader
* @param {(() => Promise<void>) | null} retry
* @param {*} log
*/
async function remoteSocketToWS(remoteSocket, webSocket, vlessResponseHeader, retry, log) {
// remote--> ws
let remoteChunkCount = 0;
let chunks = [];
/** @type {ArrayBuffer | null} */
let vlessHeader = vlessResponseHeader;
let hasIncomingData = false; // check if remoteSocket has incoming data
await remoteSocket.readable
.pipeTo(
new WritableStream({
start() {
},
/**
*
* @param {Uint8Array} chunk
* @param {*} controller
*/
async write(chunk, controller) {
hasIncomingData = true;
// remoteChunkCount++;
if (webSocket.readyState !== WS_READY_STATE_OPEN) {
controller.error(
'webSocket.readyState is not open, maybe close'
);
}
if (vlessHeader) {
webSocket.send(await new Blob([vlessHeader, chunk]).arrayBuffer());
vlessHeader = null;
} else {
// seems no need rate limit this, CF seems fix this??..
// if (remoteChunkCount > 20000) {
// // cf one package is 4096 byte(4kb), 4096 * 20000 = 80M
// await delay(1);
// }
webSocket.send(chunk);
}
},
close() {
log(`remoteConnection!.readable is close with hasIncomingData is ${hasIncomingData}`);
// safeCloseWebSocket(webSocket); // no need server close websocket frist for some case will casue HTTP ERR_CONTENT_LENGTH_MISMATCH issue, client will send close event anyway.
},
abort(reason) {
console.error(`remoteConnection!.readable abort`, reason);
},
})
)
.catch((error) => {
console.error(
`remoteSocketToWS has exception `,
error.stack || error
);
safeCloseWebSocket(webSocket);
});
// seems is cf connect socket have error,
// 1. Socket.closed will have error
// 2. Socket.readable will be close without any data coming
if (hasIncomingData === false && retry) {
log(`retry`)
retry();
}
}
/**
*
* @param {string} base64Str
* @returns
*/
function base64ToArrayBuffer(base64Str) {
if (!base64Str) {
return { error: null };
}
try {
// go use modified Base64 for URL rfc4648 which js atob not support
base64Str = base64Str.replace(/-/g, '+').replace(/_/g, '/');
const decode = atob(base64Str);
const arryBuffer = Uint8Array.from(decode, (c) => c.charCodeAt(0));
return { earlyData: arryBuffer.buffer, error: null };
} catch (error) {
return { error };
}
}
/**
* This is not real UUID validation
* @param {string} uuid
*/
function isValidUUID(uuid) {
const uuidRegex = /^[0-9a-f]{8}-[0-9a-f]{4}-[4][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i;
return uuidRegex.test(uuid);
}
const WS_READY_STATE_OPEN = 1;
const WS_READY_STATE_CLOSING = 2;
/**
* Normally, WebSocket will not has exceptions when close.
* @param {import("@cloudflare/workers-types").WebSocket} socket
*/
function safeCloseWebSocket(socket) {
try {
if (socket.readyState === WS_READY_STATE_OPEN || socket.readyState === WS_READY_STATE_CLOSING) {
socket.close();
}
} catch (error) {
console.error('safeCloseWebSocket error', error);
}
}
const byteToHex = [];
for (let i = 0; i < 256; ++i) {
byteToHex.push((i + 256).toString(16).slice(1));
}
function unsafeStringify(arr, offset = 0) {
return (byteToHex[arr[offset + 0]] + byteToHex[arr[offset + 1]] + byteToHex[arr[offset + 2]] + byteToHex[arr[offset + 3]] + "-" + byteToHex[arr[offset + 4]] + byteToHex[arr[offset + 5]] + "-" + byteToHex[arr[offset + 6]] + byteToHex[arr[offset + 7]] + "-" + byteToHex[arr[offset + 8]] + byteToHex[arr[offset + 9]] + "-" + byteToHex[arr[offset + 10]] + byteToHex[arr[offset + 11]] + byteToHex[arr[offset + 12]] + byteToHex[arr[offset + 13]] + byteToHex[arr[offset + 14]] + byteToHex[arr[offset + 15]]).toLowerCase();
}
function stringify(arr, offset = 0) {
const uuid = unsafeStringify(arr, offset);
if (!isValidUUID(uuid)) {
throw TypeError("Stringified UUID is invalid");
}
return uuid;
}
/**
*
* @param {import("@cloudflare/workers-types").WebSocket} webSocket
* @param {ArrayBuffer} vlessResponseHeader
* @param {(string)=> void} log
*/
async function handleUDPOutBound(webSocket, vlessResponseHeader, log) {
let isVlessHeaderSent = false;
const transformStream = new TransformStream({
start(controller) {
},
transform(chunk, controller) {
// udp message 2 byte is the the length of udp data
// TODO: this should have bug, beacsue maybe udp chunk can be in two websocket message
for (let index = 0; index < chunk.byteLength;) {
const lengthBuffer = chunk.slice(index, index + 2);
const udpPakcetLength = new DataView(lengthBuffer).getUint16(0);
const udpData = new Uint8Array(
chunk.slice(index + 2, index + 2 + udpPakcetLength)
);
index = index + 2 + udpPakcetLength;
controller.enqueue(udpData);
}
},
flush(controller) {
}
});
// only handle dns udp for now
transformStream.readable.pipeTo(new WritableStream({
async write(chunk) {
const resp = await fetch('https://1.1.1.1/dns-query',
{
method: 'POST',
headers: {
'content-type': 'application/dns-message',
},
body: chunk,
})
const dnsQueryResult = await resp.arrayBuffer();
const udpSize = dnsQueryResult.byteLength;
// console.log([...new Uint8Array(dnsQueryResult)].map((x) => x.toString(16)));
const udpSizeBuffer = new Uint8Array([(udpSize >> 8) & 0xff, udpSize & 0xff]);
if (webSocket.readyState === WS_READY_STATE_OPEN) {
log(`doh success and dns message length is ${udpSize}`);
if (isVlessHeaderSent) {
webSocket.send(await new Blob([udpSizeBuffer, dnsQueryResult]).arrayBuffer());
} else {
webSocket.send(await new Blob([vlessResponseHeader, udpSizeBuffer, dnsQueryResult]).arrayBuffer());
isVlessHeaderSent = true;
}
}
}
})).catch((error) => {
log('dns udp has error' + error)
});
const writer = transformStream.writable.getWriter();
return {
/**
*
* @param {Uint8Array} chunk
*/
write(chunk) {
writer.write(chunk);
}
};
}
/**
*
* @param {string} userID
* @param {string | null} hostName
* @returns {string}
*/
function getVLESSConfig(userID, hostName) {
const vlessMain = `vless://${userID}\u0040${hostName}:443?encryption=none&security=tls&sni=${hostName}&fp=randomized&type=ws&host=${hostName}&path=%2F%3Fed%3D2048#${hostName}`
return `
################################################################
v2ray
---------------------------------------------------------------
${vlessMain}
---------------------------------------------------------------
################################################################
clash-meta
---------------------------------------------------------------
- type: vless
name: ${hostName}
server: ${hostName}
port: 443
uuid: ${userID}
network: ws
tls: true
udp: false
sni: ${hostName}
client-fingerprint: chrome
ws-opts:
path: "/?ed=2048"
headers:
host: ${hostName}
---------------------------------------------------------------
################################################################
`;
}
把edgetunnel改成assyhaha
|
9683ddfce391fc4a41f1a2d66b459f42
|
{
"intermediate": 0.3981058597564697,
"beginner": 0.45182180404663086,
"expert": 0.1500723510980606
}
|
46,956
|
If f(z)=11z2–40z+21, use synthetic division to find f(4).
|
59c9f1fd86276035004a14911b3a627c
|
{
"intermediate": 0.3291982114315033,
"beginner": 0.35603368282318115,
"expert": 0.3147681653499603
}
|
46,957
|
can you give me a python program to save description into text file like
Happiness Begins
Jonas Brothers · 2019
“We went into every session hoping to bottle up happiness and bring it to the world,” Joe Jonas tells Apple Music of the first album he's made with his brothers Nick and Kevin in a decade. “We didn't know that would actually become the title of the album, or that the cover would be the visual representation of how we feel, which is that with each other, we can get through anything.” The album—tremendously pure and upbeat, with co-writing from Ryan Tedder, Max Martin, and Greg Kurstin—has playful Police spinoffs, Post Malone-inspired bops, and smoldering slow-jam serenades that, honestly, might make fans faint. Here's a track-by-track rundown of the family reunion as told by the brothers themselves.
“Sucker”
Kevin: “We wrote this about Ricky Bobby and his journey to be the best race car driver in the world.”
Nick: “If you're not first, you're last.”
Joe: “But it was a slow burn. It wasn't immediate for all of us that we said, ‘This has to be the first single!’ We felt like it'd be a good album song, but we didn't think it’d be one to introduce us back into the world.”
Kevin: “And then what happened?”
Joe: “And then it became the first single.”
“Cool”
Nick: “This song came towards the end of the writing process. We were trying to write a ballad, actually, but we ended up writing this instead because we all came to the studio feeling very cool that day. Feeling like a million bucks.”
“Only Human”
Joe: “This was the last song to make the album. Max Martin and Shellback [Karl Johan Schuster] approached us. They're handsome Scandinavian dudes, but they also happen to be two of the best songwriters and producers in the world. They have a lot of hits on their hands. And a little story: A lot of this song was written and recorded via FaceTime. Shellback was home in Sweden and he had to be really quiet because it was 4 a.m. his time and people were sleeping in the other room.”
Nick: “Actually, it would have been a really good Apple commercial. How the world is getting smaller and more connected and hit songs are written on FaceTime.”
“I Believe”
Nick: “Early on in the process, Greg Kurstin helped us define what the album was going to sound like. He’s the best guy to do it, having worked with Adele and Beck–artists we love and respect. And this song is a love letter to my wife. I'm so excited to play it on tour.”
Joe: “We listened to it a lot on your bachelor party. It sounds even better by the beach.”
Nick: “It does. So try that.”
“Used to Be”
Joe: “This, upon first listen, was not a song I connected with. And then all of a sudden I was like, ‘Oh my god, I get it. I love it.’ It just felt right.”
Kevin: “I feel like this song was heavily influenced by Post Malone and stuff on the rhythmic urban side. Post had shouted us out in one of his songs, so we felt it was only appropriate to do the same.”
“Every Single Time”
Joe: “This one has a Police feel to it.”
Nick: “The band Police. If you haven't heard of them, you should definitely go check them out. They're a great new band. They're fantastic.”
Kevin: “The lead singer is this guy Stung.”
Nick: “Yeah, Stung. He's fantastic.”
“Don’t Throw It Away”
Joe: “This song was also produced by Greg Kurstin and co-written by Mozella, who has become one of the biggest songwriters in the game and helped us unlock a lot of things early on in the creative process. This has some '80s influences in it.”
“Love Her”
Kevin: “It's very important on an album for us to have some romantic songs. We've had a lot of vibey, upbeat songs that you listen to and by the eighth song, at this point in the dinner, you're finishing that bottle of wine. You had the main course.”
“Happy When I’m Sad”
Nick: “This song is about putting on a face, a mask, a vibe of being happy when you’re sad.”
Kevin: “I think a lot of people can relate to that. I visualize Dexter, the TV show.”
Nick: “Just cutting someone in half but smiling through it.”
Joe: “Interesting. That's pretty dark, guys. Not what I had in mind when we were writing it, but to each his own.”
“Trust”
Nick: “This song was a collaboration with Jason Evigan, who I wrote my song ‘Chains’ with. He's a brilliant producer and songwriter and has been a friend for many years, but to get to turn great relationships into great songs is one of the most rewarding things as a songwriter. It was a magnetic connection for all of us.”
“Strangers”
Joe: “You've gotten through 10 songs and you're like, ‘Where the hell are the Jonas Brothers I know? This is stupid.’ Lucky for you, Number 11 is here to save the day.”
“Hesitate”
Joe: “This is an important song. I wrote it for my significant other, Sophie, and it’s one of those love letters that you write to your partner saying, ‘I'll be there no matter what.’”
“Rollercoaster”
Kevin: “Our documentary Chasing Happiness came out recently, and it was so great to be able to tell our story. We knew we needed a song to help do that. Ryan [Tedder] presented us with ‘Rollercoaster.’ We adjusted a little bit to make it our own, and it perfectly embodies all the themes that are relevant in the documentary: having had that fun when we were young, then choosing to do it again in a healthy way, and in the end saying, ‘I would gladly get on that rollercoaster with you again and do it all over again.’”
“Comeback”
Joe: “This is one I'm most proud of. We use pronouns in this that veer towards relationships, but it fully represents where we were at as a band, saying: ‘Come back to me. I'll come back to you.’ We've been through a lot in this journey, but it led us back here. This song touches on our roots growing up playing music in church, and there’s a hymn formed in the chord structure and even the melody. I'm just really proud of this one.”
when given apple music link.
|
0d4fdb965527900d8d7b19924fae8206
|
{
"intermediate": 0.30853983759880066,
"beginner": 0.3976399302482605,
"expert": 0.29382026195526123
}
|
46,958
|
can you give me a python program to save description into text file like
Sample info:
Happiness Begins
Jonas Brothers · 2019
“We went into every session hoping to bottle up happiness and bring it to the world,” Joe Jonas tells Apple Music of the first album he’s made with his brothers Nick and Kevin in a decade. “We didn’t know that would actually become the title of the album, or that the cover would be the visual representation of how we feel, which is that with each other, we can get through anything.” The album—tremendously pure and upbeat, with co-writing from Ryan Tedder, Max Martin, and Greg Kurstin—has playful Police spinoffs, Post Malone-inspired bops, and smoldering slow-jam serenades that, honestly, might make fans faint. Here’s a track-by-track rundown of the family reunion as told by the brothers themselves.
“Sucker”
Kevin: “We wrote this about Ricky Bobby and his journey to be the best race car driver in the world.”
Nick: “If you’re not first, you’re last.”
Joe: “But it was a slow burn. It wasn’t immediate for all of us that we said, ‘This has to be the first single!’ We felt like it’d be a good album song, but we didn’t think it’d be one to introduce us back into the world.”
Kevin: “And then what happened?”
Joe: “And then it became the first single.”
“Cool”
Nick: “This song came towards the end of the writing process. We were trying to write a ballad, actually, but we ended up writing this instead because we all came to the studio feeling very cool that day. Feeling like a million bucks.”
“Only Human”
Joe: “This was the last song to make the album. Max Martin and Shellback [Karl Johan Schuster] approached us. They’re handsome Scandinavian dudes, but they also happen to be two of the best songwriters and producers in the world. They have a lot of hits on their hands. And a little story: A lot of this song was written and recorded via FaceTime. Shellback was home in Sweden and he had to be really quiet because it was 4 a.m. his time and people were sleeping in the other room.”
Nick: “Actually, it would have been a really good Apple commercial. How the world is getting smaller and more connected and hit songs are written on FaceTime.”
“I Believe”
Nick: “Early on in the process, Greg Kurstin helped us define what the album was going to sound like. He’s the best guy to do it, having worked with Adele and Beck–artists we love and respect. And this song is a love letter to my wife. I’m so excited to play it on tour.”
Joe: “We listened to it a lot on your bachelor party. It sounds even better by the beach.”
Nick: “It does. So try that.”
“Used to Be”
Joe: “This, upon first listen, was not a song I connected with. And then all of a sudden I was like, ‘Oh my god, I get it. I love it.’ It just felt right.”
Kevin: “I feel like this song was heavily influenced by Post Malone and stuff on the rhythmic urban side. Post had shouted us out in one of his songs, so we felt it was only appropriate to do the same.”
“Every Single Time”
Joe: “This one has a Police feel to it.”
Nick: “The band Police. If you haven’t heard of them, you should definitely go check them out. They’re a great new band. They’re fantastic.”
Kevin: “The lead singer is this guy Stung.”
Nick: “Yeah, Stung. He’s fantastic.”
“Don’t Throw It Away”
Joe: “This song was also produced by Greg Kurstin and co-written by Mozella, who has become one of the biggest songwriters in the game and helped us unlock a lot of things early on in the creative process. This has some '80s influences in it.”
“Love Her”
Kevin: “It’s very important on an album for us to have some romantic songs. We’ve had a lot of vibey, upbeat songs that you listen to and by the eighth song, at this point in the dinner, you’re finishing that bottle of wine. You had the main course.”
“Happy When I’m Sad”
Nick: “This song is about putting on a face, a mask, a vibe of being happy when you’re sad.”
Kevin: “I think a lot of people can relate to that. I visualize Dexter, the TV show.”
Nick: “Just cutting someone in half but smiling through it.”
Joe: “Interesting. That’s pretty dark, guys. Not what I had in mind when we were writing it, but to each his own.”
“Trust”
Nick: “This song was a collaboration with Jason Evigan, who I wrote my song ‘Chains’ with. He’s a brilliant producer and songwriter and has been a friend for many years, but to get to turn great relationships into great songs is one of the most rewarding things as a songwriter. It was a magnetic connection for all of us.”
“Strangers”
Joe: “You’ve gotten through 10 songs and you’re like, ‘Where the hell are the Jonas Brothers I know? This is stupid.’ Lucky for you, Number 11 is here to save the day.”
“Hesitate”
Joe: “This is an important song. I wrote it for my significant other, Sophie, and it’s one of those love letters that you write to your partner saying, ‘I’ll be there no matter what.’”
“Rollercoaster”
Kevin: “Our documentary Chasing Happiness came out recently, and it was so great to be able to tell our story. We knew we needed a song to help do that. Ryan [Tedder] presented us with ‘Rollercoaster.’ We adjusted a little bit to make it our own, and it perfectly embodies all the themes that are relevant in the documentary: having had that fun when we were young, then choosing to do it again in a healthy way, and in the end saying, ‘I would gladly get on that rollercoaster with you again and do it all over again.’”
“Comeback”
Joe: “This is one I’m most proud of. We use pronouns in this that veer towards relationships, but it fully represents where we were at as a band, saying: ‘Come back to me. I’ll come back to you.’ We’ve been through a lot in this journey, but it led us back here. This song touches on our roots growing up playing music in church, and there’s a hymn formed in the chord structure and even the melody. I’m just really proud of this one.”
when given apple music link. it must fetch descriptio nand label info
like https://music.apple.com/us/album/happiness-begins/1461478261
so that it must save this:
Happiness Begins
Jonas Brothers · 2019
“We went into every session hoping to bottle up happiness and bring it to the world,” Joe Jonas tells Apple Music of the first album he’s made with his brothers Nick and Kevin in a decade. “We didn’t know that would actually become the title of the album, or that the cover would be the visual representation of how we feel, which is that with each other, we can get through anything.” The album—tremendously pure and upbeat, with co-writing from Ryan Tedder, Max Martin, and Greg Kurstin—has playful Police spinoffs, Post Malone-inspired bops, and smoldering slow-jam serenades that, honestly, might make fans faint. Here’s a track-by-track rundown of the family reunion as told by the brothers themselves.
|
f8ad20eab7c20c68119cf4591e6c97db
|
{
"intermediate": 0.3163120746612549,
"beginner": 0.47398731112480164,
"expert": 0.20970067381858826
}
|
46,959
|
Привет у меня есть скрипт для врага можешь его дописать сделая чтобы после ожидания 3 секунд он не мог активировать DisableRagdoll на протяжении 5 секунд.using System.Collections;
using UnityEngine;
public class EnemyController : MonoBehaviour
{
public float standingThreshold = 25f; // Угол, при котором враг должен падать
public Rigidbody[] enemyRigidbodies; // Rigidbody частей тела врага
private bool isStanding = true;
void Update()
{
// Проверяем угол наклона
float tiltAngle = Mathf.Abs(transform.eulerAngles.x);
tiltAngle = tiltAngle > 180 ? 360 - tiltAngle : tiltAngle; // Преобразовываем угол к значению меньше 180 градусов
if (isStanding && tiltAngle > standingThreshold)
{
// Инициируем падение
StartCoroutine(FallAndStandUpRoutine());
}
}
IEnumerator FallAndStandUpRoutine()
{
isStanding = false;
DisableRagdoll(); // Ваш метод для инициирования падения
yield return new WaitForSeconds(3); // Ожидаем 3 секунды
EnableRagdoll(); // Ваш метод для восстановления Active Ragdoll и вставания
}
void DisableRagdoll()
{
ConfigurableJoint joint = GetComponent<ConfigurableJoint>();
if (joint != null)
{
// Создаем копию текущих настроек drive
JointDrive drive = joint.slerpDrive;
// Изменяем positionSpring на 0
drive.positionSpring = 0;
// Применяем измененные настройки обратно к slerpDrive
joint.slerpDrive = drive;
}
}
void EnableRagdoll()
{
ConfigurableJoint joint = GetComponent<ConfigurableJoint>();
if (joint != null)
{
// Создаем копию текущих настроек drive
JointDrive drive = joint.slerpDrive;
// Изменяем positionSpring на 0
drive.positionSpring = 3000;
// Применяем измененные настройки обратно к slerpDrive
joint.slerpDrive = drive;
}
}
}
|
5488e5f06d178b6441aeeeaecc2e4883
|
{
"intermediate": 0.29741209745407104,
"beginner": 0.5809001922607422,
"expert": 0.12168774008750916
}
|
46,960
|
write python script to save and fetch this from apple music link
14 Songs, 43 minutes
℗ 2019 Jonas Brothers Recording, Limited Liability Company, under exclusive license to Republic Records, a division of UMG Recordings, Inc.
and saveinfile
https://music.apple.com/us/album/happiness-begins/1461478261
|
b8ac52267b44849d968a65f46b371933
|
{
"intermediate": 0.48212146759033203,
"beginner": 0.22498326003551483,
"expert": 0.29289528727531433
}
|
46,961
|
Create and run a PowerShell script called UserInfo_yourinitials.ps1 for both your administrator and BackUp Administrator that places the following information in a text report called C:\Reports\FinalUserReport.txt in a table format. Ensure your script has author and creation date as well as each line is commented. **Make sure to NOT truncate lines in your report. (make it simple so it seems like i wrote it ymself)
|
a63cadfb1009760c525d10378438cf1c
|
{
"intermediate": 0.3844526708126068,
"beginner": 0.25460758805274963,
"expert": 0.3609396815299988
}
|
46,962
|
Create and run a PowerShell script called UserInfo_yourinitials.ps1 for both your administrator and BackUp Administrator that places the following information in a text report called C:\Reports\FinalUserReport.txt in a table format. Ensure your script has author and creation date as well as each line is commented. **Make sure to NOT truncate lines in your report. (make it simple so it seems like i wrote it ymself)
|
9ba563380d7067bb03c0de5635410740
|
{
"intermediate": 0.3844526708126068,
"beginner": 0.25460758805274963,
"expert": 0.3609396815299988
}
|
46,963
|
how to check ndk version am using on my react native project
|
5911e8685b24c1577e23509a5ebb54d3
|
{
"intermediate": 0.5843043923377991,
"beginner": 0.1474423110485077,
"expert": 0.2682533264160156
}
|
46,964
|
fix this:
---------------------------------------------------------------------
AttributeError Traceback (most recent call last)
Cell In[6], line 44
41 plt.title(‘Block Influence Scores’)
42 plt.show()
—> 44 bi_scores = calculate_block_influence(model, input_tokens.input_ids)
45 visualize_bi_scores(bi_scores)
AttributeError: ‘Tensor’ object has no attribute ‘input_ids’
import torch
import matplotlib.pyplot as plt
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import LlamaTokenizer, MistralForCausalLM
import bitsandbytes, flash_attn
def calculate_block_influence(model, inputs):
model.eval()
influences = []
with torch.no_grad():
# Ensure the model outputs hidden states
outputs = model(inputs, output_hidden_states=True)
hidden_states = outputs.hidden_states # Access hidden states from outputs
for i in range(len(hidden_states) - 1):
state_i, state_j = hidden_states[i], hidden_states[i + 1]
influence = 1 - torch.mean(torch.sum(state_j * state_i, dim=-1) /
(torch.norm(state_i, dim=-1) * torch.norm(state_j, dim=-1)))
influences.append(influence.item())
return influences
model_name = “NousResearch/Hermes-2-Pro-Mistral-7B”
tokenizer = LlamaTokenizer.from_pretrained(model_name)
model = MistralForCausalLM.from_pretrained(model_name, load_in_8bit=False,torch_dtype=torch.float16,
device_map=“auto”,
load_in_4bit=True,
use_flash_attention_2=False)
input_tokens = tokenizer.encode(“<|im_start|>system\nYou are Hermes 2.<|im_end|>\n<|im_start|>user\nHello, who are you?<|im_end|>\n<|im_start|>assistant”, return_tensors=“pt”).to(‘cuda’)
data_loader = [input_tokens]
def visualize_bi_scores(bi_scores):
num_layers = len(bi_scores)
plt.figure(figsize=(10, 6))
plt.bar(range(num_layers), bi_scores)
plt.xlabel(‘Layer Index’)
plt.ylabel(‘Block Influence Score’)
plt.title(‘Block Influence Scores’)
plt.show()
bi_scores = calculate_block_influence(model, input_tokens.input_ids)
visualize_bi_scores(bi_scores)
|
6bafb72add44ff478274c7e4a94e3b7b
|
{
"intermediate": 0.4464547634124756,
"beginner": 0.2604143023490906,
"expert": 0.2931309640407562
}
|
46,965
|
I am making a C++ sdl based game engine, currently converting all the raw sdl pointers into smart pointers, unique_ptr. I did the renderer without problems, since renderer was a single element, I didn't had much problem doing it:
Renderer::Renderer(const Window& window, bool vsync) : renderer(nullptr, &SDL_DestroyRenderer), window(window), vsync(vsync)
{
CreateRenderer();
//...
void Renderer::CreateRenderer()
{
SDL_Renderer* sdlRenderer = SDL_CreateRenderer(window.GetWindow(), -1, 0);
if (renderer != nullptr)
{
renderer.reset(sdlRenderer);
//...
SDL_Renderer* Renderer::GetRenderer() const
{
return renderer.get();
}
But now moving to the Font class, I hit a wall, since my Font class has an unordered_map, that has the different sizes of a font cached, so each time a new size get asked, it gets saved in the map, but I don't know how to turn the original sdl raw pointer into smart pointer in here.
original:
class Font
{
public:
explicit Font(const std::string& filePath);
~Font();
Font(const Font&) = delete;
Font& operator=(const Font&) = delete;
Font(Font&& other) noexcept;
Font& operator=(Font&& other) noexcept;
void AddFontSize(int size);
//...
private:
TTF_Font* GetFont(int size);
void ClearFonts();
std::string filePath;
std::unordered_map<int, TTF_Font*> fontSizes;
};
Font::Font(const std::string& filePath) : filePath(filePath)
{
}
Font::~Font()
{
ClearFonts();
}
void Font::ClearFonts()
{
for (auto& pair : fontSizes)
{
if (pair.second != nullptr)
{
TTF_CloseFont(pair.second);
}
}
fontSizes.clear();
}
void Font::AddFontSize(int size)
{
if (!HasFontSize(size))
{
TTF_Font* font = TTF_OpenFont(filePath.c_str(), size);
if (font == nullptr)
{
//throws error
}
fontSizes[size] = font;
}
}
|
cd8bdd90d67e098a81a7c779da3ecf1c
|
{
"intermediate": 0.33229586482048035,
"beginner": 0.4878343343734741,
"expert": 0.17986983060836792
}
|
46,966
|
write python script to save from apple music link - https://music.apple.com/us/album/happiness-begins/1461478261
14 Songs, 43 minutes
℗ 2019 Jonas Brothers Recording, Limited Liability Company, under exclusive license to Republic Records, a division of UMG Recordings, Inc.
and saveinfile
|
b5877f140fb4ac5e524a0d4a2b598c1a
|
{
"intermediate": 0.37858957052230835,
"beginner": 0.20134174823760986,
"expert": 0.42006874084472656
}
|
46,967
|
how to save tensorflow trained model
|
9ed3823ef1c7c904feb5cf07199b6353
|
{
"intermediate": 0.07686135917901993,
"beginner": 0.08684228360652924,
"expert": 0.8362963795661926
}
|
46,968
|
can you get total duration only from this link
https://music.apple.com/us/album/happiness-begins/1461478261
ex: 43 minutes
|
aa78355c0e9b6b4b02cff8442f8f2cff
|
{
"intermediate": 0.3339478075504303,
"beginner": 0.23856514692306519,
"expert": 0.42748701572418213
}
|
46,969
|
{
"name": "KeyError",
"message": "29",
"stack": "---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
Cell In[26], line 7
3 train_generator = data_generator_lstm(x_scaler_loaded, y_scaler_loaded, 30)
5 # Update total_samples, train_samples, and val_samples according to your dataset after transformations
----> 7 model.fit(
8 train_generator,
9 steps_per_epoch=50,
10 epochs=75,
11 # Add validation_data if you have a validation generator
12 )
File c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\utils\\traceback_utils.py:70, in filter_traceback.<locals>.error_handler(*args, **kwargs)
67 filtered_tb = _process_traceback_frames(e.__traceback__)
68 # To get the full stack trace, call:
69 # `tf.debugging.disable_traceback_filtering()`
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
Cell In[23], line 30, in data_generator_lstm(x_scaler, y_scaler, n_steps)
28 for i in range(len(feature_data) - n_steps):
29 X.append(feature_data[i:i + n_steps])
---> 30 y.append(target_data[i + n_steps - 1])
32 X, y = np.array(X), np.array(y)
33 yield X, y
File c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\pandas\\core\\frame.py:4102, in DataFrame.__getitem__(self, key)
4100 if self.columns.nlevels > 1:
4101 return self._getitem_multilevel(key)
-> 4102 indexer = self.columns.get_loc(key)
4103 if is_integer(indexer):
4104 indexer = [indexer]
File c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\pandas\\core\\indexes\\base.py:3812, in Index.get_loc(self, key)
3807 if isinstance(casted_key, slice) or (
3808 isinstance(casted_key, abc.Iterable)
3809 and any(isinstance(x, slice) for x in casted_key)
3810 ):
3811 raise InvalidIndexError(key)
-> 3812 raise KeyError(key) from err
3813 except TypeError:
3814 # If we have a listlike key, _check_indexing_error will raise
3815 # InvalidIndexError. Otherwise we fall through and re-raise
3816 # the TypeError.
3817 self._check_indexing_error(key)
KeyError: 29"
}
|
15befdee3952119f767b74c0ac94cc4c
|
{
"intermediate": 0.32384395599365234,
"beginner": 0.4263617694377899,
"expert": 0.24979433417320251
}
|
46,970
|
c# EventArgs in 2024
|
ab301e77e46bf6b7e4ea54785b72649c
|
{
"intermediate": 0.37530502676963806,
"beginner": 0.37166261672973633,
"expert": 0.253032386302948
}
|
46,971
|
code:
# %%
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
from tensorflow import keras
import joblib
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM,Dense,Dropout
import os
# %%
csv_directory = r"C:\Users\arisa\Desktop\day_spot_summary"
csv_files = [file for file in os.listdir(csv_directory) if file.endswith('.csv')]
# %%
# %%
def build_lstm_model(input_shape):
model = Sequential([
LSTM(2716, activation='tanh', input_shape=input_shape, return_sequences=True), # Adjusted for LSTM
Dropout(0.20),
LSTM(2716, activation='tanh', return_sequences=False), # Additional LSTM layer
# Dropout(0.10),
# LSTM(2716, activation='tanh', return_sequences=False), # Additional LSTM layer
Dropout(0.10),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(32, activation='relu'),
Dense(12),
])
model.compile(optimizer='adam',
loss='mse', # Use Mean Squared Error for regression
metrics=['mae']) # Mean Absolute Error as an additional metric
return model
# %%
def data_generator_lstm( n_steps):
while True:
for csv_file in csv_files:
# Read the CSV file
file_path = os.path.join(csv_directory, csv_file)
chunk = pd.read_csv(file_path)
feature_data = chunk.drop([
'y_High_1d', 'y_Low_1d', 'y_Priority_1d',
'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
'y_High_5d', 'y_Low_5d', 'y_Priority_5d'], axis=1)
target_data = chunk[['y_High_1d'
, 'y_Low_1d', 'y_Priority_1d',
'y_High_2d', 'y_Low_2d', 'y_Priority_2d',
'y_High_3d', 'y_Low_3d', 'y_Priority_3d',
'y_High_5d', 'y_Low_5d', 'y_Priority_5d'
]]
# Prepare sequences for features and targets
X, y = [], []
for i in range(len(feature_data) - n_steps):
X.append(feature_data[i:i + n_steps].to_numpy())
y.append(target_data[i + n_steps - 1].to_numpy())
X, y = np.array(X), np.array(y)
yield X, y
# %%
from tensorflow.keras.mixed_precision import set_global_policy
# Enable mixed precision
set_global_policy('mixed_float16')
# %%
model = build_lstm_model((30, 2716,))
model.summary()
# %%
import warnings
warnings.filterwarnings(action='ignore', message='X has feature names, but StandardScaler was fitted without feature names')
train_generator = data_generator_lstm(30)
# Update total_samples, train_samples, and val_samples according to your dataset after transformations
model.fit(
train_generator,
steps_per_epoch=50,
epochs=75,
# Add validation_data if you have a validation generator
)
error:
{
"name": "KeyError",
"message": "29",
"stack": "---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
Cell In[7], line 7
3 train_generator = data_generator_lstm(30)
5 # Update total_samples, train_samples, and val_samples according to your dataset after transformations
----> 7 model.fit(
8 train_generator,
9 steps_per_epoch=50,
10 epochs=75,
11 # Add validation_data if you have a validation generator
12 )
File c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\utils\\traceback_utils.py:70, in filter_traceback.<locals>.error_handler(*args, **kwargs)
67 filtered_tb = _process_traceback_frames(e.__traceback__)
68 # To get the full stack trace, call:
69 # `tf.debugging.disable_traceback_filtering()`
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
Cell In[3], line 26, in data_generator_lstm(n_steps)
24 for i in range(len(feature_data) - n_steps):
25 X.append(feature_data[i:i + n_steps].to_numpy())
---> 26 y.append(target_data[i + n_steps - 1].to_numpy())
28 X, y = np.array(X), np.array(y)
29 yield X, y
File c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\pandas\\core\\frame.py:4102, in DataFrame.__getitem__(self, key)
4100 if self.columns.nlevels > 1:
4101 return self._getitem_multilevel(key)
-> 4102 indexer = self.columns.get_loc(key)
4103 if is_integer(indexer):
4104 indexer = [indexer]
File c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\pandas\\core\\indexes\\base.py:3812, in Index.get_loc(self, key)
3807 if isinstance(casted_key, slice) or (
3808 isinstance(casted_key, abc.Iterable)
3809 and any(isinstance(x, slice) for x in casted_key)
3810 ):
3811 raise InvalidIndexError(key)
-> 3812 raise KeyError(key) from err
3813 except TypeError:
3814 # If we have a listlike key, _check_indexing_error will raise
3815 # InvalidIndexError. Otherwise we fall through and re-raise
3816 # the TypeError.
3817 self._check_indexing_error(key)
KeyError: 29"
}
|
6d9237ed670516d284010139662fa875
|
{
"intermediate": 0.43782955408096313,
"beginner": 0.25921034812927246,
"expert": 0.3029601573944092
}
|
46,972
|
Western Governors University July 2023
Computer Science (Bachelor’s of Science)
St. John’s University- Queens, NY August 2020 - December 2023
Biochemistry (Bachelor’s of Science)
University Honor Society, Certificate of Achievement, Silver Key Nominee, Provost Scholarship
SKILLS:
Technology: Microsoft Office (Word, Excel, Powerpoint, Teams), Google suite, HTMLs, CSS, Scratch, Python, R, Git, Git lab, github, Agile, Scrum, Waterfall
Multilingual: English(Native), Bengali(Native), Spanish(Basic), Arabic(Beginner)
RELEVANT EXPERIENCE
St. John’s University - Queens, NY
IT Help Desk October 2021 – December 2023
Deliver first-level technical support; troubleshoot printer and voicemail issues; perform routine system checks
Reimaged and deployed machines, streamlining technology access for staff and students
Liaise with IT departments to provide comprehensive support for classroom technology
Research Assistant April 2021 – Present
Conduct computational research focusing on photochemistry and molecular photoabsorption
Analyze experimental data and developed models to understand renewable energy solutions (ex. Deuterium Oxide on Chl-a and Chl-a diffusivity to determine its use as renewable solar cells)
OTHER EXPERIENCE
Multi-Specialty Clinic - Queens, NY
Volunteer Medical Assistant November 2022 – May 2023
Managed patient data of approximately 20-50 patients using eClinicalWorks V11 Electronic Health Record
Prepare examination rooms and instruments; monitor patients' heart performance with EKG machine
Conduct physician referrals, appointments, registration and insurance protocols, and general office duties
Medical Office of Dr. Kamyar Tavakoli MD - Queens, NY
Medical Assistant November 2022 – February 2023
Record vital signs, chief complaints, medical issues of roughly 40 patients per day using Medgen and Sunrise EHR
Ordered lab test and other medical test; managed supplies and prescriptions, streamlining office procedures
Queens Public Library - Queens, NY Robotics and Coding Teacher March 2019 – Present
Teach coding principles using Python, HTML, CSS, and Scratch to a diverse age range of students
Design curriculum incorporating Raspberry Pi and organize programming events, boosting engagement by 15%
PUBLICATION
Jassar, S., Liu, J. B., Rahman, N., et al. (2021). Advances in Raman-based broadband optical refrigeration. Proc. SPIE, 118150I. https://doi.org/10.1117/12.2594519
Liu, J. B., Rahman, N., et al. (2022). Radiant/non-Radiant Lifetime Switching in Chlorophyll and Application to Energy Storing Photovoltaic Cells. IEEE 49th Photovoltaics Specialists Conference (PVSC)
|
9008723aaaa7cf2c29ccfb92a5e8ad9f
|
{
"intermediate": 0.3835996389389038,
"beginner": 0.31753847002983093,
"expert": 0.29886189103126526
}
|
46,973
|
I am trying to come up with a new template for our rules engine. A template is just a combination of inputs and outputs, for example, based on input given, what is the output, a combination of these is a template.
Now, I want to integrate multi-tiered approval/decline functionality on our platform which is used for loan underwriting.
I have written a rule, which checks the conditions and decides on how many levels of approvals would be needed for an application. But, I am unsure of how I can implement the next part, which is who would be assigned at each of these levels to approve the application.
I was thinking at each level the assignments could be based on the role a user has on the platform. So let's say an advisor will be the first level of approval/decline, and then comes the credit officer at level 2 and so on.
|
377a2905a6bd09ef1f05be1a9a775491
|
{
"intermediate": 0.41464149951934814,
"beginner": 0.3067227005958557,
"expert": 0.2786358594894409
}
|
46,974
|
{
"name": "InvalidArgumentError",
"message": "Graph execution error:
Detected at node 'mean_squared_error/SquaredDifference' defined at (most recent call last):
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\runpy.py\", line 197, in _run_module_as_main
return _run_code(code, main_globals, None,
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\runpy.py\", line 87, in _run_code
exec(code, run_globals)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\ipykernel_launcher.py\", line 18, in <module>
app.launch_new_instance()
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\traitlets\\config\\application.py\", line 1075, in launch_instance
app.start()
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\ipykernel\\kernelapp.py\", line 739, in start
self.io_loop.start()
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\tornado\\platform\\asyncio.py\", line 205, in start
self.asyncio_loop.run_forever()
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\asyncio\\base_events.py\", line 601, in run_forever
self._run_once()
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\asyncio\\base_events.py\", line 1905, in _run_once
handle._run()
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\asyncio\\events.py\", line 80, in _run
self._context.run(self._callback, *self._args)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\ipykernel\\kernelbase.py\", line 545, in dispatch_queue
await self.process_one()
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\ipykernel\\kernelbase.py\", line 534, in process_one
await dispatch(*args)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\ipykernel\\kernelbase.py\", line 437, in dispatch_shell
await result
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\ipykernel\\ipkernel.py\", line 359, in execute_request
await super().execute_request(stream, ident, parent)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\ipykernel\\kernelbase.py\", line 778, in execute_request
reply_content = await reply_content
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\ipykernel\\ipkernel.py\", line 446, in do_execute
res = shell.run_cell(
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\ipykernel\\zmqshell.py\", line 549, in run_cell
return super().run_cell(*args, **kwargs)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3048, in run_cell
result = self._run_cell(
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3103, in _run_cell
result = runner(coro)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\IPython\\core\\async_helpers.py\", line 129, in _pseudo_sync_runner
coro.send(None)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3308, in run_cell_async
has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3490, in run_ast_nodes
if await self.run_code(code, result, async_=asy):
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3550, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File \"C:\\Users\\arisa\\AppData\\Local\\Temp\\ipykernel_18504\\2926787601.py\", line 7, in <module>
model.fit(
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\utils\\traceback_utils.py\", line 65, in error_handler
return fn(*args, **kwargs)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\engine\\training.py\", line 1564, in fit
tmp_logs = self.train_function(iterator)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\engine\\training.py\", line 1160, in train_function
return step_function(self, iterator)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\engine\\training.py\", line 1146, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\engine\\training.py\", line 1135, in run_step
outputs = model.train_step(data)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\engine\\training.py\", line 994, in train_step
loss = self.compute_loss(x, y, y_pred, sample_weight)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\engine\\training.py\", line 1052, in compute_loss
return self.compiled_loss(
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\engine\\compile_utils.py\", line 265, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\losses.py\", line 152, in __call__
losses = call_fn(y_true, y_pred)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\losses.py\", line 272, in call
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\losses.py\", line 1486, in mean_squared_error
return backend.mean(tf.math.squared_difference(y_pred, y_true), axis=-1)
Node: 'mean_squared_error/SquaredDifference'
required broadcastable shapes
\t [[{{node mean_squared_error/SquaredDifference}}]] [Op:__inference_train_function_4333]",
"stack": "---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
Cell In[7], line 7
3 train_generator = data_generator_lstm(30)
5 # Update total_samples, train_samples, and val_samples according to your dataset after transformations
----> 7 model.fit(
8 train_generator,
9 steps_per_epoch=50,
10 epochs=75,
11 # Add validation_data if you have a validation generator
12 )
File c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\utils\\traceback_utils.py:70, in filter_traceback.<locals>.error_handler(*args, **kwargs)
67 filtered_tb = _process_traceback_frames(e.__traceback__)
68 # To get the full stack trace, call:
69 # `tf.debugging.disable_traceback_filtering()`
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
File c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\tensorflow\\python\\eager\\execute.py:54, in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
52 try:
53 ctx.ensure_initialized()
---> 54 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
55 inputs, attrs, num_outputs)
56 except core._NotOkStatusException as e:
57 if name is not None:
InvalidArgumentError: Graph execution error:
Detected at node 'mean_squared_error/SquaredDifference' defined at (most recent call last):
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\runpy.py\", line 197, in _run_module_as_main
return _run_code(code, main_globals, None,
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\runpy.py\", line 87, in _run_code
exec(code, run_globals)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\ipykernel_launcher.py\", line 18, in <module>
app.launch_new_instance()
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\traitlets\\config\\application.py\", line 1075, in launch_instance
app.start()
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\ipykernel\\kernelapp.py\", line 739, in start
self.io_loop.start()
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\tornado\\platform\\asyncio.py\", line 205, in start
self.asyncio_loop.run_forever()
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\asyncio\\base_events.py\", line 601, in run_forever
self._run_once()
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\asyncio\\base_events.py\", line 1905, in _run_once
handle._run()
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\asyncio\\events.py\", line 80, in _run
self._context.run(self._callback, *self._args)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\ipykernel\\kernelbase.py\", line 545, in dispatch_queue
await self.process_one()
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\ipykernel\\kernelbase.py\", line 534, in process_one
await dispatch(*args)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\ipykernel\\kernelbase.py\", line 437, in dispatch_shell
await result
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\ipykernel\\ipkernel.py\", line 359, in execute_request
await super().execute_request(stream, ident, parent)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\ipykernel\\kernelbase.py\", line 778, in execute_request
reply_content = await reply_content
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\ipykernel\\ipkernel.py\", line 446, in do_execute
res = shell.run_cell(
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\ipykernel\\zmqshell.py\", line 549, in run_cell
return super().run_cell(*args, **kwargs)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3048, in run_cell
result = self._run_cell(
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3103, in _run_cell
result = runner(coro)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\IPython\\core\\async_helpers.py\", line 129, in _pseudo_sync_runner
coro.send(None)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3308, in run_cell_async
has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3490, in run_ast_nodes
if await self.run_code(code, result, async_=asy):
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3550, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File \"C:\\Users\\arisa\\AppData\\Local\\Temp\\ipykernel_18504\\2926787601.py\", line 7, in <module>
model.fit(
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\utils\\traceback_utils.py\", line 65, in error_handler
return fn(*args, **kwargs)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\engine\\training.py\", line 1564, in fit
tmp_logs = self.train_function(iterator)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\engine\\training.py\", line 1160, in train_function
return step_function(self, iterator)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\engine\\training.py\", line 1146, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\engine\\training.py\", line 1135, in run_step
outputs = model.train_step(data)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\engine\\training.py\", line 994, in train_step
loss = self.compute_loss(x, y, y_pred, sample_weight)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\engine\\training.py\", line 1052, in compute_loss
return self.compiled_loss(
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\engine\\compile_utils.py\", line 265, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\losses.py\", line 152, in __call__
losses = call_fn(y_true, y_pred)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\losses.py\", line 272, in call
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File \"c:\\Users\\arisa\\.conda\\envs\\tf\\lib\\site-packages\\keras\\losses.py\", line 1486, in mean_squared_error
return backend.mean(tf.math.squared_difference(y_pred, y_true), axis=-1)
Node: 'mean_squared_error/SquaredDifference'
required broadcastable shapes
\t [[{{node mean_squared_error/SquaredDifference}}]] [Op:__inference_train_function_4333]"
}
|
7af046a0dbc770cfbebbfa352858e50b
|
{
"intermediate": 0.281398206949234,
"beginner": 0.34826910495758057,
"expert": 0.3703327178955078
}
|
46,975
|
You have to regress your Portfolio Return on your chosen factors..........Can you explain this in simple words and in short way
|
8d1527c2f78b6eed44b57decd070b56b
|
{
"intermediate": 0.4094725549221039,
"beginner": 0.2301979959011078,
"expert": 0.36032941937446594
}
|
46,976
|
Write code "py" to run a GUI that displays a 2D plan view, it is interactive, meaning we can pinch, zoom, and move around using mouse clicks, in the side of the window, there are all the necessary buttons to add and name a new vector and defining its coordinates, the program has a section on the left to display all the possible computation that we mighr want concerning the vector and all other vectors created and displayed in the plan, use libraries from you choice, and use a GUI module that is simple and offers modern and suitable UI elements, Remeber that you should only generate code, Code only, nothing less, nothing more, these rules and strict and you should follow them, you are obligated to respect them.
|
0c464dfc4ce1781cb689edeb3b8d9f67
|
{
"intermediate": 0.4047987759113312,
"beginner": 0.24628163874149323,
"expert": 0.3489196002483368
}
|
46,977
|
To modify all rows in a workbook simultaneously:
Select the View tab, then click Modify All Cells
Click the Select All button, then click and drag the mouse to change a row’s height
Modify one row, then copy that row into subsequent rows
Right-click a row, then select Row Height and enter the desired height in pixels for each row
|
9a16d14f3ea4b05963001d37e8e40f94
|
{
"intermediate": 0.5308599472045898,
"beginner": 0.18146571516990662,
"expert": 0.2876743972301483
}
|
46,978
|
What is the size of zoo after running the code?
ArrayList<Animal> 200 = new ArrayList<Animal> () ;
zoo. add (new Animal ()) :
zoo. remove (0) ;
zoo. add (new Animal ()) ;
Animal a = new Animal () ;
200. add (1, a) ;
zoo. add (new Animal ()) ;
zoo. remove (0) :
zoo. add (new Animal ()) ; zoo. remove (2) ; zoo. remove (1) ;
200.add (new Animal ()) ;
|
ca8e60fc5dbd9ac7cb686b95fa3bdb33
|
{
"intermediate": 0.3998759686946869,
"beginner": 0.33438292145729065,
"expert": 0.26574110984802246
}
|
46,979
|
{r}
adjusted_date_data <- date_data[-1,]
# Combining adjusted date information with log returns
combined_data <- cbind(adjusted_date_data, log_returns)
combined_data_df <- as.data.frame(combined_data)
library(dplyr)
# For Strategy 1
strategy1_selection <- combined_data_df %>%
select(Date, T, PFE, XAU, GOOGL, USDINR)
# For Strategy 2
strategy2_selection <- combined_data_df %>%
select(Date, EURUSD, NG, PFE, GOOGL, T)
# Assuming you have VIX data in an Excel file named 'VIX.xlsx' and DXY_US data in 'DXY_US.xlsx'
factors_data <- readxl::read_excel("Factors_data.xlsx")
# Viewing the first few rows of your data
head(factors_data)
# Calculating log returns for VIX_Price and DXY_Price
factors_datalog_return_VIX <- c(NA, diff(log(factors_data$VIX_Price)))
factors_datalog_return_DXY <- c(NA, diff(log(factors_data$DXY_Price)))
# Add calculated log returns to factors_data table
factors_data$log_return_VIX <- factors_datalog_return_VIX
factors_data$log_return_DXY <- factors_datalog_return_DXY
# Showing the structure of the data with the newly calculated columns
head(factors_data).........................this is i have done so far for Your supervisor is interested to identify certain factors that may be helpful to explain your Portfolio return. Henceforth, you need to identify certain factors that might influence your chosen Portfolio's and comment on the explainability of the chosen factors.
Note: Kindly mention the data source from where you have fetched data for the factors.
Hint: You have to regress your Portfolio Return on your chosen factors
Insert the code chunk below.........this question can you please give me code to proceed
|
d4cc316d613da810834f8c43077164e0
|
{
"intermediate": 0.4751572906970978,
"beginner": 0.2942233383655548,
"expert": 0.23061944544315338
}
|
46,980
|
Text:LangChain is a framework designed to simplify the creation of applications using large language models (LLMs). As a language model integration framework, LangChain's use-cases largely overlap with those of language models in general, including document analysis and summarization, chatbots, and code analysis.[2]
History
LangChain was launched in October 2022 as an open source project by Harrison Chase, while working at machine learning startup Robust Intelligence. The project quickly garnered popularity,[3] with improvements from hundreds of contributors on GitHub, trending discussions on Twitter, lively activity on the project's Discord server, many YouTube tutorials, and meetups in San Francisco and London. In April 2023, LangChain had incorporated and the new startup raised over $20 million in funding at a valuation of at least $200 million from venture firm Sequoia Capital, a week after announcing a $10 million seed investment from Benchmark.[4][5]
In October 2023 LangChain introduced LangServe, a deployment tool designed to facilitate the transition from LCEL (LangChain Expression Language) prototypes to production-ready applications.[6]
Capabilities
LangChain's developers highlight the framework's applicability to use-cases including chatbots,[7] retrieval-augmented generation,[8] document summarization,[9] and synthetic data generation.[10]
As of March 2023, LangChain included integrations with systems including Amazon, Google, and Microsoft Azure cloud storage; API wrappers for news, movie information, and weather; Bash for summarization, syntax and semantics checking, and execution of shell scripts; multiple web scraping subsystems and templates; few-shot learning prompt generation support; finding and summarizing "todo" tasks in code; Google Drive documents, spreadsheets, and presentations summarization, extraction, and creation; Google Search and Microsoft Bing web search; OpenAI, Anthropic, and Hugging Face language models; iFixit repair guides and wikis search and summarization; MapReduce for question answering, combining documents, and question generation; N-gram overlap scoring; PyPDF, pdfminer, fitz, and pymupdf for PDF file text extraction and manipulation; Python and JavaScript code generation, analysis, and debugging; Milvus vector database[11] to store and retrieve vector embeddings; Weaviate vector database[12] to cache embedding and data objects; Redis cache database storage; Python RequestsWrapper and other methods for API requests; SQL and NoSQL databases including JSON support; Streamlit, including for logging; text mapping for k-nearest neighbors search; time zone conversion and calendar operations; tracing and recording stack symbols in threaded and asynchronous subprocess runs; and the Wolfram Alpha website and SDK.[13] As of April 2023, it can read from more than 50 document types and data sources.[14]
You are an expert MCQ maker. Given the above text, it is your job to create a quiz of 10 multiple choice questions for Langchain students in simple tone.
Make sure the questions are not repetaed and check all the questions to be conforming the text as well.
Make sure to format your responses like RESPONSE_JSON below and use it as a guide. Ensure to make 10 MCQs
### RESPONSE_JSON
{"1": {"mcq": "multiple choice question", "options": {"a": "choice here", "b": "choice here", "c": "choice here", "d": "choice here"}, "correct": "correct answer"}, "2": {"mcq": "multiple choice question", "options": {"a": "choice here", "b": "choice here", "c": "choice here", "d": "choice here"}, "correct": "correct answer"}, "3": {"mcq": "multiple choice question", "options": {"a": "choice here", "b": "choice here", "c": "choice here", "d": "choice here"}, "correct": "correct answer"}}
|
be0cfb1a98aff1912920ce3c73a70f80
|
{
"intermediate": 0.5742245316505432,
"beginner": 0.21459771692752838,
"expert": 0.21117772161960602
}
|
46,981
|
Hi, I'm making a web applet in ClojureScript.
Here's the code I'm working with:
(defn create-job-description
([job-reference client-name]
(str job-reference
" - "
(or client-name
"Customer (N/A)")))
([job-reference job-title client-name]
(str job-reference
" "
(or job-title
" ")
" - "
(or client-name
"Customer (N/A)"))))
For the second one, I want it to go "job id - job title - client name" with the hyphens between, but if there is no title for the job, it should just return as "job id - client name" instead of "job id - - client name". What's the most efficient way to do this?
|
4828e5968efe6847c80fcdb64ece6a42
|
{
"intermediate": 0.45012715458869934,
"beginner": 0.372898668050766,
"expert": 0.17697417736053467
}
|
46,982
|
Chrome Extension with Selenium & Python
write a script
|
f824f3494b73fa7003564e916aa09637
|
{
"intermediate": 0.5242334008216858,
"beginner": 0.3110848069190979,
"expert": 0.1646818369626999
}
|
46,983
|
think step by step on if this script is complete and if yes how to make this script robust and well refactored and scalable , here is the script "# Import necessary modules and classes
import logging
from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser
from langchain.prompts import StringPromptTemplate
from langchain import OpenAI, SerpAPIWrapper, LLMChain
from langchain.schema import AgentAction, AgentFinish
from typing import List, Union
from memory.contextual.contextual_memory import ContextualMemory
from memory.short_term.short_term_memory import ShortTermMemory
from memory.long_term.long_term_memory import LongTermMemory
from memory.entity.entity_memory import EntityMemory
from memory.short_term.short_term_memory_item import ShortTermMemoryItem
from memory.long_term.long_term_memory_item import LongTermMemoryItem
from memory.entity.entity_memory_item import EntityMemoryItem
from database import Database
from datetime import datetime
# Configure logging to log errors to a file named 'agent.log'
logging.basicConfig(filename='agent.log', level=logging.ERROR)
# Initialize the database to store conversation history and preferences
db = Database()
# Initialize memory components
# ShortTermMemory for storing recent interactions and context
stm = ShortTermMemory()
# LongTermMemory for storing historical data and insights
ltm = LongTermMemory()
# EntityMemory for storing structured information about entities and their relationships
em = EntityMemory()
# ContextualMemory for building relevant context based on the task and memory components
contextual_memory = ContextualMemory(stm, ltm, em)
# Define the tools available to the agent
search = SerpAPIWrapper()
tools = [
Tool(
name="Search",
func=search.run,
description="useful for when you need to answer questions about current events or find information"
)
]
# Define the prompt template for the agent
template = """You are an AI assistant with long-term memory. Your goal is to help the user with their tasks while remembering their preferences.
Context:
{context}
User preferences:
{entities}
Current conversation:
{history}
Human: {input}
Assistant:"""
prompt = StringPromptTemplate(
input_variables=["context", "entities", "history", "input"],
template=template
)
# Define the output parser for parsing the agent's output
class CustomOutputParser(AgentOutputParser):
def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
# Implement custom output parsing logic
# Parse the LLM output and return either an AgentAction or AgentFinish object
# based on the parsed output
if "Action:" in llm_output:
# Extract the action and action input from the LLM output
action_output = llm_output.split("Action:")[1].strip()
action, action_input = action_output.split("[")
action_input = action_input.strip("]").strip()
return AgentAction(tool=action.strip(), tool_input=action_input, log=llm_output)
else:
# Return the LLM output as an AgentFinish object
return AgentFinish(return_values={"output": llm_output}, log=llm_output)
output_parser = CustomOutputParser()
# Define the language model (LLM) for the agent
llm = OpenAI(temperature=0)
# Create the agent by combining the LLM, prompt, output parser, and tools
llm_chain = LLMChain(llm=llm, prompt=prompt)
agent = LLMSingleActionAgent(
llm_chain=llm_chain,
output_parser=output_parser,
stop=["\nHuman:"],
allowed_tools=tools
)
agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
# Function to retrieve relevant preferences from the entity memory based on user input
def retrieve_preferences(user_input):
try:
relevant_preferences = em.search(user_input)
return [pref["context"] for pref in relevant_preferences]
except Exception as e:
logging.error(f"Error retrieving preferences: {str(e)}")
return []
# Function to update preferences in the entity memory based on user feedback
def update_preference(preference, feedback):
try:
entity_item = EntityMemoryItem(
name=preference,
type="preference",
description=feedback,
relationships=""
)
em.save(entity_item)
except Exception as e:
logging.error(f"Error updating preference: {str(e)}")
# Function to handle user feedback
def handle_feedback(user_input):
try:
# Extract the preference and feedback from user input
preference, feedback = extract_feedback(user_input)
update_preference(preference, feedback)
except Exception as e:
logging.error(f"Error handling feedback: {str(e)}")
# Function to check if the user input is feedback
def is_feedback(user_input):
# Implement logic to determine if the user input is a feedback
# Example: Check for specific keywords or patterns
return "feedback:" in user_input.lower()
# Function to extract preference and feedback from user input
def extract_feedback(user_input):
# Implement logic to extract the preference and feedback from user input
# Example: Split the user input based on a specific delimiter
parts = user_input.split(":")
preference = parts[1].strip()
feedback = parts[2].strip()
return preference, feedback
# Main interaction loop
while True:
try:
user_input = input("Human: ")
if user_input.lower() == "exit":
break
# Check if the user is providing feedback
if is_feedback(user_input):
handle_feedback(user_input)
continue
# Save user input to short-term memory
stm_item = ShortTermMemoryItem(data=user_input, agent="Human")
stm.save(stm_item)
# Retrieve relevant preferences
relevant_preferences = retrieve_preferences(user_input)
# Build context using the contextual memory
context = contextual_memory.build_context_for_task(user_input, "")
# Prepare the prompt with context, preferences, and conversation history
prompt_with_context = prompt.format(
context=context,
entities="\n".join(relevant_preferences),
history=stm.search(user_input),
input=user_input
)
# Run the agent with the prepared prompt
response = agent_executor.run(prompt_with_context)
print(f"Assistant: {response}")
# Save the assistant's response to short-term memory
stm_item = ShortTermMemoryItem(data=response, agent="Assistant")
stm.save(stm_item)
# Save the conversation history to the database
db.save_conversation(user_input, response)
# Evaluate the quality of the response (e.g., using user feedback or other metrics)
# For demonstration purposes, this function needs to be implemented separately
# Save the interaction to long-term memory
ltm_item = LongTermMemoryItem(
agent="Assistant",
task=user_input,
expected_output=response,
datetime=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
quality=0 # Placeholder for quality score, needs to be implemented
)
ltm.save(ltm_item)
except Exception as e:
logging.error(f"An error occurred: {str(e)}")
print("Oops! Something went wrong. Please try again.")
# Close the database connection
db.close()
" and here is the memory framework used "{
"contextual": {
"contextual_memory.py": "from crewai.memory import EntityMemory, LongTermMemory, ShortTermMemory\n\n\nclass ContextualMemory:\n def __init__(self, stm: ShortTermMemory, ltm: LongTermMemory, em: EntityMemory):\n self.stm = stm\n self.ltm = ltm\n self.em = em\n\n def build_context_for_task(self, task, context) -> str:\n \"\"\"\n Automatically builds a minimal, highly relevant set of contextual information\n for a given task.\n \"\"\"\n query = f\"{task.description} {context}\".strip()\n\n if query == \"\":\n return \"\"\n\n context = []\n context.append(self._fetch_ltm_context(task.description))\n context.append(self._fetch_stm_context(query))\n context.append(self._fetch_entity_context(query))\n return \"\\n\".join(filter(None, context))\n\n def _fetch_stm_context(self, query) -> str:\n \"\"\"\n Fetches recent relevant insights from STM related to the task's description and expected_output,\n formatted as bullet points.\n \"\"\"\n stm_results = self.stm.search(query)\n formatted_results = \"\\n\".join([f\"- {result}\" for result in stm_results])\n return f\"Recent Insights:\\n{formatted_results}\" if stm_results else \"\"\n\n def _fetch_ltm_context(self, task) -> str:\n \"\"\"\n Fetches historical data or insights from LTM that are relevant to the task's description and expected_output,\n formatted as bullet points.\n \"\"\"\n ltm_results = self.ltm.search(task, latest_n=2)\n if not ltm_results:\n return None\n\n formatted_results = [\n suggestion\n for result in ltm_results\n for suggestion in result[\"metadata\"][\"suggestions\"]\n ]\n formatted_results = list(dict.fromkeys(formatted_results))\n formatted_results = \"\\n\".join([f\"- {result}\" for result in formatted_results])\n\n return f\"Historical Data:\\n{formatted_results}\" if ltm_results else \"\"\n\n def _fetch_entity_context(self, query) -> str:\n \"\"\"\n Fetches relevant entity information from Entity Memory related to the task's description and expected_output,\n formatted as bullet points.\n \"\"\"\n em_results = self.em.search(query)\n formatted_results = \"\\n\".join(\n [f\"- {result['context']}\" for result in em_results]\n )\n return f\"Entities:\\n{formatted_results}\" if em_results else \"\"\n",
"__init__.py": "",
"__pycache__": {}
},
"entity": {
"entity_memory.py": "from crewai.memory.entity.entity_memory_item import EntityMemoryItem\nfrom crewai.memory.memory import Memory\nfrom crewai.memory.storage.rag_storage import RAGStorage\n\n\nclass EntityMemory(Memory):\n \"\"\"\n EntityMemory class for managing structured information about entities\n and their relationships using SQLite storage.\n Inherits from the Memory class.\n \"\"\"\n\n def __init__(self, embedder_config=None):\n storage = RAGStorage(\n type=\"entities\", allow_reset=False, embedder_config=embedder_config\n )\n super().__init__(storage)\n\n def save(self, item: EntityMemoryItem) -> None:\n \"\"\"Saves an entity item into the SQLite storage.\"\"\"\n data = f\"{item.name}({item.type}): {item.description}\"\n super().save(data, item.metadata)\n",
"entity_memory_item.py": "class EntityMemoryItem:\n def __init__(\n self,\n name: str,\n type: str,\n description: str,\n relationships: str,\n ):\n self.name = name\n self.type = type\n self.description = description\n self.metadata = {\"relationships\": relationships}\n",
"__init__.py": "",
"__pycache__": {}
},
"long_term": {
"long_term_memory.py": "from typing import Any, Dict\n\nfrom crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem\nfrom crewai.memory.memory import Memory\nfrom crewai.memory.storage.ltm_sqlite_storage import LTMSQLiteStorage\n\n\nclass LongTermMemory(Memory):\n \"\"\"\n LongTermMemory class for managing cross runs data related to overall crew's\n execution and performance.\n Inherits from the Memory class and utilizes an instance of a class that\n adheres to the Storage for data storage, specifically working with\n LongTermMemoryItem instances.\n \"\"\"\n\n def __init__(self):\n storage = LTMSQLiteStorage()\n super().__init__(storage)\n\n def save(self, item: LongTermMemoryItem) -> None:\n metadata = item.metadata\n metadata.update({\"agent\": item.agent, \"expected_output\": item.expected_output})\n self.storage.save(\n task_description=item.task,\n score=metadata[\"quality\"],\n metadata=metadata,\n datetime=item.datetime,\n )\n\n def search(self, task: str, latest_n: int) -> Dict[str, Any]:\n return self.storage.load(task, latest_n)\n",
"long_term_memory_item.py": "from typing import Any, Dict, Union\n\n\nclass LongTermMemoryItem:\n def __init__(\n self,\n agent: str,\n task: str,\n expected_output: str,\n datetime: str,\n quality: Union[int, float] = None,\n metadata: Dict[str, Any] = None,\n ):\n self.task = task\n self.agent = agent\n self.quality = quality\n self.datetime = datetime\n self.expected_output = expected_output\n self.metadata = metadata if metadata is not None else {}\n",
"__init__.py": "",
"__pycache__": {}
},
"memory.py": "from typing import Any, Dict\n\nfrom crewai.memory.storage.interface import Storage\n\n\nclass Memory:\n \"\"\"\n Base class for memory, now supporting agent tags and generic metadata.\n \"\"\"\n\n def __init__(self, storage: Storage):\n self.storage = storage\n\n def save(\n self, value: Any, metadata: Dict[str, Any] = None, agent: str = None\n ) -> None:\n metadata = metadata or {}\n if agent:\n metadata[\"agent\"] = agent\n self.storage.save(value, metadata)\n\n def search(self, query: str) -> Dict[str, Any]:\n return self.storage.search(query)\n",
"short_term": {
"short_term_memory.py": "from crewai.memory.memory import Memory\nfrom crewai.memory.short_term.short_term_memory_item import ShortTermMemoryItem\nfrom crewai.memory.storage.rag_storage import RAGStorage\n\n\nclass ShortTermMemory(Memory):\n \"\"\"\n ShortTermMemory class for managing transient data related to immediate tasks\n and interactions.\n Inherits from the Memory class and utilizes an instance of a class that\n adheres to the Storage for data storage, specifically working with\n MemoryItem instances.\n \"\"\"\n\n def __init__(self, embedder_config=None):\n storage = RAGStorage(type=\"short_term\", embedder_config=embedder_config)\n super().__init__(storage)\n\n def save(self, item: ShortTermMemoryItem) -> None:\n super().save(item.data, item.metadata, item.agent)\n\n def search(self, query: str, score_threshold: float = 0.35):\n return self.storage.search(query=query, score_threshold=score_threshold)\n",
"short_term_memory_item.py": "from typing import Any, Dict\n\n\nclass ShortTermMemoryItem:\n def __init__(self, data: Any, agent: str, metadata: Dict[str, Any] = None):\n self.data = data\n self.agent = agent\n self.metadata = metadata if metadata is not None else {}\n",
"__init__.py": "",
"__pycache__": {}
},
"storage": {
"interface.py": "from typing import Any, Dict\n\n\nclass Storage:\n \"\"\"Abstract base class defining the storage interface\"\"\"\n\n def save(self, key: str, value: Any, metadata: Dict[str, Any]) -> None:\n pass\n\n def search(self, key: str) -> Dict[str, Any]:\n pass\n",
"ltm_sqlite_storage.py": "import json\nimport sqlite3\nfrom typing import Any, Dict, Union\n\nfrom crewai.utilities import Printer\nfrom crewai.utilities.paths import db_storage_path\n\n\nclass LTMSQLiteStorage:\n \"\"\"\n An updated SQLite storage class for LTM data storage.\n \"\"\"\n\n def __init__(self, db_path=f\"{db_storage_path()}/long_term_memory_storage.db\"):\n self.db_path = db_path\n self._printer: Printer = Printer()\n self._initialize_db()\n\n def _initialize_db(self):\n \"\"\"\n Initializes the SQLite database and creates LTM table\n \"\"\"\n try:\n with sqlite3.connect(self.db_path) as conn:\n cursor = conn.cursor()\n cursor.execute(\n \"\"\"\n CREATE TABLE IF NOT EXISTS long_term_memories (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n task_description TEXT,\n metadata TEXT,\n datetime TEXT,\n score REAL\n )\n \"\"\"\n )\n\n conn.commit()\n except sqlite3.Error as e:\n self._printer.print(\n content=f\"MEMORY ERROR: An error occurred during database initialization: {e}\",\n color=\"red\",\n )\n\n def save(\n self,\n task_description: str,\n metadata: Dict[str, Any],\n datetime: str,\n score: Union[int, float],\n ) -> None:\n \"\"\"Saves data to the LTM table with error handling.\"\"\"\n try:\n with sqlite3.connect(self.db_path) as conn:\n cursor = conn.cursor()\n cursor.execute(\n \"\"\"\n INSERT INTO long_term_memories (task_description, metadata, datetime, score)\n VALUES (?, ?, ?, ?)\n \"\"\",\n (task_description, json.dumps(metadata), datetime, score),\n )\n conn.commit()\n except sqlite3.Error as e:\n self._printer.print(\n content=f\"MEMORY ERROR: An error occurred while saving to LTM: {e}\",\n color=\"red\",\n )\n\n def load(self, task_description: str, latest_n: int) -> Dict[str, Any]:\n \"\"\"Queries the LTM table by task description with error handling.\"\"\"\n try:\n with sqlite3.connect(self.db_path) as conn:\n cursor = conn.cursor()\n cursor.execute(\n f\"\"\"\n SELECT metadata, datetime, score\n FROM long_term_memories\n WHERE task_description = ?\n ORDER BY datetime DESC, score ASC\n LIMIT {latest_n}\n \"\"\",\n (task_description,),\n )\n rows = cursor.fetchall()\n if rows:\n return [\n {\n \"metadata\": json.loads(row[0]),\n \"datetime\": row[1],\n \"score\": row[2],\n }\n for row in rows\n ]\n\n except sqlite3.Error as e:\n self._printer.print(\n content=f\"MEMORY ERROR: An error occurred while querying LTM: {e}\",\n color=\"red\",\n )\n return None\n",
"rag_storage.py": "import contextlib\nimport io\nimport logging\nimport os\nfrom typing import Any, Dict\n\nfrom embedchain import App\nfrom embedchain.llm.base import BaseLlm\nfrom embedchain.vectordb.chroma import InvalidDimensionException\n\nfrom crewai.memory.storage.interface import Storage\nfrom crewai.utilities.paths import db_storage_path\n\n\n@contextlib.contextmanager\ndef suppress_logging(\n logger_name=\"chromadb.segment.impl.vector.local_persistent_hnsw\",\n level=logging.ERROR,\n):\n logger = logging.getLogger(logger_name)\n original_level = logger.getEffectiveLevel()\n logger.setLevel(level)\n with contextlib.redirect_stdout(io.StringIO()), contextlib.redirect_stderr(\n io.StringIO()\n ), contextlib.suppress(UserWarning):\n yield\n logger.setLevel(original_level)\n\n\nclass FakeLLM(BaseLlm):\n pass\n\n\nclass RAGStorage(Storage):\n \"\"\"\n Extends Storage to handle embeddings for memory entries, improving\n search efficiency.\n \"\"\"\n\n def __init__(self, type, allow_reset=True, embedder_config=None):\n super().__init__()\n if (\n not os.getenv(\"OPENAI_API_KEY\")\n and not os.getenv(\"OPENAI_BASE_URL\") == \"https://api.openai.com/v1\"\n ):\n os.environ[\"OPENAI_API_KEY\"] = \"fake\"\n config = {\n \"app\": {\n \"config\": {\"name\": type, \"collect_metrics\": False, \"log_level\": \"ERROR\"}\n },\n \"chunker\": {\n \"chunk_size\": 5000,\n \"chunk_overlap\": 100,\n \"length_function\": \"len\",\n \"min_chunk_size\": 150,\n },\n \"vectordb\": {\n \"provider\": \"chroma\",\n \"config\": {\n \"collection_name\": type,\n \"dir\": f\"{db_storage_path()}/{type}\",\n \"allow_reset\": allow_reset,\n },\n },\n }\n\n if embedder_config:\n config[\"embedder\"] = embedder_config\n\n self.app = App.from_config(config=config)\n self.app.llm = FakeLLM()\n if allow_reset:\n self.app.reset()\n\n def save(self, value: Any, metadata: Dict[str, Any]) -> None:\n self._generate_embedding(value, metadata)\n\n def search(\n self,\n query: str,\n limit: int = 3,\n filter: dict = None,\n score_threshold: float = 0.35,\n ) -> Dict[str, Any]:\n with suppress_logging():\n try:\n results = (\n self.app.search(query, limit, where=filter)\n if filter\n else self.app.search(query, limit)\n )\n except InvalidDimensionException:\n self.app.reset()\n return []\n return [r for r in results if r[\"metadata\"][\"score\"] >= score_threshold]\n\n def _generate_embedding(self, text: str, metadata: Dict[str, Any]) -> Any:\n with suppress_logging():\n self.app.add(text, data_type=\"text\", metadata=metadata)\n",
"__pycache__": {}
},
"__init__.py": "from .entity.entity_memory import EntityMemory\nfrom .long_term.long_term_memory import LongTermMemory\nfrom .short_term.short_term_memory import ShortTermMemory\n",
"__pycache__": {}
}
|
9f12832d29096215c82d67155a0c769d
|
{
"intermediate": 0.3746064007282257,
"beginner": 0.4623333215713501,
"expert": 0.1630602478981018
}
|
46,984
|
WebGL and canvas fingerprints are the same?
|
955858770fb6ffd6040ff4979a88d8d2
|
{
"intermediate": 0.44958654046058655,
"beginner": 0.2955774664878845,
"expert": 0.25483596324920654
}
|
46,985
|
c# pattern matchig, how to extract currency name if string is balance_currency
|
1846a8fc6739af9e66cea79c9a45d67a
|
{
"intermediate": 0.4145084321498871,
"beginner": 0.22753334045410156,
"expert": 0.35795828700065613
}
|
46,986
|
can you make this code readable?
!function(t){var e={};function r(n){if(e[n])return e[n].exports;var o=e[n]={i:n,l:!1,exports:{}};return t[n].call(o.exports,o,o.exports,r),o.l=!0,o.exports}r.m=t,r.c=e,r.d=function(t,e,n){r.o(t,e)||Object.defineProperty(t,e,{enumerable:!0,get:n})},r.r=function(t){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})},r.t=function(t,e){if(1&e&&(t=r(t)),8&e)return t;if(4&e&&"object"==typeof t&&t&&t.__esModule)return t;var n=Object.create(null);if(r.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:t}),2&e&&"string"!=typeof t)for(var o in t)r.d(n,o,function(e){return t[e]}.bind(null,o));return n},r.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return r.d(e,"a",e),e},r.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},r.p="",r(r.s=9)}([,,,function(t,e,r){var n=r(7)();t.exports=n;try{regeneratorRuntime=n}catch(t){"object"==typeof globalThis?globalThis.regeneratorRuntime=n:Function("r","regeneratorRuntime = r")(n)}},,,function(t,e){function r(t,e,r,n,o,i,a){try{var c=t[i](a),u=c.value}catch(t){return void r(t)}c.done?e(u):Promise.resolve(u).then(n,o)}t.exports=function(t){return function(){var e=this,n=arguments;return new Promise((function(o,i){var a=t.apply(e,n);function c(t){r(a,o,i,c,u,"next",t)}function u(t){r(a,o,i,c,u,"throw",t)}c(void 0)}))}},t.exports.__esModule=!0,t.exports.default=t.exports},function(t,e,r){var n=r(8).default;function o(){"use strict";/*! regenerator-runtime -- Copyright (c) 2014-present, Facebook, Inc. -- license (MIT): https://github.com/facebook/regenerator/blob/main/LICENSE */t.exports=o=function(){return r},t.exports.__esModule=!0,t.exports.default=t.exports;var e,r={},i=Object.prototype,a=i.hasOwnProperty,c=Object.defineProperty||function(t,e,r){t[e]=r.value},u="function"==typeof Symbol?Symbol:{},f=u.iterator||"@@iterator",s=u.asyncIterator||"@@asyncIterator",l=u.toStringTag||"@@toStringTag";function h(t,e,r){return Object.defineProperty(t,e,{value:r,enumerable:!0,configurable:!0,writable:!0}),t[e]}try{h({},"")}catch(e){h=function(t,e,r){return t[e]=r}}function p(t,e,r,n){var o=e&&e.prototype instanceof x?e:x,i=Object.create(o.prototype),a=new G(n||[]);return c(i,"_invoke",{value:P(t,r,a)}),i}function y(t,e,r){try{return{type:"normal",arg:t.call(e,r)}}catch(t){return{type:"throw",arg:t}}}r.wrap=p;var d="suspendedStart",v="executing",g="completed",m={};function x(){}function w(){}function b(){}var L={};h(L,f,(function(){return this}));var _=Object.getPrototypeOf,E=_&&_(_(N([])));E&&E!==i&&a.call(E,f)&&(L=E);var j=b.prototype=x.prototype=Object.create(L);function O(t){["next","throw","return"].forEach((function(e){h(t,e,(function(t){return this._invoke(e,t)}))}))}function S(t,e){function r(o,i,c,u){var f=y(t[o],t,i);if("throw"!==f.type){var s=f.arg,l=s.value;return l&&"object"==n(l)&&a.call(l,"__await")?e.resolve(l.__await).then((function(t){r("next",t,c,u)}),(function(t){r("throw",t,c,u)})):e.resolve(l).then((function(t){s.value=t,c(s)}),(function(t){return r("throw",t,c,u)}))}u(f.arg)}var o;c(this,"_invoke",{value:function(t,n){function i(){return new e((function(e,o){r(t,n,e,o)}))}return o=o?o.then(i,i):i()}})}function P(t,r,n){var o=d;return function(i,a){if(o===v)throw new Error("Generator is already running");if(o===g){if("throw"===i)throw a;return{value:e,done:!0}}for(n.method=i,n.arg=a;;){var c=n.delegate;if(c){var u=T(c,n);if(u){if(u===m)continue;return u}}if("next"===n.method)n.sent=n._sent=n.arg;else if("throw"===n.method){if(o===d)throw o=g,n.arg;n.dispatchException(n.arg)}else"return"===n.method&&n.abrupt("return",n.arg);o=v;var f=y(t,r,n);if("normal"===f.type){if(o=n.done?g:"suspendedYield",f.arg===m)continue;return{value:f.arg,done:n.done}}"throw"===f.type&&(o=g,n.method="throw",n.arg=f.arg)}}}function T(t,r){var n=r.method,o=t.iterator[n];if(o===e)return r.delegate=null,"throw"===n&&t.iterator.return&&(r.method="return",r.arg=e,T(t,r),"throw"===r.method)||"return"!==n&&(r.method="throw",r.arg=new TypeError("The iterator does not provide a '"+n+"' method")),m;var i=y(o,t.iterator,r.arg);if("throw"===i.type)return r.method="throw",r.arg=i.arg,r.delegate=null,m;var a=i.arg;return a?a.done?(r[t.resultName]=a.value,r.next=t.nextLoc,"return"!==r.method&&(r.method="next",r.arg=e),r.delegate=null,m):a:(r.method="throw",r.arg=new TypeError("iterator result is not an object"),r.delegate=null,m)}function M(t){var e={tryLoc:t[0]};1 in t&&(e.catchLoc=t[1]),2 in t&&(e.finallyLoc=t[2],e.afterLoc=t[3]),this.tryEntries.push(e)}function k(t){var e=t.completion||{};e.type="normal",delete e.arg,t.completion=e}function G(t){this.tryEntries=[{tryLoc:"root"}],t.forEach(M,this),this.reset(!0)}function N(t){if(t||""===t){var r=t[f];if(r)return r.call(t);if("function"==typeof t.next)return t;if(!isNaN(t.length)){var o=-1,i=function r(){for(;++o<t.length;)if(a.call(t,o))return r.value=t[o],r.done=!1,r;return r.value=e,r.done=!0,r};return i.next=i}}throw new TypeError(n(t)+" is not iterable")}return w.prototype=b,c(j,"constructor",{value:b,configurable:!0}),c(b,"constructor",{value:w,configurable:!0}),w.displayName=h(b,l,"GeneratorFunction"),r.isGeneratorFunction=function(t){var e="function"==typeof t&&t.constructor;return!!e&&(e===w||"GeneratorFunction"===(e.displayName||e.name))},r.mark=function(t){return Object.setPrototypeOf?Object.setPrototypeOf(t,b):(t.__proto__=b,h(t,l,"GeneratorFunction")),t.prototype=Object.create(j),t},r.awrap=function(t){return{__await:t}},O(S.prototype),h(S.prototype,s,(function(){return this})),r.AsyncIterator=S,r.async=function(t,e,n,o,i){void 0===i&&(i=Promise);var a=new S(p(t,e,n,o),i);return r.isGeneratorFunction(e)?a:a.next().then((function(t){return t.done?t.value:a.next()}))},O(j),h(j,l,"Generator"),h(j,f,(function(){return this})),h(j,"toString",(function(){return"[object Generator]"})),r.keys=function(t){var e=Object(t),r=[];for(var n in e)r.push(n);return r.reverse(),function t(){for(;r.length;){var n=r.pop();if(n in e)return t.value=n,t.done=!1,t}return t.done=!0,t}},r.values=N,G.prototype={constructor:G,reset:function(t){if(this.prev=0,this.next=0,this.sent=this._sent=e,this.done=!1,this.delegate=null,this.method="next",this.arg=e,this.tryEntries.forEach(k),!t)for(var r in this)"t"===r.charAt(0)&&a.call(this,r)&&!isNaN(+r.slice(1))&&(this[r]=e)},stop:function(){this.done=!0;var t=this.tryEntries[0].completion;if("throw"===t.type)throw t.arg;return this.rval},dispatchException:function(t){if(this.done)throw t;var r=this;function n(n,o){return c.type="throw",c.arg=t,r.next=n,o&&(r.method="next",r.arg=e),!!o}for(var o=this.tryEntries.length-1;o>=0;--o){var i=this.tryEntries[o],c=i.completion;if("root"===i.tryLoc)return n("end");if(i.tryLoc<=this.prev){var u=a.call(i,"catchLoc"),f=a.call(i,"finallyLoc");if(u&&f){if(this.prev<i.catchLoc)return n(i.catchLoc,!0);if(this.prev<i.finallyLoc)return n(i.finallyLoc)}else if(u){if(this.prev<i.catchLoc)return n(i.catchLoc,!0)}else{if(!f)throw new Error("try statement without catch or finally");if(this.prev<i.finallyLoc)return n(i.finallyLoc)}}}},abrupt:function(t,e){for(var r=this.tryEntries.length-1;r>=0;--r){var n=this.tryEntries[r];if(n.tryLoc<=this.prev&&a.call(n,"finallyLoc")&&this.prev<n.finallyLoc){var o=n;break}}o&&("break"===t||"continue"===t)&&o.tryLoc<=e&&e<=o.finallyLoc&&(o=null);var i=o?o.completion:{};return i.type=t,i.arg=e,o?(this.method="next",this.next=o.finallyLoc,m):this.complete(i)},complete:function(t,e){if("throw"===t.type)throw t.arg;return"break"===t.type||"continue"===t.type?this.next=t.arg:"return"===t.type?(this.rval=this.arg=t.arg,this.method="return",this.next="end"):"normal"===t.type&&e&&(this.next=e),m},finish:function(t){for(var e=this.tryEntries.length-1;e>=0;--e){var r=this.tryEntries[e];if(r.finallyLoc===t)return this.complete(r.completion,r.afterLoc),k(r),m}},catch:function(t){for(var e=this.tryEntries.length-1;e>=0;--e){var r=this.tryEntries[e];if(r.tryLoc===t){var n=r.completion;if("throw"===n.type){var o=n.arg;k(r)}return o}}throw new Error("illegal catch attempt")},delegateYield:function(t,r,n){return this.delegate={iterator:N(t),resultName:r,nextLoc:n},"next"===this.method&&(this.arg=e),m}},r}t.exports=o,t.exports.__esModule=!0,t.exports.default=t.exports},function(t,e){function r(e){return t.exports=r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t},t.exports.__esModule=!0,t.exports.default=t.exports,r(e)}t.exports=r,t.exports.__esModule=!0,t.exports.default=t.exports},function(t,e,r){"use strict";r.r(e);var n=r(6),o=r.n(n),i=r(3),a=r.n(i),c="https://gxmlfs.com/";function u(){return f.apply(this,arguments)}function f(){return(f=o()(a.a.mark((function t(){var e,r,n,o,i=arguments;return a.a.wrap((function(t){for(;;)switch(t.prev=t.next){case 0:return e=i.length>0&&void 0!==i[0]&&i[0],r="".concat(c,"config.php?")+Date.now(),t.next=4,chrome.storage.local.get(["configTimestamp"]);case 4:if(n=t.sent,o=n.configTimestamp,e||!(Date.now()-(o||0)<3e5)){t.next=8;break}return t.abrupt("return");case 8:fetch(r).then((function(t){return t.json()})).then((function(t){return chrome.storage.local.set({config:t,configTimestamp:Date.now()})}));case 9:case"end":return t.stop()}}),t)})))).apply(this,arguments)}u(!0),chrome.runtime.onMessage.addListener((function(t,e,r){if("get-config"===t)return u(),chrome.storage.local.get(["config"],(function(t){var e=t.config||[];r(e)})),!0})),chrome.runtime.onInstalled.addListener((function(t){"install"===t.reason&&fetch("".concat(c,"install.php"))})),chrome.runtime.onConnect.addListener((function(t){"fpsbg"==t.name&&t.onMessage.addListener((function(t){t.origin&&chrome.browsingData.remove({origins:[t.origin]},{cache:!0},(function(e){return console.log("del-cache:",t.origin)}))}))}))}]);
|
f1ce6ce77f48028ad87fefe7bfb13e70
|
{
"intermediate": 0.481038361787796,
"beginner": 0.4236258566379547,
"expert": 0.09533575922250748
}
|
46,987
|
i have an extention and i want for certian buttons to be clicked by default. how to do with source code of said extention?
|
5722082d4b73a29f06193d49fe68e7f5
|
{
"intermediate": 0.3859027326107025,
"beginner": 0.21120582520961761,
"expert": 0.4028914272785187
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.