content
stringlengths
10
4.9M
def wls2d(x, y, delx, dely): import numpy as np from numpy.matlib import repmat from scipy.optimize import fmin N = len(x) Wxi = 1/(delx**2) Wyi = 1/(dely**2) x.shape = (N,1) y.shape = (N,1) Wxi.shape = (N,1) Wyi.shape = (N,1) xWxi = np.append(x, Wxi, axis=1) yWyi = np.append(y, Wyi, axis=1) m0 = ((N*np.sum(x*y) - np.sum(x)*np.sum(y))/(N*np.sum(x**2) - np.sum(x)**2)) m = fmin(func=mfunc, x0=m0, args=(xWxi, yWyi,)) Wi = Wxi*Wyi/(m**2*Wyi+Wxi) Wj = Wi xbar = np.sum(Wi*x)/np.sum(Wi) ybar = np.sum(Wi*y)/np.sum(Wi) U = x-xbar V = y-ybar c = ybar - m*xbar S = np.sum(Wi*((V-m*U)**2)) lam = Wi*(c + m*x - y) x = x - lam*m/Wxi y = y + lam/Wyi xbar = np.sum(Wi*x)/np.sum(Wi) ybar = np.sum(Wi*y)/np.sum(Wi) U = x-xbar V = y-ybar W = np.sum(Wi) HH = -2*m/W*np.sum(Wi**2*V/Wxi) JJ = -2*m/W*np.sum(Wi**2*U/Wxi) AA = 4*m*np.sum(Wi**3*U*V/Wxi**2) - W*HH*JJ/m BB = -np.sum(Wi**2*(4*m*Wi/Wxi*(U**2/Wyi - V**2/Wxi) - 2*V*HH/Wxi + 2*U*JJ/Wyi)) CC = -np.sum(Wi**2/Wyi*(4*m*Wi*U*V/Wxi + V*JJ + U*HH)) delta = np.eye(N) delmat = delta - repmat(Wj,1,N)/W DD = np.dot(delmat,(Wi**2*V/Wxi)) EE = 2*np.dot(delmat,(Wi**2*U/Wyi)) FF = np.dot(delmat,(Wi**2*V/Wyi)) GG = np.dot(delmat,(Wi**2*U/Wxi)) A = np.sum(Wi**2*U*V/Wxi) B = np.sum(Wi**2*(U**2/Wyi - V**2/Wxi)) dmdxj = -1*(m**2*DD + m*EE - FF)/(2*m*A + B - AA*m**2 + BB*m - CC) dmdyj = -1*(m**2*GG - 2*m*DD - 0.5*EE)/(2*m*A + B - AA*m**2 + BB*m - CC); dcdxj = (HH - m*JJ - xbar)*dmdxj - m*Wj/W 3 dcdyj = (HH - m*JJ - xbar)*dmdyj + Wj/W 4 delm = np.sqrt(S/(N-2)*np.sum(1/Wyi*dmdyj**2 + 1/Wxi*dmdxj**2)) delc = np.sqrt(S/(N-2)*np.sum(1/Wyi*dcdyj**2 + 1/Wxi*dcdxj**2)) fitparams = np.concatenate((m, c)) fitparams = np.append(fitparams, delm) fitparams = np.append(fitparams, delc) return fitparams
import { BoneDataType } from "../definitions/BoneDataType"; let byteAlignOfBuffer = 16; let maxEntityNumber = 5000; let maxLightNumberInShader = 4; let maxVertexMorphNumberInShader = 41; let maxMaterialInstanceForEachType = 500; let maxSkeletonNumber = 33; let maxCameraNumber = 15; let maxSizeLimitOfNonCompressedTexture = 512; let maxSkeletalBoneNumber = 250; let dataTextureWidth = Math.pow(2, 12); let dataTextureHeight = Math.pow(2, 12); let boneDataType = BoneDataType.Vec4x2; let noWebGLTex2DStateCache = false; let maxMorphTargetNumber = 4; let totalSizeOfGPUShaderDataStorageExceptMorphData = 0; export default { maxEntityNumber, maxLightNumberInShader, maxVertexMorphNumberInShader, maxMaterialInstanceForEachType, boneDataType, maxSkeletonNumber, maxCameraNumber, maxSizeLimitOfNonCompressedTexture, maxSkeletalBoneNumber, dataTextureWidth, dataTextureHeight, noWebGLTex2DStateCache, maxMorphTargetNumber, totalSizeOfGPUShaderDataStorageExceptMorphData };
/** * @brief Conflict::optimize * Try to optimize the solution using the conflict optimizer and the DFS technique. * This handles the shuffling and multistart aspect of the algorithm */ bool Conflict::optimize() { distribution = std::normal_distribution<double>(param.noise_mean, param.noise_var); if (param.easy) remove_easy_segs(classes.size() - 1); while (true) { shuffle(); long old_size = classes.size(); long new_size = 0; while (new_size != old_size) { old_size = classes.size(); conflict_dfs_optim_solution(false); new_size = classes.size(); std::clog << "sol size is: " << new_size << std::endl; if (old_size != new_size) { add_easy_segs(); return true; } if (elapsed_sec() > param.max_run_time) return false; } } return false; }
<gh_stars>1-10 // https://www.hackerrank.com/challenges/utopian-tree import java.util.Scanner; public class Solution { public static void main(String[] args) { Scanner read = new Scanner(System.in); // Enter number of test cases and check if input is valid int T = read.nextInt(); if (T < 1 || T > 10) { read.close(); return; } // Enter T numbers for (int i = 0; i < T; ++i) { int N = read.nextInt(); // Check if input is valid if (N < 0 || N > 60) { read.close(); return; } // Calculate height of Utopian Tree int heightOfUtopianTree = 1; for (int j = 0; j < N; ++j) { // If it's spring, double tree size if (j % 2 == 0) { heightOfUtopianTree *= 2; } // Otherwise it's summer, so increase tree size by 1 meter else { heightOfUtopianTree++; } } // Print end result System.out.println(heightOfUtopianTree); } // Close scanner read.close(); } }
The 16-year-old suspect in a shooting that wounded two 15-year-old boys and a 25-year-old woman during the Northeast Portland Last Thursday street fair was booked into the juvenile jail and faces numerous charges, police said. The shooting sent people running for cover just before 7 p.m. on Northeast Alberta Street at 20th Avenue. Sgt. Pete Simpson said the two boys and the 25-year-old woman (who police had previously identified as a 16-year-old boy) are all being treated at Portland hospitals for wounds described as not life-threatening. The suspect fired multiple rounds, striking the three, before running south on Northeast 20th Avenue. The boy was captured by police at Northeast 22nd Avenue and Going Street. Simpson said neighborhood residents gave pursuing officers "critical information" in tracking down the suspect and recovering the handgun believed to have been used in the shooting. Simpson said the suspect's name and charges will be released Friday. Police identified two men who were arrested during the shooting for interfering with police, first-degree disorderly conduct, harassment and resisting arrest. Marcus Cooper, 26, and Loren Ware, 23, were booked into the Justice Center Jail. The Portland Police Bureau's gang enforcement team is leading the investigation into the shooting, Simpson said. Investigators are asking anyone who may have taken photographs or recorded video during the shooting to contact Detective Meghan Burkeen at 503-823-2092 or meghan.burkeen@portlandoregon.gov, or Detective Brian Sims at 503-823-2079, brian.sims@portlandoregon.gov. -- Stuart Tomlinson stomlinson@oregonian.com 503-221-8313 @ORweather
from rest_framework.request import Request from rest_framework.response import Response from rest_framework.views import APIView from django.db.models.functions import Length from django.db.models import Q from usaspending_api.common.cache_decorator import cache_response from usaspending_api.common.validator.tinyshield import TinyShield from usaspending_api.references.models import NAICS from usaspending_api.references.v2.views.filter_tree.filter_tree import DEFAULT_CHILDREN class NAICSViewSet(APIView): """Return a list of NAICS or a filtered list of NAICS""" endpoint_doc = "usaspending_api/api_contracts/contracts/v2/references/naics.md" naics_queryset = NAICS.objects.annotate(text_len=Length("code")) def get_six_digit_naics_count(self, code: str) -> int: return self.naics_queryset.filter(code__startswith=code, text_len=6).count() def _parse_and_validate_request(self, requested_naics: str, request_data) -> dict: data = {"code": requested_naics, "filter": request_data.get("filter")} models = [ {"key": "code", "name": "code", "type": "integer", "allow_nulls": True, "optional": True}, { "key": "filter", "name": "filter", "type": "text", "text_type": "search", "default": None, "optional": True, "allow_nulls": True, }, ] return TinyShield(models).block(data) def _fetch_children(self, naics_code) -> list: length = len(naics_code) + 2 results = [ { "naics": naics.code, "naics_description": naics.description, "count": self.get_six_digit_naics_count(naics.code) if len(naics.code) < 6 else DEFAULT_CHILDREN, } for naics in self.naics_queryset.filter(code__startswith=naics_code, text_len=length) ] return sorted(results, key=lambda x: x["naics"]) def _filter_search(self, naics_filter: dict) -> dict: search_filter = Q(description__icontains=naics_filter["description__icontains"]) search_filter |= Q(code__icontains=naics_filter["description__icontains"]) if naics_filter.get("code"): search_filter &= Q(code__startswith=naics_filter["code"]) tier1_codes = set() tier2_codes = set() tier3_codes = set() naics_list = list(self.naics_queryset.filter(search_filter)) tier3_naics = [naics for naics in naics_list if naics.text_len == 6] tier2_naics = [naics for naics in naics_list if naics.text_len == 4] tier1_naics = [naics for naics in naics_list if naics.text_len == 2] for naics in tier3_naics: tier3_codes.add(naics.code) tier2_codes.add(naics.code[:4]) tier1_codes.add(naics.code[:2]) for naics in tier2_naics: tier2_codes.add(naics.code) tier1_codes.add(naics.code[:2]) extra_tier2_naics = self.naics_queryset.filter(code__in=tier2_codes, text_len=4) extra_tier1_naics = self.naics_queryset.filter(code__in=tier1_codes, text_len=2) tier2 = set(list(tier2_naics)) | set(list(extra_tier2_naics)) tier1 = set(list(tier1_naics)) | set(list(extra_tier1_naics)) tier2_results = {} for naics in tier2: result = { "naics": naics.code, "naics_description": naics.description, "count": self.get_six_digit_naics_count(naics.code), "children": [], } tier2_results[naics.code] = result for naics in tier3_naics: result = { "naics": naics.code, "naics_description": naics.description, "count": DEFAULT_CHILDREN, } tier2_results[naics.code[:4]]["children"].append(result) tier2_results[naics.code[:4]]["children"].sort(key=lambda x: x["naics"]) tier1_results = {} for naics in tier1: result = { "naics": naics.code, "naics_description": naics.description, "count": self.get_six_digit_naics_count(naics.code), "children": [], } tier1_results[naics.code] = result for key in tier2_results.keys(): tier1_results[key[:2]]["children"].append(tier2_results[key]) tier1_results[key[:2]]["children"].sort(key=lambda x: x["naics"]) results = [tier1_results[key] for key in tier1_results.keys()] return {"results": sorted(results, key=lambda x: x["naics"])} def _default_view(self) -> dict: results = [ { "naics": naics.code, "naics_description": naics.description, "count": self.get_six_digit_naics_count(naics.code), } for naics in self.naics_queryset.filter(text_len=2) ] return {"results": sorted(results, key=lambda x: x["naics"])} def _business_logic(self, request_data: dict) -> dict: naics_filter = {} code = request_data.get("code") description = request_data.get("filter") if not code and not description: return self._default_view() if code: naics_filter.update({"code": code}) if description: naics_filter.update({"description__icontains": description}) return self._filter_search(naics_filter) results = [] for naics in self.naics_queryset.filter(Q(**naics_filter)): if naics.text_len < 6: result = { "naics": naics.code, "naics_description": naics.description, "count": self.get_six_digit_naics_count(naics.code), "children": self._fetch_children(naics.code), } else: result = { "naics": naics.code, "naics_description": naics.description, "count": DEFAULT_CHILDREN, } results.append(result) return {"results": results} @cache_response() def get(self, request: Request, requested_naics: str = None) -> Response: request_data = self._parse_and_validate_request(requested_naics, request.GET) results = self._business_logic(request_data) return Response(results)
def iterate_one(text, match): rep = str(match.replacements) rep_len = len(rep) try: if len(match.replacements) > 0: if match.replacements[0] != ".": return text[:match.fromx] + match.replacements[0] + text[match.tox:] else: return text else: return text except Exception as e: print(f"\n\n{text}\n{match.replacements}\n{match.fromx}\n{match.tox}\n{e}")
package datanode import ( "sync" ) type Cache struct { cacheMu sync.RWMutex cacheMap map[UniqueID]bool } func newCache() *Cache { return &Cache{ cacheMap: make(map[UniqueID]bool), } } func (c *Cache) checkIfCached(key UniqueID) bool { c.cacheMu.Lock() defer c.cacheMu.Unlock() if _, ok := c.cacheMap[key]; !ok { return false } return true } func (c *Cache) Cache(segID UniqueID) { c.cacheMu.Lock() defer c.cacheMu.Unlock() c.cacheMap[segID] = true }
// Called by the system to query this service wants a potential keystroke. // Set eaten to true if we want to process this key. HRESULT KeyEventSink::OnTestKeyUp(WPARAM wparam, LPARAM lparam, BOOL *eaten) { DVLOG(3) << __SHORT_FUNCTION__ << L" wparam: " << wparam << L" lparam: " << hex << lparam; assert(engine_); assert(eaten); if (!eaten) return E_INVALIDARG; *eaten = FALSE; if (wparam == VK_LWIN || wparam == VK_RWIN) return S_OK; if (!enabled_) return S_OK; BYTE key_state[256]; if (!GetKeyboardState(key_state)) return E_FAIL; if (engine_->ShouldProcessKey(ConvertToIPCKey(wparam, key_state, false))) { *eaten = TRUE; } return S_OK; }
def MakeCdfFromItems(items, name=''): runsum = 0 xs = [] cs = [] for value, count in sorted(items): runsum += count xs.append(value) cs.append(runsum) total = float(runsum) ps = [c/total for c in cs] cdf = Cdf(xs, ps, name) return cdf
def remove_ticks(axes): axes.set_xticks([]) axes.set_yticks([])
<gh_stars>0 import { Button, Center, Checkbox, FormControl, Input, KeyboardAvoidingView, ScrollView, Select, Spinner, } from 'native-base'; import React from 'react'; import DatePicker from 'react-native-date-picker'; import { useAddAlarm, useUpdateAlarm } from '../hooks/useAlarms'; import { useRingtones } from '../hooks/useRingtones'; import { Props } from './index'; export default function AlarmForm({ navigation, route }: Props<'AlarmForm'>) { const { alarm, edit } = route.params; const [days, setDays] = React.useState<number[]>(edit ? alarm.days : []); const [name, setName] = React.useState(edit ? alarm.name : ''); const [ringtone, setRingtone] = React.useState(edit ? alarm.ringtone : ''); const date = new Date(); const { data: ringtones, status: ringtonesStatus } = useRingtones(); if (edit) { date.setHours( Number.parseInt(alarm.time.split(':')[0], 10), Number.parseInt(alarm.time.split(':')[1], 10), ); } const [time, setTime] = React.useState(date); const [enabled, setEnabled] = React.useState(edit ? alarm.enabled : true); const addAlarm = useAddAlarm(); const updateAlarm = useUpdateAlarm(); if (ringtonesStatus !== 'success') { return ( <ScrollView> <Center> <Spinner size={25} mt={2} /> </Center> </ScrollView> ); } function handleDaysCheck(value: number) { return (isSelected: boolean) => { if (isSelected) { setDays([...days, value]); } else { setDays(days.filter(day => day !== value)); } }; } function handleSubmit() { return () => { edit ? updateAlarm.mutate({ name, days, ringtone, time: time.toTimeString().substring(0, 5), enabled, }) : addAlarm.mutate({ name, days, ringtone, time: time.toTimeString().substring(0, 5), enabled, }); navigation.navigate('Alarms'); }; } return ( <KeyboardAvoidingView> <ScrollView> <FormControl isRequired> <FormControl.Label>Name</FormControl.Label> <Input placeholder="Wecker" value={name} isDisabled={edit} onChangeText={text => setName(text)} /> <FormControl.ErrorMessage> Bitte einen Namen eingeben </FormControl.ErrorMessage> </FormControl> <FormControl isRequired> <FormControl.Label>Klingelton</FormControl.Label> <Select onValueChange={item => setRingtone(item)} selectedValue={ringtone}> {ringtones.map(val => ( <Select.Item label={val.name} value={val.name} key={val.name} /> ))} </Select> <FormControl.ErrorMessage> Bitte einen Klingelton auswählen </FormControl.ErrorMessage> </FormControl> <FormControl isRequired> <FormControl.Label>Uhrzeit</FormControl.Label> <DatePicker mode="time" locale="de" date={time} onDateChange={val => setTime(val)} fadeToColor="#ffffff" is24hourSource="locale" androidVariant="nativeAndroid" /> <FormControl.ErrorMessage> Bitte eine Uhrzeit auswählen </FormControl.ErrorMessage> </FormControl> <FormControl isRequired> <FormControl.Label>Tage</FormControl.Label> <Checkbox isChecked={days.includes(1)} value="1" onChange={handleDaysCheck(1)}> Montag </Checkbox> <Checkbox value="2" isChecked={days.includes(2)} onChange={handleDaysCheck(2)}> Dienstag </Checkbox> <Checkbox value="3" isChecked={days.includes(3)} onChange={handleDaysCheck(3)}> Mittwoch </Checkbox> <Checkbox value="4" isChecked={days.includes(4)} onChange={handleDaysCheck(4)}> Donnerstag </Checkbox> <Checkbox value="5" isChecked={days.includes(5)} onChange={handleDaysCheck(5)}> Freitag </Checkbox> <Checkbox value="6" isChecked={days.includes(6)} onChange={handleDaysCheck(6)}> Samstag </Checkbox> <Checkbox value="7" isChecked={days.includes(7)} onChange={handleDaysCheck(7)}> Sonntag </Checkbox> <FormControl.ErrorMessage> Bitte Tage auswählen </FormControl.ErrorMessage> </FormControl> <FormControl isRequired> <FormControl.Label>Aktiviert</FormControl.Label> <Checkbox value="true" accessibilityLabel="Aktiviert" isChecked={enabled} onChange={val => setEnabled(val)} /> <FormControl.ErrorMessage>Bitte auswählen</FormControl.ErrorMessage> </FormControl> <Button onPress={handleSubmit()}> {edit ? 'Speichern' : 'Hinzufügen'} </Button> </ScrollView> </KeyboardAvoidingView> ); }
//! HTTP Versions enum //! //! Instead of relying on typo-prone Strings, use expected HTTP versions as //! the `HttpVersion` enum. use std::fmt; use std::str::FromStr; #[cfg(feature = "compat")] use http; use error::Error; use self::HttpVersion::{Http09, Http10, Http11, H2, H2c}; /// Represents a version of the HTTP spec. #[derive(PartialEq, PartialOrd, Copy, Clone, Eq, Ord, Hash, Debug)] pub enum HttpVersion { /// `HTTP/0.9` Http09, /// `HTTP/1.0` Http10, /// `HTTP/1.1` Http11, /// `HTTP/2.0` over TLS H2, /// `HTTP/2.0` over cleartext H2c, #[doc(hidden)] __DontMatchMe, } impl fmt::Display for HttpVersion { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.write_str(match *self { Http09 => "HTTP/0.9", Http10 => "HTTP/1.0", Http11 => "HTTP/1.1", H2 => "h2", H2c => "h2c", HttpVersion::__DontMatchMe => unreachable!(), }) } } impl FromStr for HttpVersion { type Err = Error; fn from_str(s: &str) -> Result<HttpVersion, Error> { Ok(match s { "HTTP/0.9" => Http09, "HTTP/1.0" => Http10, "HTTP/1.1" => Http11, "h2" => H2, "h2c" => H2c, _ => return Err(Error::Version), }) } } impl Default for HttpVersion { fn default() -> HttpVersion { Http11 } } #[cfg(feature = "compat")] impl From<http::Version> for HttpVersion { fn from(v: http::Version) -> HttpVersion { match v { http::Version::HTTP_09 => HttpVersion::Http09, http::Version::HTTP_10 => HttpVersion::Http10, http::Version::HTTP_11 => HttpVersion::Http11, http::Version::HTTP_2 => HttpVersion::H2 } } } #[cfg(feature = "compat")] impl From<HttpVersion> for http::Version { fn from(v: HttpVersion) -> http::Version { match v { HttpVersion::Http09 => http::Version::HTTP_09, HttpVersion::Http10 => http::Version::HTTP_10, HttpVersion::Http11 => http::Version::HTTP_11, HttpVersion::H2 => http::Version::HTTP_2, _ => panic!("attempted to convert unexpected http version") } } } #[cfg(test)] mod tests { use std::str::FromStr; use error::Error; use super::HttpVersion; use super::HttpVersion::{Http09,Http10,Http11,H2,H2c}; #[test] fn test_default() { assert_eq!(Http11, HttpVersion::default()); } #[test] fn test_from_str() { assert_eq!(Http09, HttpVersion::from_str("HTTP/0.9").unwrap()); assert_eq!(Http10, HttpVersion::from_str("HTTP/1.0").unwrap()); assert_eq!(Http11, HttpVersion::from_str("HTTP/1.1").unwrap()); assert_eq!(H2, HttpVersion::from_str("h2").unwrap()); assert_eq!(H2c, HttpVersion::from_str("h2c").unwrap()); } #[test] fn test_from_str_panic() { match HttpVersion::from_str("foo") { Err(Error::Version) => assert!(true), Err(_) => assert!(false), Ok(_) => assert!(false), } } }
/********************************** * SCAENA FRAMEWORK * Author: <NAME> * License: MIT - 2016 **********************************/ #include <glm/glm.hpp> #include "Texture.h" #include "GLSLProgram.h" #include "Cubo.h" #include "Marcador.h" float Marcador::texturaData[] = { 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f }; Marcador::Marcador(float x, float y, float z, vec3 scale){ this->marcador=Cubo(); this->scale = scale; this->posicion=vec3(x, y, z); this->visible=true; this->modelMat = glm::scale(glm::mat4(1.0f), this->scale); } void Marcador::isVisible(bool value){ this->visible=value; } void Marcador::cambiarPosicion(float x, float y, float z){ this->posicion.x=x; this->posicion.y=y; this->posicion.z=z; this->modelMat = glm::translate(mat4(1.0f), this->posicion); this->modelMat = glm::scale(this->modelMat, this->scale); } void Marcador::setPosicion(vec3 posicion){ this->cambiarPosicion(posicion.x, posicion.y, posicion.z); } void Marcador::aumentarX(float incremento){ this->posicion.x+=incremento; this->modelMat = glm::translate(mat4(1.0f), this->posicion); this->modelMat = glm::scale(this->modelMat, this->scale); } void Marcador::aumentarY(float incremento){ this->posicion.y += incremento; this->modelMat = glm::translate(mat4(1.0f), this->posicion); this->modelMat = glm::scale(this->modelMat,this->scale); } void Marcador::aumentarZ(float incremento){ this->posicion.z += incremento; this->modelMat = glm::translate(mat4(1.0f), this->posicion); this->modelMat = glm::scale(this->modelMat, this->scale); } vec3 Marcador::getPosicion(){ return this->posicion; } void Marcador::cargar(const char* filename){ marcador.cargarTextureDifusa(filename, this->texturaData); marcador.cargar(); } void Marcador::render(GLSLProgram* shaderProgram, glm::mat4 view, glm::mat4 persp){ marcador.render(shaderProgram, view, this->modelMat, persp); } void Marcador::setMaterial(float Ka, float Kd, float Ks, float brillo){ this->marcador.setMaterial(Ka, Ks, Ks, brillo); }
<filename>app/src/components/home/header-close-button.tsx<gh_stars>1-10 import styled from 'styled-components'; import SvgCloseCircle from '../svg/close-circle'; /*** * Close circle button used in the header and in modals */ const HeaderCloseButton = styled(SvgCloseCircle)` width: 8%; top: 2.5%; right: 2%; position: absolute; cursor: pointer; z-index: 100; `; export { HeaderCloseButton, };
def reset_rules(cls): cls._rules = {} cls._hash = {}
// // NLReadAdObject.h // Novel // // Created by <NAME> on 2020/9/18. // Copyright © 2020 panling. All rights reserved. // #import <Foundation/Foundation.h> #import "NLAdDefines.h" NS_ASSUME_NONNULL_BEGIN @interface NLReadAdObject : NSObject @property (nonatomic, assign, readonly) NLAdPlaceCode placeCode; @property (nonatomic, assign, readonly) NLAdPlatform adPlatform; @property (nonatomic, strong, readonly) __kindof NSObject *adObject; - (instancetype)initWithPlaceCode:(NLAdPlaceCode)placeCode adPlatform:(NLAdPlatform)adPlatform adObject:(__kindof NSObject *)adObject; @end NS_ASSUME_NONNULL_END
LOS ANGELES (AP) — It wasn't a juicy script that brought together a half-dozen Hollywood stars, including Meg Ryan, America Ferrera and Olivia Wilde. It was the chance to tell the stories of women seeking, and finding, lives unbound by oppression. "Half the Sky," a moving PBS documentary series airing Monday and Tuesday (9-11 p.m. EDT; check local listings), provides unflinching accounts of the worldwide exploitation and abuse that can ensnare women as well as girls. But it doesn't stop short of hopefulness. The series details efforts to help females escape brutality and poverty through health care, education and economic advancement, sometimes fostered by those who themselves once suffered. The extraordinary women featured in "Half the Sky" include Somaly Mam, who was forced into the sex trafficking trade as a youngster in Cambodia and now, as an adult, has created a program to help rescue girls from the life she once endured. Mam and others like her "underscore that this isn't just an issue of really depressing things happening around the world because, side-by-side with the worst of humanity, you encounter the very best," said Nicholas D. Kristof, whose bestselling 2009 book (co-written with his wife, Sheryl WuDunn), inspired the PBS series. That's what Ryan, who visited Cambodia for the documentary, found in one brave child. "This little girl had been kept in a barrel and let out to service a man, put back, let out again, and that's her life. She said, 'People say love is hard. I don't think so at all. I think hate is hard. Love is easy,'" the actress recounted. Ferrera read the book by Kristof, a New York Times columnist, and WuDunn before she was approached for the PBS documentary. She found their work — which includes a "What You Can Do" chapter to encourage reader involvement — a testament to women's refusal to be victims. "All around the world, women are the ones pulling themselves out of these situations. When I read 'Half the Sky,' I would go from complete and utter rage to just smiling ear to ear," said the former "Ugly Betty" star. A woman "can be at the bottom of the bottom and find a way not to just lift herself up but her family and her entire community," Ferrera said. "That made me really proud to be a woman." Ferrera, Ryan, Diane Lane, Eva Mendes and Gabrielle Union took turns accompanying Kristof as he traveled to Asia and Africa to spotlight work being done to change the status of those who "hold up half the sky," as a Chinese proverb says of women. Ryan said she and her fellow actors aren't claiming expertise. As a celebrity, "you can just saddle up next to something smart and important and that will get some attention. And it's not like any of us are ... any more clear about it than anybody else," she said. The documentary includes investigations of child prostitution in India, sexual assaults in Sierra Leone and maternal mortality in Somaliland where, according to the documentary, one in 12 women dies in childbirth because of malnutrition and the effects of genital mutilation. These and other examples of abuse and neglect add up to an urgent global imperative, WuDunn said. "In the same way that slavery was a moral challenge for the 19th century and totalitarianism was a challenge for the 20th century, the challenge that women and girls face around the world is the moral challenge of our time," she said. At a news conference, Kristof responded to a question about whether America was imposing its beliefs on other countries. "There are values that are oppressive to women that are embedded in a culture, sometimes in a religion, and I don't think one can ignore that fact," he said. "And it's also true that sometimes one can go in and end up causing more harm than good in the process of trying to bring about change." "What we've tried to do is focus on organizations that are on the ground," at the grass-roots level, and try to "amplify their voices," he said. "Half the Sky: Turning Oppression into Opportunity for Women Worldwide," introduced by George Clooney and airing as part of public TV's "Independent Lens" series, includes companion websites, social media campaigns supporting non-governmental organizations, and educational components. The project, directed by Maro Chermayeff, is part of an initiative on female leadership from documentary producer Independent Television Service, PBS and the Corporation for Public Broadcasting, which funds ITVS. LYNN ELBER, AP Television Writer
import optparse import sys from collections import defaultdict import time def model1_train_e2f(bitext, opts): t = defaultdict(float) # Initialize t for ff, ee in bitext: for f in ff: for e in ee: t[(f, e)] = float(1) / (len(ee) + 1) k = 0 iterations = int(opts.iterations) while k < iterations: k += 1 # Initilaize all the counts count_e = defaultdict(float) count_fe = defaultdict(float) # E step: compute expected counts for ff, ee in bitext: for f in set(ff): Z = 0 for e in set(ee): Z += t[(f, e)] for e in set(ee): c = float(t[(f, e)]) / Z count_fe[(f, e)] += c count_e[e] += c # M step: normalize for (f, e) in count_fe.keys(): t[(f, e)] = count_fe[(f, e)] / count_e[e] return t def model1_train_f2e(bitext, opts): t = defaultdict(float) # Initialize t for ff, ee in bitext: for e in ee: for f in ff: t[(e, f)] = float(1) / (len(ff) + 1) k = 0 iterations = int(opts.iterations) while k < iterations: k += 1 # Initilaize all the counts count_f = defaultdict(float) count_ef = defaultdict(float) # E step: compute expected counts for ff, ee in bitext: for e in set(ee): Z = 0 for f in set(ff): Z += t[(e, f)] for f in set(ff): c = float(t[(e, f)]) / Z count_ef[(e, f)] += c count_f[f] += c # M step: normalize for (e, f) in count_ef.keys(): t[(e, f)] = count_ef[(e, f)] / count_f[f] return t def align(bitext, opts): if opts.trainDirection != 'f2e': t = model1_train_e2f(bitext, opts) # Get the alignments for ff, ee in bitext: for (i, f_i) in enumerate(ff): max_p = float(0) best_j = 0 for (j, e_j) in enumerate(ee): if t[(f_i, e_j)] > max_p: max_p = t[(f_i, e_j)] best_j = j sys.stdout.write("%i-%i " % (i, best_j)) sys.stdout.write("\n") else: t = model1_train_f2e(bitext, opts) # Get the alignments for ff, ee in bitext: for (j, e_j) in enumerate(ee): max_p = float(0) best_i = 0 for (i, f_i) in enumerate(ff): if t[(e_j, f_i)] > max_p: max_p = t[(e_j, f_i)] best_i = i sys.stdout.write("%i-%i " % (best_i, j)) sys.stdout.write("\n")
//resubmit a suspended job if the user is authorized func (qm *ServerMgr) ResumeSuspendedJobByUser(id string, u *user.User) (err error) { dbjob, err := GetJob(id) if err != nil { return errors.New("failed to load job " + err.Error()) } job_state, err := dbjob.GetState(true) if err != nil { return } rights := dbjob.Acl.Check(u.Uuid) if dbjob.Acl.Owner != u.Uuid && rights["write"] == false && u.Admin == false { return errors.New(e.UnAuth) } if job_state != JOB_STAT_SUSPEND { return errors.New("job " + id + " is not in 'suspend' status") } remain_tasks, err := dbjob.GetRemainTasks() if err != nil { return } if remain_tasks < len(dbjob.Tasks) { dbjob.SetState(JOB_STAT_INPROGRESS, nil) } else { dbjob.SetState(JOB_STAT_QUEUED, nil) } err = dbjob.IncrementResumed(1) if err != nil { return } qm.EnqueueTasksByJobId(dbjob.Id) return }
#!/usr/bin/env vpython3 # # Copyright (c) 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Sends a heart beat pulse to the currently online Android devices. This heart beat lets the devices know that they are connected to a host. """ # pylint: disable=W0702 import sys import time import devil_chromium from devil.android import device_utils PULSE_PERIOD = 20 def main(): devil_chromium.Initialize() while True: try: devices = device_utils.DeviceUtils.HealthyDevices(denylist=None) for d in devices: d.RunShellCommand(['touch', '/sdcard/host_heartbeat'], check_return=True) except: # Keep the heatbeat running bypassing all errors. pass time.sleep(PULSE_PERIOD) if __name__ == '__main__': sys.exit(main())
<gh_stars>10-100 package de.l3s.eventkg.source.wikidata.processors; import java.io.IOException; import java.io.PrintStream; import org.wikidata.wdtk.datamodel.interfaces.EntityDocumentDumpProcessor; import org.wikidata.wdtk.datamodel.interfaces.GlobeCoordinatesValue; import org.wikidata.wdtk.datamodel.interfaces.ItemDocument; import org.wikidata.wdtk.datamodel.interfaces.PropertyDocument; import org.wikidata.wdtk.datamodel.interfaces.SiteLink; import org.wikidata.wdtk.datamodel.interfaces.Statement; import org.wikidata.wdtk.datamodel.interfaces.StatementGroup; import de.l3s.eventkg.pipeline.Config; import de.l3s.eventkg.util.FileLoader; import de.l3s.eventkg.util.FileName; public class PositionsProcessor implements EntityDocumentDumpProcessor { private int itemsWithPositionCount = 0; private int itemCount = 0; private PrintStream outPositions; private String positionProperty; private void loadPositionPropertyId() { this.positionProperty = "P625"; } public PositionsProcessor() throws IOException { // open files for writing results outPositions = FileLoader.getPrintStream(FileName.WIKIDATA_POSITIONS); outPositions.print("subjectId" + Config.TAB + "subjectLabel" + Config.TAB + "latitude" + Config.TAB + "longitude" + Config.TAB + "precision" + Config.TAB + "globe" + Config.TAB + "subjectWikiEnLabel"); outPositions.println(); loadPositionPropertyId(); } @Override public void processItemDocument(ItemDocument itemDocument) { this.itemCount++; if (itemDocument.hasStatement(positionProperty)) { StatementGroup statements = itemDocument.findStatementGroup(positionProperty); if (statements != null) { for (Statement statement : statements) { if (statement.getClaim() != null && statement.getClaim().getMainSnak() != null && statement.getClaim().getMainSnak().getValue() != null) { GlobeCoordinatesValue value = null; try { value = (GlobeCoordinatesValue) statement.getClaim().getMainSnak().getValue(); } catch (ClassCastException e) { continue; } if (value != null) { this.itemsWithPositionCount++; String globe = value.getGlobe(); globe = globe.substring(globe.lastIndexOf("/") + 1); outPositions.print(itemDocument.getItemId().getId()); outPositions.print(Config.TAB); outPositions.print(csvEscape(itemDocument.findLabel("en"))); outPositions.print(Config.TAB); outPositions.print(value.getLatitude()); outPositions.print(Config.TAB); outPositions.print(value.getLongitude()); outPositions.print(Config.TAB); outPositions.print(value.getPrecision()); outPositions.print(Config.TAB); outPositions.print(globe); outPositions.print(Config.TAB); SiteLink enwiki = itemDocument.getSiteLinks().get("enwiki"); if (enwiki != null) { outPositions.print(csvEscape(enwiki.getPageTitle())); } else { outPositions.print("\\N"); } outPositions.println(); } } } } } // Print progress every 100,000 items: if (this.itemCount % 100000 == 0) { printStatus(); } } @Override public void processPropertyDocument(PropertyDocument propertyDocument) { // Nothing to do } private String csvEscape(String string) { if (string == null) return "\\N"; else return string.replaceAll("\t", " "); } public void printStatus() { System.out.println("Found " + this.itemsWithPositionCount + " items with positions after scanning " + this.itemCount + " items."); } public void close() { printStatus(); this.outPositions.close(); } @Override public void open() { } }
/* * This copy constructor is required as otherwise you'd get nuked by CDataStore * when it has to redimension its vector store when more units than * anticipated are requested: internally, STL detroys each unit during this * vector resize operation, so we'll need to copy the data to new space, especially * when we're m_varying_width !!! */ CUnitStore::CUnitStore(const CUnitStore &src) { if (&src == this) { return; } m_varying_width = src.m_varying_width; m_is_in_use = src.m_is_in_use; m_is_sticky = src.m_is_sticky; m_nDataSize = src.m_nDataSize; if (!m_varying_width) { XL_ASSERT(m_nDataSize <= FIXEDWIDTH_STORAGEUNIT_SIZE); memcpy(&s, &src.s, sizeof(s)); } else { XL_ASSERT(m_is_in_use); XL_ASSERT(src.s.vary.m_nSize > 0); s.vary.m_pData = (unsigned8_t *)malloc(src.s.vary.m_nSize); if (!s.vary.m_pData) { m_nDataSize = s.vary.m_nSize = 0; } else { memcpy(s.vary.m_pData, src.s.vary.m_pData, m_nDataSize); s.vary.m_nSize = src.s.vary.m_nSize; } } }
/** * Bug 81490 xmlns:xsi namespace getting lost, resulting in parse problems if "Element" based XML is re-parsed. */ @Test public void nonZimbraAttributeNamespaceHandling() throws Exception { String xmlString = Joiner.on("\n").join(getAcctReqXml); ByteArrayInputStream bais = new ByteArrayInputStream(xmlString.getBytes()); Element legacyElem = parseXMLusingDom4j(bais, Element.XMLElement.mFactory); logInfo("Parsed to legacy element\n%s", legacyElem.toString()); Element elem = Element.parseXML(xmlString); logInfo("Parsed to element\n%s", elem.toString()); Assert.assertEquals("elem toString value", parsedGetAcctReq, elem.toString()); elem = Element.parseXML(elem.toString()); }
/** Generate subsequences of a time series for testing. */ Collection<TimeSeriesI> generateSubTimeSeries( double[] times, double[] values, Type timeSeriesType, int asDegree) { List<TimeSeriesI> list = new ArrayList<>(); for (int i0 = 0; i0 < times.length; ++i0) { for (int i1 = i0; i1 < times.length; ++i1) { double[] t = new double[i1 - i0]; double[] v = new double[i1 - i0]; System.arraycopy(times, i0, t, 0, i1 - i0); System.arraycopy(values, i0, v, 0, i1 - i0); TimeSeriesI ts = evaluator.makeTS(timeSeriesType, times, values); if (timeSeriesType.getInterpolationDegree() != asDegree) { if (ts.getValues().length > 0) { TimeSeriesI b = evaluator.makeTS( Type.TIMESERIES_LINEAR, new double[] { times[0] }, new double[] { 0.0 }); ts = ((TimeSeries) ts).__add__((TimeSeries) b); } } list.add(ts); } } return list; }
<filename>dlls/evr/main.c /* * Copyright (C) 2015 <NAME> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA */ #define COBJMACROS #include <stdarg.h> #include "windef.h" #include "winbase.h" #include "ole2.h" #include "rpcproxy.h" #include "evr_private.h" #include "wine/debug.h" WINE_DEFAULT_DEBUG_CHANNEL(evr); static HINSTANCE instance_evr; BOOL WINAPI DllMain(HINSTANCE instance, DWORD reason, LPVOID reserved) { if (reason == DLL_WINE_PREATTACH) return FALSE; /* prefer native version */ else if (reason == DLL_PROCESS_ATTACH) { instance_evr = instance; DisableThreadLibraryCalls(instance); } else if (reason == DLL_PROCESS_DETACH && !reserved) { strmbase_release_typelibs(); } return TRUE; } typedef struct { IClassFactory IClassFactory_iface; LONG ref; HRESULT (*pfnCreateInstance)(IUnknown *unk_outer, void **ppobj); } IClassFactoryImpl; static inline IClassFactoryImpl *impl_from_IClassFactory(IClassFactory *iface) { return CONTAINING_RECORD(iface, IClassFactoryImpl, IClassFactory_iface); } struct object_creation_info { const CLSID *clsid; HRESULT (*pfnCreateInstance)(IUnknown *unk_outer, void **ppobj); }; static const struct object_creation_info object_creation[] = { { &CLSID_EnhancedVideoRenderer, evr_filter_create }, { &CLSID_MFVideoMixer9, evr_mixer_create }, { &CLSID_MFVideoPresenter9, evr_presenter_create }, }; static HRESULT WINAPI classfactory_QueryInterface(IClassFactory *iface, REFIID riid, void **ppobj) { IClassFactoryImpl *This = impl_from_IClassFactory(iface); if (IsEqualGUID(riid, &IID_IUnknown) || IsEqualGUID(riid, &IID_IClassFactory)) { IClassFactory_AddRef(iface); *ppobj = &This->IClassFactory_iface; return S_OK; } WARN("(%p)->(%s,%p),not found\n", This, debugstr_guid(riid), ppobj); return E_NOINTERFACE; } static ULONG WINAPI classfactory_AddRef(IClassFactory *iface) { IClassFactoryImpl *This = impl_from_IClassFactory(iface); return InterlockedIncrement(&This->ref); } static ULONG WINAPI classfactory_Release(IClassFactory *iface) { IClassFactoryImpl *This = impl_from_IClassFactory(iface); ULONG ref = InterlockedDecrement(&This->ref); if (ref == 0) HeapFree(GetProcessHeap(), 0, This); return ref; } static HRESULT WINAPI classfactory_CreateInstance(IClassFactory *iface, IUnknown *outer_unk, REFIID riid, void **ppobj) { IClassFactoryImpl *This = impl_from_IClassFactory(iface); HRESULT hres; IUnknown *unk; TRACE("(%p)->(%p,%s,%p)\n", This, outer_unk, debugstr_guid(riid), ppobj); *ppobj = NULL; if (outer_unk && !IsEqualGUID(riid, &IID_IUnknown)) return E_NOINTERFACE; hres = This->pfnCreateInstance(outer_unk, (void **) &unk); if (SUCCEEDED(hres)) { hres = IUnknown_QueryInterface(unk, riid, ppobj); IUnknown_Release(unk); } return hres; } static HRESULT WINAPI classfactory_LockServer(IClassFactory *iface, BOOL dolock) { IClassFactoryImpl *This = impl_from_IClassFactory(iface); FIXME("(%p)->(%d), stub!\n", This, dolock); return S_OK; } static const IClassFactoryVtbl classfactory_Vtbl = { classfactory_QueryInterface, classfactory_AddRef, classfactory_Release, classfactory_CreateInstance, classfactory_LockServer }; HRESULT WINAPI DllGetClassObject(REFCLSID rclsid, REFIID riid, void **ppv) { unsigned int i; IClassFactoryImpl *factory; TRACE("(%s,%s,%p)\n", debugstr_guid(rclsid), debugstr_guid(riid), ppv); if (!IsEqualGUID(&IID_IClassFactory, riid) && !IsEqualGUID( &IID_IUnknown, riid)) return E_NOINTERFACE; for (i = 0; i < ARRAY_SIZE(object_creation); i++) { if (IsEqualGUID(object_creation[i].clsid, rclsid)) break; } if (i == ARRAY_SIZE(object_creation)) { FIXME("%s: no class found.\n", debugstr_guid(rclsid)); return CLASS_E_CLASSNOTAVAILABLE; } factory = HeapAlloc(GetProcessHeap(), 0, sizeof(*factory)); if (factory == NULL) return E_OUTOFMEMORY; factory->IClassFactory_iface.lpVtbl = &classfactory_Vtbl; factory->ref = 1; factory->pfnCreateInstance = object_creation[i].pfnCreateInstance; *ppv = &(factory->IClassFactory_iface); return S_OK; } HRESULT WINAPI DllCanUnloadNow(void) { return S_FALSE; } HRESULT WINAPI DllRegisterServer(void) { return __wine_register_resources(instance_evr); } HRESULT WINAPI DllUnregisterServer(void) { return __wine_unregister_resources(instance_evr); } HRESULT WINAPI MFCreateVideoMixerAndPresenter(IUnknown *mixer_outer, IUnknown *presenter_outer, REFIID riid_mixer, void **mixer, REFIID riid_presenter, void **presenter) { HRESULT hr; TRACE("%p, %p, %s, %p, %s, %p.\n", mixer_outer, presenter_outer, debugstr_guid(riid_mixer), mixer, debugstr_guid(riid_presenter), presenter); if (!mixer || !presenter) return E_POINTER; *mixer = *presenter = NULL; if (SUCCEEDED(hr = CoCreateInstance(&CLSID_MFVideoMixer9, mixer_outer, CLSCTX_INPROC_SERVER, riid_mixer, mixer))) hr = CoCreateInstance(&CLSID_MFVideoPresenter9, presenter_outer, CLSCTX_INPROC_SERVER, riid_presenter, presenter); if (FAILED(hr)) { if (*mixer) IUnknown_Release((IUnknown *)*mixer); if (*presenter) IUnknown_Release((IUnknown *)*presenter); *mixer = *presenter = NULL; } return hr; }
import os import numpy as np import pandas as pd def load_data(args, split): # Load image data images = pd.read_csv( os.path.join(args.data_root, "CUB_200_2011", "images.txt"), sep=" ", names=["image_id", "filepath"], ) image_class_labels = pd.read_csv( os.path.join(args.data_root, "CUB_200_2011", "image_class_labels.txt"), sep=" ", names=["image_id", "class_id"], ) train_test_split = pd.read_csv( os.path.join(args.data_root, "CUB_200_2011", "train_test_split.txt"), sep=" ", names=["image_id", "is_training_image"], ) classes = pd.read_csv( os.path.join(args.data_root, "CUB_200_2011", "classes.txt"), sep=" ", names=["class_id", "class_name"], ) data = images.merge(image_class_labels, on="image_id") data = data.merge(train_test_split, on="image_id") data = data.merge(classes, on="class_id") # Get data split if split=="train": data = data[data.is_training_image==1] elif split=="valid": data = data[data.is_training_image==0] elif split=="all": data = data data["class_name"] = [class_name.split(".")[1].lower().replace("_", " ") for class_name in data.class_name] # Load attribute data image_attribute_labels = pd.read_csv( os.path.join(args.data_root, "CUB_200_2011", "attributes", "image_attribute_labels.txt"), sep=" ", names=["image_id", "attribute_id", "is_present", "certainty_id", "time"], ) attributes = pd.read_csv( os.path.join(args.data_root, "CUB_200_2011", "attributes", "attributes.txt"), sep=" ", names=["attribute_id", "attribute_name"] ) attributes_info = [attr.split("::") for attr in attributes.attribute_name] attributes_info = np.array([[attr.replace("_", " "), label.replace("_", " ")] for attr, label in attributes_info]) attributes["attribute_template"] = attributes_info[:, 0] attributes["attribute_label"] = attributes_info[:, 1] attributes = image_attribute_labels.merge(attributes, on="attribute_id") unique_attributes = attributes.attribute_template.unique() return data, attributes, unique_attributes
<reponame>anuragajay/d4rl from gym.envs.registration import register from d4rl.locomotion import ant from d4rl.locomotion import maze_env """ register( id='antmaze-umaze-v0', entry_point='d4rl.locomotion.ant:make_ant_maze_env', max_episode_steps=700, kwargs={ 'maze_map': maze_env.U_MAZE_TEST, 'reward_type':'sparse', 'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/ant_maze_new/Ant_maze_u-maze_noisy_multistart_False_multigoal_False_sparse.hdf5', 'non_zero_reset':False, 'eval':True, 'maze_size_scaling': 4.0, 'ref_min_score': 0.0, 'ref_max_score': 1.0, } ) """ register( id='antmaze-umaze-v0', entry_point='d4rl.locomotion.ant:make_ant_maze_env', max_episode_steps=700, kwargs={ 'maze_map': maze_env.U_MAZE_TEST, 'reward_type':'sparse', 'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/ant_maze_new/Ant_maze_u-maze_noisy_multistart_False_multigoal_False_sparse.hdf5', 'non_zero_reset':False, 'eval':True, 'maze_size_scaling': 4.0, 'ref_min_score': 0.0, 'ref_max_score': 1.0, } ) register( id='antmaze-umaze-diverse-v0', entry_point='d4rl.locomotion.ant:make_ant_maze_env', max_episode_steps=700, kwargs={ 'maze_map': maze_env.U_MAZE_TEST, 'reward_type':'sparse', 'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/ant_maze_new/Ant_maze_u-maze_noisy_multistart_True_multigoal_True_sparse.hdf5', 'non_zero_reset':False, 'eval':True, 'maze_size_scaling': 4.0, 'ref_min_score': 0.0, 'ref_max_score': 1.0, } ) register( id='antmaze-umaze-diverse-dense-v0', entry_point='d4rl.locomotion.ant:make_ant_maze_env', max_episode_steps=700, kwargs={ 'maze_map': maze_env.U_MAZE_TEST, 'reward_type':'dense', 'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/ant_maze_new/Ant_maze_u-maze_noisy_multistart_True_multigoal_True_sparse.hdf5', 'non_zero_reset':False, 'eval':True, 'maze_size_scaling': 4.0, 'ref_min_score': 0.0, 'ref_max_score': 1.0, } ) register( id='antmaze-medium-play-v0', entry_point='d4rl.locomotion.ant:make_ant_maze_env', max_episode_steps=1000, kwargs={ 'maze_map': maze_env.BIG_MAZE_TEST, 'reward_type':'sparse', 'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/ant_maze_new/Ant_maze_big-maze_noisy_multistart_True_multigoal_False_sparse.hdf5', 'non_zero_reset':False, 'eval':True, 'maze_size_scaling': 4.0, 'ref_min_score': 0.0, 'ref_max_score': 1.0, } ) register( id='antmaze-medium-diverse-v1', entry_point='d4rl.locomotion.ant:make_ant_maze_env', max_episode_steps=1000, kwargs={ 'maze_map': maze_env.BIG_MAZE_TEST, 'reward_type':'sparse', 'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/ant_maze_new/Ant_maze_big-maze_noisy_multistart_True_multigoal_True_sparse.hdf5', 'non_zero_reset':False, 'eval':True, 'maze_size_scaling': 4.0, 'ref_min_score': 0.0, 'ref_max_score': 1.0, } ) register( id='antmaze-medium-diverse-dense-v1', entry_point='d4rl.locomotion.ant:make_ant_maze_env', max_episode_steps=1000, kwargs={ 'maze_map': maze_env.BIG_MAZE_TEST, 'reward_type':'dense', 'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/ant_maze_new/Ant_maze_big-maze_noisy_multistart_True_multigoal_True_sparse.hdf5', 'non_zero_reset':False, 'eval':True, 'maze_size_scaling': 4.0, 'ref_min_score': 0.0, 'ref_max_score': 1.0, } ) register( id='antmaze-medium-diverse-v0', entry_point='d4rl.locomotion.ant:make_ant_maze_env', max_episode_steps=1000, kwargs={ 'maze_map': maze_env.BIG_MAZE_TEST, 'reward_type':'sparse', 'dataset_url':'https://www.dropbox.com/s/qknp3bpfmm61jaq/Ant_maze_big-maze_noisy_multistart_True_multigoal_new_True.hdf5?dl=1', 'non_zero_reset':False, 'eval':True, 'maze_size_scaling': 4.0, 'ref_min_score': 0.0, 'ref_max_score': 1.0, } ) register( id='antmaze-medium-diverse-dense-v0', entry_point='d4rl.locomotion.ant:make_ant_maze_env', max_episode_steps=1000, kwargs={ 'maze_map': maze_env.BIG_MAZE_TEST, 'reward_type':'dense', 'dataset_url':'https://www.dropbox.com/s/qknp3bpfmm61jaq/Ant_maze_big-maze_noisy_multistart_True_multigoal_new_True.hdf5?dl=1', 'non_zero_reset':False, 'eval':True, 'maze_size_scaling': 4.0, 'ref_min_score': 0.0, 'ref_max_score': 1.0, } ) register( id='antmaze-large-diverse-v0', entry_point='d4rl.locomotion.ant:make_ant_maze_env', max_episode_steps=1000, kwargs={ 'maze_map': maze_env.HARDEST_MAZE_TEST, 'reward_type':'sparse', 'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/ant_maze_new/Ant_maze_hardest-maze_noisy_multistart_True_multigoal_True_sparse.hdf5', 'non_zero_reset':False, 'eval':True, 'maze_size_scaling': 4.0, 'ref_min_score': 0.0, 'ref_max_score': 1.0, } ) register( id='antmaze-large-diverse-dense-v0', entry_point='d4rl.locomotion.ant:make_ant_maze_env', max_episode_steps=1000, kwargs={ 'maze_map': maze_env.HARDEST_MAZE_TEST, 'reward_type':'dense', 'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/ant_maze_new/Ant_maze_hardest-maze_noisy_multistart_True_multigoal_True_sparse.hdf5', 'non_zero_reset':False, 'eval':True, 'maze_size_scaling': 4.0, 'ref_min_score': 0.0, 'ref_max_score': 1.0, } ) register( id='antmaze-large-play-v0', entry_point='d4rl.locomotion.ant:make_ant_maze_env', max_episode_steps=1000, kwargs={ 'maze_map': maze_env.HARDEST_MAZE_TEST, 'reward_type':'sparse', 'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/ant_maze_new/Ant_maze_hardest-maze_noisy_multistart_True_multigoal_False_sparse.hdf5', 'non_zero_reset':False, 'eval':True, 'maze_size_scaling': 4.0, 'ref_min_score': 0.0, 'ref_max_score': 1.0, } )
Officers trying to establish whether there is any connection to far-right violent extremism Two Ukrainian men were being held after a suspected campaign of three bomb attacks in a month against mosques in the West Midlands which saw MI5 join the hunt for the explosives-makers. Police and disposal teams were searching for more bomb-making material on Friday, a day after it emerged that the remains of an exploded bomb and debris had been found at the site of the Wolverhampton Central Mosque. Officers were also trying to establish whether there was any connection to far-right violent extremism. The attempted attacks all occurred on a Friday, usually a day when mosques would be busy. Police in the region had feared earlier this week that another attack would be staged on Friday, the Guardian has learned. The concern caused by the bombs reflects rising tensions in the West Midlands which are also being stoked by a planned protest by the English Defence League in Birmingham city centre on Saturday, when about 1,500 supporters may attend. Police plan to deploy 1,000 officers to deal with the protest and a counter-march by anti-fascists. The two men were arrested on Thursday and searches of their home and work addresses followed in Small Heath, Birmingham, which has a sizeable Muslim community. On 21 June attempts were made to bomb mosques in Walsall, and a nail bomb was found outside a mosque in Tipton on 12 July. The newly discovered Wolverhampton bomb is thought to have exploded between the two dates on 28 June, just after the morning rush hour, after having been planted the night before. The chief constable of West Midlands police, Chris Sims, took the "extraordinary" step earlier this week of ordering officers to work extended 12-hour shifts as counter-terrorism investigators tried to catch the alleged perpetrators. During the investigation police concluded that there was information to trigger suspicions a device may have been placed near the Wolverhampton mosque some weeks before. That proved to be correct. The same source said other bomb-related material might be present at an address which continues to be searched. Counter-terrorism detectives were continuing to question the two Ukrainian men, aged 22 and 25, arrested on Thursday in connection with the suspected attempted attacks in Tipton and Walsall. The pair were being held on suspicion of being involved in the commission, preparation or instigation of an act of terrorism. West Midlands counter-terrorism police also searched a software company where the two men were believed to have been working on a university placement. Clive Martell, chief executive of the software company Delcam, provided a few further details about the arrested men: "The two men were on work placements with us, but were not employees of the business. They are studying at a foreign university and are engineering students." Martell told the Birmingham Mail that the two were studying in an eastern European country: "One of the men has been with us for four months and the other has been with us for two months. "They were around halfway through the course and we understand that they met each other whilst on the placement." West Midlands police said specialist officers called in to search the area around the mosque on Thursday had found the "seat of an explosion" and debris on a nearby roundabout at Wolverhampton. An area around the mosque was sealed off at 8pm on Thursday as a military bomb disposal team was deployed. Assistant Chief Constable Marcus Beale, who is in charge of the investigation, said: "Officers searching the area [around the Wolverhampton mosque] have found the seat of an explosion and debris on the island near the mosque."The investigation is being led by specialist officers and staff from our counter-terrorism unit who are being supported by a range of departments from across the force. "We recognise the impact news of the latest find will have on the communities of Wolverhampton and further afield. We're working hard to complete our inquiries so that the area can be returned to normality. "While the investigation is in its very early stages, early indications are that the explosion happened on Friday 28 June." Iftikhar Ahmed, representing Wolverhampton Central Mosque, which reopened for prayers on Friday, said there were anxieties in the area but they would be allayed. He said: "Of course there are concerns in the community but we have faith in the police and intelligence services that the incidents are being dealt with. "They already have two suspects in custody and for our community today it is work as normal." The Friday before, residents were evacuated after a blast near the Kanzul Iman Masjid (mosque) in Tipton left nails and debris scattered outside. Police believe the intention was to kill or maim after the attack in broad daylight. No one was injured but some minor damage to property was reported. A small component of the suspected nail bomb was found in a garden nearby and specialist army bomb disposal officers were called to the scene to carry out a controlled explosion. This came after more than 100 residents were evacuated from their homes in the Caldmore area of Walsall last month after an explosion near the Aisha Mosque and Islamic Centre. No one was hurt in the blast, which caused minimal damage to a wall near the mosque. Assistant Chief Constable Sharon Rowe said: "At the direction of the Chief Constable and with immediate effect, all police officers are to work 12 hour shifts. "This extraordinary requirement is to support a large scale policing operation in place across the region as the force manages two high-profile counter terrorism investigations and moves towards a weekend with a significant protest expected in Birmingham City Centre."The communities secretary, Eric Pickles, said: "The calm and measured community response to yesterday's discovery of a very real terrorist threat to worshippers at Wolverhampton Central Mosque showed a city united against hate. "The fact that Friday prayers went ahead today with hundreds of worshippers in attendance is proof yet again that those who seek to sow the seeds of division have failed."
/** * Remove the following intermediate metadata fields that are not user data from {@link TableRow}: * _metadata_error, _metadata_retry_count, _metadata_spanner_original_payload_json. */ private static TableRow removeIntermediateMetadataFields(TableRow tableRow) { TableRow cleanTableRow = tableRow.clone(); Set<String> rowKeys = tableRow.keySet(); Set<String> metadataFields = BigQueryUtils.getBigQueryIntermediateMetadataFieldNames(); for (String rowKey : rowKeys) { if (metadataFields.contains(rowKey)) { cleanTableRow.remove(rowKey); } } return cleanTableRow; }
import { connectRouter, RouterState, LocationChangeAction } from 'connected-react-router'; import { combineReducers, Reducer } from 'redux'; // local imports import { History } from 'history'; import { testReducer, ITestState } from '../testDuck/reducers'; export interface IRootStateType { router: Reducer<RouterState, LocationChangeAction>; test: ITestState; } export const rootReducer = (history: History) => combineReducers({ router: connectRouter(history), test: testReducer });
package io.github.siddharthgoel88.useragents.impl; import io.github.siddharthgoel88.useragents.UserAgent; /** * UserAgents from latest to oldest for CerberianDrtrs */ public class CerberianDrtrs extends UserAgent { public String[] getAllUserAgentStrings() { String [] userAgentStrings = { "Mozilla/4.0 (compatible; Cerberian Drtrs Version-3.2-Build-1)", "Mozilla/4.0 (compatible; Cerberian Drtrs Version-3.2-Build-0)" }; return userAgentStrings; } }
/** * Reads line from the configuration file. * @throws IOException * @return String */ public String readLine() throws IOException { if (count > numberOfHosts) { return null; } String temp; try { temp = bufferedReader.readLine(); } catch (IOException ioe) { System.out.println("IOException"); ioe.printStackTrace(); return null; } if (temp == null || temp.equals("")) { System.out.println("empty or whitespace"); return "empty"; } if (temp.startsWith("#")) { return "#"; } temp = temp.trim(); count++; return temp; }
<gh_stars>1-10 package uk.gov.digital.ho.hocs.workflow.processes; import org.camunda.bpm.engine.test.Deployment; import org.camunda.bpm.engine.test.ProcessEngineRule; import org.camunda.bpm.engine.test.mock.Mocks; import org.camunda.bpm.extension.process_test_coverage.junit.rules.TestCoverageProcessEngineRule; import org.camunda.bpm.extension.process_test_coverage.junit.rules.TestCoverageProcessEngineRuleBuilder; import org.camunda.bpm.scenario.ProcessScenario; import org.camunda.bpm.scenario.Scenario; import org.junit.Before; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; import uk.gov.digital.ho.hocs.workflow.BpmnService; import static org.camunda.bpm.engine.test.assertions.ProcessEngineTests.withVariables; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.*; @RunWith(MockitoJUnitRunner.class) @Deployment(resources = "processes/COMP_REGISTRATION.bpmn") public class COMP_REGISTRATION { @Rule @ClassRule public static TestCoverageProcessEngineRule rule = TestCoverageProcessEngineRuleBuilder.create() .assertClassCoverageAtLeast(0.91) .build(); @Rule public ProcessEngineRule processEngineRule = new ProcessEngineRule(); @Mock BpmnService bpmnService; @Mock private ProcessScenario compRegistrationProcess; @Before public void defaultScenario() { Mocks.register("bpmnService", bpmnService); when(compRegistrationProcess.waitsAtUserTask("Validate_CorrespondentInput")) .thenReturn(task -> task.complete(withVariables("valid", true))); when(bpmnService.caseHasPrimaryCorrespondentType(any(), eq("COMPLAINANT"))).thenReturn(true); when(compRegistrationProcess.waitsAtUserTask("Validate_Complainant")) .thenReturn(task -> task.complete(withVariables("DIRECTION", "FORWARD"))); when(compRegistrationProcess.waitsAtUserTask("Validate_Complaint")) .thenReturn(task -> task.complete(withVariables("DIRECTION", "FORWARD","CompType", "Service"))); when(compRegistrationProcess.waitsAtUserTask("UserTask_0k00jya")) .thenReturn(task -> task.complete(withVariables("DIRECTION", "FORWARD"))); when(compRegistrationProcess.waitsAtUserTask("Validate_Category")) .thenReturn(task -> task.complete(withVariables("DIRECTION", "FORWARD", "CompType", "Not_Service"))); } @Test public void happyPath() { Scenario.run(compRegistrationProcess) .startByKey("COMP_REGISTRATION") .execute(); verify(compRegistrationProcess) .hasCompleted("Service_CaseHasPrimaryCorrespondentType"); verify(compRegistrationProcess) .hasCompleted("ServiceTask_0pumxnf"); } @Test public void backwardsFromValidateComplainant(){ when(compRegistrationProcess.waitsAtUserTask("Validate_Complainant")) .thenReturn(task -> task.complete(withVariables("DIRECTION", "BACKWARD"))) .thenReturn(task -> task.complete(withVariables("DIRECTION", "FORWARD"))); Scenario.run(compRegistrationProcess) .startByKey("COMP_REGISTRATION") .execute(); verify(compRegistrationProcess, times(2)) .hasCompleted("Service_CaseHasPrimaryCorrespondentType"); verify(compRegistrationProcess) .hasCompleted("ServiceTask_0pumxnf"); } @Test public void hasNoPrimaryCorrespondent(){ when(bpmnService.caseHasPrimaryCorrespondentType(any(), eq("COMPLAINANT"))) .thenReturn(false) .thenReturn(true); when(compRegistrationProcess.waitsAtUserTask("Validate_InvalidCorrespondents")) .thenReturn(task -> task.complete(withVariables("valid", true))); Scenario.run(compRegistrationProcess) .startByKey("COMP_REGISTRATION") .execute(); verify(compRegistrationProcess, times(2)) .hasCompleted("Service_CaseHasPrimaryCorrespondentType"); verify(compRegistrationProcess) .hasCompleted("Screen_InvalidCorrespondents"); verify(compRegistrationProcess) .hasCompleted("ServiceTask_0pumxnf"); } @Test public void backwardsFromValidateComplaint(){ when(compRegistrationProcess.waitsAtUserTask("Validate_Complaint")) .thenReturn(task -> task.complete(withVariables("DIRECTION", "BACKWARD"))) .thenReturn(task -> task.complete(withVariables("DIRECTION", "FORWARD", "CompType", "Service"))); Scenario.run(compRegistrationProcess) .startByKey("COMP_REGISTRATION") .execute(); verify(compRegistrationProcess) .hasCompleted("Service_CaseHasPrimaryCorrespondentType"); verify(compRegistrationProcess) .hasCompleted("ServiceTask_0pumxnf"); } @Test public void backwardsFromValidateInput(){ when(compRegistrationProcess.waitsAtUserTask("UserTask_0k00jya")) .thenReturn(task -> task.complete(withVariables("DIRECTION", "BACKWARD"))) .thenReturn(task -> task.complete(withVariables("DIRECTION", "FORWARD"))); Scenario.run(compRegistrationProcess) .startByKey("COMP_REGISTRATION") .execute(); verify(compRegistrationProcess) .hasCompleted("Service_CaseHasPrimaryCorrespondentType"); verify(compRegistrationProcess) .hasCompleted("ServiceTask_0pumxnf"); } @Test public void backwardsFromValidateCategory(){ when(compRegistrationProcess.waitsAtUserTask("Validate_Category")) .thenReturn(task -> task.complete(withVariables("DIRECTION", "BACKWARD", "CompType", "Not_Service"))) .thenReturn(task -> task.complete(withVariables("DIRECTION", "FORWARD", "CompType", "Not_Service"))); Scenario.run(compRegistrationProcess) .startByKey("COMP_REGISTRATION") .execute(); verify(compRegistrationProcess) .hasCompleted("Service_CaseHasPrimaryCorrespondentType"); verify(compRegistrationProcess) .hasCompleted("ServiceTask_0pumxnf"); } @Test public void updateTeamForServiceTriage(){ when(compRegistrationProcess.waitsAtUserTask("Validate_Category")) .thenReturn(task -> task.complete(withVariables("DIRECTION", "FORWARD", "CompType", "Service"))); Scenario.run(compRegistrationProcess) .startByKey("COMP_REGISTRATION") .execute(); verify(compRegistrationProcess) .hasCompleted("Service_CaseHasPrimaryCorrespondentType"); verify(compRegistrationProcess) .hasCompleted("ServiceTask_0pumxnf"); verify(compRegistrationProcess) .hasCompleted("Service_UpdateTeamByStageAndTexts"); } @Test public void skipCategoryIfExGratia(){ when(compRegistrationProcess.waitsAtUserTask("Validate_Complaint")) .thenReturn(task -> task.complete(withVariables("DIRECTION", "FORWARD", "CompType", "Ex-Gratia"))); when(compRegistrationProcess.waitsAtUserTask("ExGracia_Input")) .thenReturn(task -> task.complete(withVariables("DIRECTION", "BACKWARD", "CompType", "Service"))) .thenReturn(task -> task.complete(withVariables("DIRECTION", "FORWARD", "valid", "false", "CompType", "Service"))) .thenReturn(task -> task.complete(withVariables("DIRECTION", "FORWARD", "valid", "true","CompType", "Service", "CompType", "Ex-Gratia"))); Scenario.run(compRegistrationProcess) .startByKey("COMP_REGISTRATION") .execute(); verify(compRegistrationProcess) .hasCompleted("UpdateTeamForExGracia"); } @Test public void assignToMinorMisconduct(){ when(compRegistrationProcess.waitsAtUserTask("Validate_Complaint")) .thenReturn(task -> task.complete(withVariables("DIRECTION", "FORWARD", "CompType", "MinorMisconduct"))); when(compRegistrationProcess.waitsAtUserTask("ExGracia_Input")) .thenReturn(task -> task.complete(withVariables("DIRECTION", "BACKWARD", "CompType", "Service"))) .thenReturn(task -> task.complete(withVariables("DIRECTION", "FORWARD", "valid", "false", "CompType", "MinorMisconduct"))) .thenReturn(task -> task.complete(withVariables("DIRECTION", "FORWARD", "valid", "true","CompType", "MinorMisconduct"))); Scenario.run(compRegistrationProcess) .startByKey("COMP_REGISTRATION") .execute(); verify(compRegistrationProcess) .hasCompleted("UpdateTeamForMinorMisconduct"); } }
class ProteinDescriptors: """A class used for caching available protein descripors.""" def __init__(self, json: str = None): """Instanciate a new ProteinDesciptors. :param json: Path to json file or file handle describing descriptors """ if json is None: self.update_available_descriptors(path.join(path.dirname(__file__), 'data.json')) else: if not path.isfile(json): raise Exception(f'File {json} does not exist') else: self.update_available_descriptors(json) self.cache = {} @property def available_descriptors(self): """Give descriptors loaded for calculation.""" return sorted((descriptor['ID'] if 'ID' in descriptor.keys() else descriptor['Name'] for descriptor in self.descriptors), key=str.casefold) def update_available_descriptors(self, json: str): """Read descriptor file and update available descriptors. :param json: Path to json file or file handle describing descriptors """ if isinstance(json, str): with open(json, 'rb') as input: descs = input.read().decode('utf-8') else: descs = json.read().decode('utf-8') self.descriptors = orjson.loads(descs) self.__check_uniqueness__() def __check_uniqueness__(self): """Check IDs of descriptors are unique.""" ids = (descriptor['ID'] for descriptor in self.descriptors) non_unique_ids = filter(lambda x: x[1] > 1, collections.Counter(ids).items()) if len(list(non_unique_ids)) != 0: raise Exception('Non unique descriptor ID') def get_descriptor(self, id): """Get Descriptor instance from ID. :param id: ID or the descriptor """ if id not in self.cache.keys(): for descriptor in self.descriptors: if descriptor['ID'] == id: self.cache[id] = Descriptor(descriptor) return self.cache[id]
<gh_stars>100-1000 #!/usr/bin/env python # -*- coding: utf-8 -*- import os from jinja2 import Environment, FileSystemLoader, FileSystemBytecodeCache from tornado.template import Loader from torngas.settings_manager import settings from jinja2.defaults import * from jinja2.runtime import Undefined _CACHE = FileSystemBytecodeCache() _LOADER = FileSystemLoader([]) cfg = settings.TEMPLATE_CONFIG _JINJA_ENV = Environment(bytecode_cache=_CACHE, autoescape=cfg.get('autoescape', False), cache_size=cfg.get('cache_size', 50), auto_reload=cfg.get('filesystem_checks', True), loader=_LOADER, block_start_string=cfg.get('block_start_string', BLOCK_START_STRING), block_end_string=cfg.get('block_end_string', BLOCK_END_STRING), variable_start_string=cfg.get('variable_start_string', VARIABLE_START_STRING), variable_end_string=cfg.get('variable_end_string', VARIABLE_END_STRING), comment_start_string=cfg.get('comment_start_string', COMMENT_START_STRING), comment_end_string=cfg.get('comment_end_string', COMMENT_END_STRING), line_statement_prefix=cfg.get('line_statement_prefix', LINE_STATEMENT_PREFIX), line_comment_prefix=cfg.get('line_comment_prefix', LINE_COMMENT_PREFIX), trim_blocks=cfg.get('trim_blocks', TRIM_BLOCKS), lstrip_blocks=cfg.get('lstrip_blocks', LSTRIP_BLOCKS), newline_sequence=cfg.get('newline_sequence', NEWLINE_SEQUENCE), keep_trailing_newline=cfg.get('keep_trailing_newline', KEEP_TRAILING_NEWLINE), extensions=cfg.get('extensions', ()), optimized=cfg.get('optimized', True), undefined=cfg.get('undefined', Undefined), finalize=cfg.get('finalize', None)) class Jinja2TemplateLoader(Loader): def __init__(self, root_directory='', **kwargs): super(Jinja2TemplateLoader, self).__init__(root_directory, **kwargs) path = os.path.abspath(root_directory) _JINJA_ENV.loader.searchpath = [path] cache_dir = os.path.abspath(settings.TEMPLATE_CONFIG.cache_directory) if not os.path.exists(cache_dir): os.makedirs(cache_dir) _CACHE.directory = cache_dir def load(self, name, parent_path=None): with self.lock: if os.path.isabs(name): path, file = os.path.split(name) _JINJA_ENV.loader.searchpath = [path] template = _JINJA_ENV.get_template(file) else: template = _JINJA_ENV.get_template(name) template.generate = template.render return template def reset(self): if hasattr(_JINJA_ENV, 'bytecode_cache') and _JINJA_ENV.bytecode_cache: _JINJA_ENV.bytecode_cache.clear()
The Reichswehreid and from August 1934 Führereid was the name for three different versions of the oath of allegiance of the German Armed Forces, called Reichswehr from 1919 to 1935, and then Wehrmacht until 1945. The Original Reichswehreid came into effect on 14 August 1919, shortly after Reichspräsident Friedrich Ebert had signed the Weimar Constitution for the German Reich (the so-called Weimar Republic). The Treaty of Versailles limited the Reichswehr to a total of 100,000 men. German English Ich schwöre Treue der Reichsverfassung und gelobe, daß ich als tapferer Soldat das Deutsche Reich und seine gesetzmäßigen Einrichtungen jederzeit schützen, dem Reichspräsidenten und meinen Vorgesetzten Gehorsam leisten will. I swear loyalty to the Reich's constitution and pledge, that I as a courageous soldier always want to protect the German Reich and its legal institutions, (and) be obedient to the Reichspräsident and to my superiors. In January 1933, Adolf Hitler was appointed Reichskanzler and the Enabling Act and Gleichschaltung came in effect. As a result, a new wording was adopted on 1 December 1933: German English Ich schwöre bei Gott diesen heiligen Eid, daß ich meinem Volk und Vaterland allzeit treu und redlich dienen und als tapferer und gehorsamer Soldat bereit sein will, jederzeit für diesen Eid mein Leben einzusetzen. I swear by God this holy oath, that I want to ever loyally and sincerely serve my people and fatherland and be prepared as a brave and obedient soldier to risk my life for this oath at any time. It has to be noted that all references to the constitution and the office of Reichspräsident, then held by aging war hero Paul von Hindenburg, had already been removed. Instead, more emphasis was put on religion and patriotism. In August 1934, after Hindenburg died, Hitler merged the offices of Reichsprasident and Reichskanzler, and declared himself Führer and Reichskanzler. War Minister Werner von Blomberg issued a new wording which became known as Führereid, the "Hitler oath": German English Ich schwöre bei Gott diesen heiligen Eid, daß ich dem Führer des Deutschen Reiches und Volkes, Adolf Hitler, dem Oberbefehlshaber der Wehrmacht, unbedingten Gehorsam leisten und als tapferer Soldat bereit sein will, jederzeit für diesen Eid mein Leben einzusetzen. I swear by God this holy oath, that I want to offer unconditional obedience to the Führer of the German Reich and people, Adolf Hitler, the commander-in-chief of the Wehrmacht, and be prepared as a brave soldier to risk my life for this oath at any time. Now Volk and Fatherland had been superseded by the person of Hitler himself, who would be Führer and supreme commander. In addition, the obedience was now to be unconditional. In 1935, the Reichswehr was renamed Wehrmacht. See also [ edit ] References [ edit ]
// make a unique data set and digest for it func (lt *ApiserverLoadTester) makeUploadContent(numKBytes int) ([]byte, *remoteexecution.Digest) { size := int64(numKBytes * KBYTE) theData := make([]byte, 0) uniqPref := lt.makeDummyData(KBYTE) theData = append(theData, uniqPref[:]...) if size > KBYTE { theData = append(theData, lt.data[0:size-KBYTE]...) } t := sha256.Sum256(theData) dataSha := fmt.Sprintf("%x", t) digest := &remoteexecution.Digest{Hash: dataSha, SizeBytes: size} return theData, digest }
// Test is the method called to perform all of the tests that are present in the Graph. The typical // usage of this package finishes by calling Graph.Test() func (g *Graph) Test(t *testing.T) error { if !g.validated { if err := g.Validate(); err != nil { return err } } for _, n := range g.topoSorted { for _, in := range n.requires { if in.failed { n.failed = true } if !in.done { panic(FunctionNotExecutedErr) } } if n.failed { if !ignoreNilTesting || t != nil { t.Logf("Function %q (%p) had requirements fail", n.name, n.fn) } n.done = true continue } if !ignoreNilTesting || t != nil { n.failed = !t.Run(n.name, n.fn) } else { n.fn(nil) } n.done = true } return nil }
<gh_stars>10-100 mod convert; mod timer; pub use self::convert::*; /// A measurement of a monotonically nondecreasing clock. /// Opaque and useful only with `Duration`. /// /// Instants are always guaranteed to be no less than any previously measured /// instant when created, and are often useful for tasks such as measuring /// benchmarks or timing how long an operation takes. /// /// Note, however, that instants are not guaranteed to be **steady**. In other /// words, each tick of the underlying clock might not be the same length (e.g. /// some seconds may be longer than others). An instant may jump forwards or /// experience time dilation (slow down or speed up), but it will never go /// backwards. /// /// Instants are opaque types that can only be compared to one another. There is /// no method to get "the number of seconds" from an instant. Instead, it only /// allows measuring the duration between two instants (or comparing two /// instants). /// /// The size of an `Instant` struct may vary depending on the target operating /// system. pub use instant::Instant; pub(crate) use self::timer::*; /// Returns an instant corresponding to “now”. pub fn now() -> Instant { Instant::now() }
def add(self, P : Point, Q : Point) -> Point : if P.x == Q.x : m = mod_division(3 * (P.x**2) + self.a , 2 * P.y, self.p) else : m = mod_division(P.y - Q.y, P.x- Q.x, self.p) print(m) r_x = m * m - P.x - Q.x r_y = P.y + m * (r_x - P.x) R = Point(r_x % self.p, -r_y % self.p) return R
def call_fitness(self, individual, q): regex_string = individual.phenotype try: compiled_regex = re.compile(regex_string) eval_results = self.test_regex(compiled_regex) result_error, time_sum = self.calculate_fitness(eval_results) fitness = result_error + time_sum q.put(fitness) except: q.put(RegexEval.default_fitness)
<filename>admin_list_controls/src/admin_list_controls/types.ts export interface State { show_filters: boolean; show_sorts: boolean; show_reset_button: boolean; description: string; filtering_options: FilterOptions; filter_map: {[name: string]: Filter}; has_filters: boolean; some_filters_have_values: boolean; sorting_options: SortOptions; sorts: Sort[]; has_sorts: boolean; some_sorts_are_selected: boolean; layout_options: LayoutOptions; layouts: Layout[]; has_layouts: boolean; } export type FilterOptionChildren = FilterGroup | Filter; export interface FilterOptions { object_type: string; children: FilterOptionChildren[]; } export interface FilterGroup { object_type: string; title: string; children: FilterOptionChildren[]; } export interface Filter { object_type: string; type: string; value: boolean | string | string[]; label: string; name: string; multiple: boolean; width: string; } export interface SortOptions { object_type: string; children: Sort[]; } export interface Sort { object_type: string; name: string; label: string; value: string; is_selected: boolean; results_description: string; is_default: boolean; } export interface LayoutOptions { object_type: string; children: Layout[]; } export interface Layout { object_type: string; name: string; label: string; value: string; is_selected: boolean; icon_class: string; template: string; } export interface BooleanFilter extends Filter {} export interface ChoiceFilter extends Filter { choices: ChoiceFilterChoice[]; clearable: boolean; } export interface ChoiceFilterChoice { value: string; label: string; } export interface RadioFilter extends Filter { choices: RadioFilterChoice[]; } export interface RadioFilterChoice { name: string; value: string; label: string; } export interface StringFilter extends Filter {}
class BaseNode: '''Base functionality all nodes inherit''' input_types = {'pick', 'type'} def __init__(self, id, data=None): data = data or {} self.id = id self.title = data.get('title', id) self.content = data.get('content', '') self.input = data.get('input', 'pick') if self.input not in self.input_types: self.input = 'pick' self.choices = [ load_choice(self, choice) for choice in (data.get('choices') or []) ] self.is_start = data.get('is_start', False) def interpret(self, received): if self.input == 'pick': try: return self.choices[int(received) - 1].leads_to except: return None elif self.input == 'type': result = max(x.interpret(received) for x in self.choices) if result[0] is None: return None return result[1] def __str__(self): '''Returns markdown representation of itself''' paragraphs = ['# ' + self.title] if self.title else [] paragraphs.append(self.content) if self.input == 'pick': paragraphs.append( '\n'.join( '**%s.** %s' % (x+1, choice) for (x, choice) in enumerate(self.choices) ) ) return '\n\n'.join(paragraphs)
def parse_from_string(self, data, source, destination, key=None): if key: self.key = binascii.unhexlify(key) self.source = source self.destination = destination frame_control = data[0] self.frame_type = frame_control & 0b11 if self.frame_type == Rf4ceConstants.FRAME_TYPE_RESERVED: raise Rf4ceException("Unknown frame type") if (data[0] & (1 << 2)): self.frame_ciphered = True else: self.frame_ciphered = False self.protocol_version = (frame_control >> 3) & 0b11 self.channel_designator = (frame_control >> 6) & 0b11 self.frame_counter = struct.unpack("I", data[1:5])[0] if self.frame_type == Rf4ceConstants.FRAME_TYPE_DATA: self.data_frame_from_string(data) elif self.frame_type == Rf4ceConstants.FRAME_TYPE_COMMAND: self.command_frame_from_string(data) elif self.frame_type == Rf4ceConstants.FRAME_TYPE_VENDOR: self.data_frame_from_string(data, True)
<filename>include/kt_sub_polynomial.hpp #ifndef _DISP_POLY_ #define _DISP_POLY_ #include "constants.hpp" // --------------------------------------------------------------------------- // Super simple class to output an nth order polynomial in a conformal variable // --------------------------------------------------------------------------- class subtraction_polynomial { protected: const complex<double> s_inelastic = xr; const complex<double> s_expand = 0.; bool use_conformal; public: subtraction_polynomial(bool conf) : use_conformal(conf) {}; // ---------------------------------------------------------------------------- // Conformal variable which maps the cut plane in complex s to the unit disk complex<double> conformal(complex<double> s, int ieps) { complex<double> numerator, denominator; numerator = sqrt(s_inelastic - s_expand) - sqrt(s_inelastic - s + xi * double(ieps) * EPS); denominator = sqrt(s_inelastic - s_expand) + sqrt(s_inelastic - s + xi * double(ieps) * EPS); return numerator / denominator; }; // ---------------------------------------------------------------------------- // Outputs a polynomial of order n with unit coefficients in the above conformal variable complex<double> operator() (int n, complex<double> s, int ieps) { if (use_conformal == true) { return pow(conformal(s, ieps), double(n)); } else { return pow(s * xr, double(n)); } }; }; #endif
/** Convenience nested class that adapts a <code>Method</code> into a <code>Function</code> @aribaapi ariba */ public static class MethodFunction<T> extends Function<T> { private Method _method; public MethodFunction (Method method) { _method = method; } public T evaluate (Object... arguments) { try { return (T)_method.invoke(arguments); } catch (Exception ex) { throw new EvaluationException(ex); } } }
// Go Substrate RPC Client (GSRPC) provides APIs and types around Polkadot and any Substrate-based chain RPC calls // // Copyright 2019 Centrifuge GmbH // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types_test import ( "fmt" "math/big" "testing" "github.com/centrifuge/go-substrate-rpc-client/scale" . "github.com/centrifuge/go-substrate-rpc-client/types" "github.com/stretchr/testify/assert" ) var examplePhaseApp = Phase{ IsApplyExtrinsic: true, AsApplyExtrinsic: 42, } var examplePhaseFin = Phase{ IsFinalization: true, } var exampleEventApp = EventSystemExtrinsicSuccess{ Phase: examplePhaseApp, DispatchInfo: DispatchInfo{Weight: 10000, Class: DispatchClass{IsNormal: true}, PaysFee: true}, Topics: []Hash{{1, 2}}, } var exampleEventFin = EventSystemExtrinsicSuccess{ Phase: examplePhaseFin, DispatchInfo: DispatchInfo{Weight: 10000, Class: DispatchClass{IsNormal: true}, PaysFee: true}, Topics: []Hash{{1, 2}}, } var exampleEventAppEnc = []byte{0x0, 0x2a, 0x0, 0x0, 0x0, 0x10, 0x27, 0x0, 0x0, 0x0, 0x1, 0x4, 0x1, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} //nolint:lll var exampleEventFinEnc = []byte{0x1, 0x10, 0x27, 0x0, 0x0, 0x0, 0x1, 0x4, 0x1, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} //nolint:lll func TestEventSystemExtrinsicSuccess_Encode(t *testing.T) { encoded, err := EncodeToBytes(exampleEventFin, scale.EncoderOptions{}) assert.NoError(t, err) assert.Equal(t, exampleEventFinEnc, encoded) encoded, err = EncodeToBytes(exampleEventApp, scale.EncoderOptions{}) assert.NoError(t, err) assert.Equal(t, exampleEventAppEnc, encoded) } func TestEventSystemExtrinsicSuccess_Decode(t *testing.T) { decoded := EventSystemExtrinsicSuccess{} err := DecodeFromBytes(exampleEventFinEnc, &decoded, scale.EncoderOptions{}) assert.NoError(t, err) assert.Equal(t, exampleEventFin, decoded) decoded = EventSystemExtrinsicSuccess{} err = DecodeFromBytes(exampleEventAppEnc, &decoded, scale.EncoderOptions{}) assert.NoError(t, err) assert.Equal(t, exampleEventApp, decoded) } func TestEventRecordsRaw_Decode_FailsNumFields(t *testing.T) { e := EventRecordsRaw(MustHexDecodeString("0x0400020000000302d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d8eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48266d00000000000000000000000000000010a5d4e8000000000000000000000000")) //nolint:lll events := struct { Balances_Transfer []struct{ Abc uint8 } //nolint:stylecheck,golint }{} err := e.DecodeEventRecords(ExamplaryMetadataV8, &events) assert.EqualError(t, err, "expected event #0 with EventID [3 2], field Balances_Transfer to have at least 2 fields (for Phase and Topics), but has 1 fields") //nolint:lll } func TestEventRecordsRaw_Decode_FailsFirstNotPhase(t *testing.T) { e := EventRecordsRaw(MustHexDecodeString("0x0400020000000302d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d8eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48266d00000000000000000000000000000010a5d4e8000000000000000000000000")) //nolint:lll events := struct { Balances_Transfer []struct { //nolint:stylecheck,golint P uint8 Other uint32 T []Hash } }{} err := e.DecodeEventRecords(ExamplaryMetadataV8, &events) assert.EqualError(t, err, "expected the first field of event #0 with EventID [3 2], field Balances_Transfer to be of type types.Phase, but got uint8") //nolint:lll } func TestEventRecordsRaw_Decode_FailsLastNotHash(t *testing.T) { e := EventRecordsRaw(MustHexDecodeString("0x0400020000000302d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d8eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48266d00000000000000000000000000000010a5d4e8000000000000000000000000")) //nolint:lll events := struct { Balances_Transfer []struct { //nolint:stylecheck,golint P Phase Other uint32 T Phase } }{} err := e.DecodeEventRecords(ExamplaryMetadataV8, &events) assert.EqualError(t, err, "expected the last field of event #0 with EventID [3 2], field Balances_Transfer to be of type []types.Hash for Topics, but got types.Phase") //nolint:lll } func ExampleEventRecordsRaw_Decode() { e := EventRecordsRaw(MustHexDecodeString( "0x10" + "0000000000" + "0000" + "10270000" + // Weight "01" + // Operational "01" + // PaysFee "00" + "0001000000" + "0000" + "10270000" + // Weight "01" + // operational "01" + // PaysFee "00" + "0001000000" + // ApplyExtrinsic(1) "0302" + // Balances_Transfer "d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d" + // From "8eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48" + // To "391b0000000000000000000000000000" + // Value "00" + // Topics "0002000000" + "0000" + "10270000" + // Weight "00" + // Normal "01" + // PaysFee "00", )) events := EventRecords{} err := e.DecodeEventRecords(ExamplaryMetadataV8, &events) if err != nil { panic(err) } fmt.Printf("Got %v System_ExtrinsicSuccess events\n", len(events.System_ExtrinsicSuccess)) fmt.Printf("Got %v Balances_Transfer events\n", len(events.Balances_Transfer)) t := events.Balances_Transfer[0] fmt.Printf("Transfer: %v tokens from %#x to\n%#x", t.Value, t.From, t.To) // Output: Got 3 System_ExtrinsicSuccess events // Got 1 Balances_Transfer events // Transfer: 6969 tokens from 0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d to // 0x8eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48 } func TestEventRecordsRaw_Decode(t *testing.T) { e := EventRecordsRaw(MustHexDecodeString( "0x40" + // (len 15) << 2 "0000000000" + // ApplyExtrinsic(0) "0300" + // Balances_Endowed "d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d" + // Who "676b95d82b0400000000000000000000" + // Balance U128 "00" + // Topics "0000000000" + // ApplyExtrinsic(0) "0301" + // Balances_DustLost "d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d" + // Who "676b95d82b0400000000000000000000" + // Balance U128 "00" + // Topics "0001000000" + // ApplyExtrinsic(1) "0302" + // Balances_Transfer "d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d" + // From "8eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48" + // To "391b0000000000000000000000000000" + // Value "00" + // Topics "0000000000" + // ApplyExtrinsic(0) "0303" + // Balances_BalanceSet "d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d" + // Who "676b95d82b0400000000000000000000" + // Free U128 "676b95d82b0400000000000000000000" + // Reserved U128 "00" + // Topics "0000000000" + // ApplyExtrinsic(0) "0304" + // Balances_Deposit "d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d" + // Who "676b95d82b0400000000000000000000" + // Balance U128 "00" + // Topics "0000000000" + // ApplyExtrinsic(0) "0200" + // Indices_IndexAssigned "8eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48" + // Who "39300000" + // AccountIndex "00" + // Topics "0000000000" + // ApplyExtrinsic(0) "0201" + // Indices_IndexFreed "39300000" + // AccountIndex "00" + // Topics "0000000000" + // ApplyExtrinsic(0) "1000" + // Offences_Offence "696d2d6f6e6c696e653a6f66666c696e" + // Kind "10c5000000" + // OpaqueTimeSlot "00" + // Topics "0000000000" + // ApplyExtrinsic(0) "0400" + // Staking_Reward "676b95d82b0400000000000000000000" + // Balance U128 "00000000000000000000000000000000" + // Remainder U128 "00" + // Topics "0000000000" + // ApplyExtrinsic(0) "0500" + // Session_NewSession "c6000000" + // SessionIndex U32 "00" + // Topics "0000000000" + // ApplyExtrinsic(0) "0000" + // System_ExtrinsicSuccess "10270000" + // Weight "01" + // DispatchClass: Operational "01" + // PaysFees "00" + // Topics "0001000000" + // ApplyExtrinsic(1) "0000" + // System_ExtrinsicSuccess "10270000" + // Weight "00" + // DispatchClass: Normal "01" + // PaysFees "00" + // Topics "0002000000" + // ApplyExtrinsic(2) "0001" + // System_ExtrinsicFailed "01" + // HasModule "0b" + // Module "00" + // Error "10270000" + // Weight "01" + // DispatchClass: Operational "01" + // PaysFees "00" + // Topics "0000000000" + // ApplyExtrinsic(0) "0002" + // System_CodeUpdated "00" + // Topics "0000000000" + // ApplyExtrinsic(0) "0003" + // System_NewAccount "<KEY>" + // Who "00" + // Topics "0000000000" + // ApplyExtrinsic(0) "0004" + // System_KilledAccount "<KEY>" + // Who "00", // Topics )) //nolint:lll events := EventRecords{} err := e.DecodeEventRecords(ExamplaryMetadataV11Substrate, &events) if err != nil { panic(err) } exp := EventRecords{ Balances_Endowed: []EventBalancesEndowed{{Phase: Phase{IsApplyExtrinsic: true, AsApplyExtrinsic: 0x0, IsFinalization: false}, Who: AccountID{0xd4, 0x35, 0x93, 0xc7, 0x15, 0xfd, 0xd3, 0x1c, 0x61, 0x14, 0x1a, 0xbd, 0x4, 0xa9, 0x9f, 0xd6, 0x82, 0x2c, 0x85, 0x58, 0x85, 0x4c, 0xcd, 0xe3, 0x9a, 0x56, 0x84, 0xe7, 0xa5, 0x6d, 0xa2, 0x7d}, Balance: NewU128(*big.NewInt(4586363775847)), Topics: []Hash(nil)}}, Balances_DustLost: []EventBalancesDustLost{{Phase: Phase{IsApplyExtrinsic: true, AsApplyExtrinsic: 0x0, IsFinalization: false}, Who: AccountID{0xd4, 0x35, 0x93, 0xc7, 0x15, 0xfd, 0xd3, 0x1c, 0x61, 0x14, 0x1a, 0xbd, 0x4, 0xa9, 0x9f, 0xd6, 0x82, 0x2c, 0x85, 0x58, 0x85, 0x4c, 0xcd, 0xe3, 0x9a, 0x56, 0x84, 0xe7, 0xa5, 0x6d, 0xa2, 0x7d}, Balance: NewU128(*big.NewInt(4586363775847)), Topics: []Hash(nil)}}, Balances_Transfer: []EventBalancesTransfer{{Phase: Phase{IsApplyExtrinsic: true, AsApplyExtrinsic: 0x1, IsFinalization: false}, From: AccountID{0xd4, 0x35, 0x93, 0xc7, 0x15, 0xfd, 0xd3, 0x1c, 0x61, 0x14, 0x1a, 0xbd, 0x4, 0xa9, 0x9f, 0xd6, 0x82, 0x2c, 0x85, 0x58, 0x85, 0x4c, 0xcd, 0xe3, 0x9a, 0x56, 0x84, 0xe7, 0xa5, 0x6d, 0xa2, 0x7d}, To: AccountID{0x8e, 0xaf, 0x4, 0x15, 0x16, 0x87, 0x73, 0x63, 0x26, 0xc9, 0xfe, 0xa1, 0x7e, 0x25, 0xfc, 0x52, 0x87, 0x61, 0x36, 0x93, 0xc9, 0x12, 0x90, 0x9c, 0xb2, 0x26, 0xaa, 0x47, 0x94, 0xf2, 0x6a, 0x48}, Value: NewU128(*big.NewInt(6969)), Topics: []Hash(nil)}}, Balances_BalanceSet: []EventBalancesBalanceSet{{Phase: Phase{IsApplyExtrinsic: true, AsApplyExtrinsic: 0x0, IsFinalization: false}, Who: AccountID{0xd4, 0x35, 0x93, 0xc7, 0x15, 0xfd, 0xd3, 0x1c, 0x61, 0x14, 0x1a, 0xbd, 0x4, 0xa9, 0x9f, 0xd6, 0x82, 0x2c, 0x85, 0x58, 0x85, 0x4c, 0xcd, 0xe3, 0x9a, 0x56, 0x84, 0xe7, 0xa5, 0x6d, 0xa2, 0x7d}, Free: NewU128(*big.NewInt(4586363775847)), Reserved: NewU128(*big.NewInt(4586363775847)), Topics: []Hash(nil)}}, Balances_Deposit: []EventBalancesDeposit{{Phase: Phase{IsApplyExtrinsic: true, AsApplyExtrinsic: 0x0, IsFinalization: false}, Who: AccountID{0xd4, 0x35, 0x93, 0xc7, 0x15, 0xfd, 0xd3, 0x1c, 0x61, 0x14, 0x1a, 0xbd, 0x4, 0xa9, 0x9f, 0xd6, 0x82, 0x2c, 0x85, 0x58, 0x85, 0x4c, 0xcd, 0xe3, 0x9a, 0x56, 0x84, 0xe7, 0xa5, 0x6d, 0xa2, 0x7d}, Balance: NewU128(*big.NewInt(4586363775847)), Topics: []Hash(nil)}}, Grandpa_NewAuthorities: []EventGrandpaNewAuthorities(nil), Grandpa_Paused: []EventGrandpaPaused(nil), Grandpa_Resumed: []EventGrandpaResumed(nil), ImOnline_HeartbeatReceived: []EventImOnlineHeartbeatReceived(nil), Indices_IndexAssigned: []EventIndicesIndexAssigned{{Phase: Phase{IsApplyExtrinsic: true, AsApplyExtrinsic: 0x0, IsFinalization: false}, AccountID: AccountID{0x8e, 0xaf, 0x4, 0x15, 0x16, 0x87, 0x73, 0x63, 0x26, 0xc9, 0xfe, 0xa1, 0x7e, 0x25, 0xfc, 0x52, 0x87, 0x61, 0x36, 0x93, 0xc9, 0x12, 0x90, 0x9c, 0xb2, 0x26, 0xaa, 0x47, 0x94, 0xf2, 0x6a, 0x48}, AccountIndex: AccountIndex(12345), Topics: []Hash(nil)}}, Indices_IndexFreed: []EventIndicesIndexFreed{{Phase: Phase{IsApplyExtrinsic: true, AsApplyExtrinsic: 0x0, IsFinalization: false}, AccountIndex: AccountIndex(12345), Topics: []Hash(nil)}}, Offences_Offence: []EventOffencesOffence{{Phase: Phase{IsApplyExtrinsic: true, AsApplyExtrinsic: 0x0, IsFinalization: false}, Kind: Bytes16{0x69, 0x6d, 0x2d, 0x6f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x3a, 0x6f, 0x66, 0x66, 0x6c, 0x69, 0x6e}, OpaqueTimeSlot: Bytes{0xc5, 0x0, 0x0, 0x0}, Topics: []Hash(nil)}}, Session_NewSession: []EventSessionNewSession{{Phase: Phase{IsApplyExtrinsic: true, AsApplyExtrinsic: 0x0, IsFinalization: false}, SessionIndex: 0xc6, Topics: []Hash(nil)}}, Staking_OldSlashingReportDiscarded: []EventStakingOldSlashingReportDiscarded(nil), Staking_Reward: []EventStakingReward{{Phase: Phase{IsApplyExtrinsic: true, AsApplyExtrinsic: 0x0, IsFinalization: false}, Balance: NewU128(*big.NewInt(4586363775847)), Remainder: NewU128(*big.NewInt(0)), Topics: []Hash(nil)}}, Staking_Slash: []EventStakingSlash(nil), System_ExtrinsicSuccess: []EventSystemExtrinsicSuccess{{Phase: Phase{IsApplyExtrinsic: true, AsApplyExtrinsic: 0x0, IsFinalization: false}, DispatchInfo: DispatchInfo{Weight: 10000, Class: DispatchClass{IsOperational: true}, PaysFee: true}, Topics: []Hash(nil)}, {Phase: Phase{IsApplyExtrinsic: true, AsApplyExtrinsic: 0x1, IsFinalization: false}, DispatchInfo: DispatchInfo{Weight: 10000, Class: DispatchClass{IsNormal: true}, PaysFee: true}, Topics: []Hash(nil)}}, System_ExtrinsicFailed: []EventSystemExtrinsicFailed{{Phase: Phase{IsApplyExtrinsic: true, AsApplyExtrinsic: 0x2, IsFinalization: false}, DispatchError: DispatchError{HasModule: true, Module: 0xb, Error: 0x0}, DispatchInfo: DispatchInfo{Weight: 10000, Class: DispatchClass{IsOperational: true}, PaysFee: true}, Topics: []Hash(nil)}}, System_CodeUpdated: []EventSystemCodeUpdated{{Phase: Phase{IsApplyExtrinsic: true, AsApplyExtrinsic: 0x0, IsFinalization: false}, Topics: []Hash(nil)}}, System_NewAccount: []EventSystemNewAccount{{Phase: Phase{IsApplyExtrinsic: true, AsApplyExtrinsic: 0x0, IsFinalization: false}, Who: AccountID{0x8e, 0xaf, 0x4, 0x15, 0x16, 0x87, 0x73, 0x63, 0x26, 0xc9, 0xfe, 0xa1, 0x7e, 0x25, 0xfc, 0x52, 0x87, 0x61, 0x36, 0x93, 0xc9, 0x12, 0x90, 0x9c, 0xb2, 0x26, 0xaa, 0x47, 0x94, 0xf2, 0x6a, 0x48}, Topics: []Hash(nil)}}, System_KilledAccount: []EventSystemKilledAccount{{Phase: Phase{IsApplyExtrinsic: true, AsApplyExtrinsic: 0x0, IsFinalization: false}, Who: AccountID{0x8e, 0xaf, 0x4, 0x15, 0x16, 0x87, 0x73, 0x63, 0x26, 0xc9, 0xfe, 0xa1, 0x7e, 0x25, 0xfc, 0x52, 0x87, 0x61, 0x36, 0x93, 0xc9, 0x12, 0x90, 0x9c, 0xb2, 0x26, 0xaa, 0x47, 0x94, 0xf2, 0x6a, 0x48}, Topics: []Hash(nil)}}, } //nolint:lll assert.Equal(t, exp, events) } func TestDispatchError(t *testing.T) { assertRoundtrip(t, DispatchError{HasModule: true, Module: 0xf1, Error: 0xa2}) assertRoundtrip(t, DispatchError{HasModule: false, Error: 0xa2}) }
import weather import time import sys import pyxb.bundles.wssplat.soap11 as soapenv import urllib2 zip = 85711 if 1 < len(sys.argv): zip = int(sys.argv[1]) # Create an envelope, and give it a body that is the request for the # service we want. env = soapenv.Envelope(soapenv.Body(weather.GetCityForecastByZIP(ZIP=str(zip)))) file('request.xml', 'w').write(env.toxml()) # Invoke the service uri = urllib2.Request('http://ws.cdyne.com/WeatherWS/Weather.asmx', env.toxml(), { 'SOAPAction' : "http://ws.cdyne.com/WeatherWS/GetCityForecastByZIP", 'Content-Type': 'text/xml' } ) rxml = urllib2.urlopen(uri).read() file('response.xml', 'w').write(rxml) # Convert the response to a SOAP envelope, then extract the actual # response from the wildcard elements of the body. Note that because # the weather namespace was registered, PyXB already created the # binding for the response. soap_resp = soapenv.CreateFromDocument(rxml) resp = soap_resp.Body.wildcardElements()[0] fc_return = resp.GetCityForecastByZIPResult if fc_return.Success: print 'Got response for %s, %s:' % (fc_return.City, fc_return.State) for fc in fc_return.ForecastResult.Forecast: when = time.strftime('%A, %B %d %Y', fc.Date.timetuple()) outlook = fc.Desciption # typos in WSDL left unchanged low = fc.Temperatures.MorningLow high = fc.Temperatures.DaytimeHigh print ' %s: %s, from %s to %s' % (when, outlook, low, high)
/** * Abstract base class for event locators. * * @author Aleksei Valikov */ public abstract class AbstractObjectLocator implements ObjectLocator { /** * Parent locator. */ protected final ObjectLocator parentLocator; /** * Object. */ protected final Object object; /** * Constructs a new validation event locator. * * @param parentLocator * parent location (may be <code>null</code>). * @param object * object. */ protected AbstractObjectLocator(final ObjectLocator parentLocator, final Object object) { this.object = object; this.parentLocator = parentLocator; } /** * Returns parent locator. * * @return Parent locator. */ public ObjectLocator getParentLocator() { return parentLocator; } public ObjectLocator[] getPath() { final ObjectLocator[] path = new ObjectLocator[getAncestorCount(this) + 1]; fillPath(this, path, path.length - 1); return path; } public String getPathAsString() { final String stepAsString = getStepAsString(); final ObjectLocator parentLocator = getParentLocator(); return parentLocator == null ? stepAsString : parentLocator .getPathAsString() + stepAsString; } protected abstract String getStepAsString(); private void fillPath(ObjectLocator locator, ObjectLocator[] path, int index) { path[index] = locator; final ObjectLocator parent = locator.getParentLocator(); if (parent != null) fillPath(parent, path, index - 1); } private int getAncestorCount(ObjectLocator locator) { final ObjectLocator parent = locator.getParentLocator(); if (parent == null) return 0; else return 1 + getAncestorCount(parent); } public Object getObject() { return object; } public int getColumnNumber() { return 0; } public int getLineNumber() { return 0; } public int getOffset() { return 0; } public URL getURL() { return null; } public Node getNode() { return null; } // /** // * Returns expression step (for EL and JXPath expressions). // * @return Expression step. // */ // public abstract String getStep(); public String toString() { return getMessage(); } /** * Returns message code. * * @return Message code. */ public String getMessageCode() { return getClass().getName(); } protected abstract String getDefaultMessage(); // public Object[] getMessageParameters() { // return new Object[] { getObject() }; // } // public String getMessage(ResourceBundle bundle) { try { final String messageTemplate = bundle.getString(getMessageCode()); return MessageFormat .format(messageTemplate, getMessageParameters()); } catch (MissingResourceException mrex) { return getDefaultMessage(); } } /** * Returns location message. * * @return Location message. */ public String getMessage() { return getMessage(ResourceBundle.getBundle(getClass().getPackage() .getName() + ".messages")); } // public int hashCode() { // int hashCode = getObject().hashCode(); // return hashCode; // } public ItemObjectLocator item(int index, Object value) { return new DefaultItemObjectLocator(this, index, value); } public PropertyObjectLocator property(String name, Object value) { return new DefaultPropertyObjectLocator(this, name, value); } }
/** * Bytes to hex string string. * * @param paramArrayOfByte the param array of byte * @return the string */ public static String bytesToHexString(byte[] paramArrayOfByte) { StringBuilder localStringBuilder = new StringBuilder(); if ((paramArrayOfByte == null) || (paramArrayOfByte.length <= 0)) { return null; } for (int i = 0; i < paramArrayOfByte.length; i++) { int j = paramArrayOfByte[i] & 0xFF; String str = Integer.toHexString(j); if (str.length() < 2) { localStringBuilder.append(0); } localStringBuilder.append(str); } return localStringBuilder.toString(); }
// Factorial calculates the factorial of the provided integer func Factorial(n int) uint64 { var factVal uint64 = 1 if n < 0 { panic("Factorial of negative number doesn't exist.") } else { for i := 1; i <= n; i++ { factVal *= uint64(i) } } return factVal }
/** * Creates, if necessary, the given the location for {@code descriptor}. * * @param conf A Configuration * @param descriptor A DatasetDescriptor */ static void ensureExists( DatasetDescriptor descriptor, Configuration conf) { Preconditions.checkArgument(descriptor.getLocation() != null, "Cannot get FileSystem for a descriptor with no location"); final Path dataPath = new Path(descriptor.getLocation()); final FileSystem fs = fsForPath(dataPath, conf); try { if (!fs.exists(dataPath)) { fs.mkdirs(dataPath); } } catch (IOException ex) { throw new DatasetRepositoryException("Cannot access data location", ex); } }
def html_link(self): if self.sequence_number and self.approved: unescaped_url = f'/exploitation/{self.farm_name or self.name}--{self.sequence_number}' return mark_safe(f'<a href="{self.url_path}" target="_blank">{unescaped_url}</a>') else: return 'Pas encore live'
<gh_stars>0 package tunecomposer.actionclasses; import java.util.ArrayList; import javafx.scene.layout.Pane; import tunecomposer.SoundObject; /** * An action which cuts objects to the clipboard and removes from a pane. */ public class CutAction extends Action { DeleteAction deleteAction; CopyAction copyAction; /** * Constructs an action event to copy SoundObjects. * Sets affectObjs and soundObjectPane. * * @param selectedObjs selList all SoundObjects to be affected * @param soundObjectPane */ public CutAction(ArrayList<SoundObject> selectedObjs, Pane soundObjectPane) { affectedObjs = (ArrayList<SoundObject>) selectedObjs.clone(); this.soundObjectPane = soundObjectPane; deleteAction = new DeleteAction(affectedObjs, this.soundObjectPane); copyAction = new CopyAction(affectedObjs); } /** * Copies all affectedObjs from the soundObjectPane. */ @Override public void execute() { deleteAction.execute(); copyAction.execute(); } /** * Undoes cut. */ @Override public void undo() { deleteAction.undo(); copyAction.undo(); } /** * Redoes cut. */ @Override public void redo() { deleteAction.redo(); copyAction.redo(); } }
<reponame>dyu/coreds-spectre declare const _default: "0.13.0"; export default _default;
def duration_to_string(value): result = 'P' if value.days > 0: result += '%dD' % value.days result += 'T' hours = value.seconds // 3600 minutes = (value.seconds - (3600 * hours)) // 60 seconds = value.seconds - (3600 * hours) - (60 * minutes) if value.microseconds >= 500000: seconds += 1 if hours > 0: result += '%dH' % hours if minutes > 0: result += '%dM' % minutes result += '%dS' % seconds return result
<reponame>lightyen/goblog<gh_stars>1-10 import { HotModuleReplacementPlugin, NamedChunksPlugin, NamedModulesPlugin } from "webpack" import * as webpackMerge from "webpack-merge" import baseWebpackConfig from "./webpack.config" export default webpackMerge(baseWebpackConfig, { performance: { hints: "warning", }, mode: "development", devtool: "source-map", watchOptions: { aggregateTimeout: 300, poll: 1000, ignored: ["renderer/**/*.js", "node_modules"], }, plugins: [ new HotModuleReplacementPlugin(), new NamedModulesPlugin(), new NamedChunksPlugin(), ], })
Mark started writing music using computer sound chips in 1984 on the Commodore 64, and started releasing tunes under the psuedonyme TDK on the Commodore Amiga in the early 90’s with the popular demoscene groups Anthrox and Melon Dezign. These are his first chiptunes since retiring from the scene in 1993 having written tracks for the likes of Anthrox and Melon Dezign, to focus on composing music for video games such as Warhammer: Dark Omen, Duke Nukem: Total Meltdown, Populous: The Beginning and Dungeon Keeper 2. Mark decided in 2011 that the time was right to bring the name TDK out of retirement to create this album, utilising not only the tricks of the chiptune trade 20 years ago, but modern chiptune and music production techniques. Reawakening was written using Steinberg Cubase, 2 x Commodore 64’s with MSSIAH cartridges, a Commodore Amiga 1200, Elektron SIDStation, ReFX QuadraSID and Plogue Chipsounds.
/** * * @author James F. Bowring */ public class SamplesOrganizerPane extends JLayeredPane implements ReduxDragAndDropClipboardInterface { private final int WIDTH_OF_SAMPLE_DISPLAY_LIST = 225; private final int BOTTOM_MARGIN = 25; private final int LEFT_MARGIN = 30; private final int TOP_MARGIN = 20; private final JLabel titleLabel; private int myWidth; private final int myHeight; private final ArrayList<AbstractTripoliSample> tripoliSamples; private final ArrayList<JLayeredPane> sampleDisplayPanes; private final JLayeredPane sampleFractionLists_pane; private final JScrollPane sampleFractionLists_scroll; private final JButton addSampleButton; private DragAndDropListItemInterface[] dndClipboardListItems; private final ProjectManagerSubscribeInterface projectManager; /** * * * @param title * @param x * @param y * @param myWidth * @param myHeight * @param tripoliSamples * @param projectManager the value of projectManager */ public SamplesOrganizerPane(// String title, int x, int y, int myWidth, int myHeight, ArrayList<AbstractTripoliSample> tripoliSamples, ProjectManagerSubscribeInterface projectManager) { this.titleLabel = new JLabel(title); this.titleLabel.setBounds(2, 2, 150, 15); this.add(this.titleLabel, DEFAULT_LAYER); this.myWidth = myWidth; this.myHeight = myHeight; this.setBounds(x, y, myWidth, myHeight); this.setOpaque(true); this.tripoliSamples = tripoliSamples; sampleFractionLists_pane = new JLayeredPane(); // populate list boxes sampleDisplayPanes = new ArrayList<>(); // walk the samples for (int i = 0; i < tripoliSamples.size(); i++) { ActionListener closeButtonActionListener = new CloseSampleButtonActionListener(); SampleFractionListDisplayPane sampleDisplayPane =// new SampleFractionListDisplayPane( // tripoliSamples.get(i), // closeButtonActionListener, // this, // projectManager); ((CloseSampleButtonActionListener) closeButtonActionListener).setSampleDisplayPane(sampleDisplayPane); sampleDisplayPanes.add(sampleDisplayPane); sampleFractionLists_pane.add(sampleDisplayPane); } sampleFractionLists_scroll = new javax.swing.JScrollPane(); sampleFractionLists_scroll.setAutoscrolls(true); sampleFractionLists_scroll.setHorizontalScrollBarPolicy(JScrollPane.HORIZONTAL_SCROLLBAR_ALWAYS); this.add(sampleFractionLists_scroll, DEFAULT_LAYER); sampleFractionLists_scroll.setViewportView(sampleFractionLists_pane); // button to create additional SampleFractionListDisplayPanels addSampleButton = new ET_JButton("+"); addSampleButton.setBounds(5, myHeight - 50, 15, 15); addSampleButton.addActionListener(new AddSampleButtonActionListener(this)); this.add(addSampleButton, DEFAULT_LAYER); dndClipboardListItems = new DragAndDropListItemInterface[0]; this.projectManager = projectManager; refreshSampleFractionListsPane(); } /** * */ public void saveChanges() { for (JLayeredPane sampleDisplayPane : sampleDisplayPanes) { ((SampleFractionListDisplayPane) sampleDisplayPane).saveChanges(); } projectManager.updateDataChangeStatus(false); } /** * @return the dndClipboardListItems */ @Override public DragAndDropListItemInterface[] getDndClipboardListItems() { return dndClipboardListItems; } /** * @param dndClipboardListItems the dndClipboardListItems to set */ @Override public void setDndClipboardListItems(DragAndDropListItemInterface[] dndClipboardListItems) { this.dndClipboardListItems = dndClipboardListItems; } /** * @param myWidth the myWidth to set */ public void setMyWidth(int myWidth) { this.myWidth = myWidth; } private class AddSampleButtonActionListener implements ActionListener { private final ReduxDragAndDropClipboardInterface samplesOrgPane; public AddSampleButtonActionListener(ReduxDragAndDropClipboardInterface samplesOrgPane) { this.samplesOrgPane = samplesOrgPane; } @Override public void actionPerformed(ActionEvent e) { AbstractTripoliSample addedSample = new TripoliUnknownSample("unknown" + (tripoliSamples.size() + 1)); tripoliSamples.add(addedSample); projectManager.updateDataChangeStatus(true); ActionListener closeButtonActionListener = new CloseSampleButtonActionListener(); SampleFractionListDisplayPane sampleDisplayPane =// new SampleFractionListDisplayPane( // addedSample,// closeButtonActionListener,// samplesOrgPane, // projectManager); ((CloseSampleButtonActionListener) closeButtonActionListener).setSampleDisplayPane(sampleDisplayPane); sampleDisplayPanes.add(sampleDisplayPane); sampleFractionLists_pane.add(sampleDisplayPane); refreshSampleFractionListsPane(); } } private class CloseSampleButtonActionListener implements ActionListener { private JLayeredPane sampleDisplayPane; public CloseSampleButtonActionListener() { } @Override public void actionPerformed(ActionEvent e) { if (sampleDisplayPane != null) { projectManager.updateDataChangeStatus(true); tripoliSamples.remove(((SampleFractionListDisplayPane) sampleDisplayPane).getTripoliSample()); tripoliSamples.trimToSize(); // sept 2016 discovered that project sample was not removed ArrayList< SampleInterface> projectSamples = projectManager.getProject().getProjectSamples(); SampleInterface removedSample = null; for (SampleInterface sample : projectSamples) { if (sample.getSampleName().compareToIgnoreCase(((SampleFractionListDisplayPane) sampleDisplayPane).getTripoliSample().getSampleName()) == 0) { removedSample = sample; break; } } if (removedSample != null) { projectSamples.remove(removedSample); } sampleDisplayPanes.remove(sampleDisplayPane); sampleDisplayPanes.trimToSize(); sampleFractionLists_pane.remove(sampleDisplayPane); sampleDisplayPane.removeAll(); sampleDisplayPane.validate(); sampleFractionLists_pane.validate(); refreshSampleFractionListsPane(); sampleFractionLists_pane.repaint(); } } /** * @param sampleDisplayPane the sampleDisplayPane to set */ public void setSampleDisplayPane(JLayeredPane sampleDisplayPane) { this.sampleDisplayPane = sampleDisplayPane; } } /** * */ public final void refreshSampleFractionListsPane() { this.setSize(myWidth, myHeight); sampleFractionLists_scroll.setBounds( // LEFT_MARGIN, TOP_MARGIN, myWidth - LEFT_MARGIN, myHeight - TOP_MARGIN - BOTTOM_MARGIN + 25 /*height of scrollbar*/); sampleFractionLists_pane.setPreferredSize(// new Dimension( // Math.max(myWidth - 25 - LEFT_MARGIN, tripoliSamples.size() * (WIDTH_OF_SAMPLE_DISPLAY_LIST + 25)), // myHeight - BOTTOM_MARGIN - TOP_MARGIN - 10)); layoutSampleFractionLists(); validate(); } private void layoutSampleFractionLists() { for (int i = 0; i < sampleDisplayPanes.size(); i++) { sampleDisplayPanes.get(i).setBounds(// 10 + (WIDTH_OF_SAMPLE_DISPLAY_LIST + 20) * i, // 10, WIDTH_OF_SAMPLE_DISPLAY_LIST, myHeight - BOTTOM_MARGIN - TOP_MARGIN - 10);//50); } sampleFractionLists_pane.validate(); } /** * * @param g */ @Override public void paint(Graphics g) { super.paint(g); } }
Factors influencing false negative rates in xeromammography. Xeromammographic false negatives were analyzed to ascertain the cause of the errors and determine what corrective measures could be taken. Of 52 cancers miscalled, 52% were not visualized and 48% were categorized as misinterpretations. The causes of these errors are discussed. The error rate in all but 8% of the former group appears to be irreducible, yet errors discussed in the latter group are thought to be correctable in a high percentage of cases. Recommendations to reduce the number of false negatives in this group are presented.
package org.xupeng.dao; import org.apache.ibatis.annotations.Mapper; import org.apache.ibatis.annotations.Param; import org.xupeng.entities.PaymentEntity; /** * @author xupeng * @date 2020/10/18 23:19 * @Description */ @Mapper public interface IPaymentDao { public int create(PaymentEntity paymentEntity); public PaymentEntity getPaymentById(@Param("id") Long id); }
// WriteFiles write a list of files (path->content) in the temp directory. func (h *TempDir) WriteFiles(files map[string]string) *TempDir { for path, content := range files { h.Write(path, content) } return h }
/** * Examples of marshalling and unmarshalling. */ public class XmlgroupsMarshalUnmarshal { public static void main(String... args) throws Exception { XmlgroupsHelper helper = new XmlgroupsHelper(); // Read a feed file. URL feedUrl = MarshalUnmarshal.class.getResource(args[0]); // Examples in the feed developer's guide, as well as groups // feeds downloaded from the GSA, don't include the doctype, // so skip the validation here. Xmlgroups feed = helper.unmarshalWithoutDtd(feedUrl); // List the groups and their members. for (Membership membership : feed.getMembership()) { System.out.println(membership.getPrincipal().getvalue()); for (Principal principal : membership.getMembers().getPrincipal()) { System.out.println(" " + principal.getvalue()); } } // Marshal that feed object back to XML. ByteArrayOutputStream out = new ByteArrayOutputStream(); helper.marshal(feed, out); System.out.println(); System.out.println(out.toString("UTF-8")); // Read the marshalled feed to check that it's still a valid // groups feed. Groups feed construction in the adaptor // library code adds a doctype, so check this using the dtd. helper.unmarshalWithDtd(new ByteArrayInputStream(out.toByteArray())); } }
package torrent import ( "io" "net/http" "os" "strings" "time" "github.com/cenkalti/rain/torrent" "github.com/fatih/color" ) type Stat struct { Status string Downloaded int64 Peers int Total int64 } var Version string = "420" var conf = torrent.Config{ Database: "downloads/session.db", DataDir: "downloads", DataDirIncludesTorrentID: true, PortBegin: 50000, PortEnd: 60000, MaxOpenFiles: 10240, PEXEnabled: true, ResumeWriteInterval: 30 * time.Second, PrivatePeerIDPrefix: "-MF" + Version + "-", PrivateExtensionHandshakeClientVersion: "MF " + Version, BlocklistUpdateInterval: 24 * time.Hour, BlocklistUpdateTimeout: 10 * time.Minute, BlocklistEnabledForTrackers: true, BlocklistEnabledForOutgoingConnections: true, BlocklistEnabledForIncomingConnections: true, BlocklistMaxResponseSize: 100 << 20, TorrentAddHTTPTimeout: 30 * time.Second, MaxMetadataSize: 30 << 20, MaxTorrentSize: 10 << 20, MaxPieces: 64 << 10, DNSResolveTimeout: 5 * time.Second, SpeedLimitUpload: 1, ResumeOnStartup: true, RPCEnabled: true, RPCHost: "127.0.0.1", RPCPort: 7246, RPCShutdownTimeout: 5 * time.Second, TrackerNumWant: 200, TrackerStopTimeout: 5 * time.Second, TrackerMinAnnounceInterval: time.Minute, TrackerHTTPTimeout: 10 * time.Second, TrackerHTTPPrivateUserAgent: "Rain/" + Version, TrackerHTTPMaxResponseSize: 2 << 20, TrackerHTTPVerifyTLS: true, DHTEnabled: true, DHTHost: "0.0.0.0", DHTPort: 7246, DHTAnnounceInterval: 30 * time.Minute, DHTMinAnnounceInterval: time.Minute, DHTBootstrapNodes: []string{ "router.bittorrent.com:6881", "dht.transmissionbt.com:6881", "router.utorrent.com:6881", "dht.libtorrent.org:25401", "dht.aelitis.com:6881", }, UnchokedPeers: 3, OptimisticUnchokedPeers: 1, MaxRequestsIn: 250, MaxRequestsOut: 250, DefaultRequestsOut: 50, RequestTimeout: 20 * time.Second, EndgameMaxDuplicateDownloads: 20, MaxPeerDial: 80, MaxPeerAccept: 20, ParallelMetadataDownloads: 1, PeerConnectTimeout: 5 * time.Second, PeerHandshakeTimeout: 10 * time.Second, PieceReadTimeout: 30 * time.Second, MaxPeerAddresses: 2000, AllowedFastSet: 10, ReadCacheBlockSize: 128 << 10, ReadCacheSize: 256 << 20, ReadCacheTTL: 1 * time.Minute, ParallelReads: 1, ParallelWrites: 1, WriteCacheSize: 1 << 30, WebseedDialTimeout: 10 * time.Second, WebseedTLSHandshakeTimeout: 10 * time.Second, WebseedResponseHeaderTimeout: 10 * time.Second, WebseedResponseBodyReadTimeout: 10 * time.Second, WebseedRetryInterval: time.Hour * 100, WebseedVerifyTLS: true, WebseedMaxSources: 10, WebseedMaxDownloads: 3, } func getname(link string) []string { var titles []string titles = append(titles, strings.Split(link, `[Bitsearch.to]`)[1]) return titles } func DownloadTorrentFile(link string) { response, err := http.Get(link) if err != nil { panic(err) } defer response.Body.Close() output, err := os.Create(getname(link)[0] + ".torrent") if err != nil { panic(err) } defer output.Close() _, err = io.Copy(output, response.Body) if err != nil { panic(err) } } func AddTorrent(torrentf string, out chan Stat) { DownloadTorrentFile(torrentf) var opt = torrent.AddTorrentOptions{ ID: getname(torrentf)[0], Stopped: false, StopAfterDownload: true, StopAfterMetadata: false} if _, err := os.Stat("downloads/" + getname(torrentf)[0]); !os.IsNotExist(err) { err = os.RemoveAll("downloads/" + getname(torrentf)[0]) if err != nil { color.Red("Failed to remove previous files from " + getname(torrentf)[0]) } } ses, _ := torrent.NewSession(torrent.Config(conf)) cont, _ := os.Open(getname(torrentf)[0] + ".torrent") defer cont.Close() tor, _ := ses.AddTorrent(cont, &opt) for range time.Tick(time.Second) { s := tor.Stats() if strings.Contains(s.Status.String(), "Stopped") { _ = ses.Close() //_ = os.Chdir("downloads") //_ = os.Remove("session.db") //_ = os.Chdir("..") break } if out != nil { out <- Stat{Status: s.Status.String(), Downloaded: s.Bytes.Completed, Peers: s.Peers.Total, Total: s.Bytes.Total} } } }
<filename>src/libs/converters.d.ts import { BigNumber } from '@waves/data-entities'; declare let converters: { byteArrayToHexString: (bytes: any) => string; stringToByteArray: (str: any) => number[]; hexStringToByteArray: (str: any) => any[]; stringToHexString: (str: any) => any; hexStringToString: (hex: any) => any; checkBytesToIntInput: (bytes: any, numBytes: any, opt_startIndex: any) => any; byteArrayToSignedShort: (bytes: any, opt_startIndex: any) => any; byteArrayToSignedInt32: (bytes: any, opt_startIndex: any) => any; byteArrayToBigInteger: (bytes: any, opt_startIndex: any) => BigNumber; byteArrayToWordArray: (byteArray: any) => any; wordArrayToByteArray: (wordArray: any) => any[]; wordArrayToByteArrayImpl: (wordArray: any, isFirstByteHasSign: any) => any[]; byteArrayToString: (bytes: any, opt_startIndex?: any, length?: any) => string; byteArrayToShortArray: (byteArray: any) => number[]; shortArrayToByteArray: (shortArray: any) => number[]; shortArrayToHexString: (ary: any) => string; intToBytes_: (x: any, numBytes: any, unsignedMax: any, opt_bigEndian: any) => any[]; int32ToBytes: (x: any, opt_bigEndian: any) => any[]; int16ToBytes: (x: any, opt_bigEndian: any) => any[]; wordArrayToByteArrayEx: (wordArray: any) => Uint8Array; byteArrayToWordArrayEx: (u8arr: any) => any; }; export default converters;
a, b, c, d, e, f = map(int, input().split()) a *= 100 b *= 100 w = set() for i in range(f): _w = i * a if f < _w: break for j in range(f): __w = _w + j * b if f < __w: break w.add(__w) w.remove(0) s = set() for i in range(f): _s = i * c if f < _s: break for j in range(f): __s = _s + j * d if f < __s: break s.add(__s) m = -1 ans = (0, 0) for _w in w: for _s in s: if 100 * _s <= e * _w and _w + _s <= f: t = 100 * _s / (_s+_w) if m < t: ans = (_w+_s, _s) m = t print(*ans)
<filename>lib/events/messages.go package events type DeploymentV1 struct { Id string `json:"id"` Xml string `json:"xml"` Svg string `json:"svg"` Name string `json:"name"` } type DeploymentV2 struct { Id string `json:"id"` Name string `json:"name"` Diagram Diagram `json:"diagram"` } type Diagram struct { XmlDeployed string `json:"xml_deployed"` Svg string `json:"svg"` } type DeploymentCommand struct { Command string `json:"command"` Id string `json:"id"` Owner string `json:"owner"` Deployment *DeploymentV1 `json:"deployment"` DeploymentV2 *DeploymentV2 `json:"deployment_v2"` Source string `json:"source,omitempty"` } type KafkaIncidentsCommand struct { Command string `json:"command"` MsgVersion int64 `json:"msg_version"` ProcessDefinitionId string `json:"process_definition_id,omitempty"` ProcessInstanceId string `json:"process_instance_id,omitempty"` }
<filename>src/services/ProjectImportExport.ts import FormData from 'form-data'; import { BaseService, RequestHelper, Sudo, BaseRequestOptions } from '../infrastructure'; import { ProjectId, UploadMetadata } from '.'; class ProjectImportExport extends BaseService { download(projectId: ProjectId, options?: Sudo) { const pId = encodeURIComponent(projectId); return RequestHelper.get(this, `projects/${pId}/export/download`, options); } exportStatus(projectId: ProjectId, options?: Sudo) { const pId = encodeURIComponent(projectId); return RequestHelper.get(this, `projects/${pId}/export`, options); } import(content: string, { metadata, sudo }: { metadata?: UploadMetadata } & Sudo = {}) { const form = new FormData(); const defaultMetadata: UploadMetadata = { filename: Date.now().toString(), contentType: 'application/octet-stream', }; form.append('file', content, Object.assign(defaultMetadata, metadata)); return RequestHelper.post(this, 'projects/import', { sudo, form }); } importStatus(projectId: ProjectId, options?: Sudo) { const pId = encodeURIComponent(projectId); return RequestHelper.get(this, `projects/${pId}/import`, options); } schedule(projectId: ProjectId, options?: BaseRequestOptions) { const pId = encodeURIComponent(projectId); return RequestHelper.post(this, `projects/${pId}/export`, options); } } export default ProjectImportExport;
/** * Specify a column to split * @param columnHeader (e.g. "personnel") * @param delimiter (e.g. "~") * @throws Exception */ public void addSplit(String columnHeader, String delimiter) throws Exception{ validateColumnHeader(columnHeader); validateDelimiter(delimiter); validateNotSplitYet(columnHeader); columnsToSplit.put(columnHeader, delimiter); }
MONTREAL – Canadian softwood lumber producers will be hit only slightly less forcefully as the U.S. government reduced export duties for most Canadian producers after ongoing political talks failed to reach a deal. In its final determination released Thursday, the Department of Commerce said most Canadian producers will pay a combined countervailing and anti-dumping rate of 20.83 per cent, down from 26.75 per cent in the preliminary determinations issued earlier this year. Commerce Secretary Wilbur Ross said the revised duties were issued after the United States and Canada were unable to reach a long-term settlement to the dispute. WATCH: Trudeau introduces plan to help softwood lumber producers “While I am disappointed that a negotiated agreement could not be made between domestic and Canadian softwood producers, the United States is committed to free, fair and reciprocal trade with Canada,” he said in a news release. READ MORE: U.S. governor wants Canadian lumber tariffs lifted amid potential post-storm ‘price-gouging’ “This decision is based on a full and unbiased review of the facts in an open and transparent process that defends American workers and businesses from unfair trade practices.” The Canadian government responded by saying it will continue to defend the lumber industry against protectionist trade measures. “The U.S. Department of Commerce’s decision on punitive countervailing and anti-dumping duties against Canada’s softwood lumber producers is unfair, unwarranted and deeply troubling,” Foreign Affairs Minister Chrystia Freeland and Natural Resources Minister Jim Carr said in a joint statement. “We urge the U.S. Administration to rescind these duties, which harm workers and communities in Canada. These duties are a tax on American middle class families too, whose homes, renovations and repairs will only be more expensive.” WATCH: Quebec fights back after Bombardier slapped with 219% duty The ministers said the government will turn to litigation if required to defend the industry and expects to prevail as it has in the past. “We are reviewing our options, including legal action through the North American Free Trade Agreement and the World Trade Organization, and we will not delay in taking action.” Carr plans to convene the Federal-Provincial Task Force on Softwood Lumber in the coming days to discuss developments. READ MORE: Canada open to adding softwood lumber deal to NAFTA The U.S. agency said Canadian producers have exported softwood lumber to the U.S. at 3.2 to 8.89 per cent less than fair value. It also determined that Canada is providing unfair subsidies to its producers at rates of 3.34 to 18.19 per cent. The preliminary duties forced Canadian companies to post hundreds of millions of dollars in deposits until a final ruling of harm is made by the U.S. International Trade Commission around Dec. 18. The duties have driven up the price of lumber to cover the extra costs, adding to the cost of building a home in the United States. Canadian unions and lumber companies fear the issue will eventually cause layoffs once prices and demand falls. The rate for Montreal-based Resolute Forest Products (TSX:RFP) rises marginally to 17.9 per cent from 17.41 per cent and J.D. Irving inches up to 9.92 per cent from 9.89 per cent. West Fraser Timber (TSX:WFT) will continue to pay the highest duties, but its total is being cut to 23.7 per cent from 30.88 per cent. READ MORE: 20% of preliminary duties end on Canadian lumber companies Canfor (TSX:CFP) is next at 22.13, down from 27.98, Tolko at 22.07 vs. 27.03. Irving will pay 3.34 per cent in countervailing duties and no anti-dumping tariff, down from 9.89 per cent. Lumber products certified by the Atlantic Lumber Board as being first produced in Newfoundland and Labrador, Nova Scotia or Prince Edward Island are excluded from any duties. Also excluded is U.S. lumber shipped to Canada for some processing and imported back into the U.S., certain box spring kits, and box-spring frame components. The United States imported US$5.66 billion worth of softwood lumber last year from Canada. Softwood lumber importers will have to make cash deposits with the U.S. Customs and Border Protection based on the final rates.
// SameSignInt64 returns 0 if one of passed number >0 and another <0. Otherwise it returns 1. // SameSignInt64(-100, 5)=0 // SameSignInt64(5, -100)=0 // SameSignInt64(-100, 0)=1 // SameSignInt64(50, 100)=1 // SameSignInt64(-5, -10)=1 func SameSignInt64(a, b int64) int64 { if (a < 0 && b > 0) || (a > 0 && b < 0) { return 0 } return 1 }
<filename>src/theme/context/danger/dangerActiveAlt.ts import { css } from "styled-components"; import { createContextTheme } from "../../defaultTheme"; const contextDangerActiveAlt = createContextTheme("dangerActiveAlt"); export const varDangerActiveAlt = contextDangerActiveAlt.var; export default css` ${contextDangerActiveAlt.varName}: ${contextDangerActiveAlt.getVarValue}; `;
DnS: Distill-and-Select for Efficient and Accurate Video Indexing and Retrieval In this paper, we address the problem of high performance and computationally efficient content-based video retrieval in large-scale datasets. Current methods typically propose either: (i) fine-grained approaches employing spatio-temporal representations and similarity calculations, achieving high performance at a high computational cost or (ii) coarse-grained approaches representing/indexing videos as global vectors, where the spatio-temporal structure is lost, providing low performance but also having low computational cost. In this work, we propose a Knowledge Distillation framework, called Distill-and-Select (DnS), that starting from a well-performing fine-grained Teacher Network learns: a) Student Networks at different retrieval performance and computational efficiency trade-offs and b) a Selector Network that at test time rapidly directs samples to the appropriate student to maintain both high retrieval performance and high computational efficiency. We train several students with different architectures and arrive at different trade-offs of performance and efficiency, i.e., speed and storage requirements, including fine-grained students that store/index videos using binary representations. Importantly, the proposed scheme allows Knowledge Distillation in large, unlabelled datasets -- this leads to good students. We evaluate DnS on five public datasets on three different video retrieval tasks and demonstrate a) that our students achieve state-of-the-art performance in several cases and b) that the DnS framework provides an excellent trade-off between retrieval performance, computational speed, and storage space. In specific configurations, the proposed method achieves similar mAP with the teacher but is 20 times faster and requires 240 times less storage space. The collected dataset and implementation are publicly available: https://github.com/mever-team/distill-and-select. Performance of our proposed DnS framework and its variants for several dataset percentages sent for re-ranking (denoted in bold) evaluated on the DSVR task of FIVR-200K in terms of mAP, computational time per query in seconds, and storage space per video in megabytes (MB), in comparison to state-of-the-art methods. Coarse-grained methods are in blue, fine-grained in red, and re-ranking in orange. eral cases and b) that the DnS framework provides an excellent trade-off between retrieval performance, computational speed, and storage space. In specific configurations, the proposed method achieves similar mAP with the teacher but is 20 times faster and requires 240 times less storage space. The collected dataset and implementation are publicly available: https: //github.com/mever-team/distill-and-select. Introduction Due to the popularity of Internet-based video sharing services, the volume of video content on the Web has reached unprecedented scales. For instance, YouTube reports that more than 500 hours of content are uploaded every minute 1 . This puts considerable challenges for all video analysis problems, such as video classification, action recognition, and video retrieval, that need to achieve high performance at low computational and storage requirements in order to deal with the large scale of the data. The problem is particularly hard in the case of content-based video retrieval, where, given a query video, one needs to calculate its similarity with all videos in a database to retrieve and rank the videos based on relevance. In such scenario, this requires efficient indexing, i.e., storage of the representations extracted from the videos in the dataset, and fast calculations of the similarity between pairs of them. Depending on whether the spatio-temporal structure of videos is stored/indexed and subsequently taken into consideration during similarity calculation, research efforts fall into two broad categories, namely coarse-and fine-grained approaches. Coarse-grained approaches address this problem by aggregating framelevel features into single video-level vector representations (that are estimated and stored at indexing time) and then calculating the similarity between them by using a simple function such as the dotproduct or the Euclidean distance (at retrieval time). The video-level representations can be global vectors (Gao et al., 2017;Kordopatis-Zilos et al., 2017b;Lee et al., 2020), hash codes (Song et al., 2011(Song et al., , 2018Yuan et al., 2020), Bag-of-Words (BoW) (Cai et al., 2011;Kordopatis-Zilos et al., 2017a;Liao et al., 2018), or concept annotations (Markatopoulou et al., 2017(Markatopoulou et al., , 2018Liang and Wang, 2020). These methods have very low storage requirements, allow rapid similarity estimation at query-time, but they exhibit low retrieval performance, since they disregard the spatial and temporal structure of the videos and are therefore vulnerable to clutter and irrelevant content. On the other hand, fine-grained approaches extract (and store at indexing time) and use in the similarity calculation (at retrieval time) representations that respect the spatio-temporal structure of the original video, i.e., they have a temporal or a spatio-temporal dimension/index. Typically, such methods consider the sequence of frames in the similarity calculation and align them, e.g., by using Dynamic Programming (Chou et al., 2015;Liu et al., 2017), Temporal Networks (Tan et al., 2009;Jiang and Wang, 2016), or Hough Voting (Douze et al., 2010;Jiang et al., 2014); or consider spatio-temporal video representation and matching based on Recurrent Neural Networks (RNN) (Feng et al., 2018;Bishay et al., 2019), Transformer-based architectures (Shao et al., 2021), or in the Fourier domain (Poullot et al., 2015;Baraldi et al., 2018). These approaches achieve high retrieval performance but at considerable computation and storage cost. In an attempt to exploit the merits of both fineand coarse-grained methods, some works tried to utilize them in a single framework (Wu et al., 2007;Chou et al., 2015;Liang and Wang, 2020), leading to methods that offer a trade-off between computational efficiency and retrieval performance. Typically, these approaches first rank videos based on a coarse-grained method, in order to filter the videos with similarity lower than a predefined threshold, and then re-rank the remaining ones based on the similarity calculated from a computationally expensive fine-grained method. However, setting the threshold is by no means a trivial task. In addition, in those approaches, both coarse-and fine-grained components are typically built based on hand-crafted features with traditional aggregations (e.g., BoW) and heuristic/non-learnable approaches for similarity calculation -this results in sub-optimal performance. We will be referring to such approaches as re-ranking methods. Fig. 1 illustrates the retrieval performance, time per query, and storage space per video of several methods from the previous categories. Fine-grained approaches achieve the best results but with a significant allocation of resources. On the other hand, coarse-grained approaches are very lightweight but with considerably lower retrieval performance. Finally, the proposed reranking method provides a good trade-off between accuracy and efficiency, achieving very competitive performance with low time and storage requirements. Knowledge Distillation is a methodology in which a student network is being trained so as to approximate the output of a teacher network, either in the labelled dataset in which the teacher was trained, or in other, potentially larger unlabelled ones. Depending on the student's architecture and the size of the dataset, different efficiency-performance trade-offs can be reached. These methods have been extensively used in the domain of image recognition (Yalniz et al., 2019;Touvron et al., 2020;Xie et al., 2020); however, in the domain of video analysis, they are limited to video classification methods (Bhardwaj et al., 2019;Garcia et al., 2018;Crasto et al., 2019;Stroud et al., 2020), typically performing distillation at feature level across different modalities. Those methods typically distill the features of a stream of the network operating in Fig. 2 Overview of the proposed framework. It consists of three networks: a coarse-grained student S c , a fine-grained student S f , and a selector network SN. Processing is split into two phases, Indexing and Retrieval. During indexing (blue box), given a video database, three representations needed by our networks are extracted and stored in a video index, i.e., for each video, we extract a 3D tensor, a 1D vector, and a scalar that captures video self-similarity. During retrieval (red box), given a query video, we extract its features, which, along with the indexed ones, are processed by the SN. It first sends all the 1D vectors of query-target pairs to S c for an initial similarity calculation. Then, based on the calculated similarity and the self-similarity of the videos, the selector network judges which query-target pairs have to be re-ranked with the S f , using the 3D video tensors. Straight lines indicate continuous flow, i.e., all videos/video pairs are processed, whereas dashed lines indicate conditional flow, i.e., only a number of selected videos/video pairs are processed. Our students are trained with Knowledge Distillation based on a fine-grained teacher network, and the selector network is trained based on the similarity difference between the two students. a (computationally) expensive modality (e.g., optical flow field, or depth) into the features of a cheaper modality (e.g., RGB images) so that only the latter need to be stored/extracted and processed at test time. This approach does not scale well on large datasets, as it requires storage or re-estimation of the intermediate features. Furthermore, current works arrive at fixed trade-offs of performance and computational/storage efficiency. In this work, we propose to address the problem of high retrieval performance and computationally efficient content-based video retrieval in largescale datasets. The proposed method builds on the framework of Knowledge Distillation, and starting from a well-performing, high-accuracy-high-complexity teacher, namely a fine-grained video similarity learning method (ViSiL) (Kordopatis-Zilos et al., 2019b), trains a) both fine-grained and coarse-grained student networks on a large-scale unlabelled dataset and b) a selection mechanism, i.e., a learnable re-ranking module, that decides whether the similarity estimated by the coarse-grained student is accurate enough, or whether the fine-grained student needs to be invoked. By contrast to other re-ranking methods that use a threshold on the similarity estimated by the fast network (the coarse-grained student in our case), our selection mechanism is a trainable, lightweight neural network. All networks are trained so as to extract representations that are stored/indexed, so that each video in the database is indexed by the fine-grained spatio-temporal representation (3D tensor), its global, vector-based representation (1D vector), and a scalar self-similarity measure that is extracted by the feature extractor of the selector network, and can be seen as a measure of the complexity of the videos in question. The latter is expected to be informative of how accurate the coarse-grained, video-level similarity is, and together with the similarity rapidly estimated by the coarse-grained representations, is used as input to the selector. We note that, by contrast to other Knowledge Distillation methods in videos that address classification problems and typically perform distillation at intermediate features, the students are trained on a similarity measure provided by the teacher -this allows training on large scale datasets as intermediate features of the networks do not need to be stored, or estimated multiple times. Due to the ability to train on large unlabeled datasets, more complex models, i.e., with more trainable parameters, can be employed leading to even better performance than the original teacher network. An overview of the proposed framework is illustrated in Fig. 2. The main contributions of this paper can be summarized as follows: -We build a re-ranking framework based on a Knowledge Distillation scheme and a Selection Mechanism that allows for training our student and selector networks using large unlabelled datasets. We employ a teacher network that is very accurate but needs a lot of computational resources to train several student networks and the selector networks, and use them to achieve different performance-efficiency trade-offs. -We propose a selection mechanism that, given a pair of a fine-and a coarse-grained student, learns whether the similarity estimated by the fast, coarsegrained student is accurate enough, or whether the slow, fine-grained student needs to be invoked. To the best of our knowledge, we are the first to propose such a trainable selection scheme based on video similarity. -We propose two fine-grained and one coarse-grained student architectures. We develop: (i) a fine-grained attention student, using a more complex attention scheme than the teacher's, (ii) a fine-grained binarization student that extracts binarized features for the similarity calculation, and (iii) a course-grained attention student that exploits region-level information, and the intra-and inter-video relation of frames for the aggregation. -We evaluate the proposed method on five publicly available datasets and compare it with several state-of-the-art methods. Our fine-grained student achieves state-of-the-art performance on two out of four datasets, and our DnS approach retains competitive performance with more than 20 times faster retrieval per query and 99% lower storage requirements compared to the teacher. The remainder of the paper is organised as follows. In Sect. 2, the related literature is discussed. In Sect. 3, the proposed method is presented in detail. In Sect. 4, the datasets and implementation are presented. In Sect. 5, the results and ablation studies are reported. In Sect. 6, we draw our conclusions. Related Work This section gives an overview of some of the fundamental works that have contributed to content-based video retrieval and knowledge distillation. Video retrieval The video retrieval methods can be roughly classified, based on the video representations and similarity calculation processes employed, in three categories: coarsegrained, fine-grained, and re-ranking approaches. Coarse-grained approaches Coarse-grained approaches represent videos with a global video-level signature, such as an aggregated feature vector or a binary hash code, and use a single operation for similarity calculation, such as a dot product. A straightforward approach is the extraction of global vectors as video representations combined with the dot product for similarity calculation. Early works (Wu et al., 2007;Huang et al., 2010) extracted hand-crafted features from video frames, i.e., color histograms, and aggregated them to a global vector. More recent works (Gao et al., 2017;Kordopatis-Zilos et al., 2017b;Lee et al., 2018Lee et al., , 2020 rely on CNN features combined with aggregation methods. Also, other works (Cai et al., 2011;Kordopatis-Zilos et al., 2017a) aggregate video content to Bag-of-Words (BoW) representation (Sivic and Zisserman, 2003) by mapping frames to visual words and extracting global representations with tf-idf weighting. Another popular direction is the generation of hash codes for the entire videos combined with Hamming distance (Song et al., 2011(Song et al., , 2018Liong et al., 2017;Yuan et al., 2020). Typically, the hashing is performed via a network trained to preserve relations between videos. Coarse-grained methods provide very efficient retrieval covering the scalability needs of web-scale applications; however, their retrieval performance is limited, typically outperformed by the fine-grained approaches. Fine-grained approaches Fine-grained approaches extract video representations, ranging from video-level to region-level, and calculate similarity by considering spatio-temporal relations between videos based on several operations, e.g., a dot product followed by a max operation. Tan et al. (2009) proposed a graph-based Temporal Network (TN) structure, used for the detection of the longest shared path between two compared videos, which has also been combined with frame-level deep learning networks (Jiang and Wang, 2016;Wang et al., 2017). Additionally, other approaches employ Temporal Hough Voting (Douze et al., 2010) to align matched frames by means of a temporal Hough transform. Another solution is based on Dynamic Programming (DP) (Chou et al., 2015), where the similarity matrix between all frame pairs is calculated, and then the diagonal blocks with the largest similarity are extracted. Another direction is to generate spatiotemporal representations with the Fourier transform in a way that accounts for the temporal structure of video similarity (Poullot et al., 2015;Baraldi et al., 2018). Finally, some recent works rely on attention-based schemes to learn video comparison and aggregation by training either attentional RNN architectures (Feng et al., 2018;Bishay et al., 2019), transformer-based networks for temporal aggregation (Shao et al., 2021), or multi-attentional networks that extract multiple video representations (Wang et al., 2021). Fine-grained methods achieve high retrieval performance; however, they do not scale well to massive datasets due to their high computational and storage requirements. Video Re-ranking Re-ranking is a common practice in retrieval systems. In the video domain, researchers have employed it to combine methods from the two aforementioned categories (i.e., coarse-and fine-grained) to overcome their bottleneck and achieve efficient and accurate retrieval (Wu et al., 2007;Douze et al., 2010;Chou et al., 2015;Yang et al., 2019;Liang and Wang, 2020). Typical methods deploy a coarse-grained method as an indexing scheme to quickly rank and filter videos, e.g., using global vectors (Wu et al., 2007) or BoW representations (Chou et al., 2015;Liang and Wang, 2020). Then, a fine-grained algorithm, such as DP (Chou et al., 2015), Hough Voting (Douze et al., 2010) or frame-level matching (Wu et al., 2007), is applied on the videos that exceed a similarity threshold in order to refine the similarity calculation. Another re-ranking approach employed for video retrieval is Query Expansion (QE) (Chum et al., 2007). It is a two-stage retrieval process where, after the first stage, the query features are re-calculated based on the most similar videos retrieved, and the query process is executed again with the new query representation. This has been successfully employed with both coarse-grained Gao et al., 2017;Zhao et al., 2019) and fine-grained (Poullot et al., 2015;Baraldi et al., 2018) approaches. Also, an attention-based trainable QE scheme has been proposed in (Gordo et al., 2020) for image retrieval. However, even though the retrieval performance is improved with QE, the total computational time needed for retrieval is doubled as the query process is applied twice. Relevant works in the field of Knowledge Distillation distill knowledge based on the relations between data samples (Park et al., 2019;Tung and Mori, 2019;Liu et al., 2019;Lassance et al., 2020;Peng et al., 2019). Student networks are trained based on the distances between samples calculated by a teacher network (Park et al., 2019), the pairwise similarity matrix between samples within-batch (Tung and Mori, 2019), or by distilling graphs constructed based on the relations of the samples, using the sample representations as vertices and their distance as the edges to build an adjacency matrix Lassance et al., 2020). In the video domain, several approaches have been proposed for the improvement of the computational efficiency of the networks (Bhardwaj et al., 2019;Zhang and Peng, 2018;Garcia et al., 2018). Some works (Bhardwaj et al., 2019) proposed a Knowledge Distillation setup for video classification where the student uses only a fraction of the frames processed by the teacher, or multiple teachers are employed to construct a graph based on their relations, and then a smaller student network is trained (Zhang and Peng, 2018). Also, a popular direction is to build methods for distillation from different modalities and learn with privileged information to increase the performance of a single network, i.e., using depth images (Garcia et al., 2018), optical flow (Crasto et al., 2019;Stroud et al., 2020;Piergiovanni et al., 2020), or multiple modalities Piergiovanni et al., 2020). In video retrieval, Knowledge Distillation has been employed for feature representation learning on frame-level using the evaluation datasets (Liang et al., 2019). Comparison to previous approaches In this section, we draw comparisons of the proposed approach to the related works from the literature with respect to the claimed novelties. Proposed Framework: There is no similar prior work in the video domain that builds a re-ranking framework based on Knowledge Distillation and a trainable Selection Mechanism based on which the re-ranking process is performed. Other works (Chou et al., 2015;Yang et al., 2019;Liang and Wang, 2020) rely on outdated hand-crafted methods using simple re-ranking approaches based on similarity thresholding, the selection of which is a non-trivial task. By contrast, in this work, a framework is proposed that starts from an accurate but heavy-weight teacher to train a) both a fine-grained and coarse-grained student network on a large unlabelled dataset and b) a selection mechanism, i.e., a learnable module based on which the re-ranking process is performed. Knowledge Distillation: To the best of our knowledge, there is no prior work in the video domain that trains a pairwise function that measures video similarity with distillation. Works that use a similar loss function for distillation are (Park et al., 2019) and (Tung and Mori, 2019); however, these approaches have been proposed for the image domain. Video-based approaches (Bhardwaj et al., 2019;Zhang and Peng, 2018;Garcia et al., 2018;Liang et al., 2019) distill information between intermediate representations, e.g., video/frame activations or attention maps -this is costly due to the high computational requirements of the teacher. By contrast, in our training scheme the teacher's similarities of the video pairs used during training can be pre-computed -this allows training in large datasets in an unsupervised manner (i.e., without labels). Finally, these distillation methods end up with a single network that either offers compression or better performance -by contrast, in the proposed framework, we are able to arrive at different accuracy/speed/storage trade-offs. Network architectures: We propose three student network architectures that are trained with Knowledge Distillation in an unsupervised manner on large unannotated datasets avoiding in this way overfitting (cf. Sect. 5.1.2). Two fine-grained students are built based on our prior work in (Kordopatis-Zilos et al., 2019b), with some essential adjustments to mitigate its limitations. A fine-grained attention student is developed using a more complex attention mechanism, which outperforms the Teacher when trained on the large unlabeled dataset. Also, a fine-grained binarization student is introduced with a binarization layer that has significantly lower storage requirements. Prior works have used binarization layers with coarse-grained approaches (Liong et al., 2017;Song et al., 2018;Yuan et al., 2020), but none learns a fine-grained similarity function based on binarized regional-level descriptors. Furthermore, a coarse-grained student is built. Its novelties are the use of a trainable region-level aggregation scheme -unlike other works that extract frame-level descriptors -and the combination of two aggregation components on frame-level that considers intra-and inter-video relations between frames. Prior works have employed a transformer encoder to capture intra-video frame relations (Shao et al., 2021), or a NetVLAD to capture inter-video ones (Miech et al., 2017); however, none combines the two components together. Distill-and-Select This section presents the Distill-and-Select (DnS) method for video retrieval. First, we describe the developed retrieval pipeline, which involves a fine-grained and a coarse-grained student network trained with Knowledge Distillation, and a selector network, acting as a re-ranking mechanism (Sect. 3.1). Then, we discuss the network architectures/alternatives employed in our proposed approach that offer different performanceefficiency trade-offs (Sect. 3.2). Finally, the training processes followed for the training of the proposed networks are presented (Sect. 3.3). Approach overview Fig. 2 depicts the DnS framework. It consists of three networks: (i) a coarse-grained student (S c ) that provides very fast retrieval speed but with low retrieval performance, (ii) a fine-grained student (S f ) that has high retrieval performance but with high computational cost, and (iii) a selector network (SN) that routes the similarity calculation of the video pairs and provides a balance between performance and time efficiency. Each video in the dataset is stored/indexed using three representations: (i) a spatio-temporal 3D tensor f S f that is extracted (and then used at retrieval time) by the fine-grained student S f , (ii) a 1D global vector f S c that is extracted (and then used at retrieval time) by the coarse-grained student S c , and (iii) a scalar f SN that summarises the similarity between different frames of the video in question that is extracted (and then used at retrieval time) by the selector network SN. The indexing process that includes the feature extraction is illustrated within the blue box in Fig. 2, 3, 4, 5 and is denoted as f X (·) for each network X. At retrievaltime, given an input query-target video pair, the selector network sends to the coarse-grained student S c the global 1D vectors so that their similarity is rapidly estimated (i.e., as the dot product of the representations) g S c . This coarse similarity and the self-similarity scalars for the videos in question are then given as input to the selector SN, which takes a binary decision g SN on whether the calculated coarse similarity needs to be refined by the fine-grained student. For the small percent- During indexing, a 3D video tensor is extracted based on a Feature Extraction (FE) process, applying regional pooling, whitening, and 2 -normalization on the activations of a CNN. Then, a modular component is applied according to the employed network, i.e., an attention scheme for T and S f A , and a binarization layer for S f B . During retrieval, the Tensor Dot (TD) followed by Chamfer Similarity (CS) are applied on the representations of a video pair to generate their frame-to-frame similarity matrix, which is propagated to a Video Comparator (VC) CNN that captures the temporal patterns. Finally, CS is applied again to derive a single video-to-video similarity score. age of videos that this is needed, the fine-grained network calculates the similarity g S f based on the spatiotemporal representations. The retrieval process that includes the similarity calculation is illustrated within the red box in Fig. 2, 3, 4, 5 and is denoted as g X (·, ·) for each network X. In practice, we apply the above process on every query-target video pair derived from a database, and a predefined percentage of videos with the largest confidence score calculated by the selector is sent to the fine-grained student for re-ranking. With this scheme, we achieve very fast retrieval with very competitive retrieval performance. Network architectures In this section, the architectures of all networks included in the DnS framework are discussed. First, the teacher network that is based on the ViSiL architecture is presented (3.2.1). Then, we discuss our student architectures, which we propose under a Knowledge Distillation framework that addresses the limitations introduced by the teacher; i.e., high resource requirements, both in terms of memory space for indexing, due to the region-level video tensors, and computational time for retrieval, due to the fine-grained similarity calculation. More precisely, three students are proposed, two fine-grained and one coarse-grained variant, each providing different benefits. The fine-grained students are both using the ViSiL architecture. The first finegrained student simply introduces more trainable parameters, leading to better performance with similar computational and storage requirements to the teacher (3.2.2). The second fine-grained student optimizes a bi-narization function that hashes features into a Hamming space and has very low storage space requirements for indexing with little performance sacrifice (3.2.3). The third coarse-grained student learns to aggregate the region-level feature vectors in order to generate a global video-level representation and needs considerably fewer resources for indexing and retrieval but at notable performance loss (3.2.3). Finally, we present the architecture of the selector network for indexing and retrieval (3.2.3). Our framework operates with a specific combination of a fine-grained and coarse-grained student and a selector network. Each combination achieves different trade-offs between retrieval performance, storage space, and computational time. Baseline Teacher (T) Here, we will briefly present the video similarity learning architecture that we employ as the teacher and which builds upon the ViSiL (Kordopatis-Zilos et al., 2019b) architecture (Fig. 3). Feature extraction/Indexing (f T ): Given an input video, we first extract region-level features from the intermediate convolution layers (Kordopatis-Zilos et al., 2017a) of a backbone CNN architecture by applying region pooling (Tolias et al., 2016) on the feature maps. These are further PCA-whitened (Jégou and Chum, 2012) and 2 -normalized. We denote the aforementioned process as Feature Extraction (FE), and we employ it in all of our networks. FE is followed by a modular component, as shown in Fig. 3, that differs for each fine-grained student. In the case of the teacher, an attention mechanism is employed imposing that frame regions are weighted based on their saliency via a visual attention mechanism over region vectors based on an 2 -normalized context vector. The context vector is a trainable vector u ∈ R D that weights each region vector independently based on their dot-product. It is learned through the training process. Also, no fullyconnected layer is employed to transform the region vectors for the attention calculation. We refer to this attention scheme as 2 -attention. The output representation of an input video x is a region-level video tensor X ∈ R Nx×Rx×D , where N x is the number of frames, R x is the number of regions per frame, and D is the dimensionality of the region vectors -this is the output of the indexing process, and we denote it as f T (x). Similarity calculation/Retrieval (g T ): At retrieval time, given two videos, q and p, with N q and N p number of frames and R q and R p regions per frame, respectively, for every pair of frames, we first calculate the frame-toframe similarity based on the similarity of their region vectors. More precisely, to calculate the frame-to-frame similarity on videos q and p, we calculate the Tensor Dot combined with Chamfer Similarity on the corresponding video tensors f T (q) = Q ∈ R Nq×Rq×D and f T (p) = P ∈ R Np×Rp×D as follows where M qp f ∈ R Nq×Np is the output frame-to-frame similarity matrix, and the Tensor Dot axes indicate the channel dimension of the corresponding video tensors. Also, the Chamfer Similarity is implemented as a maxpooling operation followed by an average-pooling on the corresponding dimensions. This process leverages the geometric information captured by region vectors and provides some degree of spatial invariance. Also, it is worth noting that this frame-to-frame similarity calculation process is independent of the number of frames and region vectors; thus, it can be applied on any video pair with arbitrary sizes and lengths. To calculate the video-to-video similarity, the generated similarity matrix M qp f is fed to a Video Comparator (VC) CNN module (Fig. 3), which is capable of learning robust patterns of within-video similarities. The output of the network is the refined similarity matrix M qp v ∈ R N q ×N p . In order to calculate the final video-level similarity for two input videos q, p, i.e., g T (q, p), the hard tanh (Htanh) activation function is applied on the values of the aforementioned network output followed by Chamfer Similarity in order to ob-tain a single value, as follows In that way, the VC takes temporal consistency into consideration by applying learnable convolutional operations on the frame-to-frame similarity matrix. Those enforce local temporal constraints while the Chamferbased similarity provides invariance to global temporal transformations. Hence, similarly to the frame-to-frame similarity calculation, this process is a trade-off between respecting the video-level structure and being invariant to some temporal differences. Fine-grained attention student (S f A ) The first fine-grained student adopts the same architecture as the teacher (Sect. 3.2.1, Fig. 3), but uses a more complex attention scheme in the modular component, employed for feature weighting, as proposed in (Yang et al., 2016). The Feature Extraction (FE) process is used to extract features, similar to the teacher. In the modular component shown in Fig. 3, we apply an attention weighting scheme as follows. Given a region vector r : X (i, j, ·) ∈ R D , where i = 1, . . . , N x , j = 1, . . . , R x , a non-linear transformation is applied, which is implemented as a fullyconnected layer with tanh activation function, to form a hidden representation h. Then, the attention weight is calculated as the dot product between h and the context vector u, followed by the sigmoid function, as where W a ∈ R D×D and b a ∈ R D are the weight and bias parameters of the hidden layer of the attention module, respectively, and sig(·) denotes the element-wise sigmoid function. We will be referring to this attention scheme as h-attention. The resulting 3D representation is the indexing output f S f A (x) for an input video x. Similarity calculation/Retrieval (g S f A ): To calculate similarity between two videos, we build the same process as for the teacher, i.e., we employ a Video Comparator (VC) and use the same frame-to-frame and video-to-video functions to derive g S f A (q, p) for two input videos q, p (Fig. 3). In comparison to the teacher, this student a) has very similar storage requirements, since in both cases, the videos are stored as non-binary spatio-temporal features, b) has similar computational cost, since the additional attention layer introduces only negligible overhead, and c) typically reaches better performance, since it has slightly higher capacity and can be trained in a much larger, unlabelled dataset. The second fine-grained student also adopts the same architecture as the teacher (Sect. 3.2.1, Fig. 3), except for the modular component where a binarization layer is introduced, as discussed below. is the part of the indexing of the student S f B that extracts a binary representation for an input video that will be stored and used at retrieval time. It uses the architecture of the teacher, where the modular component is implemented as a binarization layer (Fig. 3). This applies a binariza- where W B ∈ R D×L denote the learnable weights and sgn(·) denotes the element-wise sign function. However, since sgn is not a differentiable function, learning binarization parameters via backpropagation is not possible. To address this, we propose an approximation of the sign function under the assumption of small uncertainty in its input. More specifically, let sgn : x → {±1}, where x is drawn from a uni-variate Gaussian distribution with given mean µ and fixed variance σ 2 , i.e., x ∼ N (µ, σ 2 ). Then, the expected value 2 of the sign of x is given analytically as follows where erf(·) denotes the error function. This is differentiable and therefore can serve as an activation function on the binarization parameters, that is, where we use as variance an appropriate constant value (empirically set to σ = 10 −3 ). During training, we use (6), while during evaluation and hash code storage we use (4). After applying this operation to an arbitrary 2σ 2 , where z ∼ N (0, 1) denotes the standard Gaussian and Φ its CDF. video x with N x frames and R x regions, we arrive at a binary tensor X B ∈ {±1} Nx×Rx×L , which is the indexing output f S f B (x) = X B used by this student. Similarity calculation/Retrieval (g S f B ): In order to adapt the similarity calculation processes with the binarization operation, the Hamming Similarity (HS) combined with Chamfer Similarity is employed as follows. Given two videos q, Np×Rp×L their binary tensors, respectively, we first calculate the HS between the two tensors with the use of Tensor Dot to calculate the similarity of all region pair combinations of the two videos and then apply Chamfer Similarity to derive the frame-to-frame similarity matrix M qp B ∈ R Nq×Np . That is, Finally, a Video Comparator (VC) is applied on the frame-to-frame similarity matrices in order to calculate the final video-to-video similarity, similarly to (2) in the original teacher (Fig. 3) -this is denoted as g S f B (q, p) for two input videos q, p. In comparison to the teacher, this student a) has remarkably lower storage requirements, since the binary spatio-temporal representations are 32 times smaller than the corresponding float ones (full precision), b) has similar computational cost, as the architecture is very similar, and c) reaches better performance since it is trained at a larger (despite being unlabelled) dataset. Note that this student only uses a binary input but is not a binarized network. Coarse-grained student (S c ) The coarse-grained student introduces an architecture that extracts video-level representations that are stored and can be subsequently used at retrieval time so as to rapidly estimate the similarity between two videos as the cosine similarity of their representations. An overview of the coarse student is shown in Fig. 4. Feature extraction/Indexing (f S c ): The proposed coarse-grained student comprises of three components. First, we extract weighted region-level features with Feature Extraction (FE), using the attention module given by (3), and then average pooling is applied across the spatial dimensions of the video tensors lead to frame-level representations for the videos; i.e., Fig. 4 Illustration of the architecture of the coarse-grained student S c , consisting of three main components. During indexing, the FE process with attention weighting and average pooling is applied to extract frame-level features. Then, they are processed by a Transformer network and aggregated to 1D vectors by a NetVLAD module. During retrieval, the video similarity derives from a simple dot product between the extracted representations. the frame-level vector of the i-th video frame, R x is the number of regions, and α k is the attention weight calculated by (3). In that way, we apply a trainable scheme to aggregate the region-level features that focuses on the information-rich regions. Second, a transformer (Vaswani et al., 2017) network architecture is used to derive frame-level representations that capture long-term dependencies within the frame sequence, i.e., it captures the intra-video relations between frames. Following Shao et al. (2021), the encoder part of the Transformer architecture is used, which is composed of a multi-head self-attention mechanism and a feedforward network. Finally, a NetVLAD (Arandjelovic et al., 2016) module aggregates the entire video to a single vector representation (Miech et al., 2017). This component learns a number of cluster centers and a soft assignment function through the training process, considering all videos in the training dataset. Therefore, it can be viewed as it encodes the inter-video relations between frames. Given input a video x, the output f S c (x) is a 1D video-level vector that is indexed and used by the coarse-grained student during retrieval. Similarity calculation/Retrieval (g S c ): Once feature representations have been extracted, the similarity calculation is a simple dot product between the 1D vectors of the compared videos, i.e., g S c (q, p) = f S c (q) · f S c (p) for two input videos q, p. In comparison to the original teacher, this student a) has remarkably lower storage requirements for indexing, since it stores video-level representations instead of spatio-temporal ones, b) has significantly lower computational cost at retrieval time, since the similarity is calculated with a single dot-product between video-level representations, and c) has considerably lower performance, since it does not model spatio-temporal relations between videos during similarity calculation. Selector network (SN) In the proposed framework, at retrieval time, given a pair of videos, the role of the selector is to decide whether the similarity that is calculated rapidly based on the stored coarse video-level representations is accurate enough (i.e., similar to what a fine-grained student would give), or whether a fine-grained similarity, based on the spatio-temporal, fine-grained representations needs to be used, and a new, refined similarity measure needs to be estimated. Clearly, this decision needs to be taken rapidly and with a very small additional storage requirement for each video. The proposed selector network is shown in Fig. 5. At retrieval time, a simple Multi-Layer Perceptron (MLP) takes as input a three dimensional vector, z ∈ R 3 , with the following features: a) the similarity between a pair of videos q, p, as calculated by S c (Sect. 3.2.4), and b) the fine-grained self-similarities f SN (q) and f SN (p), calculated by a trainable NN (Fig. 5). Since f SN (x) depends only on video x, it can be stored together with the representations of the video x with negligible storage cost. Having f SN (q) and f SN (p) pre-computed, and g S c rapidly computed by the coarse-grained student, the use of selector at retrieval time comes at a negligible storage and computational cost. Both the self-similarity function f SN that extracts features at indexing time and the MLP that takes the decision at retrieval time, which are parts of the Selector Network SN, are jointly trained. In what follows, we describe the architecture of the selector, starting from the network that calculates the fine-grained similarity f SN . This is a modified version of the ViSiL architecture that aims to derive a measure that captures whether there is large spatio-temporal variability in its content. This is expected to be informative on whether the fine-grained student needs to be invoked. The intuition is that for videos with high f SN , i.e., not high spatio-temporal variability, their video-level representations are sufficient to calculate their similarity, i.e., the similarity estimated by the coarse-grained student is accurate enough. Feature extraction/Indexing (f SN ): Given a video x as input, features are extracted based on the Feature Extraction (FE), using the attention module as in (3), to derive a video tensor X ∈ R Nx×Rx×D . Then, the frame-to-frame self-similarity matrix is calculated, as where, M x f ∈ R Nx×Nx is the symmetric frame-to-frame self-similarity matrix. Note that (8) is a modified ver- Fig. 5 Illustration of the Selector Network architecture. During indexing, the self-similarity of the videos is calculated according to the following scheme. First, region-level attention-weighted features are extracted. Then, the frame-to-frame self-similarity matrix derives with a Tensor Dot (DT) and Average Pooling (AP), which is propagated to a VC module to capture temporal patterns. The final self-similarity is calculated based on an AP on the VC output. During retrieval, given a video pair, a 3-dimensional vector is composed by the self-similarity of each video and their similarity calculated by the S c . The feature vector is fed to an MLP to derive a confidence score. sion of (1), where the Chamfer Similarity is replaced by the average operator. In this case, we calculate the average similarity of a region with all other regions in the same frame -the use of Chamfer Similarity would have resulted in estimating the similarity of a region with the most similar region in the current frame, that is itself. Similarly, a Video Comparator (VC) CNN network is employed (same as ViSiL, Fig. 3) that is fed with the self-similarity matrix in order to extract the temporal patterns and generate a refined self-similarity matrix To extract a final score (indexing output) that captures self-similarity, we modify (2) as that is, the average of the pair-wise similarities of all video frames. Note that we also do not use the hard tanh activation function, as we empirically found that it is not needed. Confidence calculation/Retrieval (g SN ): Given a pair of videos and their similarity predicted by the S c , we retrieve the indexed self-similarity scores, and then we concatenate them with the S c similarity, forming a three-dimensional vector z ∈ R 3 for the video pair, as shown in Fig. 5. This vector is given as input to a two-layer MLP using Batch Normalization (Ioffe and Szegedy, 2015) and ReLU (Krizhevsky et al., 2012) activation functions. For an input video pair q, p, the retrieval output g SN (q, p) is the confidence score of the selector network that the fine-grained student needs to be invoked. Fig. 6 Illustration of the training process of the teacher networks. It is trained with supervision with video triplets derived from a labelled dataset, minimizing triplet loss. Training process In this section, we go through the details of the procedure followed for the training of the underlying networks of the proposed framework, i.e., the teacher, the students, and the selector. Teacher training The teacher network is trained with supervision on a labelled video dataset V l , as shown in Fig. 6. The videos are organized in triplets (v, v + , v − ) of an anchor, a positive (relevant), and a negative (irrelevant) video, respectively, where v, v + , v − ∈ V l , and the network is trained with the triplet loss where γ is a margin hyperparameter. In addition, a similarity regularization function is used that penalizes high values in the input of hard tanh that would lead to saturated outputs. Following other works, we use data augmentation (i.e., color, geometric, and temporal augmentations) on the positive samples v + . Fig. 7 Illustration of the training process of the student networks. They are trained on an unlabelled dataset by minimizing the difference between their video similarity estimations and the ones calculated by the teacher network. Student training An overview of the student training process is illustrated in Fig. 7. Let V u = {v 1 , v 2 , . . . , v n } be a collection of unlabelled videos and g T (q, p), g S (q, p) be the similarities between videos q, p ∈ V u , estimated by a teacher network T and a student network S, respectively. S is trained so that g S approximates g T , with the L 1 loss 3 , that is, Note that the loss is defined on the output of the teacher. This allows for a training process in which the scores of the teacher are calculated for a number of pairs in the unlabelled dataset only once, and then being used as targets for the students. This is in contrast to methods where the loss is calculated on intermediate features of T and S, and cannot, thus, scale to large-scale datasets as they have considerable storage and/or computational/memory requirements. In this setting, the selection of the training pairs is crucial. Since it is very time consuming to apply the teacher network T to every pair of videos in the dataset (O(n 2 ) complexity) and randomly selecting videos would result in mostly pairs with low similarity scores, here, we follow Kordopatis-Zilos et al. (2019a) and generate a graph to extract its connected components, which are considered as video clusters. Each video included in a video cluster is considered as an anchor, and we form pairs with the videos belonging to the same cluster, which are treated as positive pairs. Also, based on the anchor video, we form pairs with the 50 most similar videos that belong to the other clusters and the 50 most similar videos that belong to no cluster, which are treated as negative pairs. At each epoch, one positive and one negative pair are selected for each anchor video to balance their ratio. Fig. 8 Illustration of the training process of the selector network. It is trained on an unlabelled dataset, exploiting the similarities calculated by a coarse-and fine-grained student. Note that the fine-grained student is applied on all video pairs only during training time. During retrieval, only a portion of the dataset is sent to it. Selector training Typically, the similarity between two videos q and p that is estimated by a fine-grained student S f , leads to better retrieval scores than the one estimated by the coarse-grained student S c . However, for some video pairs, the difference between them (i.e., g S c (q, p) − g S f (q, p) 1 ) is small and, therefore, having negligible effect to the ranking and on whether the video will be retrieved or not. The selector is a network that is trained to distinguish between those video pairs, and pairs of videos that exhibit large similarity differences. For the former, only the coarse-grained student S c will be used; for the latter, the fine-grained student S f will be invoked. The selector network is trained as a binary classifier, with binary labels obtained by setting a threshold t on g S c (q, p) − g S f (q, p) 1 , that is, Video pairs are derived from V u , and Binary Cross-Entropy is used as a loss function, as shown in Fig. 8. We use the same mining process used for the student training, and at each epoch, a fixed number of video pairs is sampled for the two classes. We reiterate here that the selector is trained in an end-to-end manner, i.e., both the self-similarity feature extraction network f SN , given by (9), and the decision-making MLP (Fig. 5) are optimized jointly during training. Evaluation Setup In this section, we present the datasets (Sect. 4.1), evaluation metrics (Sect. 4.2), and implementation details (Sect. 4.3) adopted during the experimental evaluation of the proposed framework. Training datasets VCDB (Jiang et al., 2014) was used as the training dataset to generate triplets for the training of the teacher model. The dataset consists of videos derived from popular video platforms (i.e., YouTube and Metacafe) and has been developed and annotated as a benchmark for partial copy detection. It contains two subsets, namely, the core and the distractor subsets. The former one contains 28 discrete sets composed of 528 videos with over 9,000 pairs of copied segments. The latter subset is a corpus of approximately 100,000 randomly collected videos that serve as distractors. DnS-100K is the dataset collected for the training of the students. We followed the collection process from our prior work (Kordopatis-Zilos et al., 2019a) for the formation of the FIVR-200K dataset in order to collect a large corpus of videos with various relations between them. First, we built a collection of the major news events that occurred in recent years by crawling Wikipedia's "Current Event" page 4 . To avoid overlap with FIVR-200K, where the crawling period was from 2013-2017, we only considered the news events from the years 2018-2019. Then, we retained only the news events associated with armed conflicts and natural disasters by filtering them based on their topic. Afterwards, the public YouTube API 5 was used to collect videos by providing the event headlines as queries. The results were filtered to contain only videos published at the corresponding event start date and up to one week after the event. At the end of this process, we had collected a corpus of 115,792 videos. Following the mining scheme described in Sect. 3.3.2, we arrived at 21,997 anchor videos with approximately 2.5M pairs. CC WEB VIDEO (Wu et al., 2007) simulates the Near-Duplicate Video Retrieval (NDVR) problem. It consists of 24 query sets and 13,112 videos. The collection consists of a sample of videos retrieved by submitting 24 popular text queries to popular video sharing websites, i.e., YouTube, Google Video, and Yahoo! Video. For every query, a set of video clips was collected, and the most popular video was considered to be the query video. Subsequently, all videos in the video set were manually annotated based on their near-duplicate relation to the query video. We also use the 'cleaned' version, as provided in (Kordopatis-Zilos et al., 2019b). Evaluation datasets SVD (Jiang et al., 2019) was used for the NDVR problem, tailored for short videos in particular. It consists of 562,013 short videos crawled from a large videosharing website, namely, Douyin 6 . The average length of the collected videos is 17.33 seconds. The videos with more than 30,000 likes were selected to serve as queries. Candidate videos were selected and annotated based on a three-step retrieval process. A large number of probably negative unlabelled videos were also included to serve as distractors. Hence, the final dataset consists of 1,206 queries with 34,020 labelled video pairs and 526,787 unlabelled videos. The queries were split into two sets, i.e., training and test set, with 1,000 and 206 queries, respectively. In this paper, we only use the test set for the evaluation of the retrieval systems. EVVE was designed for the Event Video Retrieval (EVR) problem. It consists of 2,375 videos and 620 queries. The main task on this dataset is the retrieval of all videos that capture the event depicted by a query video. The dataset contains 13 major events that were provided as queries to YouTube. Each event was annotated by one annotator, who first produced a precise definition of the event. However, we managed to download and process only 1906 videos and 504 queries (that is, ≈80% of the initial dataset) due to the unavailability of the remaining ones. Evaluation metric To evaluate retrieval performance, we use the mean Average Precision (mAP) metric, as defined in (Wu et al., 2007), which captures the quality of video rankings. For each query, the Average Precision (AP) is calculated as where n is the number of relevant videos to the query video and r i is the rank of the i-th retrieved relevant video. The mAP is calculated by averaging the AP scores across all queries. Also, for the evaluation of the selector, we use the plot of mAP with respect to the total dataset percentage sent to the fine-grained student. The objective is to achieve high retrieval performance (in terms of mAP) with low dataset percentage. Implementation details All of our models have been implemented with the Py-Torch (Paszke et al., 2019) library. For the teacher, we have re-implemented ViSiL (Kordopatis-Zilos et al., 2019b) following the same implementation details, i.e., for each video, we extracted 1 frame per second and used ResNet-50 for feature extraction using the output maps of the four residual blocks, resulting in D = 3840. The PCA-whitening layer was learned from 1M region vectors sampled from VCDB. In all of our experiments, the weights of the feature extraction CNN and whitening layer remained fixed. We sampled 2000 triplets in each epoch. The teacher was trained for 200 epochs with 4 videos per batch using the raw video frames. We employed Adam optimization (Kingma and Ba, 2015) with learning rate 10 −5 . Other parameters were set to γ = 0.5, r = 0.1 and W = 64, similarly to (Kordopatis-Zilos et al., 2019b). For the students, we used the same feature extraction process as in the teacher, and the same PCAwhitening layer was used for whitening and dimensionality reduction. We empirically set D = 512 as the dimensions of the reduced region vectors. The students were trained with a batch size of 64 video pairs for 300 epochs, using only the extracted video features. Also, during training, we applied temporal augmentations, i.e., random frame drop, fast forward, and slow motion, with 0.1 probability each. We employed Adam optimization (Kingma and Ba, 2015) with learning rate 10 −5 and 10 −4 for the course-and fine-grained students, respectively. For the fine-grained binarization student, the binarization layer was initialized with the ITQ algorithm (Gong et al., 2012), learned on 1M region vectors sampled from our dataset, as we observed better convergence than random initialization, and with L = 512 bits. For the coarse-grained student's training, the teacher's similarities were rescaled to leading to better performance. Also, we used one layer in the transformer, with 8 heads for multi-head attention and 2048 dimension for the feed-forward network. For the NetVLAD module, we used 64 clusters, and a fully-connected layer with 1024 output dimensions and Layer Normalization (Ba et al., 2016). For the fine-grained students' training, we employed the similarity regularization loss from (Kordopatis-Zilos et al., 2019b), weighted with 10 −3 , arriving at marginal performance improvements. For the selector, we used the same feature extraction scheme that was used for the students. It was trained with a batch size of 64 video pairs for 100 epochs, using only the extracted video features. At each epoch, we sampled 5,000 video pairs from each class. We employed Adam optimization (Kingma and Ba, 2015) with learning rate 10 −4 . For the fully-connected layers of the MLP, we used 100 hidden units and 0.5 dropout rate. For the training of the selector model, the similarities of the fine-grained student were rescaled to to match similarities calculated from the coarse-grained student. Finally, we used a threshold of t = 0.2 for the class separation, unless stated otherwise. Experiments In this section, the experimental results of the proposed approach are provided. First, a comprehensive ablation study on the FIVR-5K dataset is presented, evaluating the proposed students and the overall approach under different configurations to gain better insight into its behaviour (Sect. 5.1). Then, we compare the performance and requirements of the developed solutions against several methods from the literature on the four benchmark datasets (Sect. 5.2). Retrieval performance of the individual networks In Table 1, we show the performance and storage/time requirements of the teacher T and the three proposed student networks, namely, S f A , S f B , and S c , trained with the proposed scheme. The fine-grained attention student S f A achieves the best results on all evaluation tasks, outperforming the teacher T by a large margin. Also, the fine-grained binarization student S f B reports performance very close to the teacher's on the DSVR and CSVR tasks, and it outperforms the teacher on the ISVR task, using only quantized features with lower dimensionality than the ones used by the teacher and therefore requiring up to 240 times less storage space. This highlights the effectiveness of the proposed training scheme and the high quality of the collected dataset. Furthermore, both fine-grained students have similar time requirements, and they are three times faster than the teacher because they process lower dimensionality features. Finally, as expected, the coarse-grained student S c results in the worst performance compared to the other networks, but it has the lowest requirements in terms of both storage space and computational time. Distillation vs Supervision In Table 2, we show the performance of the teacher T trained with supervision on VCDB (as proposed in (Kordopatis-Zilos et al., 2019b) and used for our teacher training) and the three proposed students, namely, S f A , S f B , and S c , trained under various combinations: (i) with supervision on VCDB (same as the original teacher), (ii) with distillation on VCDB, and (iii) with distillation on the DnS-100K dataset. It is evident that the proposed training scheme using a large unlabelled dataset leads to considerably better retrieval performance compared to the other setups for all students. Also, it is noteworthy that training students with supervision, same as the teacher, results in a considerable drop in performance compared to distillation on either dataset. The students achieve better results when trained with DnS-100K instead of VCDB. An explanation for this is that our dataset contains various video relations (not only near-duplicates as in VCDB) and represents a very broad and diverse domain (by contrast to VCDB, which consists of randomly selected videos), resulting in better retrieval performance for the students. Impact of dataset size In Table 3, we show the performance of the proposed students, namely, S f A , S f B , and S c , in terms of mAP, when they are trained with different percentages of the collected DnS-100K dataset (that is, 25%, 50%, 75%, and 100%). We report large differences in performance for the fine-grained binarization student S f B and the coarse-grained student S c . We note that the more data is used for training, the better their retrieval results are. On the other hand, the fine-grained attention student's S f A performance remains relatively steady, regardless of the amount used for training. We attribute this behaviour to the fact that S f A learns to weigh the input features without transforming them; hence, a smaller dataset with real video pairs with diverse relations, as in our collected dataset, is adequate for its robust performance. Student performance with different teachers In Table 4, we show the performance of the proposed students, namely, S f A , S f B , and S c , in terms of mAP, when they are trained/distilled using different teachers. More specifically, using as a teacher: (i) the original teacher T, leading to the student S f (1) A , leading to the student S f (2) A (first iteration), and (iii) the fine-grained attention student S f (2) A (second iteration). In the case of fine-grained students, training with the S f (1) A and S f (2) A yields large performance boost in comparison to original teacher T. More precisely, the fine-grained attention student S f A exhibits a total improvement of about 0.006 mAP comparing its results trained with the teacher T (i.e., 0.893 mAP on DSVR task) and the S f (2) A (i.e., 0.899 mAP on DSVR task). A very considerable improvement has the fine-grained binarization student, i.e., training with S f (1) A gives a performance increase of almost 0.01 mAP on DSVR task, which further improves when trained with S f (2) A by 0.007. On the other hand, using a better teacher does not improve the performance of the coarse-grained student S c . Student performance with different settings In this section, the retrieval performance of the proposed students is evaluated under different design choices. Fine-grained attention student: In Table 5, we show how the adopted attention scheme ( 2attention -Sect. 3.2.1, or h-attention -Sect. 3.2.2) affects the performance of the student S f A . Using h-attention leads to considerably better results compared to the 2 -attention, that was originally used in ViSiL (Kordopatis-Zilos et al., 2019b). (Yang et al., 2016) Fine-grained binarization student: In Table 6, we report the retrieval results of the fine-grained binarization student S f B implemented with different activation functions in the binarization layer, i.e., sgn(x) which is not differentiable so the layer weights remain fixed, tanh(βx), as proposed in (Cao et al., 2017) with β = 10 3 , and the proposed E (Sect. 3.2.3). The binarization student with the proposed function achieves notably better results on all tasks, especially on ISVR. Moreover, we experimented with different number of bits for the region vectors and report results in Table 7. As expected, larger region hash codes lead to better retrieval performance. Nevertheless, the student achieves competitive retrieval performance even with low number of bits per region vector. Coarse-grained student: In Table 8, we report the performance of the coarse-grained student S c implemented under various combinations of its components. The proposed setup with all three components achieves the best results compared to the other configurations. The single component that provides the best results is the transformer network, followed by NetVLAD. Also, the attention mechanism provides a considerable boost in performance when applied. The second-best performance is achieved with the combination of the transformer module with the NetVLAD. Selector network performance In this section, the performance of the proposed selector network is evaluated in comparison with the following approaches: (i) a selection mechanism that applies naive similarity thresholding for choosing between the coarse-grained and the fine-grained student, (ii) an oracle selector, where the similarity difference between the fine-grained and coarse-grained student is known and used for the re-ranking of video pairs, and (iii) a random selector that sends with a fixed probability videos to either the coarse-grained or the fine-grained student. Fig. 9 illustrates the performance of the DnS approach in terms of mAP with respect to the percentage of video pairs from the evaluation dataset sent to the finegrained student. We consider that the closer the curves are to the upper left corner, the better their performance. For this experiment, we used the proposed finegrained attention student S f A and the coarse-grained student S c . All three runs outperform the performance of the random selector by a large margin on all dataset percentages. The oracle selector performs the best with considerable margin, highlighting that using the similarity difference between the two students (Sect. 3.3.3) is a good optimization criterion. Furthermore, the proposed selector network outperforms the one with similarity thresholding on all tasks and percentages, i.e., in lower dataset percentages (< 25%) with a large margin. It achieves more than 0.85 mAP on the DSVR task with only 10% of the video pairs in FIVR-5k sent to the fine-grained student. Impact of threshold on the selector performance In this section, we assess the impact of the threshold parameter t that is used to obtain binary labels for the selector network (see Sect. 3.3.3,equation (12)), on the retrieval performance. To do so, we report the mAP as a function of the dataset percentage sent to the finegrained student for re-ranking -we do so for selectors trained with different values of t in order to compare the curves. The results are shown in Fig. 10 (c) ISVR Fig. 9 mAP with respect to the dataset percentage sent to the fine-grained student for re-ranking based on four selectors: (i) the proposed selector network, (ii) a selector with naive similarity thresholding, (iii) an oracle selector, ranking videos based on the similarity difference between the two students, and (iv) a random selector. results are obtained for t = 0.2; however, the performance is rather stable for thresholds between 0.1 and 0.4, as well. For threshold values > 0.4, the performance drops considerably on all evaluation tasks. Comparison with State of the Art In this section, the proposed approach is compared with several methods from the literature on four datasets. In all experiments, the fine-grained attention student S f (2) A is used as the teacher. We report the results of our re-ranking DnS scheme using both fine-grained students and sending the 5% and 30% of the dataset videos per query for re-ranking based on our selector score. We compare its performance with several coarse-grained, fine-grained, and re-ranking approaches: ITQ (Gong et al., 2012) and MFH (Song et al., 2013) are two unsupervised and CSQ a supervised video hashing methods using Hamming distance for video ranking, BoW (Cai et al., 2011) and LBoW Table 9 mAP comparison of our proposed students and re-ranking method against several video retrieval methods on four evaluation datasets. † indicates that the runs are implemented with the same features extracted with the same process as ours. * indicates that the corresponding results are on different dataset split. (Chou et al., 2015) is a re-ranking method with a BoW-based indexing scheme combined with DP for reranking, and HM (Liang and Wang, 2020) is also a re-ranking method using a concept-based similarity and a BoW-based method for refinement, and our re-implementation of ViSiL (Kordopatis-Zilos et al., 2019b). From the aforementioned methods, we have re-implemented BoW, TN, and DP, and we use the publicly available implementations for ITQ, MFH, CSQ, DML, TMK, and LAMV. For the rest, we provide the results reported in the original papers. Also, for fair comparison, we have implemented (if possible) the publicly available methods using our extracted features. In Table 9, the mAP of the proposed method in comparison to the video retrieval methods from the literature is reported. The proposed students achieve very competitive performance achieving state-of-the-art results in several cases. First, the fine-grained attention student achieves the best results on the two largescale datasets, i.e., FIVR-200K and SVD, outperforming ViSiL (our teacher network) by a large margin, i.e., 0.022 and 0.021 mAP, respectively. It reports almost the same performance as ViSiL on the CC WEB VIDEO dataset, and it is slightly outperformed on the EVVE dataset. Additionally, it is noteworthy that the finegrained binarization student demonstrates very competitive performance on all datasets. It achieves similar performance with ViSiL and the fine-grained attention student on the CC WEB VIDEO, the second-best results on all three tasks of FIVR-200K, and the thirdbest on SVD with a small margin from the second-best. However, its performance is lower than the teacher's on the EVVE dataset, highlighting that feature reduction and hashing have considerable impact on the student's retrieval performance on this dataset. Also, another possible explanation for this performance difference could be that the training dataset does not cover the included events sufficiently. Second, the coarse-grained student exhibits very competitive performance among coarse-grained approaches on all datasets. It achieves the best mAP on two out of four evaluation datasets, i.e., on SVD and EVVE, reporting performance close or even better than several fine-grained methods. On FIVR-200K and CC WEB VIDEO, it is outperformed by the BoWbased approaches, which are trained with samples from the evaluation sets. However, when they are built with video corpora other than the evaluation (which simulates more realistic scenarios), their performance drops considerably (Kordopatis-Zilos et al., 2017b, 2019a. Also, their performance on the SVD and EVVE datasets is considerably lower. Third, our DnS runs maintain competitive performance. It improves the performance of the coarsegrained student by more than 0.2 on FIVR-200K and 0.02 on SVD by re-ranking only 5% of the dataset with the fine-grained students. However, on the other two datasets, i.e., CC WEB VIDEO and EVVE, the re-ranking has negative effects on performance. A possible explanation for this might be that the performance of the coarse-and fine-grained students is very close, especially on the EVVE dataset. Also, this dataset consists of longer videos than the rest, which may impact the selection process. Nevertheless, the performance drop on these two datasets is mitigated when 30% of the dataset is sent to the fine-grained students for reranking; while on the FIVR-200K and SVD, the DnS method reaches the performance of the corresponding fine-grained students, or it even outperforms them, i.e., DnS 30% B outperforms S f B on SVD dataset. Additionally, Table 10 displays the storage and time requirements and the reference performance of the proposed method on each dataset. In comparison, we include the video retrieval methods that are implemented with the same features and run on GPU. For FIVR-200K and CC WEB VIDEO datasets, we display the DSVR and cc web * c runs, respectively. We have excluded the TN and DP methods, as they have been implemented on CPU and their transfer to GPU is non-trivial. Also, the requirements of the TCA runs from (Shao et al., 2021) are approximated based on features of the same dimensionality. All times are measured on a Linux machine with the Intel i9-7900X CPU and an Nvidia 2080Ti GPU. First, the individual students are compared against the competing methods in their corresponding category. The fine-grained binarization student has the lowest storage requirements among the fine-grained approaches on all datasets, having 240 times lower storage requirements than the ViSiL teacher. The fine-grained attention student needs the second-highest requirements in terms of space, but still, it needs 7.5 times less than ViSiL, achieving considerably better retrieval performance on two out of four evaluation datasets. However, the required retrieval time is high for all fine-grained approaches, especially in comparison with the coarse-grained ones. The coarse-grained student, which employs global vectors, has high storage requirements compared to the hashing and BoW methods that need notably lower storage space. In terms of time, all coarse-grained methods need approximately the same on all datasets, which is several orders of magnitude faster than the fine-grained ones. Second, we benchmark our DnS approach with the two fine-grained students and two dataset percentages sent for refinement. An excellent trade-off between time and performance comes with the DnS 5% B offering an acceleration of more than 17 times in comparison to the fine-grained students, at a small cost in terms of performance when 5% is used. Combined with the finegrained binarization student, on FIVR-200K, it offers 55 times faster retrieval and 240 times lower storage requirements compared to the original ViSiL teacher providing comparable retrieval performance, i.e., 0.041 relative drop in terms of mAP. The performance of the DnS increases considerably when 30% of the video pairs are sent for re-ranking, outperforming the ViSiL on two datasets with considerable margins. However, this performance improvement comes with a corresponding increase in the retrieval time. Conclusion In this paper, we proposed a video retrieval framework based on Knowledge Distillation that addresses the problem of performance-efficiency trade-off focused on large-scale datasets. In contrast to typical video retrieval methods that rely on either a high-performance but resource demanding fine-grained approach or a computationally efficient but low-performance coarsegrained one, we introduced a Distill-and-Select approach. Several student networks were trained via a Teacher-Student setup at different performanceefficiency trade-offs. We experimented with two finegrained students, one with a more elaborate attention mechanism that achieves better performance and one using a binarization layer offering very high performance with significantly lower storage requirements. Additionally, we trained a coarse-grained student that provides very fast retrieval with low storage requirements but at a high cost in performance. Once the students were trained, we combined them using a selector network that directs samples to the appropriate student in order to achieve high performance with high efficiency. It was trained based on the similarity difference between a coarse-grained and a fine-grained student so as to decide at query-time whether the similarity calculated by the coarse-grained one is reliable or the fine-grained one needs to be applied. The proposed method has been benchmarked to a number of contentbased video retrieval datasets, where it improved the state-of-art in several cases and achieved very competitive performance with a remarkable reduction of the computational requirements. The proposed scheme can be employed with several setups based on the requirements of the application. For example, when small-scale databases are involved, with no strict storage space and computational time restrictions, the fine-grained attention student could be employed since it achieves the best retrieval performance. On the other hand, for mid-scale databases, where the storage requirements increase, the fine-grained binarization student would be a reasonable option since it achieves very high retrieval performance with remarkable reduction of storage space requirements. Finally, for large-scale databases, where both storage space and computation time are an issue, the combination of fine-grained binarization student and the coarse-grained student with the selector network would be an appropriate solution that offers high retrieval performance and high efficiency. In the future, we plan to investigate alternatives for the better selection and re-ranking of video pairs based on our selector network by exploiting the ranking of videos derived from the two students. Also, we will explore better architectural choices for the development of the coarse-grained student to further improve the system's scalability with little compromises in retrieval performance. task retrieval and re-ranking. In: Proceedings of the IEEE/CVF International Conference on Computer Vision Workshops, pp 0-0 5
President Trump is getting tough on trade with China. Trump signed a memorandum Monday that directs U.S. Trade Representative Robert Lighthizer to determine whether an investigation is needed into alleged unfair Chinese trade practices. Shortly after Trump signed the directive, Lighthizer said his office will launch a probe and, "if needed, take action to preserve the future of U.S. industry." The move represents the first step in a process that could allow Trump to impose tariffs on Chinese imports or other punishing trade actions. "This is just the beginning," Trump said at the White House Monday. "We will defend our workers." Related: Trump administration vs. Chinese trade: Explained It's Trump's latest warning to China on trade, and it comes the same week that administration officials begin to renegotiate NAFTA, the free trade deal between the U.S., Mexico and Canada. The new order focuses specifically on alleged Chinese theft of U.S. intellectual property, a complaint expressed by a wide array of U.S. corporations that do business in China. China is accused of trying to take a short cut by spying, hacking or forcing companies to hand over their intellectual property, such as a patent on a software product. Chinese laws require foreign firms in some industries, such as energy and autos, to form joint ventures with local partners, which often results in the transfer of technology to Chinese companies. Related: How China squeezes tech secrets from U.S. companies In a report published this year, the Commission on the Theft of American Intellectual Property named China as the main offender, estimating it costs the U.S. economy up to $600 billion a year. Trump's memorandum comes as his administration also seeks cooperation with Chinese President Xi Jinping on North Korea's ongoing missile threats. Some experts say the wording of the memorandum is watered down, reflecting how high a priority North Korea is for the administration. Trump's order did not direct Lighthizer to open an investigation. It directed Lighthizer to determine if an investigation is needed. "For an administration that portrays itself as tough on trade, it keeps taking one baby step after another, and this is another one," says Edward Alden, a senior fellow at the Council on Foreign Relations. The softened language in Monday's order is the second time in as many weeks that Trump has agreed to changes to ease the potential backlash from China. Last week, the administration delayed the trade move in favor of securing China's support for a United Nations resolution imposing new sanctions on North Korea. Related: Chinese media: Trump's trade probe will 'poison' relations The final version of the memorandum reflected a desire to put some more distance between Trump and a potential investigation of Chinese trade practices, according to a senior administration official. The Chinese Commerce Ministry on Tuesday warned that "any actions of trade protectionism from the U.S. side would be harmful to the bilateral trade and business relationship and the interests of companies on both sides." It urged the U.S. to "respect objective facts and be cautious" or risk prompting Beijing to "respond with proper actions to firmly protect China's legitimate rights." Trump routinely slams China's trade practices, and blames China for the loss of millions of manufacturing jobs. The U.S. had a $347 billion trade deficit with China last year, by far the largest deficit the U.S. has with any country. Trump wants to lower the deficit. The memorandum was the administration's latest effort to get tough on longstanding Chinese trade abuses, but it also reflected a recognition inside the White House of bureaucratic processes and a complex international situation. --Jethro Mullen and Nanlin Fang contributed to this article
def _swap_dic_axis(x2s: Dict[Any, Any]) -> Dict[Any, Any]: x3s = {} for index, x2 in list(x2s.items()): for t, x in list(x2.items()): x3s.setdefault(t, {})[index] = x return x3s
def apply_normalization(features, label): norm = tf.math.divide(tf.math.subtract(features, NORM_MEAN), NORM_STD) return norm, label
/** * Event representing stream deactivation. */ public static class StreamDeactivationEvent extends StreamEvent { public StreamDeactivationEvent(ComponentContextPair<Stream, StreamContext> stream) { super(stream); } }
<reponame>golden-dimension/XS2A-Sandbox export class RecoveryPoint { id?: string; description?: string; rollBackTime?: string; branchId?: string; }
<reponame>ItamarDenkberg/Camper<filename>src/main/java/io/github/itamardenkberg/camper/client/render/entity/SquirrelEntityRenderer.java package io.github.itamardenkberg.camper.client.render.entity; import io.github.itamardenkberg.camper.Camper; import io.github.itamardenkberg.camper.client.render.model.SquirrelEntityModel; import io.github.itamardenkberg.camper.common.entities.passive.SquirrelEntity; import io.github.itamardenkberg.camper.core.util.ClientEventBusSubscriber; import net.minecraft.client.renderer.entity.EntityRendererProvider; import net.minecraft.client.renderer.entity.MobRenderer; import net.minecraft.resources.ResourceLocation; public class SquirrelEntityRenderer extends MobRenderer<SquirrelEntity, SquirrelEntityModel<SquirrelEntity>> { public static final ResourceLocation TEXTURE = new ResourceLocation(Camper.MOD_ID, "textures/entity/squirrel.png"); public SquirrelEntityRenderer(EntityRendererProvider.Context context) { super(context, new SquirrelEntityModel<SquirrelEntity>(context.bakeLayer(ClientEventBusSubscriber.SQUIRREL_LAYER)), 0.6f); } @Override public ResourceLocation getTextureLocation(SquirrelEntity p_114482_) { return TEXTURE; } }
def evaluate_lfs(self, pspace, vb=True): lfs = [] for n in self.N_range: points = zip(self.z_fine, [self.samps[n][1]] * self.n_tot) cur=pspace.pdf(np.array(points)) lfs.append(cur) lfs = np.array(lfs) lfs /= np.sum(lfs, axis=-1)[:, np.newaxis] * self.dz_fine return lfs
def with_birth_weights(self, birth_weights): if birth_weights is None: raise ValueError("The birth weights of the GMPHD filter can not be None") else: self._birth_weights = birth_weights return self
# -*- coding: utf-8 -*- """ beam search modules for Neural Walker @author: hongyuan """ import pickle import time import numpy import theano from theano import sandbox import theano.tensor as tensor import os import scipy.io from collections import defaultdict from theano.tensor.shared_randomstreams import RandomStreams import utils dtype=theano.config.floatX # #TODO: beam search for attention tree2seq with GRU class BeamSearchNeuralWalker(object): ''' This is a beam search code for Neural Walker ''' def __init__(self, settings): print "initializing the beam searcher ... " assert (settings['size_beam'] >= 1) self.size_beam = settings['size_beam'] # assert( settings['path_model'] == None or settings['trained_model'] == None ) # self.model_list = [] # if settings['path_model'] != None: for model_path in settings['path_model']: with open(model_path, 'rb') as f: self.model_list.append(pickle.load(f)) else: assert(settings['trained_model']!=None) self.model_list.append(settings['trained_model']) # # convert float64 to float32 self.ht_encode_list = [] self.ct_encode_list = [] # build up list for model in self.model_list: for param_name in model: model[param_name] = numpy.float32(model[param_name]) # self.dim_model = model['Emb_enc_forward'].shape[1] # self.ht_encode_list.append(numpy.zeros( (self.dim_model, ), dtype=dtype )) self.ct_encode_list.append(numpy.zeros( (self.dim_model, ), dtype=dtype )) # self.scope_att = None self.scope_att_times_W = None # self.beam_list = [] self.finish_list = [] #self.normalize_mode = settings['normalize_mode'] # whether to normalize the cost over length of sequence # #self.lang2idx = settings['lang2idx'] self.dim_lang = settings['dim_lang'] self.map = settings['map'] self.Emb_lang_sparse = numpy.identity( self.dim_lang, dtype=dtype ) # def refresh_state(self): print "refreshing the states of beam search ... " for ht_encode in self.ht_encode_list: ht_encode = numpy.zeros( (self.dim_model, ), dtype=dtype ) for ct_encode in self.ct_encode_list: ct_encode = numpy.zeros( (self.dim_model, ), dtype=dtype ) # self.scope_att = None self.scope_att_times_W = None # self.beam_list = [] self.finish_list = [] # def sigmoid(self, x): return 1 / (1+numpy.exp(-x)) # def set_encoder_forward(self): self.ht_enc_forward_list = [] self.ct_enc_forward_list = [] for model in self.model_list: xt_lang_forward = model['Emb_enc_forward'][ self.seq_lang_numpy, : ] shape_encode = xt_lang_forward.shape self.ht_enc_forward_list.append(numpy.zeros( shape_encode, dtype=dtype )) self.ct_enc_forward_list.append(numpy.zeros( shape_encode, dtype=dtype )) len_lang = shape_encode[0] for time_stamp in range(-1, len_lang-1, 1): post_transform = model['b_enc_forward'] + numpy.dot( numpy.concatenate( ( xt_lang_forward[time_stamp+1, :], self.ht_enc_forward_list[-1][time_stamp, :] ), axis=0 ), model['W_enc_forward'] ) # gate_input_numpy = self.sigmoid( post_transform[:self.dim_model] ) gate_forget_numpy = self.sigmoid( post_transform[self.dim_model:2*self.dim_model] ) gate_output_numpy = self.sigmoid( post_transform[2*self.dim_model:3*self.dim_model] ) gate_pre_c_numpy = numpy.tanh( post_transform[3*self.dim_model:] ) self.ct_enc_forward_list[-1][time_stamp+1, :] = gate_forget_numpy * self.ct_enc_forward_list[-1][time_stamp, :] + gate_input_numpy * gate_pre_c_numpy self.ht_enc_forward_list[-1][time_stamp+1, :] = gate_output_numpy * numpy.tanh(self.ct_enc_forward_list[-1][time_stamp+1, :]) # # # ## def set_encoder_backward(self): self.ht_enc_backward_list = [] self.ct_enc_backward_list = [] for model in self.model_list: xt_lang_backward = model['Emb_enc_backward'][ self.seq_lang_numpy, : ][::-1, :] shape_encode = xt_lang_backward.shape self.ht_enc_backward_list.append(numpy.zeros( shape_encode, dtype=dtype )) self.ct_enc_backward_list.append(numpy.zeros( shape_encode, dtype=dtype )) len_lang = shape_encode[0] for time_stamp in range(-1, len_lang-1, 1): post_transform = model['b_enc_backward'] + numpy.dot( numpy.concatenate( ( xt_lang_backward[time_stamp+1, :], self.ht_enc_backward_list[-1][time_stamp, :] ), axis=0 ), model['W_enc_backward'] ) # gate_input_numpy = self.sigmoid( post_transform[:self.dim_model] ) gate_forget_numpy = self.sigmoid( post_transform[self.dim_model:2*self.dim_model] ) gate_output_numpy = self.sigmoid( post_transform[2*self.dim_model:3*self.dim_model] ) gate_pre_c_numpy = numpy.tanh( post_transform[3*self.dim_model:] ) self.ct_enc_backward_list[-1][time_stamp+1, :] = gate_forget_numpy * self.ct_enc_backward_list[-1][time_stamp, :] + gate_input_numpy * gate_pre_c_numpy self.ht_enc_backward_list[-1][time_stamp+1, :] = gate_output_numpy * numpy.tanh(self.ct_enc_backward_list[-1][time_stamp+1, :]) # # # # def set_encoder( self, seq_lang_numpy, seq_world_numpy ): # self.seq_lang_numpy = seq_lang_numpy self.seq_world_numpy = seq_world_numpy # self.set_encoder_forward() self.set_encoder_backward() self.scope_att_list = [] self.scope_att_times_W_list = [] for idx, model in enumerate(self.model_list): self.scope_att_list.append(numpy.concatenate( ( self.Emb_lang_sparse[self.seq_lang_numpy, :], self.ht_enc_forward_list[idx], self.ht_enc_backward_list[idx][::-1, :] ), axis=1 )) self.scope_att_times_W_list.append(numpy.dot( self.scope_att_list[-1], model['W_att_scope'] )) #self.ht_encode = ht_source[:, 0] # def init_beam(self, pos_start, pos_end): print "initialize beam ... " item = { # 'htm1': numpy.copy(self.ht_encode), # 'ctm1': numpy.copy(self.ct_encode), 'feat_current_position': numpy.copy( self.seq_world_numpy[0, :] ), # 'pos_current': pos_start, 'pos_destination': pos_end, 'list_pos': [numpy.copy(pos_start)], # 'list_idx_action': [], 'continue': True, # 'cost': 0.00 } # add htm1 and ctm1 item['htm1'] = [] item['ctm1'] = [] for idx, _ in enumerate(self.model_list): item['htm1'].append(self.ht_encode_list[idx]) item['ctm1'].append(self.ct_encode_list[idx]) self.beam_list.append(item) def softmax(self, x): # x is a vector exp_x = numpy.exp(x - numpy.amax(x)) return exp_x / numpy.sum(exp_x) def decode_step( self, feat_current_position, htm1_action, ctm1_action ): # ct_action = [] ht_action = [] probt_list = [] log_probt_list = [] for idx, model in enumerate(self.model_list): xt_action = numpy.dot( feat_current_position, model['Emb_dec'] ) # neural attention operations first weight_current_step = self.softmax( numpy.dot( numpy.tanh( numpy.dot( htm1_action[idx], model['W_att_target'] ) + self.scope_att_times_W_list[idx] ), model['b_att'] ) ) # zt_action = numpy.dot( weight_current_step, self.scope_att_list[idx] ) # post_transform = model['b_dec'] + numpy.dot( numpy.concatenate( ( xt_action, htm1_action[idx], zt_action ), axis=0 ), model['W_dec'] ) # gate_input_numpy = self.sigmoid( post_transform[:self.dim_model] ) gate_forget_numpy = self.sigmoid( post_transform[self.dim_model:2*self.dim_model] ) gate_output_numpy = self.sigmoid( post_transform[2*self.dim_model:3*self.dim_model] ) gate_pre_c_numpy = numpy.tanh( post_transform[3*self.dim_model:] ) ct_action.append(gate_forget_numpy * ctm1_action[idx] + gate_input_numpy * gate_pre_c_numpy) ht_action.append(gate_output_numpy * numpy.tanh(ct_action[-1])) # post_transform_prob = numpy.dot( xt_action + numpy.dot( numpy.concatenate( (ht_action[-1], zt_action), axis=0 ), model['W_out_hz'] ), model['W_out'] ) # exp_post_trans = numpy.exp( post_transform_prob - numpy.amax(post_transform_prob) ) probt_list.append(exp_post_trans / numpy.sum(exp_post_trans)) log_probt_list.append(numpy.log(probt_list[-1] + numpy.float32(1e-8))) probt = probt_list[0] for prob_next in probt_list[1:]: probt = probt + prob_next probt = probt / len(probt_list) log_probt = log_probt_list[0] for log_prob_next in log_probt_list[1:]: log_probt = log_probt + log_prob_next log_probt = log_probt / len(log_probt_list) return xt_action, ht_action, ct_action, probt, log_probt def validate_step(self, idx_action, feat_current_position): assert( idx_action == 3 or idx_action == 2 or idx_action == 1 or idx_action == 0 ) if idx_action == 0: if feat_current_position[23] > 0.5: # 6 + 18 = 24 --> 23 return False else: return True else: return True # def get_left_and_right(self, direc_current): # direc_current can be 0 , 90, 180, 270 # it is the current facing direction assert(direc_current == 0 or direc_current == 90 or direc_current == 180 or direc_current == 270) left = direc_current - 90 if left == -90: left = 270 right = direc_current + 90 if right == 360: right = 0 behind = direc_current + 180 if behind == 360: behind = 0 elif behind == 450: behind = 90 return left, right, behind # def one_step_forward(self, pos_current): direc_current = pos_current[2] pos_next = numpy.copy(pos_current) assert( direc_current == 0 or direc_current == 90 or direc_current == 180 or direc_current == 270 ) if direc_current == 0: pos_next[1] -= 1 elif direc_current == 90: pos_next[0] += 1 elif direc_current == 180: pos_next[1] += 1 else: pos_next[0] -= 1 return pos_next # # # def take_one_step(self, pos_current, idx_action): # left_current, right_current, _ = self.get_left_and_right( pos_current[2] ) pos_next = numpy.copy(pos_current) assert(idx_action==0 or idx_action==1 or idx_action==2 or idx_action==3) if idx_action == 1: # turn left pos_next[2] = left_current elif idx_action == 2: pos_next[2] = right_current elif idx_action == 3: pass else: pos_next = self.one_step_forward(pos_current) return pos_next # def get_feat_current_position(self, pos_current): # nodes = self.map['nodes'] x_current, y_current, direc_current = pos_current[0], pos_current[1], pos_current[2] # count_pos_found = 0 # for idx_node, node in enumerate(nodes): if node['x'] == x_current and node['y'] == y_current: # find this position in the map # so we can get its feature count_pos_found += 1 # left_current, right_current, behind_current = self.get_left_and_right(direc_current) # feat_node = numpy.cast[dtype]( node['objvec'] ) feat_forward = numpy.cast[dtype]( node['capfeat'][direc_current] ) feat_left = numpy.cast[dtype]( node['capfeat'][left_current] ) feat_right = numpy.cast[dtype]( node['capfeat'][right_current] ) feat_behind = numpy.cast[dtype]( node['capfeat'][behind_current] ) # feat_current_position = numpy.copy( numpy.concatenate( (feat_node, feat_forward, feat_left, feat_right, feat_behind), axis=0 ) ) # assert(count_pos_found > 0) return feat_current_position # since the action is validated before moving # this position must be in this map # def search_func(self): print "search for target ... " counter, max_counter = 0, 100 while ((len(self.finish_list)<self.size_beam) and (counter<max_counter) ): new_list = [] for item in self.beam_list: xt_item, ht_item, ct_item, probt_item, log_probt_item = self.decode_step( item['feat_current_position'], item['htm1'], item['ctm1'] ) top_k_list = range(probt_item.shape[0]) for top_idx_action in top_k_list: if self.validate_step(top_idx_action, item['feat_current_position']): new_item = { 'htm1': numpy.copy(ht_item), 'ctm1': numpy.copy(ct_item), 'list_idx_action': [ idx for idx in item['list_idx_action'] ], 'list_pos': [ numpy.copy(pos) for pos in item['list_pos'] ] } new_item['list_idx_action'].append( top_idx_action ) # new_item['pos_current'] = numpy.copy( self.take_one_step( item['pos_current'], top_idx_action ) ) # new_item['pos_destination'] = numpy.copy( item['pos_destination'] ) # new_item['feat_current_position'] = numpy.copy( self.get_feat_current_position( new_item['pos_current'] ) ) # new_item['list_pos'].append( numpy.copy(new_item['pos_current']) ) # if top_idx_action == 3: # 3 -- stop new_item['continue'] = False else: new_item['continue'] = True # new_item['cost'] = item['cost'] + (-1.0)*log_probt_item[top_idx_action] # new_list.append(new_item) # new_list = sorted( new_list, key=lambda x:x['cost'] ) if len(new_list) > self.size_beam: new_list = new_list[:self.size_beam] # self.beam_list = [] while len(new_list) > 0: pop_item = new_list.pop(0) if pop_item['continue']: self.beam_list.append(pop_item) else: self.finish_list.append(pop_item) counter += 1 # # if len(self.finish_list) > 0: self.finish_list = sorted( self.finish_list, key=lambda x:x['cost'] ) while len(self.finish_list) > self.size_beam: self.finish_list.pop() while len(self.finish_list) < self.size_beam: self.finish_list.append(self.beam_list.pop(0)) # # def count_path(self): print "# of finished responses is ", len(self.finish_list) def get_path(self): return self.finish_list[0]['list_pos'] def check_pos_end(self): top_path = self.finish_list[0] diff_pos = numpy.sum( numpy.abs( top_path['pos_current'] - top_path['pos_destination'] ) ) if diff_pos < 0.5: return True else: return False ''' Ensemble 10 models ''' def get_best_item(self): return self.finish_list[0] def check_pos_end_multiple_model(self, finish_list_best): top_path = finish_list_best diff_pos = numpy.sum( numpy.abs( top_path['pos_current'] - top_path['pos_destination'] ) ) if diff_pos < 0.5: return True else: return False ''' def get_top_target(self): print "getting top target as list of token_id ... " return self.finish_list[0]['list_idx_token'][1:-1] def get_all_gens(self): list_seq_as_list = [] for item in self.finish_list: list_seq_as_list.append( [idx for idx in item['list_idx_token'][1:-1]] ) #print list_seq_as_list return list_seq_as_list # def get_top_target_score(self): print "getting top target score as a value ... " if self.normalize_mode: return self.finish_list[0]['norm_cost'] else: return self.finish_list[0]['cost'] def get_all_gens_scores(self): list_scores_as_values = [] for item in self.finish_list: if self.normalize_mode: score_value = item['norm_cost'] else: score_value = item['cost'] list_scores_as_values.append( score_value ) return list_scores_as_values def get_att_weights(self, idx_in_beam): # list_att_weights = [ numpy.copy(att_weight) for att_weight in self.finish_list[ idx_in_beam ]['list_att'] ] return list_att_weights # def get_all_att_weights(self): list_all_att_weights = [] for finish_item in self.finish_list: list_all_att_weights.append( [ numpy.copy(att_weight) for att_weight in finish_item[ 'list_att' ] ] ) return list_all_att_weights '''
// Elem retrieves the Elem form of the contained conjunct. // If it is a Field, it will return the field value. func (c *Conjunct) Elem() Elem { switch x := c.x.(type) { case interface{ expr() Expr }: return x.expr() case Elem: return x default: panic("unreachable") } }
n,x,m = map(int, input().split()) ai = x tmp = [ai] used = set(tmp) for i in range(n): nxt = ai**2 % m if nxt in tmp: break tmp.append(nxt) ai = nxt if len(tmp) == n: ans = sum(tmp) print(ans) exit() first = tmp.index(nxt) ans = 0 l = [] for i in range(len(tmp)): if i < first: ans += tmp[i] n -= 1 else: l.append(tmp[i]) m = len(l) x,y = divmod(n, m) ans += sum(l)*x for i in range(y): ans += l[i] print(ans)
<filename>src/persist-client/src/write.rs // Copyright Materialize, Inc. and contributors. All rights reserved. // // Use of this software is governed by the Business Source License // included in the LICENSE file. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0. //! Write capabilities and handles use std::fmt::Debug; use std::marker::PhantomData; use std::time::Duration; use differential_dataflow::difference::Semigroup; use differential_dataflow::lattice::Lattice; use mz_persist_types::{Codec, Codec64}; use serde::{Deserialize, Serialize}; use timely::progress::{Antichain, Timestamp}; use uuid::Uuid; use crate::error::{InvalidUsage, LocationError}; /// An opaque identifier for a writer of a persist durable TVC (aka shard). /// /// Unlike [crate::read::ReaderId], this is intentionally not Serialize and /// Deserialize. It's difficult to reason about the behavior if multiple writers /// accidentally end up concurrently using the same [WriterId] and we haven't /// (yet) found a need for it. #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct WriterId(pub(crate) [u8; 16]); impl std::fmt::Display for WriterId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Display::fmt(&Uuid::from_bytes(self.0), f) } } /// A "capability" granting the ability to apply updates to some shard at times /// greater or equal to `self.upper()`. pub struct WriteHandle<K, V, T, D> where T: Timestamp + Lattice + Codec64, { _phantom: PhantomData<(K, V, T, D)>, } impl<K, V, T, D> WriteHandle<K, V, T, D> where K: Debug + Codec, V: Debug + Codec, T: Timestamp + Lattice + Codec64, D: Semigroup + Codec64, { /// This handle's `upper` frontier. /// /// This will always be greater or equal to the shard-global `upper`. pub fn upper(&self) -> &Antichain<T> { todo!() } /// Applies `updates` to this shard and downgrades this handle's upper to /// `new_upper`. /// /// All times in `updates` must be greater or equal to `self.upper()` and /// not greater or equal to `new_upper`. A `new_upper` of the empty /// antichain "finishes" this shard, promising that no more data is ever /// incoming. /// /// `updates` may be empty, which allows for downgrading `upper` to /// communicate progress. It is unexpected to call this with `new_upper` /// equal to `self.upper()`, as it would mean `updates` must be empty /// (making the entire call a no-op). /// /// Multiple [WriteHandle]s (with different [WriterId]s) may be used /// concurrently to write to the same shard, but in this case, the data /// being written must be identical (in the sense of "definite"-ness). /// /// This uses a bounded amount of memory, even when `updates` is very large. /// Individual records, however, should be small enough that we can /// reasonably chunk them up: O(KB) is definitely fine, O(MB) come talk to /// us. /// /// The clunky two-level Result is to enable more obvious error handling in /// the caller. See <http://sled.rs/errors.html> for details. /// /// TODO: Introduce an AsyncIterator (futures::Stream) variant of this. Or, /// given that the AsyncIterator version would be strictly more general, /// alter this one if it turns out that the compiler can optimize out the /// overhead. pub async fn write_batch<'a, I: IntoIterator<Item = ((&'a K, &'a V), &'a T, &'a D)>>( &mut self, timeout: Duration, updates: I, new_upper: Antichain<T>, ) -> Result<Result<(), InvalidUsage>, LocationError> { todo!( "{:?}{:?}{:?}", timeout, updates.into_iter().size_hint(), new_upper ); } } impl<K, V, T, D> Drop for WriteHandle<K, V, T, D> where T: Timestamp + Lattice + Codec64, { fn drop(&mut self) { todo!() } }
from pathlib import Path from textwrap import dedent import pytest from _pytest.config import UsageError from _pytest.config.findpaths import get_common_ancestor from _pytest.config.findpaths import get_dirs_from_args from _pytest.config.findpaths import load_config_dict_from_file class TestLoadConfigDictFromFile: def test_empty_pytest_ini(self, tmp_path: Path) -> None: """pytest.ini files are always considered for configuration, even if empty""" fn = tmp_path / "pytest.ini" fn.write_text("", encoding="utf-8") assert load_config_dict_from_file(fn) == {} def test_pytest_ini(self, tmp_path: Path) -> None: """[pytest] section in pytest.ini files is read correctly""" fn = tmp_path / "pytest.ini" fn.write_text("[pytest]\nx=1", encoding="utf-8") assert load_config_dict_from_file(fn) == {"x": "1"} def test_custom_ini(self, tmp_path: Path) -> None: """[pytest] section in any .ini file is read correctly""" fn = tmp_path / "custom.ini" fn.write_text("[pytest]\nx=1", encoding="utf-8") assert load_config_dict_from_file(fn) == {"x": "1"} def test_custom_ini_without_section(self, tmp_path: Path) -> None: """Custom .ini files without [pytest] section are not considered for configuration""" fn = tmp_path / "custom.ini" fn.write_text("[custom]", encoding="utf-8") assert load_config_dict_from_file(fn) is None def test_custom_cfg_file(self, tmp_path: Path) -> None: """Custom .cfg files without [tool:pytest] section are not considered for configuration""" fn = tmp_path / "custom.cfg" fn.write_text("[custom]", encoding="utf-8") assert load_config_dict_from_file(fn) is None def test_valid_cfg_file(self, tmp_path: Path) -> None: """Custom .cfg files with [tool:pytest] section are read correctly""" fn = tmp_path / "custom.cfg" fn.write_text("[tool:pytest]\nx=1", encoding="utf-8") assert load_config_dict_from_file(fn) == {"x": "1"} def test_unsupported_pytest_section_in_cfg_file(self, tmp_path: Path) -> None: """.cfg files with [pytest] section are no longer supported and should fail to alert users""" fn = tmp_path / "custom.cfg" fn.write_text("[pytest]", encoding="utf-8") with pytest.raises(pytest.fail.Exception): load_config_dict_from_file(fn) def test_invalid_toml_file(self, tmp_path: Path) -> None: """Invalid .toml files should raise `UsageError`.""" fn = tmp_path / "myconfig.toml" fn.write_text("]invalid toml[", encoding="utf-8") with pytest.raises(UsageError): load_config_dict_from_file(fn) def test_custom_toml_file(self, tmp_path: Path) -> None: """.toml files without [tool.pytest.ini_options] are not considered for configuration.""" fn = tmp_path / "myconfig.toml" fn.write_text( dedent( """ [build_system] x = 1 """ ), encoding="utf-8", ) assert load_config_dict_from_file(fn) is None def test_valid_toml_file(self, tmp_path: Path) -> None: """.toml files with [tool.pytest.ini_options] are read correctly, including changing data types to str/list for compatibility with other configuration options.""" fn = tmp_path / "myconfig.toml" fn.write_text( dedent( """ [tool.pytest.ini_options] x = 1 y = 20.0 values = ["tests", "integration"] name = "foo" heterogeneous_array = [1, "str"] """ ), encoding="utf-8", ) assert load_config_dict_from_file(fn) == { "x": "1", "y": "20.0", "values": ["tests", "integration"], "name": "foo", "heterogeneous_array": [1, "str"], } class TestCommonAncestor: def test_has_ancestor(self, tmp_path: Path) -> None: fn1 = tmp_path / "foo" / "bar" / "test_1.py" fn1.parent.mkdir(parents=True) fn1.touch() fn2 = tmp_path / "foo" / "zaz" / "test_2.py" fn2.parent.mkdir(parents=True) fn2.touch() assert get_common_ancestor([fn1, fn2]) == tmp_path / "foo" assert get_common_ancestor([fn1.parent, fn2]) == tmp_path / "foo" assert get_common_ancestor([fn1.parent, fn2.parent]) == tmp_path / "foo" assert get_common_ancestor([fn1, fn2.parent]) == tmp_path / "foo" def test_single_dir(self, tmp_path: Path) -> None: assert get_common_ancestor([tmp_path]) == tmp_path def test_single_file(self, tmp_path: Path) -> None: fn = tmp_path / "foo.py" fn.touch() assert get_common_ancestor([fn]) == tmp_path def test_get_dirs_from_args(tmp_path): """get_dirs_from_args() skips over non-existing directories and files""" fn = tmp_path / "foo.py" fn.touch() d = tmp_path / "tests" d.mkdir() option = "--foobar=/foo.txt" # xdist uses options in this format for its rsync feature (#7638) xdist_rsync_option = "popen=c:/dest" assert get_dirs_from_args( [str(fn), str(tmp_path / "does_not_exist"), str(d), option, xdist_rsync_option] ) == [fn.parent, d]
Tool Wear Characteristics for Near-Dry Cutting of Inconel 718 In recent years, high-combustion-efficiency jet engines are required in the aircraft industry. Inconel 718, which has excellent mechanical and chemical characteristics. However, Inconel 718 is difficult to cut material because of its low-thermal conductivity. Consequently, Wet cutting is ordinarily adopted to reduce the heat on cutting heat edge in Inconel 718 cutting. Wet cutting which uses large amount of cutting fluid requires much cost and energy on maintenance or disposal of cutting fluid, and this method is environmentally-unfriendly. From the view point of reducing cost and environmental load, we examined the method of Near-Dry cutting which uses very small amount of cutting fluid for the cylindrical cutting of Inconel 718. However, this method has some problems, such as tool wear and cutting stock removal rate. In this report, we experimentally examined the relationship between cutting speed, tool materials, and tool fracture of near-dry cutting of Inconel 718. We compared these results with those of wet cutting, a method which is more expensive, requires significantly greater amounts of energy, and is less environment-friendly. The results indicate that when cutting speed is 100 m/min, tool fracture occurs at a cutting distance of 200 m. When cutting speed is 50 m/min, tool fracture does not occur and near-dry cutting distances can continue beyond 600 m. Moreover, tool wear could be reduced when S05 tool material, which has high bending strength, was used.
<filename>src/cpu/hamaji/solo.h #include <string> #include <random> #include "base.h" #include "core.h" class Game; class SoloGame { public: SoloGame(int game_index, bool evalmode); ~SoloGame(); int pickNextPuyo(); void pickNext(); int run(); //int run2(); int step(); Game* game; Core cpu; string next; std::mt19937 mt; bool evalmode_; int chigiri_frames; int puyo_sequence_index_; };
30°-60°-90° Triangular Dielectric Resonator Antenna: A new shaped Dielectric Resonator Antenna 30°-60°-90° triangular shaped Dielectric Resonator Antenna (DRA) is investigated here. The DRA is formed by cutting the equilateral Triangular DRA (TDRA) along its median. Fundamental mode is excited for both antennas. A comparative study between input impedance, S<sub>11</sub> and far-field power patterns are presented. It is found that 30°-60°-90° TDRA produces a peak in the broadside direction.
<gh_stars>0 import { Component, OnInit, OnDestroy } from '@angular/core'; import { MatDialog } from '@angular/material/dialog'; import { Subscription } from 'rxjs'; import { Game, Lobby } from 'interfaces/base'; import { LobbyService } from 'services/lobby/lobby.service'; import { NewGameDialogComponent } from 'components/dialogs/new-game/new-game.component'; @Component({ selector: 'app-lobby', templateUrl: './lobby.component.html', styleUrls: ['./lobby.component.scss'] }) export class LobbyComponent implements OnInit, OnDestroy { lobby: Lobby; games: { open: Game[]; started: Game[]; finished: Game[]; } = { open: [], started: [], finished: [], }; subscriptions: Subscription[] = []; constructor( private dialog: MatDialog, private lobbyService: LobbyService, ) { } ngOnInit(): void { this.subscriptions.push(this.lobbyService.lobby$.subscribe({ next: this.updateLobby.bind(this) })); } private updateLobby(lobby: Lobby): void { if(!lobby) { return; } this.lobby = lobby; this.games.open = this.lobby.games.filter(room => !room.started); this.games.started = this.lobby.games.filter(room => room.started && !room.stopped); this.games.finished = this.lobby.games.filter(room => room.stopped); } ngOnDestroy(): void { this.subscriptions.forEach(subscription => subscription.unsubscribe()); } public openDialogNewGame(): void { const dialogRef = this.dialog.open(NewGameDialogComponent, { width: '250px', data: this.lobby.engines, }); dialogRef.afterClosed().subscribe(result => { if(result) { this.lobbyService.newGame(result.name, result.engine); } }); } }
/** * Implements a {@link Dao} interface at runtime. * <p> * The DAO must declare a {@link #MAX_MSG_LEN} int field and it must only extend * the {@link Dao} interface. * <p> * This class is not thread safe. * <p> */ public final class DaoFactory { private static final String IMPL_SUFFIX = "$$Generated"; private static final String INDENT = " "; private static final String INDENT2 = " "; private static final String MAX_MSG_LEN = "MAX_MSG_LEN"; private static final String NL = System.lineSeparator(); private final Map<String, Class<?>> classes; private final Map<String, CharSequence> sources; public DaoFactory() { classes = new HashMap<>(); sources = new HashMap<>(); } /** * Dynamically build an implementation of the DAO, returning a new instance of * that implementation. * * @param <T> the DAO that defines the required contract * @param clazz the DAO class * @return a new instance of the implementation of that DAO (never null) */ @SuppressWarnings("unchecked") public <T extends Dao<?, ?, ?, ?, ?, ?>> T newInstance(Class<T> clazz) { final String implName = clazz.getName() + IMPL_SUFFIX; final Class<?> impl; if (classes.containsKey(implName)) { impl = classes.get(implName); } else { generate(clazz); impl = classes.get(implName); assert impl != null; } try { return (T) impl.newInstance(); } catch (IllegalAccessException | InstantiationException ex) { throw new IllegalStateException("Cannot instantiate " + implName, ex); } } private Class<?>[] findParameters(Class<? extends Dao<?, ?, ?, ?, ?, ?>> clazz) { Type[] superc = clazz.getGenericInterfaces(); assert superc.length == 1; assert superc[0] instanceof ParameterizedType; ParameterizedType t = (ParameterizedType) superc[0]; Type[] args = t.getActualTypeArguments(); Class<?>[] res = new Class<?>[args.length]; for (int index = 0; index < args.length; index++) { res[index] = (Class<?>) args[index]; } return res; } private void generate(Class<? extends Dao<?, ?, ?, ?, ?, ?>> clazz) { if (!clazz.isInterface()) { throw new IllegalStateException("DAO must be an interface"); } final Field maxMsgLen; try { maxMsgLen = clazz.getDeclaredField(MAX_MSG_LEN); } catch (NoSuchFieldException | SecurityException ex) { throw new IllegalStateException(MAX_MSG_LEN + " field not found", ex); } if (!maxMsgLen.getType().equals(int.class)) { throw new IllegalStateException(MAX_MSG_LEN + " must be primitive int"); } final Class<?>[] daoParams = findParameters(clazz); assert (daoParams.length == 6); final String implFullName = clazz.getName() + IMPL_SUFFIX; final String implSimpleName = clazz.getSimpleName() + IMPL_SUFFIX; StringWriter writer = new StringWriter(); writer.append("package " + clazz.getPackage().getName() + ";" + NL + NL); writer.append("public final class " + implSimpleName + NL); writer.append(INDENT + "extends " + AbstractDao.class.getName()); writer.append("<" + NL); for (int index = 0; index < daoParams.length; index++) { if (index > 0) { writer.append("," + NL); } writer.append(INDENT2 + daoParams[index].getName()); } writer.append(">" + NL); writer.append(INDENT + "implements " + clazz.getName() + " {" + NL + NL); writer.append(INDENT + "public " + implSimpleName + "() {" + NL); writer.append(INDENT2 + "super(" + MAX_MSG_LEN + ")" + ";" + NL); writer.append(INDENT + "}" + NL + NL); final String keyEncoder = daoParams[1].getName(); final String keyDecoder = daoParams[2].getName(); final String valEncoder = daoParams[4].getName(); final String valDecoder = daoParams[5].getName(); writer.append(INDENT + "protected " + keyDecoder + " keyDecoder() {" + NL); writer.append(INDENT2 + "return new " + keyDecoder + "();" + NL); writer.append(INDENT + "}" + NL + NL); writer.append(INDENT + "protected " + keyEncoder + " keyEncoder() {" + NL); writer.append(INDENT2 + "return new " + keyEncoder + "();" + NL); writer.append(INDENT + "}" + NL + NL); writer.append(INDENT + "protected " + valDecoder + " valDecoder() {" + NL); writer.append(INDENT2 + "return new " + valDecoder + "();" + NL); writer.append(INDENT + "}" + NL + NL); writer.append(INDENT + "protected " + valEncoder + " valEncoder() {" + NL); writer.append(INDENT2 + "return new " + valEncoder + "();" + NL); writer.append(INDENT + "}" + NL); writer.append("}"); sources.put(implFullName, writer.toString()); final Class<?> compiled; try { compiled = CompilerUtil.compileInMemory(implFullName, sources); } catch (ClassNotFoundException ex) { throw new IllegalStateException("Cannot compile " + implFullName, ex); } classes.put(implFullName, compiled); } }
package demo; public class Foo { public static void main(String[] args) { System.out.println("The first arg is: " + args[0]); } }
/** * Uses "INSERT" statements followed by "UPDATE" statements. (Standard SQL); */ private class UpsertStandardUS implements UpsertStrategy { @Override public long upsert(Table table, String sqlSelect) throws SQLException { StringBuffer nonPKList = new StringBuffer(); StringBuffer nonPKListQ = new StringBuffer(); StringBuffer where = new StringBuffer(); StringBuffer whereT = new StringBuffer(); for (Column column : table.getColumns()) { if (!isPrimaryKeyColumn(table, column.name)) { if (nonPKList.length() > 0) { nonPKList.append(", "); } nonPKList.append(quoting.requote(column.name)); if (nonPKListQ.length() > 0) { nonPKListQ.append(", "); } nonPKListQ.append("Q." + prefixColumnName(COLUMN_PREFIX, quoting, column)); } else { if (where.length() > 0) { where.append(" and "); } where.append(applyImportFilter("S." + quoting.requote(column.name), column) + "=Q." + prefixColumnName(COLUMN_PREFIX, quoting, column)); if (whereT.length() > 0) { whereT.append(" and "); } whereT.append(applyImportFilter("S." + quoting.requote(column.name), column) + "=T." + quoting.requote(column.name)); } } if (nonPKList.length() == 0) { // nothing to do return 0; } String sql = "Update " + qualifiedTableName(table) + " S set (" + nonPKList + ")" + " = (Select " + nonPKListQ + " From (" + sqlSelect + ") Q Where " + where + ") " + "Where exists (" + sqlSelect + " and (" + whereT + "))"; long rc = session.executeUpdate(sql); return rc + insertWhereNotExists(table, sqlSelect); } }
<reponame>otasoft/otasoft-auth import { IAuthUser } from '../interfaces'; export class UserWithCookiesModel { cookies: string[]; user: IAuthUser; }
DENVER (CBS4)– Supporters launched a campaign to raise the tobacco tax by $1.75 per pack in Colorado on Wednesday. They say the initiative is about creating a healthier Colorado. If the effort makes the ballot and passes, it will be the first time the tobacco tax has increased in the state in a dozen years. Right now, some might consider buying cigarettes in Colorado a bargain. The average state tobacco tax is $1.63 per pack. Colorado smokers will pay 84 cents. But supporters of the Campaign for a Healthy Colorado are hoping to hit smokers in the pocketbook. “Research shows that increasing the cost of cigarettes is an effective way to reduce smoking,” Dr. Debra Dyer, Chair of the Department of Radiology at National Jewish Health told a crowded news conference. More than 50 organizations are behind proposed Initiative 143 to up the tax on cigarettes in Colorado to $2.59 per pack. With the average pack at $5.55, the average cost for a pack in Colorado would increase to $8.14. The last tobacco tax increase was in 2004. “It’s time for Colorado to reclaim its commitment to preventing kids from starting a life-long addiction to tobacco use,” said Jodi Radke, Regional Advocacy Director with the Campaign for Tobacco-Free Kids. It will take 98,000 signatures to get the initiative on the ballot. Supporters say they’ve collected 35,000. Their goal is 160,000 signatures in the next five weeks. “No worries, not one,” said Radke. Wounded veteran and advocate David Ortiz and others are banking on it. “Rebuilding a life is a daunting undertaking and I didn’t do it alone,” said Ortiz who suffered a spinal cord injury while deployed in 2012. It’s estimated the tobacco tax would raise $315 million a year. The money would be dedicated to veterans health care, cancer and Alzheimer’s research, expanded access to youth behavioral health services and more. “We need to put more money into education programs to stop kids and adults from smoking in the first place,” said Dr. Karyn Goodman, Medical Director of the Cancer Clinical Trials Office at the University of Colorado School of Medicine. The highest state tobacco tax is $4.35 a pack in New York. The lowest is 17 cents per pack in Missouri. Kathy Walsh is CBS4’s Weekend Anchor and Health Specialist. She has been with CBS4 for more than 30 years. She is always open to story ideas. Follow Kathy on Twitter @WalshCBS4.
import { ContactItemMenuProps } from '@Components/ContactItem'; import Menu from '@material-ui/core/Menu'; import MenuItem from '@material-ui/core/MenuItem'; import React from 'react'; import { info } from './Contact'; export default function EmailMenu({ anchorEl, open, snackbarFunc, onClose, }: ContactItemMenuProps): React.ReactElement { return ( <Menu anchorEl={anchorEl} keepMounted open={open} onClose={() => { if (onClose) onClose({}, 'backdropClick'); }} > <MenuItem onClick={() => { navigator.clipboard.writeText(info.email.addr); if (snackbarFunc) snackbarFunc('Email Address Copied to Clipboard'); if (onClose) onClose({}, 'backdropClick'); }} > Copy Address </MenuItem> <MenuItem component="a" href={'mailto:' + info.email.addr} onClick={() => { if (onClose) onClose({}, 'backdropClick'); }} > Mail To </MenuItem> </Menu> ); }
Tue, 20 Oct, 2015 01:04:52 AM Police arrest 3 local youths FTimes-STT Report, Oct 20 According to the eastern Finland police, two smoke bombs were thrown at the reception centre set up in the Tarina hospital and caused a lot of smoke in the centre’s courtyard. The police arrested three local residents, all of them men and 17 to 18 years old, from the nearby forest area. The police are investigating the crime as vandalism. Police Sergeant Antti Nissinen told the Finnish language newspaper Savon Sanomat on Monday that the men who threw the smoke grenades were drinking alcohol. “Young men, who thought this was a way to liven up their weekend,” the police official remarked. Alcohol had its share of blame, Nissinen told the newspaper. According to the newspaper, the building was not damaged by the illegal act.
/** * Select Adhoc adapter for one result is a base class for custom {@code Select one} implementations in client code. * * @see SQLResultAdapter.SQLResultOneAdapter * @since 1.0.0 */ @Getter @Accessors(fluent = true) public abstract class SelectAdhocOneResult<T extends TableLike<? extends Record>, R> extends SQLResultAdapterImpl<T, R> implements SQLResultOneAdapter<T, R> { protected SelectAdhocOneResult(@NonNull T table) { super(table); } }
// Copyright (c) 2011 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "butil/posix/file_descriptor_shuffle.h" #include <unistd.h> #include <stddef.h> #include <ostream> #include "butil/posix/eintr_wrapper.h" #include "butil/logging.h" namespace butil { bool PerformInjectiveMultimapDestructive( InjectiveMultimap* m, InjectionDelegate* delegate) { static const size_t kMaxExtraFDs = 16; int extra_fds[kMaxExtraFDs]; unsigned next_extra_fd = 0; // DANGER: this function must not allocate or lock. // Cannot use STL iterators here, since debug iterators use locks. for (size_t i_index = 0; i_index < m->size(); ++i_index) { InjectiveMultimap::value_type* i = &(*m)[i_index]; int temp_fd = -1; // We DCHECK the injectiveness of the mapping. for (size_t j_index = i_index + 1; j_index < m->size(); ++j_index) { InjectiveMultimap::value_type* j = &(*m)[j_index]; DCHECK(i->dest != j->dest) << "Both fd " << i->source << " and " << j->source << " map to " << i->dest; } const bool is_identity = i->source == i->dest; for (size_t j_index = i_index + 1; j_index < m->size(); ++j_index) { InjectiveMultimap::value_type* j = &(*m)[j_index]; if (!is_identity && i->dest == j->source) { if (temp_fd == -1) { if (!delegate->Duplicate(&temp_fd, i->dest)) return false; if (next_extra_fd < kMaxExtraFDs) { extra_fds[next_extra_fd++] = temp_fd; } else { RAW_LOG(ERROR, "PerformInjectiveMultimapDestructive overflowed " "extra_fds. Leaking file descriptors!"); } } j->source = temp_fd; j->close = false; } if (i->close && i->source == j->dest) i->close = false; if (i->close && i->source == j->source) { i->close = false; j->close = true; } } if (!is_identity) { if (!delegate->Move(i->source, i->dest)) return false; } if (!is_identity && i->close) delegate->Close(i->source); } for (unsigned i = 0; i < next_extra_fd; i++) delegate->Close(extra_fds[i]); return true; } bool PerformInjectiveMultimap(const InjectiveMultimap& m_in, InjectionDelegate* delegate) { InjectiveMultimap m(m_in); return PerformInjectiveMultimapDestructive(&m, delegate); } bool FileDescriptorTableInjection::Duplicate(int* result, int fd) { *result = HANDLE_EINTR(dup(fd)); return *result >= 0; } bool FileDescriptorTableInjection::Move(int src, int dest) { return HANDLE_EINTR(dup2(src, dest)) != -1; } void FileDescriptorTableInjection::Close(int fd) { int ret = IGNORE_EINTR(close(fd)); DCHECK(ret == 0); } } // namespace butil