content
stringlengths
10
4.9M
use anyhow::{anyhow, Result}; use prettytable::{format, Table}; use crate::config; pub fn list(shortcut: Option<String>) -> Result<i32> { if let Some(shortcut) = shortcut { let config: config::Config = config::Config::load()?; let mut shortcut_list = Table::new(); shortcut_list.set_format(*format::consts::FORMAT_NO_BORDER_LINE_SEPARATOR); shortcut_list.set_titles(row!["Name", "Shortcuts", "Shortcut Location"]); for shortcut_conf in config.shortcuts { if shortcut_conf.name.to_lowercase() == shortcut.to_lowercase() { let calls: String = shortcut_conf.calls.join(", "); shortcut_list.add_row(row![shortcut_conf.name, calls, shortcut_conf.location]); shortcut_list.printstd(); return Ok(0); } } Err(anyhow!(format!( "Shortcut with name {} was not found", shortcut ))) } else { let config: config::Config = config::Config::load()?; let mut shortcut_list = Table::new(); shortcut_list.set_format(*format::consts::FORMAT_NO_BORDER_LINE_SEPARATOR); shortcut_list.set_titles(row!["Name", "Shortcuts", "Shortcut Location"]); for shortcut_conf in config.shortcuts { let calls: String = shortcut_conf.calls.join(", "); shortcut_list.add_row(row![shortcut_conf.name, calls, shortcut_conf.location]); } shortcut_list.printstd(); Ok(0) } }
/** * The result of an HttpMethodClient operation. This type exposes a future * for chaining async operations as well as a method for aborting the operation. * * @param <T> The type of result. */ public static class AsyncToken<T> { private final HttpRequestBase request; private final CompletableFuture<T> future; private AsyncToken(final HttpRequestBase request, final CompletableFuture<T> future) { this.request = request; this.future = future; } /** * Gets the future for the HTTP operation. * * @return The future. */ public CompletableFuture<T> getFuture() { return this.future; } /** * Waits if necessary for the underlying future to complete, and then * returns its result. * * @return The result value. */ public T get() { return ExceptionUtils.propagate(() -> this.getFuture().get()); } /** * Aborts the HTTP operation. */ public void abort() { this.request.abort(); } }
{-# language BangPatterns #-} {-# language DeriveFunctor #-} {-# language InstanceSigs #-} {-# language LambdaCase #-} {-# language RankNTypes #-} {-# language ScopedTypeVariables #-} module Exp where import Data.Functor.Foldable (Fix(..), cata) data ExpF v n a = NameF n | VarF v | IntF !Int | AddF a a | AppF a a | LamF (v -> a) deriving Functor newtype Exp a = Exp { unExp :: forall v. Fix (ExpF v a) } instance Functor Exp where fmap :: forall a b. (a -> b) -> Exp a -> Exp b fmap f (Exp a) = Exp (go a) where go :: forall v. Fix (ExpF v a) -> Fix (ExpF v b) go = cata $ Fix . \case NameF n -> NameF $ f n VarF v -> VarF v IntF n -> IntF n AddF a b -> AddF a b AppF a b -> AppF a b LamF f -> LamF f instance Foldable Exp where foldMap f (Exp a) = go a where go = cata $ \case NameF n -> f n VarF v -> mempty IntF n -> mempty AddF a b -> a <> b AppF a b -> a <> b LamF f -> f () abstract :: Eq n => n -> Fix (ExpF v n) -> (v -> Fix (ExpF v n)) abstract _ (Fix (IntF n)) = \_ -> Fix $ IntF n abstract _ (Fix (VarF v)) = \_ -> Fix $ VarF v abstract n (Fix (NameF n')) | n == n' = \v -> Fix $ VarF v | otherwise = \_ -> Fix $ NameF n' abstract n (Fix (AppF a b)) = \v -> Fix $ AppF (abstract n a v) (abstract n b v) abstract n (Fix (AddF a b)) = \v -> Fix $ AddF (abstract n a v) (abstract n b v) abstract n (Fix (LamF f)) = \v -> Fix $ LamF (($ v) . abstract n . f) lam :: Eq n => n -> Fix (ExpF v n) -> Fix (ExpF v n) lam n a = Fix . LamF $ abstract n a app :: Fix (ExpF v n) -> Fix (ExpF v n) -> Fix (ExpF v n) app a b = Fix $ AppF a b add :: Fix (ExpF v n) -> Fix (ExpF v n) -> Fix (ExpF v n) add a b = Fix $ AddF a b var :: v -> Fix (ExpF v n) var v = Fix $ VarF v int :: Int -> Fix (ExpF v n) int a = Fix $ IntF a name :: n -> Fix (ExpF v n) name a = Fix $ NameF a showExp :: forall a. Show a => Exp a -> String showExp (Exp l) = go 0 l where go :: Int -> Fix (ExpF Int a) -> String go !depth (Fix e) = case e of IntF n -> "IntF " <> show n NameF n -> "NameF " <> show n VarF n -> "VarF " <> show (depth-n) AppF a b -> "AppF " <> "(" <> go depth a <> ") (" <> go depth b <> ")" AddF a b -> "AddF " <> "(" <> go depth a <> ") (" <> go depth b <> ")" LamF f -> "LamF (" <> go (depth+1) (f (depth+1)) <> ")"
<filename>Source/Foundation/bsfUtility/Utility/BsLookupTable.h<gh_stars>1-10 //************************************ bs::framework - Copyright 2018 <NAME> **************************************// //*********** Licensed under the MIT license. See LICENSE.md for full terms. This notice is not to be removed. ***********// #pragma once #include "Prerequisites/BsPrerequisitesUtil.h" namespace bs { /** @addtogroup General * @{ */ /** * Contains a set of samples resulting from sampling some function at equal intervals. The table can then be used * for sampling that function at arbitrary time intervals. The sampling is fast but precision is limited to the number * of samples. */ class BS_UTILITY_EXPORT LookupTable { public: /** * Constructs a lookup table from the provided set of values. * * @param[in] values Buffer containing information about all the samples. Total buffer size must be divisble * by @p sampleSize. * @param[in] startTime Time at which the first provided sample has been evaluated at. * @param[in] endTime Time at which the last provided sample has been evaluate at. All samples in-between * first and last are assumed to be evaluated to equal intervals in the * [startTime, endTime] range. * @param[in] sampleSize Number of 'float's each sample requires. This number must divide the number of elements * in the @p values buffer. */ LookupTable(Vector<float> values, float startTime = 0.0f, float endTime = 1.0f, uint32_t sampleSize = 1); /** * Evaluates the lookup table at the specified time. * * @param[in] t Time to evaluate the lookup table at. * @param[out] left Pointer to the set of values contained in the sample left to the time value. * @param[out] right Pointer to the set of values contained in the sample right to the time value. * @param[out] fraction Fraction that determines how to interpolate between @p left and @p right values, where * 0 corresponds to the @p left value, 1 to the @p right value and values in-between * interpolate linearly between the two. */ void evaluate(float t, const float*& left, const float*& right, float& fraction) const; /** Returns a sample at the specified index. Returns last available sample if index is out of range. */ const float* getSample(uint32_t idx) const; private: Vector<float> mValues; uint32_t mSampleSize; uint32_t mNumSamples; float mTimeStart; float mTimeScale; }; /** @} */ }
def plot_minimum_jerk_basis(): tau = 1 fifth_order_pol = cBasis0010() val = fifth_order_pol.evalOnWindow(0, tau) print('The value of the Polynomial base at s=0 is:') print(val) normalized_time_interval = np.arange(-1, 1.05, 0.05) res = np.array([fifth_order_pol.evalOnWindow(t, tau) for t in normalized_time_interval]) for i in range(fifth_order_pol.dim_): plt.plot(normalized_time_interval, res[:, i], label='Pol base of degree {:d}'.format(i)) plt.grid() plt.title('Polynomial basis for minimum jerk motions') plt.xlabel('normalized time interval \"s\"') plt.legend() plt.show() res = np.array([fifth_order_pol.evalDerivOnWindow(t, tau, 1) for t in normalized_time_interval]) for i in range(fifth_order_pol.dim_): plt.plot(normalized_time_interval, res[:, i], label='Derivative of the pol base of degree {:d}'.format(i)) plt.grid() plt.title('Derivative of the polynomial basis for minimum jerk motions') plt.xlabel('normalized time interval \"s\"') plt.legend() plt.show()
/** * Integration test for {@link CreateReadCountPanelOfNormals}. * * @author Samuel Lee &lt;slee@broadinstitute.org&gt; */ public final class CreateReadCountPanelOfNormalsIntegrationTest extends CommandLineProgramTest { private static final int RANDOM_SEED = 1; private static final int NUM_GOOD_SAMPLES = 95; private static final int NUM_BAD_SAMPLES_WITH_TOO_MANY_ZEROS = 5; private static final int NUM_SAMPLES = NUM_GOOD_SAMPLES + NUM_BAD_SAMPLES_WITH_TOO_MANY_ZEROS; private static final int NUM_GOOD_INTERVALS = 95; private static final int NUM_BAD_INTERVALS_WITH_TOO_MANY_ZEROS = 5; private static final int NUM_INTERVALS = NUM_GOOD_INTERVALS + NUM_BAD_INTERVALS_WITH_TOO_MANY_ZEROS; private static final double DEPTH_SCALE_FACTOR = 10000.; private static final double ALPHA_MIN = 0.01; private static final double ALPHA_MAX = 100.; private static final double MEAN_BIAS_SHAPE = 10.; private static final double MEAN_BIAS_SCALE = 0.1; //we test only for filtering of samples and intervals with too many zeros private static final double MINIMUM_INTERVAL_MEDIAN_PERCENTILE = 0.; private static final double MAXIMUM_ZEROS_IN_SAMPLE_PERCENTAGE = 5.; private static final double MAXIMUM_ZEROS_IN_INTERVAL_PERCENTAGE = 5.; private static final double EXTREME_SAMPLE_MEDIAN_PERCENTILE = 0.; //test that number of eigenvalues is recovered for a few different values using fraction of variance as a heuristic private static final int NUMBER_OF_EIGENVALUES_REQUESTED = NUM_SAMPLES; private static final List<Integer> TRUE_NUMBER_OF_EIGENVALUES_LIST = Arrays.asList(1, 4); private static final double FRACTION_OF_VARIANCE_EXPLAINED_THRESHOLD = 0.95; //only a rough threshold---generating different test data may cause failures //test that denoised log2 copy ratios are sufficiently denoised private static final double DENOISED_LOG2CR_STANDARD_DEVIATION_THRESHOLD = 0.15; //generating different test data may cause failures //a reasonable default GC bias curve (borrowed from GCBiasCorrectorUnitTest) private static final Function<Double, Double> QUADRATIC_GC_BIAS_CURVE = gc -> 0.5 + 2 * gc * (1 - gc); private static final SAMSequenceDictionary SEQUENCE_DICTIONARY = new SAMSequenceDictionary(Collections.singletonList( new SAMSequenceRecord("1", NUM_INTERVALS))); @DataProvider(name = "dataPanelOfNormals") public Object[][] dataPanelOfNormals() { final RandomDataGenerator rng = new RandomDataGenerator(); rng.reSeed(RANDOM_SEED); //make fake intervals final List<SimpleInterval> intervals = IntStream.range(1, NUM_INTERVALS + 1) .mapToObj(i -> new SimpleInterval("1", i, i)) .collect(Collectors.toList()); //randomly generate GC content in each interval (borrowed from GCBiasCorrectorUnitTest) // gc_i ~ Bound(Normal(0.5, 0.2), 0.05, 0.95) final double[] intervalGCContent = IntStream.range(0, NUM_INTERVALS) .mapToDouble(n -> rng.nextGaussian(0.5, 0.2)) .map(x -> Math.min(x, 0.95)).map(x -> Math.max(x, 0.05)) .toArray(); //assume all samples have the same GC bias (this does not add any principal components) final double[] intervalGCBias = Arrays.stream(intervalGCContent) .map(QUADRATIC_GC_BIAS_CURVE::apply) .toArray(); //write GC-content annotations to file final AnnotatedIntervalCollection annotatedIntervals = new AnnotatedIntervalCollection( new SimpleLocatableMetadata(SEQUENCE_DICTIONARY), IntStream.range(0, NUM_INTERVALS) .mapToObj(i -> new AnnotatedInterval(intervals.get(i), new AnnotationSet(intervalGCContent[i]))) .collect(Collectors.toList())); final File annotatedIntervalsFile = createTempFile("annotated-intervals", ".tsv"); annotatedIntervals.write(annotatedIntervalsFile); final List<List<Object>> data = new ArrayList<>(); for (final Integer trueNumberOfEigenvalues : TRUE_NUMBER_OF_EIGENVALUES_LIST) { //simulate data from simple version of gCNV coverage model // alpha_j ~ Uniform(alpha_min, alpha_max) // sigma_j = 1 / sqrt(alpha_j) // W_ji ~ Normal(0, sigma_j) // z_sj ~ Normal(0, 1) // m_i ~ Gamma(m_shape, m_scale) // n_si ~ Poisson(d * gc_bias_i * exp(z_sj W_ji + m_i)) final List<Double> sigmas = IntStream.range(0, trueNumberOfEigenvalues) .mapToObj(i -> rng.nextUniform(ALPHA_MIN, ALPHA_MAX)) .map(alpha -> 1. / Math.sqrt(alpha)) .collect(Collectors.toList()); final RealMatrix W = new Array2DRowRealMatrix(trueNumberOfEigenvalues, NUM_INTERVALS); W.walkInOptimizedOrder(new DefaultRealMatrixChangingVisitor() { @Override public double visit(int row, int column, double value) { return rng.nextGaussian(0., sigmas.get(row)); } }); final RealMatrix z = new Array2DRowRealMatrix(NUM_SAMPLES, trueNumberOfEigenvalues); z.walkInOptimizedOrder(new DefaultRealMatrixChangingVisitor() { @Override public double visit(int row, int column, double value) { return rng.nextGaussian(0., 1.); } }); final RealVector m = new ArrayRealVector(IntStream.range(0, NUM_INTERVALS) .mapToDouble(i -> rng.nextGamma(MEAN_BIAS_SHAPE, MEAN_BIAS_SCALE)) .toArray()); final RealMatrix zdotW = z.multiply(W); final RealMatrix bias = new Array2DRowRealMatrix(NUM_SAMPLES, NUM_INTERVALS); bias.walkInOptimizedOrder(new DefaultRealMatrixChangingVisitor() { @Override public double visit(int row, int column, double value) { return Math.exp(zdotW.getEntry(row, column) + m.getEntry(column)); } }); final RealMatrix counts = new Array2DRowRealMatrix(NUM_SAMPLES, NUM_INTERVALS); counts.walkInOptimizedOrder(new DefaultRealMatrixChangingVisitor() { @Override public double visit(int row, int column, double value) { return rng.nextPoisson(DEPTH_SCALE_FACTOR * intervalGCBias[column] * bias.getEntry(row, column)); } }); //corrupt first NUM_BAD_SAMPLES_WITH_TOO_MANY_ZEROS samples by randomly adding zeros //to 5 * MAXIMUM_ZEROS_IN_SAMPLE_PERCENTAGE / 100. of intervals for (int sampleIndex = 0; sampleIndex < NUM_BAD_SAMPLES_WITH_TOO_MANY_ZEROS; sampleIndex++) { for (int intervalIndex = 0; intervalIndex < NUM_INTERVALS; intervalIndex++) { if (rng.nextUniform(0., 1.) < 5 * MAXIMUM_ZEROS_IN_SAMPLE_PERCENTAGE / 100.) { counts.setEntry(sampleIndex, intervalIndex, 0.); } } } //corrupt first NUM_BAD_INTERVALS_WITH_TOO_MANY_ZEROS intervals by randomly adding zeros //to 5 * MAXIMUM_ZEROS_IN_INTERVAL_PERCENTAGE / 100. of samples for (int intervalIndex = 0; intervalIndex < NUM_BAD_INTERVALS_WITH_TOO_MANY_ZEROS; intervalIndex++) { for (int sampleIndex = 0; sampleIndex < NUM_SAMPLES; sampleIndex++) { if (rng.nextUniform(0., 1.) < 5 * MAXIMUM_ZEROS_IN_INTERVAL_PERCENTAGE / 100.) { counts.setEntry(sampleIndex, intervalIndex, 0.); } } } //make input files from counts matrix final List<File> inputTSVFiles = new ArrayList<>(NUM_SAMPLES); final List<File> inputHDF5Files = new ArrayList<>(NUM_SAMPLES); for (int sampleIndex = 0; sampleIndex < NUM_SAMPLES; sampleIndex++) { final File inputTSVFile = createTempFile("sample-" + sampleIndex, ".tsv"); final File inputHDF5File = createTempFile("sample-" + sampleIndex, ".hdf5"); final double[] sampleCounts = counts.getRow(sampleIndex); final SimpleCountCollection scc = new SimpleCountCollection( new SimpleSampleLocatableMetadata( "sample_" + sampleIndex, SEQUENCE_DICTIONARY), IntStream.range(0, NUM_INTERVALS) .mapToObj(i -> new SimpleCount(intervals.get(i), (int) sampleCounts[i])) .collect(Collectors.toList())); scc.write(inputTSVFile); inputTSVFiles.add(inputTSVFile); scc.writeHDF5(inputHDF5File); inputHDF5Files.add(inputHDF5File); } for (final File inputAnnotatedIntervalsFile : Arrays.asList(annotatedIntervalsFile, null)) { //counts for all samples as TSV files data.add(Arrays.asList( inputTSVFiles, inputAnnotatedIntervalsFile, trueNumberOfEigenvalues)); //counts for all samples as HDF5 files data.add(Arrays.asList( inputHDF5Files, inputAnnotatedIntervalsFile, trueNumberOfEigenvalues)); //mix of TSV and HDF5 files data.add(Arrays.asList( ListUtils.union( inputTSVFiles.subList(0, NUM_SAMPLES / 2), inputHDF5Files.subList(NUM_SAMPLES / 2, NUM_SAMPLES)), inputAnnotatedIntervalsFile, trueNumberOfEigenvalues)); } } return data.stream().map(List::toArray).toArray(Object[][]::new); } @Test(dataProvider = "dataPanelOfNormals") public void test(final List<File> inputFiles, final File annotatedIntervalsFile, final int expectedNumberOfEigenvalues) { final File resultOutputFile = createTempFile("create-read-count-panel-of-normals-test", ".tsv"); final ArgumentsBuilder argsBuilder = new ArgumentsBuilder() .addArgument(CreateReadCountPanelOfNormals.MINIMUM_INTERVAL_MEDIAN_PERCENTILE_LONG_NAME, Double.toString(MINIMUM_INTERVAL_MEDIAN_PERCENTILE)) .addArgument(CreateReadCountPanelOfNormals.MAXIMUM_ZEROS_IN_SAMPLE_PERCENTAGE_LONG_NAME, Double.toString(MAXIMUM_ZEROS_IN_SAMPLE_PERCENTAGE)) .addArgument(CreateReadCountPanelOfNormals.MAXIMUM_ZEROS_IN_INTERVAL_PERCENTAGE_LONG_NAME, Double.toString(MAXIMUM_ZEROS_IN_INTERVAL_PERCENTAGE)) .addArgument(CreateReadCountPanelOfNormals.EXTREME_SAMPLE_MEDIAN_PERCENTILE_LONG_NAME, Double.toString(EXTREME_SAMPLE_MEDIAN_PERCENTILE)) .addArgument(CopyNumberStandardArgument.NUMBER_OF_EIGENSAMPLES_LONG_NAME, Integer.toString(NUMBER_OF_EIGENVALUES_REQUESTED)) .addOutput(resultOutputFile); if (annotatedIntervalsFile != null) { argsBuilder.addFileArgument(CopyNumberStandardArgument.ANNOTATED_INTERVALS_FILE_LONG_NAME, annotatedIntervalsFile); } inputFiles.forEach(argsBuilder::addInput); runCommandLine(argsBuilder); testPanelOfNormals(annotatedIntervalsFile, expectedNumberOfEigenvalues, resultOutputFile); } @Test(dataProvider = "dataPanelOfNormals") public void testSingleSample(final List<File> inputFiles, final File annotatedIntervalsFile, final int expectedNumberOfEigenvalues) { //ignored in this test final File resultOutputFile = createTempFile("create-read-count-panel-of-normals-test", ".tsv"); final ArgumentsBuilder argsBuilder = new ArgumentsBuilder() .addArgument(CreateReadCountPanelOfNormals.MINIMUM_INTERVAL_MEDIAN_PERCENTILE_LONG_NAME, Double.toString(MINIMUM_INTERVAL_MEDIAN_PERCENTILE)) .addArgument(CreateReadCountPanelOfNormals.MAXIMUM_ZEROS_IN_SAMPLE_PERCENTAGE_LONG_NAME, Double.toString(MAXIMUM_ZEROS_IN_SAMPLE_PERCENTAGE)) .addArgument(CreateReadCountPanelOfNormals.MAXIMUM_ZEROS_IN_INTERVAL_PERCENTAGE_LONG_NAME, Double.toString(MAXIMUM_ZEROS_IN_INTERVAL_PERCENTAGE)) .addArgument(CreateReadCountPanelOfNormals.EXTREME_SAMPLE_MEDIAN_PERCENTILE_LONG_NAME, Double.toString(EXTREME_SAMPLE_MEDIAN_PERCENTILE)) .addArgument(CopyNumberStandardArgument.NUMBER_OF_EIGENSAMPLES_LONG_NAME, Integer.toString(NUMBER_OF_EIGENVALUES_REQUESTED)) .addOutput(resultOutputFile); if (annotatedIntervalsFile != null) { argsBuilder.addFileArgument(CopyNumberStandardArgument.ANNOTATED_INTERVALS_FILE_LONG_NAME, annotatedIntervalsFile); } argsBuilder.addInput(inputFiles.get(NUM_BAD_SAMPLES_WITH_TOO_MANY_ZEROS)); //use only first good sample runCommandLine(argsBuilder); //just check that we can build the panel; no other assertions checked } @Test(dataProvider = "dataPanelOfNormals") public void testZeroEigensamples(final List<File> inputFiles, final File annotatedIntervalsFile, final int expectedNumberOfEigenvalues) { //ignored in this test final File resultOutputFile = createTempFile("create-read-count-panel-of-normals-test", ".tsv"); final ArgumentsBuilder argsBuilder = new ArgumentsBuilder() .addArgument(CreateReadCountPanelOfNormals.MINIMUM_INTERVAL_MEDIAN_PERCENTILE_LONG_NAME, Double.toString(MINIMUM_INTERVAL_MEDIAN_PERCENTILE)) .addArgument(CreateReadCountPanelOfNormals.MAXIMUM_ZEROS_IN_SAMPLE_PERCENTAGE_LONG_NAME, Double.toString(MAXIMUM_ZEROS_IN_SAMPLE_PERCENTAGE)) .addArgument(CreateReadCountPanelOfNormals.MAXIMUM_ZEROS_IN_INTERVAL_PERCENTAGE_LONG_NAME, Double.toString(MAXIMUM_ZEROS_IN_INTERVAL_PERCENTAGE)) .addArgument(CreateReadCountPanelOfNormals.EXTREME_SAMPLE_MEDIAN_PERCENTILE_LONG_NAME, Double.toString(EXTREME_SAMPLE_MEDIAN_PERCENTILE)) .addArgument(CopyNumberStandardArgument.NUMBER_OF_EIGENSAMPLES_LONG_NAME, "0") .addOutput(resultOutputFile); if (annotatedIntervalsFile != null) { argsBuilder.addFileArgument(CopyNumberStandardArgument.ANNOTATED_INTERVALS_FILE_LONG_NAME, annotatedIntervalsFile); } inputFiles.forEach(argsBuilder::addInput); runCommandLine(argsBuilder); try (final HDF5File hdf5PanelOfNormalsFile = new HDF5File(resultOutputFile)) { final SVDReadCountPanelOfNormals panelOfNormals = HDF5SVDReadCountPanelOfNormals.read(hdf5PanelOfNormalsFile); Assert.assertEquals(panelOfNormals.getNumEigensamples(), 0); //denoise last sample (which is not a bad sample) in original counts using zero eigensamples final RealMatrix counts = new Array2DRowRealMatrix(panelOfNormals.getOriginalReadCounts()); final List<SimpleInterval> originalIntervals = panelOfNormals.getOriginalIntervals(); final SimpleCountCollection sampleCounts = new SimpleCountCollection( new SimpleSampleLocatableMetadata("test-sample", SEQUENCE_DICTIONARY), IntStream.range(0, NUM_INTERVALS) .mapToObj(i -> new SimpleCount(originalIntervals.get(i), (int) counts.getEntry(counts.getRowDimension() - 1, i))) .collect(Collectors.toList())); final SVDDenoisedCopyRatioResult denoisedResult = panelOfNormals.denoise(sampleCounts, 0); //check that the denoised copy ratios are identical to the standardized copy ratios Assert.assertEquals(denoisedResult.getDenoisedCopyRatios(), denoisedResult.getStandardizedCopyRatios()); //check that exceptions are thrown when attempting to get singular values and eigenvectors Assert.assertThrows(UnsupportedOperationException.class, panelOfNormals::getSingularValues); Assert.assertThrows(UnsupportedOperationException.class, panelOfNormals::getEigensampleVectors); } } private void testPanelOfNormals(final File annotatedIntervalsFile, final int expectedNumberOfEigenvalues, final File resultOutputFile) { try (final HDF5File hdf5PanelOfNormalsFile = new HDF5File(resultOutputFile)) { final SVDReadCountPanelOfNormals panelOfNormals = HDF5SVDReadCountPanelOfNormals.read(hdf5PanelOfNormalsFile); //check dimensions of original counts and intervals final RealMatrix counts = new Array2DRowRealMatrix(panelOfNormals.getOriginalReadCounts()); Assert.assertEquals(counts.getRowDimension(), NUM_SAMPLES); Assert.assertEquals(counts.getColumnDimension(), NUM_INTERVALS); final List<SimpleInterval> originalIntervals = panelOfNormals.getOriginalIntervals(); Assert.assertEquals(originalIntervals.size(), NUM_INTERVALS); //check that GC annotations are present (missing) if explicit GC correction was (was not) performed if (annotatedIntervalsFile != null) { Assert.assertEquals(panelOfNormals.getOriginalIntervalGCContent().length, NUM_INTERVALS); } else { Assert.assertEquals(panelOfNormals.getOriginalIntervalGCContent(), null); } //check filtering of samples and intervals with too many zeros Assert.assertEquals(panelOfNormals.getNumEigensamples(), NUM_GOOD_SAMPLES); Assert.assertEquals(panelOfNormals.getPanelIntervals().size(), NUM_GOOD_INTERVALS); Assert.assertEquals(panelOfNormals.getPanelIntervalFractionalMedians().length, NUM_GOOD_INTERVALS); //check that correct number of significant eigenvalues is found (this is a bit heuristic and may fail if test data is changed final double totalVariance = DoubleStream.of(panelOfNormals.getSingularValues()).map(x -> x * x).sum(); final double fractionOfVarianceExplainedMissingLastEigenvalue = IntStream.range(0, expectedNumberOfEigenvalues - 1) .mapToDouble(i -> panelOfNormals.getSingularValues()[i]).map(x -> x * x).sum() / totalVariance; Assert.assertTrue(fractionOfVarianceExplainedMissingLastEigenvalue < FRACTION_OF_VARIANCE_EXPLAINED_THRESHOLD); final double fractionOfVarianceExplained = IntStream.range(0, expectedNumberOfEigenvalues) .mapToDouble(i -> panelOfNormals.getSingularValues()[i]).map(x -> x * x).sum() / totalVariance; Assert.assertTrue(fractionOfVarianceExplained > FRACTION_OF_VARIANCE_EXPLAINED_THRESHOLD); //check dimensions of eigenvectors final RealMatrix eigensampleVectors = new Array2DRowRealMatrix(panelOfNormals.getEigensampleVectors()); Assert.assertEquals(eigensampleVectors.getRowDimension(), NUM_GOOD_INTERVALS); Assert.assertEquals(eigensampleVectors.getColumnDimension(), Math.min(NUMBER_OF_EIGENVALUES_REQUESTED, NUM_GOOD_SAMPLES)); //denoise last sample (which is not a bad sample) in original counts using true number of eigenvalues final SimpleCountCollection sampleCounts = new SimpleCountCollection( new SimpleSampleLocatableMetadata("test-sample", SEQUENCE_DICTIONARY), IntStream.range(0, NUM_INTERVALS) .mapToObj(i -> new SimpleCount(originalIntervals.get(i), (int) counts.getEntry(counts.getRowDimension() - 1, i))) .collect(Collectors.toList())); final SVDDenoisedCopyRatioResult denoisedResult = panelOfNormals.denoise(sampleCounts, expectedNumberOfEigenvalues); //check that the denoised log2 copy ratios are sufficiently denoised final CopyRatioCollection denoisedCopyRatios = denoisedResult.getDenoisedCopyRatios(); final double denoisedLog2CRStandardDeviation = new StandardDeviation().evaluate( denoisedCopyRatios.getLog2CopyRatioValues().stream() .mapToDouble(x -> x) .toArray()); Assert.assertTrue(denoisedLog2CRStandardDeviation < DENOISED_LOG2CR_STANDARD_DEVIATION_THRESHOLD); //check that the denoised log2 copy ratios are less noisy than the standardized log2 copy ratios final CopyRatioCollection standardizedCopyRatios = denoisedResult.getStandardizedCopyRatios(); final double standardizedLog2CRStandardDeviation = new StandardDeviation().evaluate( standardizedCopyRatios.getLog2CopyRatioValues().stream() .mapToDouble(x -> x) .toArray()); Assert.assertTrue(denoisedLog2CRStandardDeviation < standardizedLog2CRStandardDeviation); //denoise first sample (which is a bad sample) in original counts using true number of eigenvalues final SimpleCountCollection badSampleCounts = new SimpleCountCollection( new SimpleSampleLocatableMetadata("bad-test-sample", SEQUENCE_DICTIONARY), IntStream.range(0, NUM_INTERVALS) .mapToObj(i -> new SimpleCount(originalIntervals.get(i), (int) counts.getEntry(0, i))) .collect(Collectors.toList())); final SVDDenoisedCopyRatioResult badDenoisedResult = panelOfNormals.denoise(badSampleCounts, expectedNumberOfEigenvalues); //check that the denoised log2 copy ratios are not sufficiently denoised final CopyRatioCollection badDenoisedCopyRatios = badDenoisedResult.getDenoisedCopyRatios(); final double badDenoisedLog2CRStandardDeviation = new StandardDeviation().evaluate( badDenoisedCopyRatios.getLog2CopyRatioValues().stream() .mapToDouble(x -> x) .toArray()); Assert.assertFalse(badDenoisedLog2CRStandardDeviation < DENOISED_LOG2CR_STANDARD_DEVIATION_THRESHOLD); } } }
<reponame>dogshoes/zcryp // XOR encryption key manager for zcryp. // Copyright 2014 <NAME> <<EMAIL>>. // Provided under the terms of the MIT license in the included LICENSE file. package main // KeyState maintains the state of the XOR key. type KeyState struct { key string keylen, keyidx int } // Create a new KeyState based on an input key of arbitrary length. func NewKeyState(key string) *KeyState { keystate := new(KeyState) keystate.key = key keystate.keylen = len(keystate.key) keystate.keyidx = 0 return keystate } // Fetch the next byte of the KeyState and increment the internal cursor. func (keystate *KeyState) NextByte() byte { nextbyte := keystate.key[keystate.keyidx] keystate.keyidx++ if keystate.keyidx >= keystate.keylen { keystate.keyidx = 0 } return nextbyte }
The impact of Covid-19 pandemic on community-oriented mental health services: The experience of Friuli Venezia Giulia region, Italy Objectives to assess the changes in prevalence, incidence and hospitalisation rates during the first four months of 2020, compared to the same period of 2019, in Friuli Venezia Giulia Mental Health Departments (MHDs); to analyse the features of MHDs patients tested for Sars-Cov-2, and to monitor whether MHDs applied and adhered to regional recommendations. Methods Observational study using MHDs’ administrative data and individual data on suspected and positive cases of Sars-Cov-2. Adherence to recommentations was assessed using 21 indicators. Changes in rates were calculated by Poisson regression analysis, while the Fisher exact test was used for assessing differences between suspected and positive cases. Results The decrease in voluntary admission rates on 100,000 inhabitants in hospital services was significantly larger from January to April 2020, compared to the same period of 2019 (P<0.001), while no other data showed a significant decrease. Among the 82 cases tested for Sars-Cov-2, five were positive, and they significantly differ from suspected cases only in that they were at home or in supported housing facilities prior to the test. The MHDs mostly complied with the indicators in the month after the publication of recommendations. Conclusions Outpatient services continued to work normally during the emergency, while hospital services decreased their activities. A low number of positive cases was found among MHDs’ users, which might be linked to a rapid reconversion of services, with an extensive use of home visits and telepsychiatry. These preliminary data should be interpreted with caution, due to the small size and the limited period of observation. Introduction The Coronavirus 2019 pandemic, due to Severe Acute Respiratory Syndrome Coronavirus 2 (Sars-Cov-2), has heavily affected not only physical health, but the whole daily living of the general population, with an increased burden on global public health and social system . In this context, Italy has been the first European country to face the pandemic emergency. The first patient in Italy was found positive the 29 th of January 2020, although the pandemic had been probably spreading months before. Nonetheless, from February the incidence of contagion dramatically increased, with 10 0 0 deaths reported by the 28 th of March . The pervasive contagion, along with the consequent widespread fear and the necessary containment measures to limit the outbreak (e.g. quarantine and lockdown), is likely to lead psychological consequences in the general population, but the distress in people with pre-existing psychiatric conditions is likely to have been greater . Although general literature provides evidence of a greater susceptibility of psychiatric patients to environmental stress related to trauma , a recent review focusing on the psychological impact of quarantine included only one study considering mental illness as a risk factor . Studies suggest that psychiatric patients seem to be at high risk of infection, due to pre-existing disorders, unhealthy lifestyle, cognitive impairment or reduced level of awareness of the risk . In contrast, recent findings from United Kingdom (UK) indicated that mentally-ill patients were at lower risk of contracting the infection compared to general population . Nonetheless, the continuity of mental care and the integration of mental health services with public health is crucial to guarantee an effective response to the Covid-19 emergency . International agencies, such as the United Nations (UN), the World Health Organization (WHO) and the World Federation for Mental Health, drafted editorials and documents addressing the urgent need for actions on mental health to reduce the detrimental effect of the pandemic on psychiatric patients . Despite these recommendations, mental care has been reported to be generally under-resourced and under-prepared in assisting new psychiatric patients and those already in charge. However, community mental health systems, well integrated with general healthcare, have demonstrated better adaptability compared to systems based on inpatient care . In Italy, different population subgroups were identified at high risk of developing severe psychological consequences linked to Covid-19 pandemic, also among patients with mental disorders . Therefore, the need to reorganize mental health services and develop new approaches to rapid response to crisis quickly became clear. For instance, by March 2020 the Italian Society of Epidemiological Psychiatry (SIEP), for instance, had already published a list of practical recommendations for Mental Health Departments (MHDs) to ensure the best possible psychiatric health care during the emergency. The aim of these recommendations was to avoid the spread of the infection and to maintain an adequate level of care supports, developing also new strategies to cope with this reality, such as telemedicine . Unfortunately, early evidence from Italian MHDs showed a different reality, similar to the international scenario. During the emergency, a number of psychiatric emergency units were converted to Covid-19 services, and professionals were re-designated to work there. Moreover, many community services, where patients typically spend several hours a day finding immediate help for their problems, were severely restricted or temporarily closed . A general decrease of hospitalisa-tions in inpatient units, as well as in psychiatric consultations, was also observed . Concern has been expressed for those severely ill patients who have disappeared from mental health services and are holed up in other settings without any kind of support . Global data on the extent of cases affected by the Covid-19 pandemic among people with mental disorders are largely missing . This seems inversely striking to correctly drafting and applying helpful recommentations for mental care. Furthermore, there is a lack of evidence regarding the success of existing recommendations in containing the infection and in keeping services working to support patients, whose opportunities for social contact are increasingly reduced, and who suffer increased social isolation and loneliness as a result . According to the WHO, the Friuli Venezia Giulia (FVG) region is an example of how the Italian movement realized deinstitutionalization with the development of community-based mental healthcare and social inclusion programs . Our hypothesis, hence, is that a strong community mental care network can have additional and more flexible tools to rapidly adapt to a sudden healthcare emergency. In parallel we expect a greater prevalence of Sars-Cov-2 infection among psychiatric patients, with a consequent high burden and pressure on mental care. The aim of the present study was: i. to assess the changes on administrative prevalence, incidence and hospitalisation rates during the first four months of 2020 compared to the same period of the previous year; ii. to analyse the sociodemographic and clinical features of patients tested for Sars-Cov-2 infection among those in the care of regional mental health services, and iii. to monitor whether regional mental health services applied and adhered to regional reccomendations, using a series of indicators. Materials and methods The study was set in Friuli Venezia Giulia (FVG), an Italian north-eastern region with a population of 1.2 million inhabitants. Three MHDs, strongly community-based , provide mental care to 20,551individuals aged 18 years and older. The main services of the MHDs are Community Mental Health Centres (CMHCs), which each look after a catchment area of 50,0 0 0 to 80,0 0 0 inhabitants. Seventeen regional CMHCs are open 24 h a day, 7 days a week, with four to eight beds each (CMHC/24h). The remaining 5 CMHCs are open 12 h a day, 6 days a week, without beds. CMHCs deal with the most of mental care needs, including management of acute conditions, prevention of mental illness, pharmacological treatment, and rehabilitation. There are also several different "supported housing" facilities, managed by the Third Sector in collaboration with MHDs, where psychiatric patients can be hosted . Hospital psychiatric services are provided by three "General Hospital Psychiatric Units" (GHPU), with the lowest number of beds used for psychiatric acute care in Italy (2.6 beds per 10 0,0 0 0 inhabitants) . Aggregated data on individuals in the care of MHDs The FVG Regional Social and Health Information System (SISSR) was used to retrieve data on all subjects registered as patients of the regional MHDs from the 1st of January 2020 to the 30th of April 2020 (pandemic period) and from the 1st of January 2019 to the 30th of April 2019 (control period). The pandemic period involved the recognition, initiation and the subsequent acceleration interval of the initial wave of Covid-19 . SISSR is an administrative database wherein a unique anonymous identifier is provided for each individual resident in the Region. It also covers all the regional public healthcare services, which represent almost 100% of the mental care, since there are no private clinics in the region . We further provided a detailed description of SISSR and used it in other studies . Data retrieved for the present study included: administrative prevalence and administrative incidence of all subjects in charge MHDs during the above periods, date of admission and date of discharge of each hospitalization in GHPUs and CMHCs/24h, type of hospitalization (voluntary admission (VA) and compulsory admission (CA)), length of each hospitalizations (LoS). "Compulsory" hospitalizations is ruled by Italian law (Articles 33, 34, 35 and 64 of the General Health Law no. 833/1978 . FVG region is characterised by a low CA rate compared to other Italian regions . Individual data on suspected and positive Sars-Cov-2 cases in the care of MHDs Information on individuals with mental disorders tested for Sars-Cov-2 virus was obtained from MHDs' database available on SISSR and used for the monitoring of healthcare services during the pandemic. Individual data, with a unique anonymous identifier, included all subjects in the care of regional MHDs, involving patients hospitalised in GHPUs, CMHCs and different supported housing solutions, as well as patients at home. For the present study, we used data retrieved from the 1 st of February to the 31st of May2020. Data included: gender, age, nationality, marital status, occupational status, presence of comorbid medical conditions or cognitive impairment, suicide attempts, main psychiatric diagnosis, pharmacological treatment, dates of each nasopharyngeal swab test execution and provenience prior to the test. In cases of hospitalization, date of admission and date of discharge of each hospitalization in GHPUs and CMHCs/24 h, type of hospitalization (VA and CA) and length of each hospitalizations were included. The operative recommendations for regional MHDs during Sars-Cov-2 pandemic In response to the pandemic emergency, on the 22nd of April 2020 the "Direzione Centrale Salute, Politiche Sociali e Disabilità" of the FVG region published a policy document called: "Prevenzione e gestione infezione COVID 19 -Indicazioni organizzative e gestionali per i Servizi per la salute mentale" (Prevention and management of the COVID 19 infection: Organizational and administrative recommendations for mental health services) which included several operative recommendations regarding the organization of mental health services during the pandemic. These recommendations mostly summarized the practical changes in how services should be administered, which MHDs had already been applying. The recommendations were also published as Best Practices during Covid-19 emergency by the Italian National Agency for Regional Healthcare Services (AGENAS). A set of 21 recommendations were then adopted to monitor how closely regional MHD followed these recommendations in the subsequent months of the pandemic emergency. In the present study, we referred to this monitoring period from the publication of the document to the 31st of May 2020. Statistical analysis Poisson regression analysis was used to compare prevalence and incidence of MHDs patients across the two study periods (year 2019 and 2020). Poisson regression analysis was also used to compare rates of VA and CA to GHPU and CMHC/24 h and respective lengths of hospitalizations in the same different periods. Continuous variables (age, days of hospitalization) were summarized using the median as a measure of central tendency and the range as a measure of dispersion. All other variables were dichotomous or categorical and were tabulated into contingency tables; the Fisher Exact test was used to test the differences between proportions. A P-value (P) < 0.05 was set as the threshold for statistical significance. Descriptive and inferential analyses were performed using the statistical software Stata/SE (version 15.1). Administrative prevalence, incidence and hospitalizations Prevalence of patients in the care of regional MHDs was 12.9 patients per 10 0 0 inhabitants during the control period; this rate decreased to 11.9 patients per 10 0 0 inhabitants during the pandemic period (Poisson P = 0.41). The incidence rate was 1.8 patients per 10 0 0 inhabitants during the control period; the rate decreased to 1.3 patients per 10 0 0 inhabitants during the pandemic period (Poisson P = 0.37). As shown in Fig. 1 , VA rates in GHPU decreased from 7.53 in January 2019 to 5.98 hospitalizations per 10 0,0 0 0 inhabitants in April 2019, while the reduction was from 6.27 to 3.28 hospitalizations per 10 0,0 0 0 inhabitants during the same period of 2020 (Poisson P < 0.001). When VA rates were calculated in CMHC, they decreased from 20.17 to 12.36 in 2019 and from 23.65 to 8.98 in 2020 (Poisson P = 0.41). CA rates slightly increased during the first four months of 2019 and 2020, but this rise was not significant either in GHPU (Poisson P = 0.12) or in CMHC (Poisson P = 0.15). The mean LoS of VA in GHPU decreased from 7.1 to 6.1 days per hospitalization during the control and pandemic period, respectively, while increasing in CMHC from 16.1 to 18.6 days per hospitalization. The mean LoS of CA in GHPU decreased from 11.7 Table 1 Sociodemographic and clinical characteristics of patients suspected and positive to Sars-Cov-2 infection in regional mental health services. Fisher Exact test and respective P-values were used to assess the differences between proportions. Significant P-values were highlighted in bold. Suspected and positive cases of Sars-Cov-2 Among the 82 cases tested for Sars-Cov-2 infection, only five (6.1%) tested positive for the infection; one of these cases died due to Sars-Cov-2 complications. The other four positive cases were hospitalized in general hospital Covid-wards with MHD support. No positive cases were found among patients hospitalized in GHPU and CMHC/24h during the emergency. The mean age for positive cases was 48.6 (median = 49.5, range = 34 -74), while for suspected cases the mean was 57.1 (median = 57.0, range = 19 -80). As summarized in Table 1 , there were no significant differences between suspected and positive cases with regard to socio-demographic or clinical characteristics, with the exception of provenience prior to the test. Positive cases were more likely to be at home and in supported housing facilities (P = 0.005). Fifty-five (71.4%) out of the 77 suspected cases were hospitalized in MH services during the emergency, 38 in GHPU (70%) and 17 in CMHC (30%). Eight of them were hospitalized more than once. The mean length of first hospitalization was 16.4 days (median = 12.5, range = 1 -58) in GHPU, and 48.4 days (median = 16.5, range = 1 -263) in CMHC. Only 11 out of the 55 subjects hospitalized were CA, mainly in GHPU. Monitoring of operative recommendations for MHDs As shown in Table 2 , a set of 21 monitoring indicators were selected and divided in three macro areas: "Covid" team, Preventive interventions and Reorganization of activities. The MHDs generally complied with the indicators in the month after the publication of the operative r recommendations. One MHD only partially reorganized ambulatory activities, home visits and transports. Although Table 2 Indicators for the monitoring of operative indications for regional MHDs during Sars-Cov-2 epidemic, explantion of indicators and respective results an increased use of telepsychiatry was immediately activated in all MHDs, according to indicator 6, one MHD had problems activating video calls for meetings and, consequently, should decrease the rehabilitations in groups using remote devices. Inpatient and outpatient mental health services A significant decrease of VAs in GHPU was observed during the pandemic period compared to the control period, while no significant changes were found regarding CAs in GHPU or either hospitalisation type in CMHC/24h. This decrease was paralleled with a decreased incidence of patients seeking mental care during the pandemic period, even not significant. Although no literature is available with regard to CMHCs, another Italian study focusing on admissions to seven GHPUs in the Lombardy region showed a significant decrease of VAs during the pandemic period compared to the same period in 2019 . Similarly, findings from Emilia Romagna showed a decrease in both hospital admissions and accesses to ER for psychiatric disorders in March and April 2020 . A reduced use of psychiatric emergency was also described in other countries, such as Germany and France . The international evidence of a lower number of patients seeking mental care is cause for concern, since a greater degree of mental problems during and in consequence of lockdown or similar restrictive measures can be expected at the general population level . Possible explanations for this reduction may be a more cautious attitude of healthcare professionals or an increased family tolerance of behavioural problems due to the fear of contamination in hospital . Moreover, the lockdown may have had an attenuating effect on the experience of marginalization of the patient with psychiatric disorders, who suddenly share the same position of other people. The need to develop and implement innovative strategies for mental health care delivery has been strongly argued . In this regard, our finding of no significant change in admissions between the pandemic and the control period in CMCHs was relevant. CMHCs in FVG remained open during the pandemic crisis and were able to offer an appropriate level of healthcare, especially through the enhancement of home services. Moreover, a proportion of patients that in normal conditions would have been hospitalised for mental care were instead followed at home due to the Covid-19 emergency . We also found an increase in the LoS of patients in CMHC/24h during the pandemic period, especially for CAs, but this did not apply to GHPU. In contrast, a study from Lombardy GHPUs described an increased LoS during the Covid-19 emergency . We agree that increased LoS may be due to the difficulties involved with discharging patients safely during the emergency, the greater availability of free beds, or the delay in swab testing. In FVG, however, psychiatric patients, even with severe conditions, had the possibility of being discharged discharged from hospital and being hosted in outpatient services. This could have led to choose CMHCs for managing patients with psychopathological difficulties, and decrease pressure on hospitals, more engaged in crisis response . From this first piece of data, community services demonstrated a greater ability to maintain an adequate level of activity and provide a continuity of care during the pandemic emergency, showing a greater adaptability compared to hospitals. This was in line with a recent summary of different international experiences regarding mental care provision during the emergency . In contrast with this evidence, however, community mental services worldwide tended to have closed, strongly reduced or even converted their services in favour of physical care . Extent of Covid-19 pandemic on subjects with mental disorders In contrast with our a priori hypothesis, subjects with mental disorders were not found to test positive for Sars-Cov-2 infection more than the general population. The cumulative incidence in FVG population was 2.69 positive cases per 10 0,0 0 0 inhabitants on the 31st of May 2020 . We did not find, however, other studies with individual data on positive patients to Sars-Cov-2 among mental health services. Our findings differed from both the hypothesis of a greater susceptibility to Sars-Cov-2 among psychiatric patients and of their lower ability to adhere to restrictions, such as social distancing and use of masks . Although only 82 patients were tested for Covid-19 on a total number of 20,551 MHDs' users, it is likely that patients with severe mental disorders (SMD), who have frequent contacts with mental health services, were administered swab tests after being identified by MHDs. In a community-based context, mental health professionals often represent a primary contact point with general healthcare for their patients with SMD and, thus, in many cases act as first responders during the pandemic . When tested, however, only 6% of MHDs' users suspected to have Sars-Cov-2 tested positive for the illness. Positive cases did not differ from suspected cases for any of the sociodemographic and clinical variables considered, except the provenience prior to the test. That is, patients who tested positive for Sars-Cov-2 were more likely to be at home or in supported housing facilities than hospitalised in MHDs' services. This finding highly differed from preliminary data from a GHPU in Lombardy, were 15 positive cases were hospitalised in only 20 days during March 2020 . Although in FVG almost 40% of suspected cases were in GHPUs, none was found positive to infection. This may have to do with a different regional epidemiological situation compared to Lombardy, but is likely also linked to FVG's different MHD organisation, where a strong community system allowed a more rapid response to pandemic emergency and permitted the identification of positive cases prior to hospitalisation, for example during home visits. This consequently might have had a role in preventing the spread of infection inside mental health services. Application and adherence to regional recommendations for MHDs Another possible explanation of the good response to the pandemic emergency in FVG MHDs may be related to a rapid reorganization of services and staff . Although operative recommendations were delivered to MHDs only at the end of April 2020, when the pandemic was showing a slight deceleration, the great majority of changes in routine practice were applied during the acceleration phase. Major challenges included the initial provision of PPEs for professionals and users as well as a rapid technological predisposition for a broader use of remote devices. These issues were similar for the general healthcare sector, especially in countries which experienced an explosive acceleration of the pandemic, such as Italy , although research from Catalonia (Spain), which has been facing a situation similar to Italy, showed an exponential increase in teleconsultations from the March 2020 . In spite of these challenges, regional MHDs complied quickly with the operative recommendations, and monitoring indicators were mostly achieved after one month from their delivery. The changes included an increased use of remote devices, which has been observed in all MHDs. A key point of this positive outcome may be found in the so called "whole system recovery-oriented approach", which distinguishes the mental health system of FVG. This approach is characterised by a multi-sectoral provision of services by multiple professional organizations that are strictly integrated and connected . New forms of social connections established during the health emergency period can further enhance the collective effort to tackle the social problems deriving from the pandemic . This may be helpful also for professionals working in mental health services. A study from UK, for instance, highlighted that keeping mental care services working and maintaining contact with the community during the emergency was helpful, as was the communication between staff members . Strengths and limitations Due to the availability of a database that recorded most of the cases identified and monitored in the region, a main strength of this study was that we could analyse the situation of a mental health system during a key phase of the Covid-19 pandemic through three different perspectives: the burden on mental health services, the size and characteristics of MHDs' users directly involved in the pandemic and the operational changes put into action in mental health services. The study also shed a light on the importance of combining expert based information and represen-tative administrative population data for a rapid response to the Covid-19 crisis, which can drive better local decision-making . However, a number of limitations should be acknowledged. Firstly, the period considered was short, hindering the possibility of carrying out further considerations as the emergency progressed. The study period included the recognition, initiation and acceleration interval of the pandemic in Italy, which is consistent with the importance of follow-up applying systems for coding the phases of the pandemic, in order to allow international comparison, and consequently enhance the understanding of the impact of Covid-19 on health planning . Secondly, we did not consider emergency psychiatric consultations, with a consequent possible loss of information on the impact of pandemic-related stress among patients with pre-existing psychiatric conditions. Thirdly, the limited sample size, especially of positive cases to Sars-Cov-2 infection, hindered more detailed analysis. This small number of positive and suspected cases may be due to a lack of nasopharyngeal swab tests during the study period, but this is to be applied to all the population as well. Another reason for the small number of positive and suspected cases may be a selection bias, since patients without SMD could not be in close contact with MHDs. Swab tests, hence, could have been done on the recommendation of general practitioners, or patients may have been hospitalised in general care, without involving MHDs. This could have led to an underestimation of positive cases with mental disorders. On the other hand, it is reasonably likely that most of patients with SMD were included. Since they also represent the individuals theoretically more subjected to the negative effects of infection , this low number seems an important epidemiological achievement. Finally, a strong adherence to monitoring indicators may be linked to the attitude of mental health professionals to tackle a new emergency, while adherence to recommendations monitored through a longer period may be diluted. In this context, continuous monitoring or the use of direct interviews with MHDs' professionals may provide qualitative data on telepsychiatry, in order to further support the hypothesis that a community mental health system is able to provide rapid responses. Conclusions In line with our first hypothesis, we demonstrated that a community-based mental health system may be better placed to respond to events such as Covid-19 pandemic. The high level of integration between primary, community and tertiary elements of healthcare may enhance the flexibility of services, making the need for rapid reorganisation more feasible . Further, being used to personalised and tailored approaches may allow the reconversion of interventions, also with the aid of telepsychiatry . In this context, a community-based model may reduce feelings of isolation and loneliness of patients and, thus, prevent the risk of relapses . This was also confirmed by epidemiological data. Outpatient services have continued to work during the emergency more or less in the same way they did previously, while hospital mental care services have strongly decreased their activities. In contrast to our second hypothesis, however, we observed a very low number of Covid-19 cases among MHDs' users. Possible explanations for the low rate of infection might be linked to a rapid reconversion of services, with an extensive use of home visits and telepsychiatry, together with a probable good compliance to general preventive rules (i.e. handwashing, use of PPEs, social distancing, and so on). Nonetheless, our preliminary data should be interpreted with caution, due to the small size and the limited period of observation. More research is needed to assess if similar data can be replicated in other regions or countries, especially if characterised by a strong community-based mental health system.
PhiC31 recombination system demonstrates heritable germinal transmission of site-specific excision from the Arabidopsis genome Background The large serine recombinase phiC31 from broad host range Streptomyces temperate phage, catalyzes the site-specific recombination of two recognition sites that differ in sequence, typically known as attachment sites attB and attP. Previously, we characterized the phiC31 catalytic activity and modes of action in the fission yeast Schizosaccharomyces pombe. Results In this work, the phiC31 recombinase gene was placed under the control of the Arabidopsis OXS3 promoter and introduced into Arabidopsis harboring a chromosomally integrated attB and attP-flanked target sequence. The phiC31 recombinase excised the attB and attP-flanked DNA, and the excision event was detected in subsequent generations in the absence of the phiC31 gene, indicating germinal transmission was possible. We further verified that the genomic excision was conservative and that introduction of a functional recombinase can be achieved through secondary transformation as well as manual crossing. Conclusion The phiC31 system performs site-specific recombination in germinal tissue, a prerequisite for generating stable lines with unwanted DNA removed. The precise site-specific deletion by phiC31 in planta demonstrates that the recombinase can be used to remove selectable markers or other introduced transgenes that are no longer desired and therefore can be a useful tool for genome engineering in plants. Background Plant biotechnology has a role in addressing global needs for food, fiber and fuel, by developing new crop varieties with increased pest resistance, biofortification, and abiotic stress tolerance. Publicly acceptable forms of biotechnology offer an avenue for meeting these demands . Recombinase-mediated genetic engineering provides a favorable direction for enhancing the precision of biotechnological approaches. Concerns over the presence of antibiotic resistance genes in the food supply and their escape into the environment can be relieved through the use of recombinase technology to excise unwanted DNA from the genome of genetically engineered (GE) crops prior to marketing or release . A study by Chawla and colleagues documented how site-specific integration in rice exhibited stable gene expression over multiple generations. The research also demonstrated that rice with multicopy transgene inserts, initially silenced for expression, recovered expression when resolved by recombinase technology to a single genomic copy. Such studies demonstrate other potential uses for recombinase technology in the development of plant biotechnology. Genomic engineering took a large step forward with the discovery that site-specific recombinases, a group of enzymes that are capable of precise DNA cleavage and ligation without the gain or loss of nucleotides, could facilitate conservative DNA manipulation in a heterologous host . The recombinase super family is split into two fundamental groups, the tyrosine and serine enzymes. This grouping is based on the active amino acid (Y or S) within the catalytic domain of each enzyme family. The best known tyrosine recombinases are Cre, Flp and R . Tyrosine recombinases utilize identical recognition sites and perform a bi-directional mode of recombination. They have been shown to be effective for excision of unwanted DNA from the genome of the host but require complex schemes for integration. The serine enzyme group includes the phiC31, TP901-1 and Bxb1 recombinases among others . Members of this group recognize two non-identical recognition sites (attB and attP) and perform a uni-directional mode of recombination. While less research has been conducted on this group, it appears that the serine enzymes are well suited for precise genomic recombination due to their uni-directional catalytic activity that prevents the reversion of recombination products. In previous studies, we identified a number of prokaryotic site-specific recombination systems that function in the eukaryote Schizosaccharomyces pombe . Among those, the phiC31 uni-directional recombinase was highly efficient. The system has been successfully shown capable of recombinase mediated excision, inversion and integration reactions. The phiC31-att system is derived from the broad host range Streptomyces temperate phage phiC31 . The 613 amino acid phiC31 protein acts on recognition sites attB and attP that are minimally 34 bp and 39 bp, respectively . Published evidence has demonstrated that the phiC31 system is functional for excision and transmission of marker-free plastids in the seed of tobacco and in the genome of Arabidopsis and wheat but has yet to be demonstrated capable of germinal transmission of nuclear DNA in planta. In this research, we tested the phiC31 recombination system for the capacity to germinally transmit a target sequence that has undergone site-specific excision from within the Arabidopsis genome to a subsequent generation in the absence of the recombinase gene. Plants transgenic for an attB and attP flanked target sequence were introduced with a second construct that contained the recombinase gene. The phiC31 recombinase performed excision of the target sequence from three independent plant lines (i.e. genomic locations) and generated stably excised progeny plants that carry only the recombined target DNA of interest in the absence of the recombinase gene. This demonstrates that the phiC31 recombination system is suitable for the generation of stable marker-free, recombinase-free transgenic plants. Experimental design To test for site-specific recombination, we initially sought to use a gain-of-function strategy whereby excision of a transgene would lead to promoter fusion with a previously distal marker . Hence, pN3-phiC31 was configured with a CaMV 35S promoter (35S) proximal to a 760 bp non-coding stuffer region followed by a distal gusA coding region (Fig. 1a). The stuffer region is flanked in direct orientation by the 54 bp attB and 57 bp attP phiC31 attachment sites (Fig. 1d) derived from pPB-phiC31 located in the binary vector pCambia 1301 http://www.cambia.org/daisy/cambia. The expectation was that prior to site-specific recombination, 35S would not drive expression of gusA due the presence of the stuffer region. After recombination, the non-coding stuffer would be removed and activate expression of gusA (Fig. 1c). In this strategy, we first introduced the recombination target (pN3-phiC31) into the Arabidopsis genome via Agrobacterium transformation. The target construct contains hptII (hygromycin phosphotransferase II) for selection of transgenic plants and was intentionally placed outside of the recombination recognition sites (and thus is not excised by phiC31) to aid the tracking of excised plants. These target lines, or 'TA' lines, were then transformed with the second construct, pCOXS3-phiC31 ( Fig. 1b) that expresses the recombinase gene to produce the 'TR' lines. Upon site-specific excision of the recognition site-flanked DNA, the TR 1 plants were backcrossed to wild type plants and the BC 1 progeny screened for segregants that retain the excision event but lack the recombinase gene ( Fig. 2). Target lines for phiC31 recombination The target construct pN3-phiC31 was introduced into Arabidopsis and 23 hygromycin resistant lines were confirmed by PCR detection of a 1.26 kb product that spans the recognition site-flanked non-coding stuffer region (data not shown). Of those, 13 pN3-phiC31 lines were propagated to the TA 2 generation and examined by Southern blot for single copy T-DNA integration. EcoRI or BamHI each cuts once within the target T-DNA (Fig. 1a). Hybridization with a gusA probe of EcoRI or BamHI cleaved genomic DNA should reveal a band size >4.17 kb, the length of the cleaved T-DNA. A hybridizing band <4.17 kb would indicate integration of a truncated T-DNA. From this analysis, three of the 13 pN3-phiC31 plants were determined to contain a single copy of a likely complete T-DNA (data not shown) and designated TA 2 -phiC31.22, 31, and 34. The 1.26 kb PCR product from each of these lines was sequenced to confirm the presence of intact attB and attP sites (Fig. 1d). Arabidopsis OXS3 promoter for expression of phiC31 As previous research has demonstrated successful germline tissue expression of the parA and cre recombinase genes , we chose the 1.5 kb promoter fragment of the Arabidopsis Oxidative Stress 3 gene (OXS3) (AGI At5g56550) for phiC31 gene expression and termed the plasmid pCOXS3-phiC31 (Fig. 1b). Independent research, through the use of tiling microarrays, has also confirmed that the OXS3 gene is constitutively expressed in most Arabidopsis tissues . Secondary transformation of TA target lines The TA 3 generation of phiC31.22, 31, 34 plant lines were transformed with Agrobacterium harboring the pCOXS3-phiC31 vector. Kanamycin resistant transformants that exhibited wild type appearance and growth rate were identified and grown in the greenhouse. Three-week old TR 1 transformants were tested for the presence of the phiC31 gene. PCR amplification by primers g and h (Fig. 1b) showed that a majority of the plants harbor the recombinase gene (Fig. 3). The groups of plants that harbor the phiC31 gene were designated TR 1 -phiC31.22, 31 and 34 ( Table 1). The TR 1 -phiC31 lines were examined using histochemical staining to detect gusA encoded β-glucuronidase activity. GUS expression in the TR 1 -phiC31 lines, however, showed variable levels of β-glucuronidase (not to scale) from a) pN3-phiC31; b) pCOXS3-phiC31; and c) predicted single copy T-DNA structures after excision of stuffer by phiC31-att recombination. PCR primers shown as e, f, g, h; att sites as grey arrowheads; hybridization probes as grey rectangles. Abbreviations: B, BamHI; E, EcoRI; V, EcoRV; X, XhoI; RB, T-DNA right border; LB, T-DNA left border. Length in kb of PCR products (dotted lines) and DNA fragments (dashed lines). d) Sequence of the 54 bp attB and 57 bp attP phiC31 recognition sites, where the minimal required sequence is underlined and the 2 nucleotide 'AA' core region of crossover is in bold. e) sequence of a PCR product detecting a conservative site-specific excision event. Not shown are gene terminators and promoters for hptII (hygromycin phosphotransferase II) and nptII (neomycin phosphotransferase II) and the gene terminator for gusA (b-glucuronidase). activity. Initially we attributed this reduced activity to lower levels of phiC31-mediated excision, but PCR analysis of lines where GUS activity was weak or undetectable were positive for excision of the target DNA. Given that the screening for GUS activity was not a reliable indicator of phiC31 site-specific recombination, we subsequently utilized PCR to screen for site-specific excision. With the 65 TR1-phiC31.22, 31 TR 1 -phiC31.31 and 19 TR 1 -phiC31.34 individuals, PCR with primers e and f (Fig. 1c) detected a 0.44 kb product expected for sitespecific excision (Fig. 3a). However, the 1.26 kb product representing the parental configuration was also detected in some individuals, which indicates the presence of unexcised target DNA. As each individual harbors an independent COXS3-phiC31 T-DNA integration at a different genomic location, with perhaps a different copy number or structural arrangements, the incomplete excision in some individuals may be due to variability in recombinase gene expression. Removal of the phiC31 gene by segregation To determine if the genomic excision event occurred in the germline tissue, we examined whether the excised target was heritably transmitted to the progeny lacking the phiC31 gene. This analysis further resolved whether or not the excision reaction was generated de novo in each generation. We chose 5 individuals ( Table 2) from each of the TR 1 -phiC31.22, TR 1 -phiC31.31 and TR 1 -phiC31.34 families to pollinate wild type recipients. The backcross progenies (BC 1 ) were grown without selection and then screened by PCR for the target locus (primers e and f) and the recombinase gene (primers g and h), which reveals whether excision occurred (0.44 kb band) or not (1.26 kb band) and if phiC31 was present or absent ( Fig. 3c, d). With the TR 1 -phiC31.22, TR 1 -phiC31.31 and TR 1 -phiC31.34, 59% (115 of 194), 78% (178 of 227) and 55% (118 of 214) of the BC 1 plants harbored the target DNA, respectively. For the five TR 1 -phiC31.22 plants that were backcrossed, 93% of the plants (107 of 115) that harbor the target locus showed excision of the attB and attPflanked DNA, with 48% (51 of 107) lacking the recombinase gene ( Table 2). Of the TR 1 -phiC31.31 plants, 80% (142 of 178) of target plants showed excision of the attB and attP-flanked target, and 43% (61 of 142) lack the recombinase gene ( Table 2). A total of 87% of the TR 1 -phiC31.34 plants (103 of 118) harbored the target locus with excision of the attB and attP-flanked DNA, 1% (1 of 103) lacked the recombinase gene ( Table 2). The genomic excision 0.44 kb PCR product from two representative individuals from each family was sequenced and examined for conservative recombination. All of the phiC31-mediated excision PCR products sequenced were conservative and site specific (GenBank accession No. GU564447, Fig. 1e). BC 1 progeny for molecular confirmation BC 1 plants that showed excision but lacked the recombinase gene were self-fertilized to yield progeny designated S 1 -phiC31. PCR analysis on these plants again confirmed excision in the absence of the phiC31 recombinase gene (Fig. 4a, b), which indicates germinal transmission of the excision event. For further confirmation, Southern blot hybridization was conducted on some of these S 1 individuals. The genomic DNA was isolated and cleaved with EcoRV, which is expected to liberate either a 1.77 kb or a 0.96 kb fragment from the non-recombined or recombined structure, respectively (Fig. 1a, c). The GUS1350 probe detected the 1.77 kb band in the parental lines but not in the S 1 plants (Fig. 5a, lanes 1-6). Instead, only the 0.96 kb band was observed for S 1 plants from the TR 1 -phiC31 lineage. Genomic DNA was also cleaved with XhoI, which should liberate a 0.88 kb fragment if the genome were to harbor a COXS3-phiC31 T-DNA. Hybridization with the NPT690 probe detected the nptII gene fragment in the parental controls but not in the S 1 plants determined to be excision positive and phiC31 negative (Fig. 5b, lanes 1-5) with the exception of a non-segregated S 1 -phiC31.34.9 plant line that contains both the excision product and the recombinase expression cassette (Fig. 5b, lane 6). Discussion Our interest in site-specific recombination lies in its ability to facilitate crop improvement through controlled engineering of the plant genome. Recently transgenic corn has been deregulated for the production of high lysine, a consumer directed product . Further, this transgenic crop was engineered with the assistance of the site-specific recombinase technology for marker removal. Deregulation in this case required extensive studies to ensure that the recombinase mediated excision event was heritably transmitted to subsequent generations in the absence of the recombinase gene . Such agricultural requirements, while obviously necessary, have elicited few detailed studies on the transmission of recombined chromosome transmission to progeny plants. The recombinase systems Cre/lox, Flp/ FRT, R/RS, β/six and ParA/MRS have all been shown capable of germinal transmission in planta . Therefore, our research investigated the publicly available phiC31 recombination system as a potential tool for the precise removal of plant transgenes. In order to demonstrate its utility for crop genome engineering and increase public acceptance of transgenic technology, the potential for predefined nuclear excision events and their germinal transmission was investigated. An advantage of phiC31 over existing recombinase systems is its unidirectional recombination activity, which prevents the re-insertion of the excision product into the genome. In addition, phiC31 has the ability to site-specifically integrate DNA into the host genome making this a versatile enzyme. Our strategy began with the assumption that we could use gusA expression as a reporter for site-specific recombination. The pattern of GUS enzyme activity would reveal genomic excision of the target sequence and any tissue specificity in recombination. This strategy, however, failed to perform as expected with initial excised plants being either weak or completely devoid of GUS activity. Subsequent analysis of the original TR 1 -phiC31 progeny confirmed that use of reporter enzyme activity was an unreliable indicator of excision. We had also observed this phenomenon with other constructs used in both Arabidopsis and S. pombe . It is possible that the 54 bp attB/P hybrid sequence present within the transcript leader sequence of the gusA gene may cause poor expression due to methylation or by some other mechanism that inhibits gene expression. Due to this circumstance, the analysis and scoring of site-specific excision was performed using PCR. Site-specific excision was detected in all TR 1 -phiC31. 22 . By this measure, it appears that the phiC31 recombinase mediated excision efficiency is more effective than ParA and approaching that of the Cre-lox system. Although, the majority of the BC 1 lines displayed excised genomic target, it is difficult to give a precise quantitative assessment of the phiC31 activity since only a modest number of different target locations were thoroughly characterized. Variability in copy number and chromosome locations of the phiC31 gene can affect the amount of recombinase protein produced and thus impact the efficiency of the excision reaction observed, making a direct comparison difficult. Other excision strategies for the phiC31 recombinase are being investigated. These include the use of inducible or tissue specific promoters for controllable expression use of self-deleting designs and use of viral inoculation or Agrobacterium-infiltration for immediate but transient expression . As an alternative method of recombinase introduction into the plant target lines, our lab tested hand pollination between phiC31 recombinase expressing plants and pN3-phiC31 target plants. PCR analysis of the manually crossed MC 1 progeny demonstrated that this is a viable method for the generation of individuals with genomic target excision (Fig. 6). However, it was observed that like secondary Agrobacterium transformation with the recombinase expression cassette, the genomic excision results varied between lines ( Table 3). Use of a demonstrated recombinase expression line such as phiC31.31.83 (Table 3) enabled sufficient recombinase mediated excision events to fully excise all target DNA when crossed together. It was also observed that segregation of the secondary Agrobacterium transformed TR 1 lines, without benefit of backcrossing, produced excised target and recombinase expression-only T-DNA lines in the TR 2 and TR 3 generations (data not shown). This indicates that the phiC31 expression T-DNA in these lines was at a single locus or a low number of loci within the genome and that expression was sufficient to facilitate recombination allowing segregation by self-pollination. Since PCR assays of genomic DNA from leaf tissue only indicates that excision has occurred in somatic cells, we utilized Southern blot analysis to ascertain whether target sequence removal had occurred in the germline. As long as phiC31 DNA was present in the genome, or the phiC31 protein was present in the germline cells, the possibility that recombination was generated de novo could not be ruled out. Hence, BC 1 plants were screened by PCR for the absence of the phiC31 recombinase gene, and the following generation (S 1 plants) was confirmed by Southern blot hybridization. As is clearly shown in Fig. 5 lanes #1 -5, germinal transmission of the genomic excision event in the absence of the phiC31 recombinase gene occurred, illustrating that the production of stable lines with the unwanted DNA removed can be achieved. Controlled targeted integration with recombinase technology allows the application of more sophisticated recombinase strategies . This technology enables the production of precisely engineered transgenic plants through genome specific transgene integration and has been reported to function in Arabidopsis, tobacco and rice with Cre, Flp and R recombinase systems. The phiC31 recombinase with its uni-directional catalytic activity presents a novel way to facilitate stable sitespecific integration events without the elaborate strategies required by the bi-directional systems. Peerreviewed literature reported that phiC31 is capable of mammalian genome targeting and targeted integration into the plastid genome of tobacco . Utilization of phiC31 for genome modification has been facilitated in mammalian species through the identification of cryptic attB or attP sites as potential locations for transgene introduction . To this end our lab investigated, in silico, the presence of sequences similar to the phiC31 att sites within the Arabidopsis thaliana genome. We used a BLASTn search to investigate whether the Arabidopsis genome contains sequences similar to the minimal 34 bp attB and 39 bp attP sites . The genomic sequences with the highest similarity to the att sites exhibited >60% overall nucleotide identity. A total of seven sequences had 21-23 (61.8-67.7%) of the 34 nucleotides conserved with the minimal attB sequence, while 14 native sequences had 24-27 (61.5-69.2%) nucleotides in common with the 39 bp attP sequence (Fig. 7). While most of the sequences including the best matches for attP did contain the conserved core domain presumably essential for phiC31-mediated recombination, only three of the attB-like sequences contained the core sequence ( Fig. 1d; Fig. 7). It is possible that some of these att-like sequences could potentially be used as a native target site for phiC31 mediated integration in Arabidopsis. Pseudo phiC31 attP sequences in the mouse, bovine and human genomes have been reported and some of them have been shown suitable for integration of introduced DNA . Although unlikely, the potential for genomic excision, inversion and translocation mediated by these cryptic att sequences in Arabidopsis is possible. For excision, Arabidopsis chromosomes 3 and 5 carry both attB and attP-like sequences in direct orientation (Fig. 7). The closest correctly oriented sites are located >500 kb apart on chromosome 3, but the cryptic attB does not contain a conserved core domain. Although it is theoretically possible that genomic recombination could occur via endogenous att-like sequences, the OXS3 promoter-phiC31 plants did not exhibit compromised viability, morphological or growth defects. This differs from earlier observations using a 35S-phiC31 construct where Arabidopsis plants with crinkled leaves were common . Hence, this underscores the importance in controlling expression of the recombinase gene through appropriate use of promoters. Conclusion The purpose of the research was to provide proof-ofconcept that the phiC31 recombinase can mediate sitespecific genome modification in the plant germline tissue without affecting fecundity. The research established that the excision event was passed to subsequent generations in the absence of phiC31 and that the excision of attB and attP-flanked DNA from the plant genome was a conservative site-specific event. In a majority of the phiC31 lines examined (11 out of 15), at least one BC 1 segregant was recovered that contained a germinally transmitted excision event lacking the phiC31 gene. These results were validated with Southern blot hybridization and demonstrate that the secondary transformation strategy used in this study is feasible for the production of marker-free transgenic plants. This approach may prove particularly useful in those species where cross pollination is not possible or undesirable. We further demonstrate that an alternative approach to marker removal where the recombinase is introduced into the excision test target plants with cross pollination is also a viable strategy. Molecular analysis confirmed that the genomic excision was site-specific and conservative. Therefore, taken together the results clearly establish that the phiC31 system performs genomic excision, generating stable transgenic recombinase-free Arabidopsis plants with unwanted DNA removed. DNA constructs pN3-phiC31 (GenBank accession No. GU564446), (Fig. 1a): An NheI-attB-stuffer-attP-AscI fragment was retrieved from pPB-phiC31 and inserted into binary vector pCambia-1301 http://www.cambia.org/daisy/cambia in which the NcoI site between 35S and gusA had been changed to SpeI and AscI. The vector contains hptII (hygromycin phosphotransferase II) for selection in plants outside the region of site-specific excision to allow for progeny tracking. The pN3-phiC31 exc vector for control lanes (Fig. 3, 4 and 6, lane E) was generated by removal of the non-coding stuffer region by recombinase-mediated excision in bacteria.
Cervical posture analysis in dental students and its correlation with temporomandibular disorder Abstract Objective: To evaluate the relationship between temporomandibular disorders (TMD) and craniocervical posture in the sagittal plane measured from lateral radiographs of the head. Methods: The sample was comprised of 80 randomly selected students of dentistry at the Federal University of Rio Grande do Norte. Research Diagnostic Criteria for TMD (RDC/TMD) was used to evaluate the signs and symptoms of TMD. Lateral radiographs of each individual were used to measure the position of the hyoid bone, the craniocervical angle, and the occiput–atlas distance. A chi-square test was used to evaluate the relationships between craniocervical posture measures and TMD. Results: No relationship was found between TMD and the craniocervical posture measured by the positioning of the hyoid bone, head rotation, and the extension/flexion of the head (p > 0.05). Conclusion: It can be concluded, therefore, that no relationship exists between cervical posture in the sagittal plane and TMD.
#pragma once #include "Client.h" #define DllPublic __declspec( dllexport ) static std::shared_ptr<Client> g_client; namespace API { DllPublic int Initialize(const wchar_t *path, const wchar_t *file); DllPublic void ResetInitialize(); namespace Other { DllPublic int GetWeatherID(); DllPublic int SetKey(int key, bool pressed); DllPublic int Useless(int useless); DllPublic int Useless1(); } namespace SAMP { namespace Chat { DllPublic void Send(const wchar_t *message); DllPublic void AddMessage(const wchar_t *message); DllPublic void AddMessage(const wchar_t *message, unsigned long color); DllPublic void GetText(wchar_t *text, int length); DllPublic void SetText(const wchar_t *text); DllPublic void GetBufferMessage(int index, wchar_t *message, int length); DllPublic void AddBufferMessage(const wchar_t *message); DllPublic void SetCursorPosition(int index); DllPublic void SetCursorPosition(int indexStart, int indexEnd); DllPublic void Toggle(bool state); DllPublic void Clear(); DllPublic bool IsOpen(); } namespace Dialog { DllPublic void GetText(wchar_t *text, int length); DllPublic void SetText(const wchar_t *text); DllPublic void ClearText(); DllPublic void SetCursorPosition(int index); DllPublic void SetCursorPosition(int indexStart, int indexEnd); DllPublic int GetID(); DllPublic int GetStringCount(); DllPublic void GetStringByIndex(int index, wchar_t *text, int length); DllPublic bool IsOpen(); DllPublic void Close(int reason); DllPublic void SelectIndex(int index); DllPublic void BlockGetCaption(wchar_t *caption, int length); DllPublic void BlockGetText(wchar_t *text, int length); DllPublic void Block(int id, const wchar_t *text); DllPublic bool BlockHasBlockedDialog(); DllPublic bool BlockHasNeedBlocking(); } namespace Player { DllPublic void GetLocalName(wchar_t *name, int length); DllPublic int GetLocalId(); DllPublic void GetNameById(int id, wchar_t *name, int length); DllPublic int GetIdByName(const wchar_t *name); DllPublic void GetFullName(const wchar_t *name, wchar_t *fullName, int length); } namespace Vehicle { DllPublic void GetNumberplate(wchar_t *numberplate, int length); DllPublic void ToggleSiren(bool state); } } namespace Overlay { namespace Box { DllPublic int Create(); DllPublic void Delete(int id); DllPublic void SetColor(int id, unsigned long color); DllPublic void SetX(int id, int x); DllPublic void SetY(int id, int y); DllPublic void SetWidth(int id, int width); DllPublic void SetHeight(int id, int height); DllPublic void SetActive(int id, bool active); } namespace Text { DllPublic int Create(); DllPublic void Delete(int id); DllPublic void SetColor(int id, unsigned long color); DllPublic void SetX(int id, int x); DllPublic void SetY(int id, int y); DllPublic void SetMaxWidth(int id, int maxWidth); DllPublic void SetMaxHeight(int id, int maxHeight); DllPublic void SetActive(int id, bool active); DllPublic void SetText(int id, const wchar_t *text); DllPublic void SetSize(int id, int size); DllPublic void SetUseMaxWidth(int id, bool state); DllPublic void SetUseMaxHeight(int id, bool state); DllPublic void GetTextExtent(int id, int &width, int &height); } } namespace Player { DllPublic int GetHealth(); DllPublic int GetArmor(); DllPublic int GetInteriorId(); DllPublic void GetCity(wchar_t *city, int length); DllPublic void GetDistrict(wchar_t *district, int length); DllPublic void GetPosition(float &x, float &y, float &z); DllPublic float GetYaw(); DllPublic bool InInterior(); DllPublic bool InVehicle(); DllPublic bool IsDriver(); DllPublic bool IsFreezed(); } namespace Vehicle { DllPublic float GetSpeed(); DllPublic float GetHealth(); DllPublic int GetModelID(); DllPublic bool IsLightActive(); DllPublic bool IsLocked(); DllPublic bool IsEngineRunning(); DllPublic bool UseHorn(); DllPublic bool UseSiren(); } }
<gh_stars>1-10 #include <deci.hpp> namespace deci { value_t::~value_t() { ; } }
/* * capacitySWF is just mock function returning the len just for the sake of harmonizing the capacity computation. We need to make payload types as a children of all classes. */ unsigned int PayloadServer::capacitySWF(char* buf, int len) { (void)buf; return len; }
/* /********************************************************** /* Low-level reading: buffer reload /********************************************************** */ protected final boolean loadMoreCtrip() throws IOException { if (_inputStream != null) { _currInputProcessed += _inputEnd; int count = _inputStream.read(_inputBuffer, 0, _inputBuffer.length); if (count > 0) { _currentEndOffset = _parsingContext.adjustEnd(_inputEnd); _inputPtr = 0; _inputEnd = count; return true; } _closeInput(); if (count == 0) { throw new IOException("InputStream.read() returned 0 characters when trying to read "+_inputBuffer.length+" bytes"); } } return false; }
// ExecuteRules against the target inspector server func (c Client) ExecuteRules(rules []rule.Rule) ([]rule.Result, error) { serverSideRules := getServerSideRules(rules) d, err := json.Marshal(serverSideRules) if err != nil { return nil, fmt.Errorf("error marshaling check request: %v", err) } resp, err := http.Post(fmt.Sprintf("http://%s%s", c.TargetNode, executeEndpoint), "application/json", bytes.NewReader(d)) if err != nil { return nil, fmt.Errorf("error posting request to server: %v", err) } defer resp.Body.Close() if resp.StatusCode == http.StatusInternalServerError { errMsg := &serverError{} if err = json.NewDecoder(resp.Body).Decode(errMsg); err != nil { return nil, fmt.Errorf("failed to decode server response: %v. Server sent %q status", err, resp.Status) } return nil, fmt.Errorf("server sent %q status: error from server: %s", http.StatusInternalServerError, errMsg.Error) } if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("server responded with non-successful status: %q", resp.Status) } results := []rule.Result{} err = json.NewDecoder(resp.Body).Decode(&results) if err != nil { return nil, fmt.Errorf("error decoding server response: %v", err) } clientSideRules := getClientSideRules(rules) remoteResults, err := c.engine.ExecuteRules(clientSideRules, c.TargetNodeFacts) if err != nil { return nil, err } results = append(results, remoteResults...) endpoint := fmt.Sprintf("http://%s%s", c.TargetNode, closeEndpoint) resp, err = http.Get(endpoint) if err != nil { return nil, fmt.Errorf("GET request to %q failed. You might have to restart the inspector server. Error was: %v", endpoint, err) } return results, nil }
package com.shf15Single; import java.lang.reflect.Constructor; import java.lang.reflect.Field; public class Demo01 { public static void main(String[] args) throws Exception { // LazyMan instance1 = LazyMan.getInstance(); Field shuhongfan = LazyMan.class.getDeclaredField("shuhongfan"); shuhongfan.setAccessible(true); Constructor<LazyMan> declaredConstructor = LazyMan.class.getDeclaredConstructor(null); // 无视私有构造器 declaredConstructor.setAccessible(true); LazyMan instance1 = declaredConstructor.newInstance(); shuhongfan.set(instance1,false); LazyMan instance2 = declaredConstructor.newInstance(); System.out.println(instance1); System.out.println(instance2); } }
def hh(a,b,c,d): A = a ^ b B = c or d C = b and c D = a ^ d return (A and B)^(C or D) a = input() b = input() c = input() d = input() print hh(a,b,c,d)
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.utils.rnn import pad_sequence def hardest_negative(loss_values): hard_negative = np.argmax(loss_values) return hard_negative if loss_values[hard_negative] > 0 else None def random_hard_negative(loss_values): hard_negatives = np.where(loss_values > 0)[0] return np.random.choice(hard_negatives) if len(hard_negatives) > 0 else None def semihard_negative(loss_values, margin): semihard_negatives = np.where( np.logical_and(loss_values < margin, loss_values > 0) )[0] return np.random.choice(semihard_negatives) if len(semihard_negatives) > 0 else None class FunctionNegativeTripletSelector: """ For each positive pair, takes the hardest negative sample (with the greatest triplet loss value) to create a triplet Margin should match the margin used in triplet loss. negative_selection_fn should take array of loss_values for a given anchor-positive pair and all negative samples and return a negative index for that pair """ def __init__(self, margin, negative_selection_fn): super(FunctionNegativeTripletSelector, self).__init__() self.margin = margin self.negative_selection_fn = negative_selection_fn def get_triplets(self, distance_matrix, gt_corr_ms, numPlanes1, numPlanes2): batch_size, row_len, column_len = distance_matrix.size() # create mask masks = np.ones(distance_matrix.size()) for batch_idx, (num1, num2) in enumerate(zip(numPlanes1, numPlanes2)): masks[batch_idx][: num1[0], : num2[0]] = 0 distance_matrix_masked = distance_matrix.cpu().data.numpy() distance_matrix_masked[masks > 0.5] = 100 # Reshape distance_matrix distance_matrix_flatten = distance_matrix_masked.reshape(-1, column_len) gt_corr_ms_flatten = gt_corr_ms.reshape(-1, column_len) # add offset to gt_corr and remove redundent corrs anchor_positives = gt_corr_ms_flatten.nonzero() selected_rows = distance_matrix_flatten[anchor_positives[:, 0], :] ap_distances = distance_matrix_flatten[ anchor_positives[:, 0], anchor_positives[:, 1] ].reshape(-1, 1) positive_mask = gt_corr_ms_flatten[anchor_positives[:, 0], :] loss_total = ap_distances - selected_rows + self.margin assert (np.array(positive_mask.size()) == loss_total.shape).all() try: loss_total[positive_mask] = 0 except: print(loss_total) print(positive_mask) return torch.LongTensor([[0, 0, 0]]) # select semihard_negatives triplets = [] for loss, anchor_positive in zip(loss_total, anchor_positives): negative_idx = self.negative_selection_fn(loss) if negative_idx is not None: triplets.append([anchor_positive[0], anchor_positive[1], negative_idx]) if len(triplets) == 0: # print("triplet length is 0") triplets.append([0, 0, 0]) triplets = np.array(triplets) return torch.LongTensor(triplets) class OnlineTripletLoss(nn.Module): """ Online Triplets loss Takes a batch of embeddings and corresponding labels. Triplets are generated using triplet_selector object that take embeddings and targets and return indices of triplets """ def __init__(self, margin, device, selector_type, max_num_planes=20): super(OnlineTripletLoss, self).__init__() self.margin = margin if selector_type == "semihard": self.triplet_selector = FunctionNegativeTripletSelector( margin=margin, negative_selection_fn=lambda x: semihard_negative(x, margin), ) elif selector_type == "hardest": self.triplet_selector = FunctionNegativeTripletSelector( margin=margin, negative_selection_fn=hardest_negative ) elif selector_type == "random": self.triplet_selector = FunctionNegativeTripletSelector( margin=margin, negative_selection_fn=random_hard_negative ) else: raise NotImplementedError self.device = torch.device(device) self.max_num_planes = max_num_planes def pack_data(self, pred_instances1, pred_instances2, batched_inputs=None): if isinstance(pred_instances1[0], dict): has_instances_key = True else: has_instances_key = False embeddings1 = [] embeddings2 = [] numPlanes1 = [] numPlanes2 = [] for x in pred_instances1: if has_instances_key: embedding_tmp = x["instances"].embedding else: embedding_tmp = x.embedding embeddings1.append(embedding_tmp) numPlanes1.append([len(embedding_tmp)]) embeddings1 = pad_sequence(embeddings1, batch_first=True) for x in pred_instances2: if has_instances_key: embedding_tmp = x["instances"].embedding else: embedding_tmp = x.embedding embeddings2.append(embedding_tmp) numPlanes2.append([len(embedding_tmp)]) embeddings2 = pad_sequence(embeddings2, batch_first=True) gt_corr_ms = [] if batched_inputs is not None: for x1, x2, x in zip(pred_instances1, pred_instances2, batched_inputs): gt_plane_idx1 = x1.gt_plane_idx gt_plane_idx2 = x2.gt_plane_idx gt_corr = x["gt_corrs"] gt_corr_m = torch.zeros( [len(embeddings1[0]), len(embeddings2[0])], dtype=torch.bool ) for corr in gt_corr: x1_idx = (gt_plane_idx1 == corr[0]).nonzero()[:, 0] x2_idx = (gt_plane_idx2 == corr[1]).nonzero()[:, 0] for i in x1_idx: for j in x2_idx: gt_corr_m[i, j] = True gt_corr_ms.append(gt_corr_m) gt_corr_ms = torch.stack(gt_corr_ms) return embeddings1, embeddings2, gt_corr_ms, numPlanes1, numPlanes2 def forward(self, batched_inputs, pred_instances1, pred_instances2, loss_weight): embeddings1, embeddings2, gt_corr_ms, numPlanes1, numPlanes2 = self.pack_data( pred_instances1, pred_instances2, batched_inputs ) # torch.cdist is similar to scipy.spatial.distance.cdist # input: embedding1 B*N1*D, embedding2 B*N2*D, # output: B*N1*N2. Each entry is ||e1-e2|| distance_matrix = torch.cdist(embeddings1, embeddings2, p=2) # get triplets triplets = self.triplet_selector.get_triplets( distance_matrix, gt_corr_ms, numPlanes1, numPlanes2 ) distance_matrix_flatten = distance_matrix.view(-1, distance_matrix.size(2)) # calculate loss ap_distances = distance_matrix_flatten[triplets[:, 0], triplets[:, 1]] an_distances = distance_matrix_flatten[triplets[:, 0], triplets[:, 2]] losses = F.relu(ap_distances - an_distances + self.margin) return {"embedding_loss": loss_weight * losses.mean()} def inference(self, pred_instances1, pred_instances2): embeddings1, embeddings2, _, numPlanes1, numPlanes2 = self.pack_data( pred_instances1, pred_instances2 ) # torch.cdist is similar to scipy.spatial.distance.cdist # input: embedding1 B*N1*D, embedding2 B*N2*D, # output: B*N1*N2. Each entry is ||e1-e2|| distance_matrix = torch.cdist(embeddings1, embeddings2, p=2) return distance_matrix, numPlanes1, numPlanes2 class OnlineRelaxMatchLoss(nn.Module): """ Online Triplets loss Takes a batch of embeddings and corresponding labels. Triplets are generated using triplet_selector object that take embeddings and targets and return indices of triplets """ def __init__(self, margin, device, max_num_planes=20): super(OnlineRelaxMatchLoss, self).__init__() self.margin = margin self.device = torch.device(device) self.max_num_planes = max_num_planes def pack_data(self, pred_instances1, pred_instances2, batched_inputs=None): numCorr = [] gt_corr = [] if batched_inputs is not None: assert "gt_corrs" in batched_inputs[0] for x in batched_inputs: numCorr.append(torch.tensor([len(x["gt_corrs"])])) gt_corr.append(torch.tensor(x["gt_corrs"])) gt_corr = pad_sequence(gt_corr, batch_first=True).numpy() if isinstance(pred_instances1[0], dict): has_instances_key = True else: has_instances_key = False embeddings1 = [] embeddings2 = [] numPlanes1 = [] numPlanes2 = [] for x in pred_instances1: if has_instances_key: embedding_tmp = x["instances"].embedding else: embedding_tmp = x.embedding embeddings1.append(embedding_tmp) numPlanes1.append([len(embedding_tmp)]) embeddings1 = pad_sequence(embeddings1, batch_first=True) for x in pred_instances2: if has_instances_key: embedding_tmp = x["instances"].embedding else: embedding_tmp = x.embedding embeddings2.append(embedding_tmp) numPlanes2.append([len(embedding_tmp)]) embeddings2 = pad_sequence(embeddings2, batch_first=True) return embeddings1, embeddings2, gt_corr, numPlanes1, numPlanes2, numCorr def get_gt_corr_matrix(self, gt_corr, num_corr, pred): corr_ms = [] zeros = torch.zeros_like(pred[0]) for corr, num in zip(gt_corr, num_corr): corr_m = zeros.clone() for i, j in corr[: num[0]]: corr_m[i, j] = 1.0 corr_ms.append(corr_m) return torch.stack(corr_ms).cuda() def forward(self, batched_inputs, pred_instances1, pred_instances2, loss_weight): ( embeddings1, embeddings2, gt_corr, numPlanes1, numPlanes2, numCorr, ) = self.pack_data(pred_instances1, pred_instances2, batched_inputs) affinity_matrix = torch.sigmoid( torch.bmm(embeddings1, torch.transpose(embeddings2, 1, 2)) * 5.0 ) gt_corr_m = self.get_gt_corr_matrix(gt_corr, numCorr, affinity_matrix) zero_grad_mask = (affinity_matrix < 1 - self.margin) * (1 - gt_corr_m) losses = F.mse_loss( affinity_matrix * (1 - zero_grad_mask), gt_corr_m * (1 - zero_grad_mask), reduction="sum", ) losses /= len(batched_inputs) return {"embedding_loss": loss_weight * losses} def inference(self, pred_instances1, pred_instances2): embeddings1, embeddings2, _, numPlanes1, numPlanes2, _ = self.pack_data( pred_instances1, pred_instances2 ) affinity_matrix = torch.sigmoid( torch.bmm(embeddings1, torch.transpose(embeddings2, 1, 2)) * 5.0 ) affinity_matrix[affinity_matrix < 1 - self.margin] = 0.0 pred_corr_m = affinity_matrix return pred_corr_m, numPlanes1, numPlanes2 class CooperativeTripletLoss(nn.Module): """ Loss for ASNet Cosine similarity as weight """ def __init__(self, margin, device, selector_type, max_num_planes=20): super(CooperativeTripletLoss, self).__init__() self.margin = margin if selector_type == "semihard": self.triplet_selector = FunctionNegativeTripletSelector( margin=margin, negative_selection_fn=lambda x: semihard_negative(x, margin), ) elif selector_type == "hardest": self.triplet_selector = FunctionNegativeTripletSelector( margin=margin, negative_selection_fn=hardest_negative ) elif selector_type == "random": self.triplet_selector = FunctionNegativeTripletSelector( margin=margin, negative_selection_fn=random_hard_negative ) else: raise NotImplementedError self.device = torch.device(device) self.max_num_planes = max_num_planes def pack_data(self, pred_instances1, pred_instances2, batched_inputs=None): if isinstance(pred_instances1[0], dict): has_instances_key = True else: has_instances_key = False embeddings1_c = [] embeddings1_s = [] embeddings2_c = [] embeddings2_s = [] numPlanes1 = [] numPlanes2 = [] for x in pred_instances1: if has_instances_key: embedding_tmp_c = x["instances"].embedding_c embedding_tmp_s = x["instances"].embedding_s else: embedding_tmp_c = x.embedding_c embedding_tmp_s = x.embedding_s embeddings1_c.append(embedding_tmp_c) embeddings1_s.append(embedding_tmp_s) numPlanes1.append([len(embedding_tmp_c)]) embeddings1_c = pad_sequence(embeddings1_c, batch_first=True) embeddings1_s = pad_sequence(embeddings1_s, batch_first=True) for x in pred_instances2: if has_instances_key: embedding_tmp_c = x["instances"].embedding_c embedding_tmp_s = x["instances"].embedding_s else: embedding_tmp_c = x.embedding_c embedding_tmp_s = x.embedding_s embeddings2_c.append(embedding_tmp_c) embeddings2_s.append(embedding_tmp_s) numPlanes2.append([len(embedding_tmp_c)]) embeddings2_c = pad_sequence(embeddings2_c, batch_first=True) embeddings2_s = pad_sequence(embeddings2_s, batch_first=True) gt_corr_ms = [] if batched_inputs is not None: for x1, x2, x in zip(pred_instances1, pred_instances2, batched_inputs): gt_plane_idx1 = x1.gt_plane_idx gt_plane_idx2 = x2.gt_plane_idx gt_corr = x["gt_corrs"] gt_corr_m = torch.zeros( [len(embeddings1_c[0]), len(embeddings2_c[0])], dtype=torch.bool ) for corr in gt_corr: x1_idx = (gt_plane_idx1 == corr[0]).nonzero()[:, 0] x2_idx = (gt_plane_idx2 == corr[1]).nonzero()[:, 0] for i in x1_idx: for j in x2_idx: gt_corr_m[i, j] = True gt_corr_ms.append(gt_corr_m) gt_corr_ms = torch.stack(gt_corr_ms) return ( embeddings1_c, embeddings1_s, embeddings2_c, embeddings2_s, gt_corr_ms, numPlanes1, numPlanes2, ) def forward(self, batched_inputs, pred_instances1, pred_instances2, loss_weight): ( embeddings1_c, embeddings1_s, embeddings2_c, embeddings2_s, gt_corr_ms, numPlanes1, numPlanes2, ) = self.pack_data(pred_instances1, pred_instances2, batched_inputs) center_dist = torch.cdist(embeddings1_c, embeddings2_c, p=2) surrnd_dist = torch.cdist(embeddings1_s, embeddings2_s, p=2) surrnd_weigt = torch.cos(torch.asin(torch.clamp(surrnd_dist / 2, -1, 1)) * 2) distance_matrix = (1 - surrnd_weigt) * center_dist + surrnd_weigt * surrnd_dist triplets = self.triplet_selector.get_triplets( distance_matrix, gt_corr_ms, numPlanes1, numPlanes2 ) distance_matrix_flatten = distance_matrix.view(-1, distance_matrix.size(2)) # calculate loss ap_distances = distance_matrix_flatten[triplets[:, 0], triplets[:, 1]] an_distances = distance_matrix_flatten[triplets[:, 0], triplets[:, 2]] losses = F.relu(ap_distances - an_distances + self.margin) return {"embedding_loss": loss_weight * losses.mean()} def inference(self, pred_instances1, pred_instances2): ( embeddings1_c, embeddings1_s, embeddings2_c, embeddings2_s, _, numPlanes1, numPlanes2, ) = self.pack_data(pred_instances1, pred_instances2) center_dist = torch.cdist(embeddings1_c, embeddings2_c, p=2) surrnd_dist = torch.cdist(embeddings1_s, embeddings2_s, p=2) surrnd_weigt = torch.cos(torch.asin(torch.clamp(surrnd_dist / 2, -1, 1)) * 2) distance_matrix = (1 - surrnd_weigt) * center_dist + surrnd_weigt * surrnd_dist return distance_matrix, numPlanes1, numPlanes2
import React from 'react' export const useFetch = (url, options, triggerProps: any[] = []) => { const [response, setResponse] = React.useState<any>(null) const [error, setError] = React.useState(null) const [isLoading, setIsLoading] = React.useState(true) // const [refetch, setRefetch] = React.useState(null) const refetch = React.useCallback(async () => { setIsLoading(true) try { const response = await fetch(url, options) const json = await response.json() setResponse(json) setIsLoading(false) } catch (error) { setError(error) } }, [url, options]) React.useEffect(() => { refetch() }, triggerProps) return { response, error, isLoading, refetch } }
#include <bits/stdc++.h> #define int long long const int INF = 0x3f3f3f3f; const int M = 1e6 + 5; typedef long long ll; typedef unsigned long long ull; typedef double db; //char buf[1 << 23], *p1 = buf, *p2 = buf, obuf[1 << 23], *O = obuf; //#define getchar() (p1 == p2) && (p2 = (p1 = buf) + fread(buf, 1, 1 << 21, stdin), p1 == p2)? EOF : *p1++ int n; int a[M], b[M], sa[M], sb[M]; int to[M][2]; inline int read() { int f = 1, s = 0; char ch = getchar(); while(!isdigit(ch)) (ch == '-') && (f = -1), ch = getchar(); while(isdigit(ch)) s = (s << 1) + (s << 3) + (ch ^ 48), ch = getchar(); return f * s; } signed main() { n = read(); for(int i = 1; i <= n; ++i) a[i] = read(), sa[i] = sa[i - 1] + a[i]; for(int i = 1; i <= n; ++i) b[i] = read(), sb[i] = sb[i - 1] + b[i]; memset(to, -1, sizeof(to)); int j = 0; for(int i = 0; i <= n; ++i) { while(j <= n && sb[j] <= sa[i]) j ++; --j; int s = sa[i] - sb[j]; if(to[s][0] == -1) to[s][0] = i, to[s][1] = j; else { int now = to[s][0] + 1; std :: cout << i - now + 1 << '\n'; while(now <= i) std :: cout << now << ' ', now ++; std :: cout << '\n'; now = to[s][1] + 1; std :: cout << j - now + 1 << '\n'; while(now <= j) std :: cout << now << ' ', now ++; return 0; } } return 0; }
<gh_stars>0 public class Text implements Widget { public int length; public String text; public Text(String tIn) { text = tIn; length = tIn.length(); } @Override public void display() { System.out.printf("Text contains: %s\nIt's length is: %d\n", text, length); } }
<reponame>gflohr/Lingua-Poly import { Component, OnInit, OnDestroy } from '@angular/core'; import { TranslateService } from '@ngx-translate/core'; import { applicationConfig } from './app.config'; import * as fromAuth from './auth/reducers'; import * as fromRoot from './app.reducers'; import * as fromUser from './user/reducers'; import { Store, select } from '@ngrx/store'; import { Observable } from 'rxjs'; import { ConfigActions, MessageActions } from './core/actions'; import { ActivatedRoute, ParamMap, Router } from '@angular/router'; import { LinguaActions } from './lingua/actions'; @Component({ selector: 'app-root', templateUrl: './app.component.html', styleUrls: ['./app.component.css'], }) export class AppComponent implements OnInit, OnDestroy { loggedIn$: Observable<boolean>; uiLingua$: Observable<string>; constructor( private translate: TranslateService, private store: Store<fromRoot.State & fromAuth.State>, private userStore: Store<fromUser.State>, private route: ActivatedRoute, private router: Router, ) { this.translate.setDefaultLang(applicationConfig.defaultLocale); this.translate.use(applicationConfig.defaultLocale); this.loggedIn$ = this.store.pipe(select(fromAuth.selectLoggedIn)); this.uiLingua$ = this.userStore.pipe(select(fromUser.selectUILingua)); } ngOnInit() { this.store.dispatch(ConfigActions.configRequest()); this.route.queryParamMap.subscribe((params: ParamMap) => { if (params.get('error') !== null) { this.store.dispatch(MessageActions.displayError({ code: params.get('error')} )); this.router.navigate([this.router.url.split('?')[0]]); } }); this.route.paramMap.subscribe(params => { const lingua = params.get('uiLingua'); // FIXME! Check that the language actually changed. this.userStore.dispatch(LinguaActions.UILinguaChangeDetected({ lingua })); }); } ngOnDestroy() { } }
<gh_stars>0 package com.redescooter.ses.service.scooter.dao; import com.redescooter.ses.api.scooter.vo.emqx.ScooterMcuReportedDTO; import org.apache.ibatis.annotations.Param; /** * @author assert * @date 2020/11/23 17:58 */ public interface ScooterMcuMapper { /** * 根据scooterNo和batchNo查询车辆MCU控制器数据 * @param scooterNo,batchNo * @return com.redescooter.ses.api.scooter.vo.emqx.ScooterMcuReportedDTO * @author assert * @date 2020/11/20 */ ScooterMcuReportedDTO getScooterMcuByScooterNoAndBatchNo(@Param("scooterNo") String scooterNo, @Param("batchNo") Long batchNo); /** * 根据id修改车辆MCU控制器信息 * @param scooterMcu * @return int * @author assert * @date 2020/11/23 */ int updateScooterMcuById(ScooterMcuReportedDTO scooterMcu); /** * 新增车辆MCU控制器信息 * @param scooterMcu * @return int * @author assert * @date 2020/11/23 */ int insertScooterMcu(ScooterMcuReportedDTO scooterMcu); }
<reponame>zyqhpz/UTeM-OOP-Java<gh_stars>0 package model; public class InvalidSubcription extends Exception { private char type; public InvalidSubcription(char type) { this.type = type; } public String getMessage() { return "Invalid Subscription of type " + type; } }
<gh_stars>1-10 /** * @license * Copyright Google LLC All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ import { json, tags } from '@angular-devkit/core'; import * as debug from 'debug'; import * as inquirer from 'inquirer'; import { v4 as uuidV4 } from 'uuid'; import { colors } from '../utilities/color'; import { getWorkspace, getWorkspaceRaw } from '../utilities/config'; import { isTTY } from '../utilities/tty'; import { AnalyticsCollector } from './analytics-collector'; /* eslint-disable no-console */ const analyticsDebug = debug('ng:analytics'); // Generate analytics, including settings and users. let _defaultAngularCliPropertyCache: string; export const AnalyticsProperties = { AngularCliProd: 'UA-8594346-29', AngularCliStaging: 'UA-8594346-32', get AngularCliDefault(): string { if (_defaultAngularCliPropertyCache) { return _defaultAngularCliPropertyCache; } const v = require('../package.json').version; // The logic is if it's a full version then we should use the prod GA property. if (/^\d+\.\d+\.\d+$/.test(v) && v !== '0.0.0') { _defaultAngularCliPropertyCache = AnalyticsProperties.AngularCliProd; } else { _defaultAngularCliPropertyCache = AnalyticsProperties.AngularCliStaging; } return _defaultAngularCliPropertyCache; }, }; /** * This is the ultimate safelist for checking if a package name is safe to report to analytics. */ export const analyticsPackageSafelist = [ /^@angular\//, /^@angular-devkit\//, /^@ngtools\//, '@schematics/angular', ]; export function isPackageNameSafeForAnalytics(name: string): boolean { return analyticsPackageSafelist.some((pattern) => { if (typeof pattern == 'string') { return pattern === name; } else { return pattern.test(name); } }); } /** * Set analytics settings. This does not work if the user is not inside a project. * @param level Which config to use. "global" for user-level, and "local" for project-level. * @param value Either a user ID, true to generate a new User ID, or false to disable analytics. */ export function setAnalyticsConfig(level: 'global' | 'local', value: string | boolean) { analyticsDebug('setting %s level analytics to: %s', level, value); const [config, configPath] = getWorkspaceRaw(level); if (!config || !configPath) { throw new Error(`Could not find ${level} workspace.`); } const cli = config.get(['cli']); if (cli !== undefined && !json.isJsonObject(cli as json.JsonValue)) { throw new Error(`Invalid config found at ${configPath}. CLI should be an object.`); } if (value === true) { value = uuidV4(); } config.modify(['cli', 'analytics'], value); config.save(); analyticsDebug('done'); } /** * Prompt the user for usage gathering permission. * @param force Whether to ask regardless of whether or not the user is using an interactive shell. * @return Whether or not the user was shown a prompt. */ export async function promptGlobalAnalytics(force = false) { analyticsDebug('prompting global analytics.'); if (force || isTTY()) { const answers = await inquirer.prompt<{ analytics: boolean }>([ { type: 'confirm', name: 'analytics', message: tags.stripIndents` Would you like to share anonymous usage data with the Angular Team at Google under Google’s Privacy Policy at https://policies.google.com/privacy? For more details and how to change this setting, see https://angular.io/analytics. `, default: false, }, ]); setAnalyticsConfig('global', answers.analytics); if (answers.analytics) { console.log(''); console.log(tags.stripIndent` Thank you for sharing anonymous usage data. If you change your mind, the following command will disable this feature entirely: ${colors.yellow('ng analytics off')} `); console.log(''); // Send back a ping with the user `optin`. const ua = new AnalyticsCollector(AnalyticsProperties.AngularCliDefault, 'optin'); ua.pageview('/telemetry/optin'); await ua.flush(); } else { // Send back a ping with the user `optout`. This is the only thing we send. const ua = new AnalyticsCollector(AnalyticsProperties.AngularCliDefault, 'optout'); ua.pageview('/telemetry/optout'); await ua.flush(); } return true; } else { analyticsDebug('Either STDOUT or STDIN are not TTY and we skipped the prompt.'); } return false; } /** * Prompt the user for usage gathering permission for the local project. Fails if there is no * local workspace. * @param force Whether to ask regardless of whether or not the user is using an interactive shell. * @return Whether or not the user was shown a prompt. */ export async function promptProjectAnalytics(force = false): Promise<boolean> { analyticsDebug('prompting user'); const [config, configPath] = getWorkspaceRaw('local'); if (!config || !configPath) { throw new Error(`Could not find a local workspace. Are you in a project?`); } if (force || isTTY()) { const answers = await inquirer.prompt<{ analytics: boolean }>([ { type: 'confirm', name: 'analytics', message: tags.stripIndents` Would you like to share anonymous usage data about this project with the Angular Team at Google under Google’s Privacy Policy at https://policies.google.com/privacy? For more details and how to change this setting, see https://angular.io/analytics. `, default: false, }, ]); setAnalyticsConfig('local', answers.analytics); if (answers.analytics) { console.log(''); console.log(tags.stripIndent` Thank you for sharing anonymous usage data. Would you change your mind, the following command will disable this feature entirely: ${colors.yellow('ng analytics project off')} `); console.log(''); // Send back a ping with the user `optin`. const ua = new AnalyticsCollector(AnalyticsProperties.AngularCliDefault, 'optin'); ua.pageview('/telemetry/project/optin'); await ua.flush(); } else { // Send back a ping with the user `optout`. This is the only thing we send. const ua = new AnalyticsCollector(AnalyticsProperties.AngularCliDefault, 'optout'); ua.pageview('/telemetry/project/optout'); await ua.flush(); } return true; } return false; } export async function hasGlobalAnalyticsConfiguration(): Promise<boolean> { try { const globalWorkspace = await getWorkspace('global'); const analyticsConfig: string | undefined | null | { uid?: string } = globalWorkspace && globalWorkspace.getCli() && globalWorkspace.getCli()['analytics']; if (analyticsConfig !== null && analyticsConfig !== undefined) { return true; } } catch {} return false; } /** * Get the global analytics object for the user. This returns an instance of UniversalAnalytics, * or undefined if analytics are disabled. * * If any problem happens, it is considered the user has been opting out of analytics. */ export async function getGlobalAnalytics(): Promise<AnalyticsCollector | undefined> { analyticsDebug('getGlobalAnalytics'); const propertyId = AnalyticsProperties.AngularCliDefault; if ('NG_CLI_ANALYTICS' in process.env) { if (process.env['NG_CLI_ANALYTICS'] == 'false' || process.env['NG_CLI_ANALYTICS'] == '') { analyticsDebug('NG_CLI_ANALYTICS is false'); return undefined; } if (process.env['NG_CLI_ANALYTICS'] === 'ci') { analyticsDebug('Running in CI mode'); return new AnalyticsCollector(propertyId, 'ci'); } } // If anything happens we just keep the NOOP analytics. try { const globalWorkspace = await getWorkspace('global'); const analyticsConfig: string | undefined | null | { uid?: string } = globalWorkspace && globalWorkspace.getCli() && globalWorkspace.getCli()['analytics']; analyticsDebug('Client Analytics config found: %j', analyticsConfig); if (analyticsConfig === false) { analyticsDebug('Analytics disabled. Ignoring all analytics.'); return undefined; } else if (analyticsConfig === undefined || analyticsConfig === null) { analyticsDebug('Analytics settings not found. Ignoring all analytics.'); // globalWorkspace can be null if there is no file. analyticsConfig would be null in this // case. Since there is no file, the user hasn't answered and the expected return value is // undefined. return undefined; } else { let uid: string | undefined = undefined; if (typeof analyticsConfig == 'string') { uid = analyticsConfig; } else if (typeof analyticsConfig == 'object' && typeof analyticsConfig['uid'] == 'string') { uid = analyticsConfig['uid']; } analyticsDebug('client id: %j', uid); if (uid == undefined) { return undefined; } return new AnalyticsCollector(propertyId, uid); } } catch (err) { analyticsDebug('Error happened during reading of analytics config: %s', err.message); return undefined; } } export async function hasWorkspaceAnalyticsConfiguration(): Promise<boolean> { try { const globalWorkspace = await getWorkspace('local'); const analyticsConfig: string | undefined | null | { uid?: string } = globalWorkspace && globalWorkspace.getCli() && globalWorkspace.getCli()['analytics']; if (analyticsConfig !== undefined) { return true; } } catch {} return false; } /** * Get the workspace analytics object for the user. This returns an instance of AnalyticsCollector, * or undefined if analytics are disabled. * * If any problem happens, it is considered the user has been opting out of analytics. */ export async function getWorkspaceAnalytics(): Promise<AnalyticsCollector | undefined> { analyticsDebug('getWorkspaceAnalytics'); try { const globalWorkspace = await getWorkspace('local'); const analyticsConfig: string | undefined | null | { uid?: string } = globalWorkspace?.getCli()[ 'analytics' ]; analyticsDebug('Workspace Analytics config found: %j', analyticsConfig); if (analyticsConfig === false) { analyticsDebug('Analytics disabled. Ignoring all analytics.'); return undefined; } else if (analyticsConfig === undefined || analyticsConfig === null) { analyticsDebug('Analytics settings not found. Ignoring all analytics.'); return undefined; } else { let uid: string | undefined = undefined; if (typeof analyticsConfig == 'string') { uid = analyticsConfig; } else if (typeof analyticsConfig == 'object' && typeof analyticsConfig['uid'] == 'string') { uid = analyticsConfig['uid']; } analyticsDebug('client id: %j', uid); if (uid == undefined) { return undefined; } return new AnalyticsCollector(AnalyticsProperties.AngularCliDefault, uid); } } catch (err) { analyticsDebug('Error happened during reading of analytics config: %s', err.message); return undefined; } } /** * Return the usage analytics sharing setting, which is either a property string (GA-XXXXXXX-XX), * or undefined if no sharing. */ export async function getSharedAnalytics(): Promise<AnalyticsCollector | undefined> { analyticsDebug('getSharedAnalytics'); const envVarName = 'NG_CLI_ANALYTICS_SHARE'; if (envVarName in process.env) { if (process.env[envVarName] == 'false' || process.env[envVarName] == '') { analyticsDebug('NG_CLI_ANALYTICS is false'); return undefined; } } // If anything happens we just keep the NOOP analytics. try { const globalWorkspace = await getWorkspace('global'); const analyticsConfig = globalWorkspace?.getCli()['analyticsSharing']; if (!analyticsConfig || !analyticsConfig.tracking || !analyticsConfig.uuid) { return undefined; } else { analyticsDebug('Analytics sharing info: %j', analyticsConfig); return new AnalyticsCollector(analyticsConfig.tracking, analyticsConfig.uuid); } } catch (err) { analyticsDebug('Error happened during reading of analytics sharing config: %s', err.message); return undefined; } }
<filename>queryset_reporter/management/commands/purge.py #-*- encoding=utf-8 -*- import os from datetime import datetime, timedelta from django.core.management.base import BaseCommand from django.conf import settings class Command(BaseCommand): help = 'Purge xlsx and csvs' def handle(self, *args, **options): directories = ['xlsx', 'csvs'] for directory in directories: path = os.path.join(settings.MEDIA_ROOT, directory) files = os.listdir(path) purge_date = datetime.now() - timedelta(days=7) for file in files: file = os.path.join(path, file) file_date = datetime.fromtimestamp(os.path.getmtime(file)) print file_date if file_date < purge_date: os.remove(file)
#include <bits/stdc++.h> #include <algorithm> using namespace std; const int N=1e6+20; typedef long long ll; ll dp[1000005],a[1000005]; int main() { ios_base::sync_with_stdio(false);cin.tie(NULL);cout.tie(NULL); string s; cin>>s; ll a,b,p=1; cin>>a>>b; ll n=s.size(); ll r[n]; r[0]=(s[0]-'0')%a; for(ll i=1;i<n;i++){ r[i]=(r[i-1]*10+s[i]-'0')%a; } ll sum=0; for(ll i=n;i>1;i--){ sum+=p*(s[i-1]-'0')%b; if(sum%b==0 && !r[i-2] && s[i-1]-'0'!=0){ cout<<"YES"<<endl<<s.substr(0,i-1)<<endl<<s.substr(i-1,n)<<"\n"; return 0; } p=(p*10)%b; } cout<<"NO"; return 0; }
<filename>dist/Textarea/index.d.ts interface TextareaProps { hasError?: boolean; } declare const Textarea: import("styled-components").StyledComponent<"textarea", any, TextareaProps, never>; export default Textarea;
<reponame>satoru2001/MLH-Flask-Starter-Pack<gh_stars>10-100 from __future__ import absolute_import, unicode_literals __version__ = '1.0.0' import misaka from flask import Markup from copy import copy # import constants for compatibility from misaka import (EXT_AUTOLINK, EXT_FENCED_CODE, # pyflakes.ignore EXT_NO_INTRA_EMPHASIS, EXT_SPACE_HEADERS, EXT_STRIKETHROUGH, EXT_SUPERSCRIPT, EXT_TABLES, HTML_ESCAPE, HTML_HARD_WRAP, HTML_SKIP_HTML, HTML_USE_XHTML, TABLE_ALIGNMASK, TABLE_HEADER, TABLE_ALIGN_CENTER, TABLE_ALIGN_LEFT, TABLE_ALIGN_RIGHT, EXT_MATH, EXT_FOOTNOTES, EXT_UNDERLINE, EXT_MATH_EXPLICIT, EXT_DISABLE_INDENTED_CODE, EXT_HIGHLIGHT, EXT_QUOTE) ALIAS_EXT = { 'autolink': EXT_AUTOLINK, 'fenced_code': EXT_FENCED_CODE, 'no_intra_emphasis': EXT_NO_INTRA_EMPHASIS, 'space_headers': EXT_SPACE_HEADERS, 'strikethrough': EXT_STRIKETHROUGH, 'superscript': EXT_SUPERSCRIPT, 'tables': EXT_TABLES, 'math': EXT_MATH, 'footnotes': EXT_FOOTNOTES, 'underline': EXT_UNDERLINE, 'math_explicit': EXT_MATH_EXPLICIT, 'disable_indented_code': EXT_DISABLE_INDENTED_CODE, 'no_indented_code': EXT_DISABLE_INDENTED_CODE, 'highlight': EXT_HIGHLIGHT, 'quote': EXT_QUOTE } ALIAS_RENDER = { 'escape': HTML_ESCAPE, 'hard_wrap': HTML_HARD_WRAP, 'wrap': HTML_HARD_WRAP, 'skip_html': HTML_SKIP_HTML, 'no_html': HTML_SKIP_HTML, 'use_xhtml': HTML_USE_XHTML, 'xhtml': HTML_USE_XHTML, } def make_flags(**options): ext = 0 for name, val in ALIAS_EXT.items(): if options.get(name): ext = ext | val if name.startswith("no_"): if options.get(name[3:]) is False: ext = ext | val rndr = 0 for name, val in ALIAS_RENDER.items(): if options.get(name): rndr = rndr | val if name.startswith("no_"): if options.get(name[3:]) is False: rndr = rndr | val return ext, rndr def markdown(text, renderer=None, **options): """ Parses the provided Markdown-formatted text into valid HTML, and returns it as a :class:`flask.Markup` instance. :param text: Markdown-formatted text to be rendered into HTML :param renderer: A custom misaka renderer to be used instead of the default one :param options: Additional options for customizing the default renderer :return: A :class:`flask.Markup` instance representing the rendered text """ ext, rndr = make_flags(**options) if renderer: md = misaka.Markdown(renderer,ext) result = md(text) else: result = misaka.html(text, extensions=ext, render_flags=rndr) if options.get("smartypants"): result = misaka.smartypants(result) return Markup(result) class Misaka(object): def __init__(self, app=None, renderer=None, **defaults): """ Set the default options for the :meth:`render` method. If you want the ``markdown`` template filter to use options, set them here. A custom misaka renderer can be specified to be used instead of the default one. """ self.renderer = renderer self.defaults = defaults if app: self.init_app(app) def init_app(self, app): """ Registers the rendering method as template filter. :param app: a :class:`flask.Flask` instance. """ app.jinja_env.filters.setdefault('markdown', self.render) def render(self, text, **overrides): """ It delegates to the :func:`markdown` function, passing any default options or renderer set in the :meth:`__init__` method. The ``markdown`` template filter calls this method. :param text: Markdown-formatted text to be rendered to HTML :param overrides: Additional options which may override the defaults :return: A :class:`flask.Markup` instance representing the rendered text """ options = self.defaults if overrides: options = copy(options) options.update(overrides) return markdown(text, self.renderer, **options)
// YasdiMasterInitialize initializes YASDI by reading information from the INI- // file and returns the number of found drivers. // This function must be called before all others. func YasdiMasterInitialize(iniFilePath string) (int, error) { cPath := C.CString(iniFilePath) defer C.free(unsafe.Pointer(cPath)) cDriverNum := C.DWORD(0) ret := C.yasdiMasterInitialize(cPath, &cDriverNum) if int(ret) != 0 { if int(ret) == -1 { return 0, fmt.Errorf("file %q not found or not readable", iniFilePath) } return 0, ErrUnknown } return int(cDriverNum), nil }
/** * Created by alexeyglushkov on 26.08.16. */ public class SimpleCompareStrategy<T> implements CompareStrategy<T> { private boolean isInversed; public SimpleCompareStrategy() { } protected SimpleCompareStrategy(Parcel in) { this.isInversed = in.readByte() != 0; } @Override public boolean isInversed() { return isInversed; } @Override public void inverse() { isInversed = !isInversed; } @Override public int compare(T lhs, T rhs) { return 0; } //// Parcelable @Override public int describeContents() { return 0; } @Override public void writeToParcel(Parcel dest, int flags) { dest.writeByte(this.isInversed ? (byte) 1 : (byte) 0); } public static final Creator<SimpleCompareStrategy> CREATOR = new Creator<SimpleCompareStrategy>() { @Override public SimpleCompareStrategy createFromParcel(Parcel source) { return new SimpleCompareStrategy(source); } @Override public SimpleCompareStrategy[] newArray(int size) { return new SimpleCompareStrategy[size]; } }; }
""" graph.py """ class Graph: def __init__(self, lines): self.edges = set() self.vertices = set() self.indexes = dict() for line in lines: tmp = line.split(',') self.vertices.add(tmp[0]) self.vertices.add(tmp[1]) self.edges.add(self.Edge(tmp[0], tmp[1], tmp[2])) i = 0 for vertex in self.vertices: self.indexes[vertex] = i i += 1 class Edge: def __init__(self, vertex1, vertex2, weight): self.v1 = vertex1 self.v2 = vertex2 self.weight = weight def __eq__(self, o): return ((self.v1 == o.v1) and (self.v2 == o.v2) and (self.weight == o.weight)) \ or ((self.v1 == o.v2) and (self.v2 == o.v1) and (self.weight == o.weight)) def __hash__(self): return hash(self.v1) + hash(self.v2) + hash(self.weight) def __gt__(self, o): return self.weight > o.weight def __lt__(self, o): return self.weight < o.weight def __str__(self): return '(' + self.v1 + ', ' + self.v2 + '; ' + self.weight + ')'
/** * Method to load all codes from deployed bundles or legacy custom * extensions from Svarog2 * * @param jCodes * The JsonObject in which the codes will be stored * @throws IOException * any exception when reading the files */ private static void loadCodes(JsonObject jCodes) throws IOException { File customFolder = new File("custom/"); File[] customJars = customFolder.listFiles(); if (customJars != null) { for (int i = 0; i < customJars.length; i++) { if (customJars[i].getName().endsWith(".jar")) loadCodesFromCustom(customJars[i].getAbsolutePath(), jCodes); } } customFolder = new File(SvConf.getParam(AutoProcessor.AUTO_DEPLOY_DIR_PROPERTY)); customJars = customFolder.listFiles(); if (customJars != null) { for (int i = 0; i < customJars.length; i++) { if (customJars[i].getName().endsWith(".jar")) loadCodesFromCustom(customJars[i].getAbsolutePath(), jCodes); } } }
def to_json(self): json = {'title': self.title} if self.sub_title is not None: json['subtitle'] = self.sub_title if self.image_url is not None: json['image_url'] = self.image_url if self.default_action is not None: json['default_action'] = self.default_action.to_json() if self.buttons is not None: json['buttons'] = [button.to_json() for button in self.buttons] return json
Statistic-Based Magnitude Determination of Impulse Sample in Impulse Postfix OFDM Systems Instead of pilot tones, the impulse sample is exploited for channel estimation in Impulse Postfix OFDM systems . As the magnitude of impulse sample is increased, the accuracy of channel estimation can be enhanced, but it may significantly increase the PAPR of generated OFDM symbols. In this letter, based on the statistical analysis of the generated OFDM symbol, we propose a decision scheme for determining the magnitude of impulse sample. By using the proposed scheme, we can determine the magnitude of impulse sample that provides the enhancement of BER performance as well as the avoidance of PAPR increase. The validation of the proposed scheme is demonstrated by computer simulations.
Making the Most of Tweet-Inherent Features for Social Spam Detection on Twitter Social spam produces a great amount of noise on social media services such as Twitter, which reduces the signal-to-noise ratio that both end users and data mining applications observe. Existing techniques on social spam detection have focused primarily on the identification of spam accounts by using extensive historical and network-based data. In this paper we focus on the detection of spam tweets, which optimises the amount of data that needs to be gathered by relying only on tweet-inherent features. This enables the application of the spam detection system to a large set of tweets in a timely fashion, potentially applicable in a real-time or near real-time setting. Using two large hand-labelled datasets of tweets containing spam, we study the suitability of five classification algorithms and four different feature sets to the social spam detection task. Our results show that, by using the limited set of features readily available in a tweet, we can achieve encouraging results which are competitive when compared against existing spammer detection systems that make use of additional, costly user features. Our study is the first that attempts at generalising conclusions on the optimal classifiers and sets of features for social spam detection over different datasets. INTRODUCTION Social networking spam, or social spam, is increasingly affecting social networking websites, such as Facebook, Pinterest and Twitter. According to a study by the social media security firm Nexgate , social media platforms experi-Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. To copy otherwise, to republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Copyright 20XX ACM X-XXXXX-XX-X/XX/XX ...$15.00. enced a 355% growth of social spam during the first half of 2013. Social spam can reach a surprisingly high visibility even with a simple bot , which detracts from a company's social media presence and damages their social marketing ROI (Return On Investment). Moreover, social spam exacerbates the amount of unwanted information that average social media users receive in their timeline, and can occasionally even affect the physical condition of vulnerable users through the so-called "Twitter psychosis" . Social spam has different effects and therefore its definition varies across major social networking websites. One of the most popular social networking services, Twitter, has published their definition of spamming as part of their "The Twitter Rules" 1 and provided several methods for users to report spam such as tweeting "@spam @username" where @username will be reported as a spammer. While as a business, Twitter is also generous with mainline bot-level access 2 and allows some level of advertisements as long as they do not violate "The Twitter Rules". In recent years we have seen Twitter being used as a prominent knowledge base for discovering hidden insights and predicting trends from finance to public sector, both in industry and academia. The ability to sort out the signal (or the information) from Twitter noise is crucial, and one of the biggest effects of Twitter spam is that it significantly reduces the signal-to-noise ratio. Our work on social spam is motivated by the initial attempts at harvesting a Twitter corpus around a specific topic with a set of predefined keywords . This led to the identification of a large amount of spam within those datasets. The fact that certain topics are trending and therefore many are tracking its contents encourages spammers to inject their spam tweets using the keywords associated with these topics to maximise the visibility of their tweets. These tweets produce a significant amount of noise both to end users who follow the topic as well as to tools that mine Twitter data. In previous works, the automatic detection of Twitter spam has been addressed in two different ways. The first way is to tackle the task as a user classification problem, where a user can be deemed either a spammer or a nonspammer. This approach, which has been used by the majority of the works in the literature so far (see e.g., , , , , and ), makes use of numerous features that need to gather historical details about a user, such as tweets that a user posted in the past to explore what they usually tweet about, or how the number of followers and followings of a user has evolved in recent weeks to discover unusual behaviour. While this is ideal as the classifier can make use of extensive user data, it is often unfeasible due to restrictions of the Twitter API. The second, alternative way, which has not been as common in the literature (see e.g., ), is to define the task as a tweet classification problem, where a tweet can be deemed spam or non-spam. In this case, the classification task needs to assume that only the information provided within a tweet is available to determine if it has to be categorised as spam. Here, we delve into this approach to Twitter spam classification, studying the categorisation of a tweet as spam or not from its inherent features. While this is more realistic for our scenario, it presents the extra challenge that the available features are rather limited, which we study here. In this work, after discussing the definition of social spam and reviewing previous research in Twitter spam detection, we present a comparative study of Twitter spam detection systems. We investigate the use of different features inherent to a tweet so as to identify the sets of features that do best in categorising tweets as spam or not. Our study compares five different classification algorithms over two different datasets. The fact that we test our classifiers on two different datasets, collected in different ways, enables us to validate the results and claim repeatability. Our results suggest a competitive performance can be obtained using tree-based classifiers for spam detection even with only tweet-inherent features, as comparing to the existing spammer detection studies. Also the combination of different features generally lead to an improved performance, with User feature + Bi & Tri-gram (Tf) having the best results for both datasets. SOCIAL SPAM The detection of spam has now been studied for more than a decade since email spam . In the context of email messages, spam has been widely defined as "unsolicited bulk email" . The term "spam" has then been extended to other contexts, including "social spam" in the context of social media. Similarly, social spam can be defined as the "unwanted content that appears in online social networks". It is, after all, the noise produced by users who express a different behavior from what the system is intended for, and has the goal of grabbing attention by exploiting the social networks' characteristics, including for instance the injection of unrelated tweet content in timely topics, sharing malicious links or fraudulent information. Social spam hence can appear in many different forms, which poses another challenge of having to identify very different types of noise for social spam detection systems. Social Spammer Detection As we said before, most of the previous work in the area has focused on the detection of users that produce spam content (i.e., spammers), using historical or network features of the user rather than information inherent to the tweet. Early work by , and put together a set of different features that can be obtained by looking at a user's previous behaviour. These include some aggregated statistics from a user's past tweets such as average number of hashtags, average number of URL links and average number of user mentions that appear in their tweets. They combine these with other non-historical features, such as number of follow-ers, number of followings and age of the account, which can be obtained from a user's basic metadata, also inherent to each tweet they post. Some of these features, such as the number of followers, can be gamed by purchasing additional followers to make the user look like a regular user account. Lee et al. and Yang et al. employed different techniques for collecting data that includes spam (more details will be discussed in Section 3.1) and performed comprehensive studies of the spammers' behaviour. They both relied on the tweets posted in the past by the users and their social networks, such as tweeting rate, following rate, percentage of bidirectional friends and local clustering coefficient of its network graph, aiming to combat spammers' evasion tactics as these features are difficult or costly to simulate. Ferrara et al. used network, user, friends, timing, content and sentiment features for detecting Twitter bots, their performance evaluation is based on the social honeypots dataset (from ). Miller et al. treats spammer detection as an anomaly detection problem as clustering algorithms are proposed and such clustering model is built on normal Twitter users with outliers being treated as spammers. They also propose using 95 uni-gram counts along with user profile attributes as features. The sets of features utilised in the above works require the collection of historical and network data for each user, which do not meet the requirements of our scenario for spam detection. Social Spam Detection Few studies have addressed the problem of spam detection. Santos et al. investigated two different approaches, namely compression-based text classification algorithms (i.e. Dynamic Markov compression and Prediction by partial matching) and using "bag of words" language model (also known as uni-gram language model) for detecting spam tweets. Martinez-Romo and Araujo applied Kullback-Leibler Divergence and examined the difference of language used in a set of tweets related to a trending topic, suspicious tweets (i.e. tweets that link to a web page) and the page linked by the suspicious tweets. These language divergence measures were used as their features for the classification. They used several URL blacklists for identifying spam tweets from their crawled dataset, therefore each one of their labelled spam tweets contains a URL link, and is not able to identify other types of spam tweets. In our studies we have investigated and evaluated the discriminative power of four feature sets on two Twitter datasets (which were previously in and ) using five different classifiers. We examine the suitability of each of the features for the spam classification purposes. Comparing to our system is able to detect most known types of spam tweet irrespective of having a link or not. Also our system does not have to analyze a set of tweets relating to each topic (which did to create part of their proposed features) or external web page linked by each suspicious tweet, therefore its computation cost does not increase dramatically when applied for mass spam detection with potentially many different topics in the data stream. The few works that have dealt with spam detection are mostly limited in terms of the sets of features that they studied, and the experiments have been only conducted in a single dataset (except in the case of , where very limited evaluation was conducted on a new and smaller set of tweets), which does not allow for generalisability of the re-sults. To the best of our knowledge, our work is the first study that evaluates a wide range of tweet-inherent features (namely user, content, n-gram and sentiment features) over two different datasets, obtained from and and with more than 10,000 tweets each, for the task of spam detection. The two datasets were collected using completely different approaches (namely deploying social honeypots for attracting spammers; and checking malicious URL links), which helps us learn more about the nature of social spam and further validate the results of different spam detection systems. METHODOLOGY In this section we describe the Twitter spam datasets we used, the text preprocessing techniques that we performed on the tweets, and the four different feature sets we used for training our spam vs non-spam classifier. Datasets A labelled collection of tweets is crucial in a machine learning task such as spam detection. We found no spam dataset which is publicly available and specifically fulfils the requirements of our task. Instead, the datasets we obtained include Twitter users labelled as spammers or not. For our work, we used the latter, which we adapted to our purposes by taking out the features that would not be available in our scenario of spam detection from tweet-inherent features. We used two spammer datasets in this work, which have been created using different data collection techniques and therefore is suitable to our purposes of testing the spam classifier in different settings. To accomodate the datasets to our needs, we sample one tweet for each user in the dataset, so that we can only access one tweet per user and cannot aggregate several tweets from the same user or use social network features. In what follows we describe the two datasets we use. Social Honeypot Dataset: Lee et al. created and manipulated (by posting random messages and engaging in none of the activities of legitimate users) 60 social honeypot accounts on Twitter to attract spammers. Their dataset consists of 22,223 spammers and 19,276 legitimate users along with their most recent tweets. They used Expectation-Maximization (EM) clustering algorithm and then manually grouped their harvested users into 4 categories: duplicate spammers, duplicate @ spammers, malicious promoters and friend infiltrators. 1KS-10KN Dataset: Yang et al. defines a tweet that contains at least one malicious or phishing URL as a spam tweet, and a user whose spam ratio is higher than 10% as a spammer. Therefore their dataset which contains 1,000 spammers and 10,000 legitimate users, represents only one major type of spammers (as discussed in their paper). We used spammer vs. legitimate user datasets from and . After removing duplicated users and the ones that do not have any tweets in the dataset we randomly selected one tweet from each spammer or legitimate user to create our labelled collection of spam vs. legitimate tweets, in order to avoid overfitting and reduce our sampling bias. The resulting datasets contain 20,707 spam tweets and 19,249 normal tweets (named Social Honeypot dataset, as from ), and 1,000 spam tweets and 9,828 normal tweets (named 1KS-10KN dataset, as from ) respectively. Data Preprocessing Before we extract the features to be used by the classifier from each tweet, we apply a set of preprocessing techniques to the content of the tweets to normalise it and reduce the noise in the classification phase. The preprocessing techniques include decoding HTML entities, and expanding contractions with apostrophes to standard spellings (e.g. "I'm" -> "I am"). More advanced preprocessing techniques such as spell-checking and stemming were tested but later discarded given the minimal effect we observed in the performance of the classifiers. For the specific case of the extraction of sentiment-based features, we also remove hashtags, links, and user mentions from tweet contents. Features As spammers and legitimate users have different goals in posting tweets or interacting with other users on Twitter, we can expect that the characteristics of spam tweets are quite different to the normal tweets. The features inherent to a tweet include, besides the tweet content itself, a set of metadata including information about the user who posted the tweet, which is also readily available in the stream of tweets we have access to in our scenario. We analyse a wide range of features that reflect user behaviour, which can be computed straightforwardly and do not require high computational cost, and also describe the linguistic properties that are shown in the tweet content. We considered four feature sets: (i) user features, (ii) content features, (iii) n-grams, and (iv) sentiment features. User features include a list of 11 attributes about the author of the tweet (as seen in Table 1) that is generated from each tweet's metadata, such as reputation of the user , which is defined as the ratio between the number of followers and the total number of followers and followings and it had been used to measure user influence. Other candidate features, such as the number of retweets and favourites garnered by a tweet, were not used given that it is not readily available at the time of posting the tweet, where a tweet has no retweets or favourites yet. Content features capture the linguistic properties from the text of each tweet (Table 1) including a list of content attributes and part-of-speech tags. Among the 17 content attributes, number of spam words and number of spam words per word are generated by matching a popular list of spam words 3 . Part-of-speech (or POS) tagging provides syntactic (or grammatical) information of a sentence and has been used in the natural language processing community for measuring text informativeness (e.g. Tan et al. used POS counts as a informativeness measure for tweets). We have used a Twitter-specific tagger , and in the end our POS feature consists of uni-gram and 2-skip-bi-gram representations of POS tagging for each tweet in order to capture the structure and therefore informativeness of the text. We also used Stanford tagger with standard Penn Tree tags, which makes very little difference in the classification results. N-gram models have long been used in natural language processing for various tasks including text classification. Although it is often criticized for its lack of any explicit representation of long range or semantic dependency, it is surpris- Selection of Classifier During the classification and evaluation stage, we tested 5 classification algorithms implemented using scikit-learn 4 : Bernoulli Naive Bayes, K-Nearest Neighbour (KNN), Support Vector Machines (SVM), Decision Tree, and Random Forests. These algorithms were chosen as being the most commonly used in the previous research on spammer detection. We evaluate using the standard information retrieval metrics of recall (R), precision (P) and F1-measure. Recall 4 http://scikit-learn.org/ in this case refers to the ratio obtained from diving the number of correctly classified spam tweets (i.e. True Positives) by the number of tweets that are actually spam (i.e. True Positives + False Negatives). Precision is the ratio of the number of correctly classified spam tweets (i.e. True Positives) to the total number of tweets that are classified as spam (i.e. True Positives + False Positives). F1-measure can be interpreted as a harmonic mean of the precision and recall, where its score reaches its best value at 1 and worst at 0. It is defined as: In order to select the best classifier for our task, we have used a subset of each dataset (20% for 1KS-10KN dataset and 40% for Social Honeypot dataset, due to the different sizes of the two datasets) to run a 10-fold cross validation for optimising the hyperparameters of each classifier. By doing so it minimises the risk of over-fitting in model selection and hence subsequent selection bias in performance evaluation. Such optimisation was conducted using all 4 feature sets (each feature was normalised to fit the range of values ; we also selected 30% of the highest scoring features using Chi Square for tuning SVM as computationally it is more efficient and gives better classification results). Then we evaluated our algorithm on the rest of the data (i.e. 80% for 1KS-10KN dataset and 60% for Social Honeypot dataset), again using all 4 feature sets in a 10-fold cross validation setting (same as in grid-search, each feature was normalised and Chi square feature selection was used for SVM). As shown in Table 2, tree-based classifiers achieved very promising performances, among which Random Forests out-perform all the others when we look at the F1-measure. This outperformance occurs especially due to the high precision values of 99.3% and 94.1% obtained by the Random Forest classifier. While Random Forests show a clear superiority in terms of precision, its performance in terms of recall varies for the two datasets; it achieves high recall for the Social Honeypot dataset, while it drops substantially for the 1KS-10KN dataset due to its approximate 1:10 spam/nonspam ratio. These results are consistent with the conclusion of most spammer detection studies; our results extend this conclusion to the spam detection task. When we compare the performance values for the different datasets, it is worth noting that with the Social Honeypot dataset the best result is more than 10% higher than the best result in 1KS-10KN dataset. This is caused by the different spam/non-spam ratios in the two datasets, as the Social Honeypot dataset has a roughly 50:50 ratio while in 1KS-10KN it is roughly 1:10 which is a more realistic ratio to reflect the amount of spam tweets existing on Twitter (In Twitter's 2014 Q2 earnings report it says that less than 5% of its accounts are spam 5 , but independent researchers believe the number is higher). In comparison to the original papers, reported a best 0.983 F1-score and reported a best 0.884 F1-score. Our results are only about 4% lower than their results, which make use of historical and networkbased data, not readily available in our scenario. Our results suggest that a competitive performance can also be obtained for spam detection where only tweet-inherent features can be used. Evaluation of Features We trained our best classifier (i.e. Random Forests) with different feature sets, as well as combinations of the feature sets using the two datasets (i.e. the whole corpora), and under a 10-fold cross validation setting. We report our results in Table 3. As seen in 1KS-10KN dataset, the F1-measure for different feature sets ranges from 0.718 to 0.820 when using a single feature set. All feature set combinations except C + S (content + sentiment feature) perform higher than 0.810 in terms of F1-measure, reflecting that feature combinations have more discriminative power than a single feature set. For the Social Honeypot dataset, we can clearly see User features (U) having the most discriminative power as it has a 0.940 F1-measure. Results without using User features (U) have significantly worse performance, and feature combinations with U give very little improvement with respect to the original 0.940 (except for U + Uni & Bi-gram (Tf) + S). This means U is dominating the discriminative power of these feature combinations and other feature sets contribute very little in comparison to U. This is potentially caused by the data collection approach (i.e. by using social honeypots) adopted by , which resulted in the fact that most spammers that they attracted have distinguishing user profile information compared to the legitimate users. On the other hand, Yang et al. checked malicious or phishing URL links for collecting their spammer data, and this way of data collection gives more discriminative power to Content and N-gram features than does (although U is still a very significant feature set in 1KS-10KN). Note that U + Bi & Tri-gram (Tf) resulted in the best performance in both datasets, showing that these two feature sets are the most 5 http://www.webcitation.org/6VyBTJ7vt beneficial to each other irrespective of the different nature of datasets. Another important aspect to take into account when choosing the features to be used is the computation time, especially when one wants to apply the spam classifier in realtime. Table 4 shows a efficiency comparison for generating each feature from 1000 tweets, using a machine with 2.8 GHz Intel Core i7 processor and 16 GB memory. Some of the features, such as the User features, can be computed quickly and require minimal computational cost, as most of these features can be straightforwardly inferred from a tweet's metadata. Other features, such as N-grams and part-of-speech counts (from Content features), can be affected by the size of the vocabulary in the training set. On the other hand, some of the features are computationally more expensive, and therefore worth studying their applicability. This is the case of Sentiment features, which require string matching between our training documents and a list of lexica we used. We keep the sentiment features since they have shown added value in the performance evaluation of feature set combinations. Similarly, Content features such as Number of spam words and Number of spam words per word also require string matching between our training documents and a dictionary containing 11,529 spam words. However, given that the latter did not provide significant improvements in terms of accuracy, most probably because the spam words were extracted from blogs, we conclude that Number of spam words and Number of spam words per word can be taken out from the representation for the sake of the classifier's efficiency. DISCUSSION Our study looks at different classifiers and feature sets over two spam datasets to pick the settings that perform best. First, our study on spam classification buttresses previous findings for the task of spammer classification, where Random Forests were found to be the most accurate classifier. Second, our comparison of four feature sets reveals the features that, being readily available in each tweet, perform best in identifying spam tweets. While different features perform better for each of the datasets when using them alone, our comparison shows that the combination of different features leads to an improved performance in both datasets. We believe that the use of multiple feature sets increases the possibility to capture different spam types, and makes it more difficult for spammers to evade all feature sets used by the spam detection system. For example spammers might buy more followers to look more legitimate but it is still very likely that their spam tweet will be detected as its tweet content will give away its spam nature. Due to practical limitations, we have generated our spam vs. non-spam data from two spammer vs. non-spammer datasets that were collected in 2011. For future work, we plan to generate a labelled spam/non-spam dataset which was crawled in 2014. This will not only give us a purposebuilt corpus of spam tweets to reduce the possible effect of sampling bias of the two datasets that we used, but will also give us insights on how the nature of Twitter spam changes over time and how spammers have evolved since 2011 (as spammers do evolve and their spam content are manipulated to look more and more like normal tweet). Furthermore we will investigate the feasibility of cross-dataset spam classification using domain adaptation methods, and also whether A caveat of the approach we relied on for the dataset generation is the fact that we have considered spam tweets posted by users who were deemed spammers. This was done based on the assumption that the majority of social spam tweets on Twitter are shared by spam accounts. However, the dataset could also be complemented with spam tweets which are occasionally posted by legitimate users, which our work did not deal with. An interesting study to complement our work would be to look at these spam tweets posted by legitimate users, both to quantify this type of tweets, as well as to analyse whether they present different features from those in our datasets, especially when it comes to the userbased features as users might have different characteristics. For future work, we plan to conduct further evaluation on how our features would function for spam tweets shared by legitimate users, in order to fully understand the effects of bias of pursuing our approach of corpus construction. CONCLUSION In this paper we focus on the detection of spam tweets, solely making use of the features inherent to each tweet. This differs from most previous research works that classified Twitter users as spammers instead, and represents a real scenario where either a user is tracking an event on Twitter, or a tool is collecting tweets associated with an event. In these situations, the spam removal process cannot afford to retrieve historical and network-based features for all the tweets involved with the event, due to high number of requests to the Twitter API that this represents. We have tested five different classifiers, and four different feature sets on two Twitter spam datasets with different characteristics, which allows us to validate our results and claim repeatability. While the task is more difficult and has access to fewer data than a spammer classification task, our results show competitive performances. Moreover, our system can be applied for detecting spam tweets in real time and does not require any feature not readily available in a tweet. Here we have conducted the experiments on two different datasets which were originally collected in 2011. While this allows us to validate the results with two datasets collected in very different methods, our plan for future work includes the application of the spam detection system to more recent events, to assess the validity of the classifier with recent data as Twitter and spammers may have evolved.
import { EnigmaUtils, SigningCosmWasmClient, Secp256k1Pen, pubkeyToAddress, encodeSecp256k1Pubkey, } from 'secretjs'; import { getSafeUrl } from 'components/protocols/secret/lib'; import type { NextApiRequest, NextApiResponse } from 'next'; export default async function connect( req: NextApiRequest, res: NextApiResponse<string> ) { try { const url = await getSafeUrl() const { mnemonic, contract }= req.body const signingPen = await Secp256k1Pen.fromMnemonic(mnemonic) const pubkey = encodeSecp256k1Pubkey(signingPen.pubkey); const address = pubkeyToAddress(pubkey, 'secret'); const customFees = { send: { amount: [{ amount: '80000', denom: 'uscrt' }], gas: '80000', }, }; // Initialise client const txEncryptionSeed = EnigmaUtils.GenerateNewSeed(); const client = new SigningCosmWasmClient( url, address, (signBytes) => signingPen.sign(signBytes), txEncryptionSeed, customFees, ); // Get the stored value console.log('Querying contract for current count'); let response = undefined. let count = response.count as number res.status(200).json(count.toString()) } catch(error) { console.log(error) res.status(500).json('get counter value failed') } }
import { FormBuilder, FormGroup, Validators } from '@angular/forms'; import { Component, OnInit, Input } from '@angular/core'; import { Router, ActivatedRoute } from '@angular/router'; import { MustMatch } from '../../common/validation'; import Swal from 'sweetalert2'; import { AccountsService } from '../../services/accounts.service'; import { ManagePasswordVm } from '../../models/accounts-model'; import { AuthenticationService } from '../../services/authentication.service'; @Component({ selector: 'ngx-managepassword', templateUrl: './managepassword.component.html', }) export class ManagePasswordComponent implements OnInit { manageForm: FormGroup; submitted = false; title = 'Manage Password'; sub: any; @Input() AirlineId: number; constructor(private accservice: AccountsService, private formBuilder: FormBuilder, private _auth: AuthenticationService ) { } ngOnInit() { this.manageForm = this.formBuilder.group({ oldPassword: ['', Validators.required], newPassword: ['', [Validators.required, Validators.pattern('(?=.*[a-z])(?=.*[A-Z])(?=.*[0-9])(?=.*[$@$!%*?&])[A-Za-z\d$@$!%*?&].{8,}')]], confirmPassword: ['', [Validators.required]], }, { validator: [ MustMatch('newPassword', 'confirmPassword'), ], }); } get f() { return this.manageForm.controls; } managePassword() { Swal.fire({ title: 'Are you sure?', text: 'You will have to re-login with new password!', icon: 'warning', showCancelButton: true, confirmButtonColor: '#3085d6', cancelButtonColor: '#d33', confirmButtonText: 'Yes, change it!', }).then((result) => { if (result.value) { // debugger; const objManagePw: ManagePasswordVm = new ManagePasswordVm(); objManagePw.UserId = this._auth.getUserId(); objManagePw.OldPassword = this.manageForm.controls['oldPassword'].value, objManagePw.NewPassword = this.manageForm.controls['newPassword'].value, this.accservice.managePassword(objManagePw).subscribe( (data) => { // debugger; setTimeout(() => { this._auth.logout(); location.href = '/'; }, 9000); if (data['payload'].isSuccess) { Swal.fire( 'Changed!', 'Password changed successsfully.', 'success', ); setTimeout(() => { this._auth.logout(); location.href = '/'; }, 2000); } else { document.getElementById('manageErrMsg').innerHTML = data['payload'].message; document.getElementById('manageErrMsg').style.display = 'block'; setTimeout(() => { document.getElementById('manageErrMsg').style.display = 'none'; }, 5000); } }, ); } }); } onSubmitManageForm() { this.submitted = true; if (this.manageForm.invalid) { return; } this.managePassword(); } onReset() { this.submitted = false; this.manageForm.reset(); } // onList(): void { // this._router.navigate(['airlineList']); // } }
Evaluation of the effectiveness of the use of hepatoprotector in acute liver damage by various toxicants in the experiment Introduction. Acute liver damage with ethanol and its surrogates and chemicals remains an urgent problem. Therefore, studies of the use of hepatoprotector in acute liver damage by various toxicants in the experiment are relevant. The purpose of this study is an experimental evaluation of the use of ademetionine in the early stages of the toxic effects of carbon tetrachloride and ethanol. Materials and methods. The therapeutic effect of the drug “Heptor” in acute intoxication with carbon tetrachloride (subcutaneous administration at a dose of 2 g/kg) and ethanol (oral administration at a dose of 4 g/kg weight) was studied. Studies of metabolic processes in the liver were carried out based on biochemical parameters of rat blood serum. Results. The studies showed that normalization of metabolic processes was observed after introducing “Heptor” against the background of exposure to both toxicants. The therapeutic effect of ademetionine in the case of carbon tetrachloride intoxication had a positive impact after 24 hours of administration and persisted after 72 hours of the experiment. These provisions were based on the results obtained: the restoration of the activity of marker enzymes of hepatocytes (AsAT, AlAT, LDH), the concentration of uric acid and cholesterol, and the indicators of protein metabolism were revealed. The introduction of ademetionine after ethanol intoxication helped restore the function of hepatocytes, which led to the normalization of protein metabolism. The drug stopped hyperenzymemia, which confirmed its membrane-protective properties. Conclusion. “Heptor” has a regenerating, detoxifying and membrane-protective effect in acute liver lesions with carbon tetrachloride and ethanol. The obtained data confirm the universality of this drug, based on the possibility of using various mechanisms of therapeutic action, which allows us to recommend ademetionine as a hepatoprotector to prevent early liver damage when exposed to high doses of multiple toxicants.
/* * Copyright (C) 2014 <NAME> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package poisondog.net; import java.io.UnsupportedEncodingException; import java.net.URLConnection; import java.util.Map; import poisondog.string.ExtractFileName; import poisondog.string.ExtractHost; import poisondog.string.ExtractParent; import poisondog.string.ExtractParentUrl; import poisondog.string.ExtractPath; import poisondog.string.ExtractPort; import poisondog.string.ExtractScheme; import poisondog.string.URLDecode; import poisondog.string.URLEncode; import poisondog.vfs.GuessContentType; /** * @author <NAME> */ public class URLUtils { public static String encode(String input) { try{ URLEncode task = new URLEncode("utf-8"); return task.process(input); }catch(UnsupportedEncodingException e){ e.printStackTrace(); } return null; } public static String decode(String input) { try{ URLDecode task = new URLDecode("utf-8"); return task.process(input); }catch(UnsupportedEncodingException e){ e.printStackTrace(); } return null; } public static String scheme(String url) { return new ExtractScheme().process(url); } public static String host(String url) { return new ExtractHost().process(url); } public static int port(String url) { try{ return Integer.parseInt(new ExtractPort().process(url)); }catch(Exception e) { } return -1; } public static String path(String url) { return new ExtractPath().process(url); } public static String parent(String url) { return new ExtractParent().process(url); } public static String parentUrl(String url) { return new ExtractParentUrl().process(url); } public static String file(String url) { return new ExtractFileName().process(url); } public static String httpQuery(Map<String, String> map) { StringBuilder builder = new StringBuilder(); builder.append("?"); for (String key : map.keySet()) { builder.append(key); builder.append("="); builder.append(encode(map.get(key))); builder.append("&"); } return builder.substring(0, builder.length()-1); } public static String long2IP(long proper_address) { StringBuilder builder = new StringBuilder(); if (proper_address >= 0 ) { builder.append((int)(Math.floor(proper_address / Math.pow(256, 3)))); builder.append("."); builder.append((int)(Math.floor((proper_address % Math.pow(256, 3)) / Math.pow(256, 2)))); builder.append("."); builder.append((int)(Math.floor(((proper_address % Math.pow(256, 3)) % Math.pow(256, 2)) / Math.pow(256, 1)))); builder.append("."); builder.append((int)(Math.floor((((proper_address % Math.pow(256, 3)) % Math.pow(256, 2)) % Math.pow(256, 1)) / Math.pow(256, 0)))); } return builder.toString(); } public static String guessContentType(String name) { // if(!name.contains(".")) return "application/octet-stream"; // if(name.endsWith(".mp3")) return "audio/mpeg"; // if(name.endsWith(".wma")) return "audio/x-ms-wma"; // if(name.endsWith(".mka")) return "audio/x-matroska"; // if(name.endsWith(".flac")) return "audio/x-flac"; // if(name.endsWith(".avi")) return "video/avi"; // if(name.endsWith(".mp4")) return "video/mp4"; // if(name.endsWith(".wmv")) return "video/x-ms-wmv"; // if(name.endsWith(".3gp")) return "video/3gpp"; // if(name.endsWith(".3gpp")) return "video/3gpp"; // if(name.endsWith(".flv")) return "video/x-flv"; // if(name.endsWith(".mkv")) return "video/x-matroska"; // if(name.endsWith(".rar")) return "application/rar"; // if(name.endsWith(".7z")) return "application/7z"; // if(name.endsWith(".ppt")) return "application/vnd.ms-powerpoint"; // if(name.endsWith(".pptx")) return "application/vnd.openxmlformats-officedocument.presentationml.presentation"; // if(name.endsWith(".pps")) return "application/vnd.ms-powerpoint"; // if(name.endsWith(".doc")) return "application/msword"; // if(name.endsWith(".docx")) return "application/vnd.openxmlformats-officedocument.wordprocessingml.document"; // if(name.endsWith(".xls")) return "application/vnd.ms-excel"; // if(name.endsWith(".xlsx")) return "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"; // String contentType = URLConnection.guessContentTypeFromName(name); // return contentType == null ? "application/octet-stream" : contentType; GuessContentType task = new GuessContentType(); return task.execute(name); } }
/** * Creates a parser for the given URL. * * <p> * If you do not read all records from the given {@code url}, you should call {@link #close()} on the parser, unless * you close the {@code url}. * </p> * * @param url * a URL. Must not be null. * @param charset * the charset for the resource. Must not be null. * @param format * the CSVFormat used for CSV parsing. Must not be null. * @return a new parser * @throws IllegalArgumentException * If the parameters of the format are inconsistent or if either url, charset or format are null. * @throws IOException * If an I/O error occurs */ public static CSVParser parse(URL url, Charset charset, final CSVFormat format) throws IOException { Args.notNull(url, "url"); Args.notNull(charset, "charset"); Args.notNull(format, "format"); return new CSVParser(new InputStreamReader(url.openStream(), charset == null ? Charset.forName("UTF-8") : charset), format); }
Lately, media around the web has been bracing for robots — not time-traveling robots per se, but robot workers. Specifically, the increased sophistication of artificial intelligence and improved engineering of robotics has spurred a growing concern about what people are going to do when all the regular jobs are done by robots. A variety of solutions have been proposed to this potential technological unemployment (we even had an entire Future of Work series dealing with this topic in March), many of which suggest that there will still be things that humans can do that robots can’t, but what are they? During a Q&A session at an Executive Program hosted at Singularity University last October, one participant had the opportunity to prompt Ray Kurzweil with the question, “What do you think humans will be uniquely suited to do in the future?” Kurzweil, who has been wrestling with this topic for a long time (see his 1999 book The Age of Spiritual Machines) provided a nearly 10-minute answer and acknowledged the core changes that are occurring: “We are destroying jobs at the of the bottom scale ladder. We add new jobs at the top of the scale ladder. The scale ladder moves up. In order to keep up with that rising scale ladder, we need to make people more skilled.” “We’re constantly creating and inventing new jobs and things to do.” Citing similar concern that was shown over the introduction of machines into the textile industry at the dawn of the Industrial Revolution, Kurzweil said, “You could point at almost every job and it seemed only a matter of time before those jobs were automated and eliminated. Indeed that happened. Those jobs were automated and went away. Yet somehow, employment went up…New industries emerged making and servicing these machines.” As a Director of Engineering at Google, Kurzweil is seeing new industries emerge right in front of him. He’s been given unlimited resources to lead projects in artificial intelligence around natural language processing. His work comes at a time when the company is deep in related initiatives, like Google Brain, a research project leveraging deep learning to enhance products like speech recognition and recommendation engines. These projects are employing numerous teams of people in positions that didn’t exist decades ago. In his answer, Kurzweil also noted that “Indeed, 65% of jobs in America today are information jobs…These information jobs didn’t exist 25 years ago, let alone a hundred years ago. We’re constantly creating and inventing new jobs and things to do.” You can check out the following video for his complete response, but we want to know what you think: what are humans uniquely suited to do? To learn more about Singularity University’s Executive Programs, click here. [image courtesy of Shutterstock]
from .futurasciences_loader import FuturaSciencesLoader from .liberation_loader import LiberationLoader from .nouvelobs_loader import NouvelObsLoader from .telerama_loader import TeleramaLoader from .lefigaro_loader import LeFigaroLoader from .lemonde_loader import LeMondeLoader
// calls advance_core, keeps track of deficit it adds to animaption_step, make sure the deficit sum stays close to zero MainFrameTime MainTimerSync::advance_checked(float p_frame_slice, int p_iterations_per_second, float p_idle_step) { if (fixed_fps != -1) p_idle_step = 1.0 / fixed_fps; p_idle_step += time_deficit; MainFrameTime ret = advance_core(p_frame_slice, p_iterations_per_second, p_idle_step); const double idle_minus_accum = ret.idle_step - time_accum; { float min_average_physics_steps, max_average_physics_steps; int consistent_steps = get_average_physics_steps(min_average_physics_steps, max_average_physics_steps); if (consistent_steps > 3) { ret.clamp_idle(min_average_physics_steps * p_frame_slice, max_average_physics_steps * p_frame_slice); } } float max_clock_deviation = get_physics_jitter_fix() * p_frame_slice; ret.clamp_idle(p_idle_step - max_clock_deviation, p_idle_step + max_clock_deviation); ret.clamp_idle(idle_minus_accum, idle_minus_accum + p_frame_slice); time_accum = ret.idle_step - idle_minus_accum; time_deficit = p_idle_step - ret.idle_step; ret.interpolation_fraction = time_accum / p_frame_slice; return ret; }
// Test a single write which fills multiple blocks, and then overflows the last TEST(BlockPacker, MultiBlockInputPlus) { MockPackerCallback callback; BlockPacker packer; auto buffer = generateByteSequence(0, 33); callback.expects({32}); ASSERT_OK(packer.pack(ConstDataRange(buffer), callback)); auto leftovers = packer.getBlock(); ASSERT_EQ(1, leftovers.length()); }
import sys from threading import Thread, Lock, Condition import time from random import random, randrange import colored from colored import stylize import pandas as pd import csv import os import threading import random # ============= Utilities for MySQL ============================= from datetime import datetime import pymysql from DBUtils.PooledDB import PooledDB # pip3 install DBUtils==1.3 import mysql.connector mydb = mysql.connector.connect( host="localhost", user="root", password="<PASSWORD>" ) mycursor = mydb.cursor() mycursor.execute("CREATE DATABASE IF NOT EXISTS so") mycursor.execute("use so") mycursor.execute("CREATE TABLE IF NOT EXISTS lead(id_file INT AUTO_INCREMENT PRIMARY KEY, lead_id INT, nombre VARCHAR(255), telefono VARCHAR(255), fecha VARCHAR(255), ciudad VARCHAR(255), productor_id INT, fechahora_ingesta TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP)") mycursor.execute("CREATE TABLE IF NOT EXISTS copy_lead(id_file INT AUTO_INCREMENT PRIMARY KEY, lead_id INT, nombre VARCHAR(255), telefono VARCHAR(255), fecha VARCHAR(255), ciudad VARCHAR(255), productor_id INT, fechahora_ingesta TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP)") mycursor.execute("CREATE TABLE IF NOT EXISTS comprador(compra_id INT AUTO_INCREMENT PRIMARY KEY, id_file INT, comprador INT, monto INT, fechahora TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP)") # CONSTRAINT fk_lead FOREIGN KEY(id_file) REFERENCES lead(id_file) # Information required to create a connection object dbServerIP = "0.0.0.0" # IP address of the MySQL database server dbUserName = "root" # User name of the MySQL database server dbUserPassword = "<PASSWORD>" # Password for the MySQL database user databaseToUse = "so" # Name of the MySQL database to be used charSet = "utf8mb4" # Character set cusrorType = pymysql.cursors.DictCursor Crawl_Info_Count = int(sys.argv[1]) mySQLConnectionPool = PooledDB(creator=pymysql, # Python function returning a connection or a Python module, both based on DB-API 2 host=dbServerIP, user=dbUserName, password=<PASSWORD>, database=databaseToUse, autocommit=True, charset=charSet, cursorclass=cusrorType, blocking=False, # maxconnections = Crawl_Info_Count ) # ============= End of utilities for MySQL ============================= class Personas: def __init__(self, idP, nameP, phoneP, date, cityP): self.idP = idP self.nameP = nameP self.phoneP = phoneP self.date = date self.cityP = cityP class Produced: def __init__(self, idP, idC, fecha, bid): self.idP = idP self.idC = idC self.fecha = fecha self.bid = bid df2 = pd.read_csv('personas.csv') # Esto sí va así quemado idp = df2.id namep = df2.nombre phonep = df2.telefono date = df2.fecha cityp = df2.ciudad personas = [] class Compradores: def __init__(self, idC, compradorC, low, high): self.idC = idC self.low = low self.high = high self.compradorC = compradorC compradores = [] try: df = pd.read_csv(str(sys.argv[3])) col1 = df.id for a in range(len(col1)): print(col1[a]) col2 = df.comprador col3 = df.bid_min col4 = df.bid_max for a in range(len(col1)): compradores.append(Compradores(col1[a], col2[a], col3[a], col4[a])) except: print("File not Found or Error in File Format") for b in range(len(idp)): personas.append(Personas(int(idp[b]), namep[b], phonep[b], date[b], cityp[b])) queue = [] produced = [] CAPACITY = int(sys.argv[1]) qlock = Lock() item_ok = Condition(qlock) space_ok = Condition(qlock) timeConsumer = [] timeProducer = [] timeAll = [] class ProducerThread(Thread): def __init__(self, color, ProducerID): super(ProducerThread, self).__init__() self.name = color self.ProducerID = ProducerID def run(self): global queue global personas mycolor = self.name UPid = self.ProducerID # IDENTIFICADOR UNICO timeach = [] while True: qlock.acquire() while len(queue) >= CAPACITY: print(stylize('queue is full, stop producing', colored.fg(mycolor))) space_ok.wait() if len(queue) >= CAPACITY: print(stylize('oops, someone filled the space before me', colored.fg(mycolor))) if personas: # si todavia existen registros, produzco startTimeP = time.time() # INICIO TIEMPO person = personas.pop(0) print(stylize(str(person.idP) + ' ' + str(person.date) + ' ' + str(UPid), colored.fg(mycolor))) queue.append(person) # insertar a mysql # =========== MySQL Space ================== try: now = datetime.now() dbConnection_in = mySQLConnectionPool.connection() formatted_date = now.strftime('%Y-%m-%d %H:%M:%S') sqlInsertLead = "INSERT INTO lead (lead_id, nombre, telefono, fecha, ciudad, productor_id, fechahora_ingesta) values ('{}','{}','{}','{}','{}','{}','{}')".format( int(person.idP), str(person.nameP), str(person.phoneP), str(person.date), str(person.cityP), int(UPid), formatted_date) # Obtain a cursor object mySQLCursor = dbConnection_in.cursor() # Execute the SQL stament mySQLCursor.execute(sqlInsertLead) # Close the cursor and connection objects mySQLCursor.close() dbConnection_in.close() except Exception as e: print("Exception: %s" % e) # return # ============ End MySQL Space ===================== # mycursor.execute("INSERT INTO copy_lead SELECT * FROM lead;") endTimeP = time.time() timeTakenP = endTimeP - startTimeP timeach.append(timeTakenP) timeAll.append(timeTakenP) print(stylize("ROWS PRODUCED: " + str(len(timeach)) + " " + mycolor, colored.fg(mycolor))) AverageTimeP = sum(timeach) / len(timeach) print(stylize("AVG TIME TAKEN TO PRODUCE: " + str(AverageTimeP) + " " + mycolor, colored.fg(mycolor))) totalTime = sum(timeAll) print("TOTAL PROGRAM TIME: " + str(totalTime)) print(stylize(len(queue), colored.fg(mycolor))) else: item_ok.wait() # Dejar de producir si ya no hay registros item_ok.notify() # ANADI ESTO sys.exit() # ^ANADI ESTO qlock.release() item_ok.notify() qlock.release() time.sleep(1) class ConsumerThread(Thread): def __init__(self, mycomprador, myminbid, mymaxbid, mycolor): super(ConsumerThread, self).__init__() self.mycomprador = mycomprador self.myminbid = myminbid self.mymaxbid = mymaxbid self.name = mycolor def run(self): global queue global produced global personas global timeAll timeachC = [] mycolor = self.name mycomprador = self.mycomprador myminbid = self.myminbid mymaxbid = self.mymaxbid while True: if personas: qlock.acquire() while not queue: print(stylize('queue is empty, stop consuming', colored.fg(mycolor))) item_ok.wait() if not queue: print(stylize('oops, someone consumed the food before me', colored.fg(mycolor))) startTimeC = time.time() finalbid = random.randrange(myminbid, mymaxbid) # Crear lead person = queue.pop(0) # sqlCopyLead = "INSERT INTO copy_lead(lead_id, nombre, telefono, fecha, ciudad, productor_id, fechahora_ingesta) SELECT lead_id, nombre, telefono, fecha, ciudad, productor_id, fechahora_ingesta FROM lead LIMIT 1" # ================ MySQL Space ================== # try: now = datetime.now() dbConnection_comprador = mySQLConnectionPool.connection() formatted_date = now.strftime('%Y-%m-%d %H:%M:%S') mycursor.execute("SELECT id_file FROM lead LIMIT 1") my_lead = mycursor.fetchone() sqlDropLead = "DELETE FROM lead LIMIT 1" sqlInsertComprador = "INSERT INTO comprador (compra_id, id_file, comprador, monto, fechahora) values ('{}','{}','{}', '{}','{}')".format( random.sample((1, 9999999), 1), person.idP, mycomprador, finalbid, formatted_date) # Obtain a cursor object mySQLCursorComprador = dbConnection_comprador.cursor() # Execute the SQL stament # mySQLCursorComprador.execute(sqlCopyLead) mySQLCursorComprador.execute(sqlDropLead) mySQLCursorComprador.execute(sqlInsertComprador) # Close the cursor and connection objects mySQLCursorComprador.close() dbConnection_comprador.close() # except Exception as e: # print("Exception: %s" % e) # return # ================ End MySQL Space ================== # Sacar de Mysql # with open('comprador.csv', 'a+') as final: # writer = csv.writer(final) # writer.writerow([person.idP, myid, person.date, finalbid, mycolor]) # final.close() # Cerrar coneccion a archivo para que otros threads la puedan usar produced.append(Produced(person.idP, mycomprador, person.date, finalbid)) # meter a archivo comprador print(stylize("CONSUMED", colored.fg(mycolor))) endTimeC = time.time() timeTakenC = endTimeC - startTimeC timeAll.append(timeTakenC) timeachC.append(timeTakenC) print(stylize("ROWS CONSUMED: "+ str(len(timeachC)) + " " + mycolor, colored.fg(mycolor))) totalTime = sum(timeAll) print("TOTAL PROGRAM TIME: " + str(totalTime)) print(len(produced)) print(len(queue)) space_ok.notify() qlock.release() time.sleep(1) if not personas: qlock.acquire() if not queue: # apagar thread si ya no hay nada en el buffer print(stylize('queue is empty, and producer is stopped, thread shutting down...', colored.fg(mycolor))) os._exit(0) item_ok.wait() space_ok.notify() # ANADI ESTO # ^ANADI ESTO qlock.release() print("SHUTTING DOWN THREAD") else: # crear lead startTimeC = time.time() finalbid = random.randrange(myminbid, mymaxbid) person = queue.pop(0) # with open('comprador.csv', 'a+') as final: # writer = csv.writer(final) # writer.writerow([person.idP, myid, person.date, finalbid, mycolor]) # final.close() # Cerrar coneccion a archivo para que otros threads la puedan usar produced.append(Produced(person.idP, mycomprador, person.date, finalbid)) print(stylize("CONSUMED", colored.fg(mycolor))) endTimeC = time.time() timeTakenC = endTimeC - startTimeC timeAll.append(timeTakenC) timeachC.append(timeTakenC) print(stylize("ROWS CONSUMED: " + str(len(timeachC)) + " " + mycolor, colored.fg(mycolor))) totalTime = sum(timeAll) print("TOTAL PROGRAM TIME: " + str(totalTime)) print(stylize(len(queue), colored.fg(mycolor))) print(len(produced)) space_ok.notify() qlock.release() time.sleep(1) # COSAS NUEVAS llenando = True # IMPORTANTE, AGREGAR producidos = [] consumidos = [] class ProducerThreadAlternance(Thread): def __init__(self, color, ProducerID): super(ProducerThreadAlternance, self).__init__() self.name = color self.ProducerID = ProducerID def run(self): global queue global llenando global personas timeachP = [] mycolor = self.name UPid = self.ProducerID while True: while llenando: qlock.acquire() if len(queue) <= CAPACITY: if len(queue) == CAPACITY: print(stylize('queue is full, stop producing', colored.fg(mycolor))) llenando = False space_ok.wait() qlock.release() if len(queue) >= CAPACITY: print(stylize('oops, someone filled the space before me', colored.fg(mycolor))) if llenando and personas: if personas: startTimep = time.time() person = personas.pop(0) print(stylize(str(person.idP) + ' ' + str(person.date), colored.fg(mycolor))) queue.append(person) # <- insertar a mysql # =========== MySQL Space ================== try: now = datetime.now() dbConnection_in = mySQLConnectionPool.connection() formatted_date = now.strftime('%Y-%m-%d %H:%M:%S') sqlInsertLead = "INSERT INTO lead (lead_id, nombre, telefono, fecha, ciudad, productor_id, fechahora_ingesta) values ('{}','{}','{}','{}','{}','{}','{}')".format( int(person.idP), str(person.nameP), str(person.phoneP), str(person.date), str(person.cityP), int(UPid), formatted_date) # Obtain a cursor object mySQLCursor = dbConnection_in.cursor() # Execute the SQL stament mySQLCursor.execute(sqlInsertLead) # Close the cursor and connection objects mySQLCursor.close() dbConnection_in.close() except Exception as e: print("Exception: %s" % e) # return # ============ End MySQL Space ===================== # mycursor.execute("INSERT INTO copy_lead SELECT * FROM lead;") endTimeP = time.time() timeTakenP = endTimeP - startTimep timeachP.append(timeTakenP) timeAll.append(timeTakenP) AverageTimeTakenP = sum(timeachP) / len(timeachP) TotalTime = sum(timeAll) print(stylize("ROWS PRODUCED: " + str(len(timeachP)) + " " + mycolor, colored.fg(mycolor))) print(stylize("Average Time taken to Produce: " + str(AverageTimeTakenP) + " " + mycolor, colored.fg(mycolor))) print("Total Time: " + str(TotalTime)) print(stylize(len(queue), colored.fg(mycolor))) item_ok.notify() # notificar qlock.release() # soltar time.sleep(0.5) else: item_ok.wait() # Dejar de producir si ya no hay registros item_ok.notify() qlock.release() time.sleep(0.5) class ConsumerThreadAlternance(Thread): def __init__(self, mycomprador, myminbid, mymaxbid, mycolor): super(ConsumerThreadAlternance, self).__init__() self.mycomprador = mycomprador self.myminbid = myminbid self.mymaxbid = mymaxbid self.name = mycolor def run(self): global queue global llenando global produced global personas timeachC = [] mycolor = self.name mycomprador = self.mycomprador myminbid = self.myminbid mymaxbid = self.mymaxbid while True: while not llenando: qlock.acquire() if not queue: print(stylize('queue is empty, stop consuming', colored.fg(mycolor))) llenando = True item_ok.wait() space_ok.notify() qlock.release() if not queue: print(stylize('oops, someone consumed the food before me', colored.fg(mycolor))) if not llenando: startTimeC = time.time() finalbid = randrange(myminbid, mymaxbid) # Crear lead person = queue.pop(0) # Sacar de Mysql # ================ MySQL Space ================== # try: now = datetime.now() dbConnection_comprador = mySQLConnectionPool.connection() formatted_date = now.strftime('%Y-%m-%d %H:%M:%S') mycursor.execute("SELECT id_file FROM lead LIMIT 1") my_lead = mycursor.fetchone() sqlDropLead = "DELETE FROM lead LIMIT 1" sqlInsertComprador = "INSERT INTO comprador (compra_id, id_file, comprador, monto, fechahora) values ('{}','{}','{}', '{}','{}')".format( random.sample((1, 9999999), 1), person.idP, mycomprador, finalbid, formatted_date) # Obtain a cursor object mySQLCursorComprador = dbConnection_comprador.cursor() # Execute the SQL stament # mySQLCursorComprador.execute(sqlCopyLead) mySQLCursorComprador.execute(sqlDropLead) mySQLCursorComprador.execute(sqlInsertComprador) # Close the cursor and connection objects mySQLCursorComprador.close() dbConnection_comprador.close() # except Exception as e: # print("Exception: %s" % e) # return # ================ End MySQL Space ================== # with open('comprador.csv', 'a+') as final: # writer = csv.writer(final) # writer.writerow([person.idP, mycomprador, person.date, finalbid, mycolor]) # final.close() # Cerrar coneccion a archivo para que otros threads la puedan usar produced.append( Produced(person.idP, mycomprador, person.date, finalbid)) # meter a archivo comprador print(stylize("CONSUMED", colored.fg(mycolor))) endTimeC = time.time() timeTakenC = endTimeC - startTimeC timeachC.append(timeTakenC) timeAll.append(timeTakenC) TotalTime = sum(timeAll) print(stylize("ROWS PRODUCED: " + str(len(timeachC)) + " " + mycolor, colored.fg(mycolor))) print("Total Time: " + str(TotalTime)) print(len(produced)) print(len(queue)) space_ok.notify() qlock.release() time.sleep(0.5) # buffSize productores consumerFile Alternance # 1 2 3 4 producerList = [] ColorList = ['green', 'red', 'blue', 'yellow', 'white', 'magenta', 'black', 'cyan', 'orchid', 'tan', 'violet', 'plum_2', 'honeydew_2', 'pink_1', 'indian_red_1a', 'sky_blue_2', 'light_sky_blue_3a', 'dark_sea_green_3a'] alternance = int(sys.argv[4]) print("Runs up to this point") if alternance == 0: print("non alternance") for i in range(int(sys.argv[2])): producer = ProducerThread(ColorList[i], i) producerList.append(producer) producer.setDaemon(False) producer.start() try: print("trying") color = 0 consumerList = [] for i in compradores: print("A") consumer = ConsumerThread(i.idC, i.low, i.high, ColorList[color]) color = color + 1 print("B") consumerList.append(consumer) print("C") consumer.start() print("D") except: print("No consumer file given, only producer threads will be generated") if alternance == 1: print("alternance") for i in range(int(sys.argv[2])): producer = ProducerThreadAlternance(ColorList[i], i) producerList.append(producer) producer.setDaemon(False) producer.start() try: print("trying") consumerList = [] for i in compradores: consumer = ConsumerThreadAlternance(i.idC, i.low, i.high, "blue") consumerList.append(consumer) consumer.start() except: print("No consumer file given, only producer threads will be generated") # print("DONE") # ConsumerThread(name='yellow', daemon=True).start() # for a in compradores: # print(a.idC, a.low, a.high) # for c in personas: # print(c.idP, c.date)
/** * Auto-converts the given response object to a representation matching the layout used by the API call. The layout * will be determined heuristically by analyzing the routes actual return value and compare it to the type of values * typically returned by a specific layout. * * @param result The value returned by invoking the actual API call * @param response Response object representing the value expected to be returned by the API call * * @return Representation of the given response object matching the used layout. This can either be a data object, * an APIResponse DTO or a JSON representation. * * @throws IOException If an exception occurred while auto-converting a response object into it's JsonNode * representation */ private Object buildExpectation(T result, ResponseData<?> response) throws IOException { if (isUsingExtendedLayout(result)) { return response.getDTO(); } else if (isUsingJsonLayout(result)) { return response.getJson(); } else { return ((APIResponse<?>)response.getDTO()).getData(); } }
<gh_stars>0 // noinspection JSUnusedGlobalSymbols import {SmartTransactionsProductModel} from './smart-transactions-product-model'; export class SmartTransactionsList { /** * Number of existing smart transactions */ count?: number; /** * SmartTransactionsList */ data?: Array<SmartTransactionsProductModel>; }
export * from './recipeService'
/* AizuOnline A1510 Title Independent Research PE */ #include <stdio.h> //Global data section int cas; int N; int hako[5][5][5]; int hakow[5][5][5]; int M1,M2; int a[27]; int b[27]; // int mawari(int x,int y,int z) { int i,j,k,c; c=0; for(i=-1;i<=1;i++) for(j=-1;j<=1;j++) for(k=-1;k<=1;k++) { if((i||j||k) && x+i >=0 && x+i < 5 && y+j >= 0 && y+j <5 && z+k >=0 && z+k < 5){ c+=hako[x+i][y+j][z+k]; #ifdef DEBUG2 if(x==0 && y==0 && z==0) printf("%d %d %d ;%d\n",x+i,y+j,z+k,hako[x+i][y+j][z+k]); #endif } } return(c); } int in_val(int x,int n,int *array) { int i; for(i=0;i<n;i++) if(array[i]==x) return(1); return(0); } void next_day() { int i,j,k; for(i=0;i<5;i++) for(j=0;j<5;j++) for(k=0;k<5;k++) if(hako[i][j][k]==0) { if(in_val(mawari(i,j,k),M1,a)) hakow[i][j][k]=1; else hakow[i][j][k]=0; } else { if(in_val(mawari(i,j,k),M2,b)) hakow[i][j][k]=1; else hakow[i][j][k]=0; } for(i=0;i<5;i++) for(j=0;j<5;j++) for(k=0;k<5;k++) hako[i][j][k]=hakow[i][j][k]; } void display() { int i,j,k; if(cas>1) printf("\n"); printf("Case %d:\n",cas); for(i=0;i<5;i++) { for(j=0;j<5;j++) { for(k=0;k<5;k++) printf("%d",(hako[i][j][k] % 10)); printf("\n"); } if(i<4) printf("\n"); } } void display_mawari() { int i,j,k; for(i=0;i<5;i++) { for(j=0;j<5;j++) { for(k=0;k<5;k++) printf("%d:",mawari(i,j,k)); printf("\n"); } printf("\n"); } } main() { int i,j,k; char dummy[10]; cas=0; while(EOF!=scanf("%d",&N) && N) { cas++; for(i=0;i<5;i++) { for(j=0;j<5;j++) for(k=0;k<5;k++) scanf("%1d",&hako[i][j][k]); //scanf("%s",dummy); } scanf("%d",&M1); for(i=0;i<M1;i++) scanf("%d",&a[i]); scanf("%d",&M2); for(i=0;i<M2;i++) scanf("%d",&b[i]); //display(); //display_mawari(); for(i=0;i<N;i++) next_day(); display(); } return(0); }
<reponame>zealoussnow/chromium<gh_stars>1000+ /* Copyright (c) 2013 <NAME> */ /* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This is meant to be a simple example of encoding and decoding audio using Opus. It should make it easy to understand how the Opus API works. For more information, see the full API documentation at: https://www.opus-codec.org/docs/ */ #include <stdlib.h> #include <errno.h> #include <string.h> #include <opus.h> #include <stdio.h> /*The frame size is hardcoded for this sample code but it doesn't have to be*/ #define FRAME_SIZE 960 #define SAMPLE_RATE 48000 #define CHANNELS 2 #define APPLICATION OPUS_APPLICATION_AUDIO #define BITRATE 64000 #define MAX_FRAME_SIZE 6*960 #define MAX_PACKET_SIZE (3*1276) int main(int argc, char **argv) { char *inFile; FILE *fin; char *outFile; FILE *fout; opus_int16 in[FRAME_SIZE*CHANNELS]; opus_int16 out[MAX_FRAME_SIZE*CHANNELS]; unsigned char cbits[MAX_PACKET_SIZE]; int nbBytes; /*Holds the state of the encoder and decoder */ OpusEncoder *encoder; OpusDecoder *decoder; int err; if (argc != 3) { fprintf(stderr, "usage: trivial_example input.pcm output.pcm\n"); fprintf(stderr, "input and output are 16-bit little-endian raw files\n"); return EXIT_FAILURE; } /*Create a new encoder state */ encoder = opus_encoder_create(SAMPLE_RATE, CHANNELS, APPLICATION, &err); if (err<0) { fprintf(stderr, "failed to create an encoder: %s\n", opus_strerror(err)); return EXIT_FAILURE; } /* Set the desired bit-rate. You can also set other parameters if needed. The Opus library is designed to have good defaults, so only set parameters you know you need. Doing otherwise is likely to result in worse quality, but better. */ err = opus_encoder_ctl(encoder, OPUS_SET_BITRATE(BITRATE)); if (err<0) { fprintf(stderr, "failed to set bitrate: %s\n", opus_strerror(err)); return EXIT_FAILURE; } inFile = argv[1]; fin = fopen(inFile, "rb"); if (fin==NULL) { fprintf(stderr, "failed to open input file: %s\n", strerror(errno)); return EXIT_FAILURE; } /* Create a new decoder state. */ decoder = opus_decoder_create(SAMPLE_RATE, CHANNELS, &err); if (err<0) { fprintf(stderr, "failed to create decoder: %s\n", opus_strerror(err)); return EXIT_FAILURE; } outFile = argv[2]; fout = fopen(outFile, "wb"); if (fout==NULL) { fprintf(stderr, "failed to open output file: %s\n", strerror(errno)); return EXIT_FAILURE; } while (1) { int i; unsigned char pcm_bytes[MAX_FRAME_SIZE*CHANNELS*2]; int frame_size; size_t samples; /* Read a 16 bits/sample audio frame. */ samples = fread(pcm_bytes, sizeof(short)*CHANNELS, FRAME_SIZE, fin); /* For simplicity, only read whole frames. In a real application, * we'd pad the final partial frame with zeroes, record the exact * duration, and trim the decoded audio to match. */ if (samples != FRAME_SIZE) { break; } /* Convert from little-endian ordering. */ for (i=0;i<CHANNELS*FRAME_SIZE;i++) { in[i]=pcm_bytes[2*i+1]<<8|pcm_bytes[2*i]; } /* Encode the frame. */ nbBytes = opus_encode(encoder, in, FRAME_SIZE, cbits, MAX_PACKET_SIZE); if (nbBytes<0) { fprintf(stderr, "encode failed: %s\n", opus_strerror(nbBytes)); return EXIT_FAILURE; } /* Decode the data. In this example, frame_size will be constant because the encoder is using a constant frame size. However, that may not be the case for all encoders, so the decoder must always check the frame size returned. */ frame_size = opus_decode(decoder, cbits, nbBytes, out, MAX_FRAME_SIZE, 0); if (frame_size<0) { fprintf(stderr, "decoder failed: %s\n", opus_strerror(frame_size)); return EXIT_FAILURE; } /* Convert to little-endian ordering. */ for(i=0;i<CHANNELS*frame_size;i++) { pcm_bytes[2*i]=out[i]&0xFF; pcm_bytes[2*i+1]=(out[i]>>8)&0xFF; } /* Write the decoded audio to file. */ fwrite(pcm_bytes, sizeof(short), frame_size*CHANNELS, fout); } /*Destroy the encoder state*/ opus_encoder_destroy(encoder); opus_decoder_destroy(decoder); fclose(fin); fclose(fout); return EXIT_SUCCESS; }
Results of peripheral laser photocoagulation in pars planitis. PURPOSE To determine the effect of peripheral retinal laser photocoagulation (PLP) on visual acuity, intraocular inflammation, and other ocular findings, including retinal neovascularization in eyes with pars planitis. METHODS A retrospective chart review of eyes with pars planitis that had undergone PLP. RESULTS Twenty-two eyes in 17 patients with pars planitis had undergone treatment with PLP at 2 centers. The mean age at the time of treatment was 19.3 years. Following treatment, mean follow-up was 16.3 months (range, 6 to 37 months). Mean visual acuity was 20/60 preoperatively and 20/50 postoperatively. This level of improvement was not statistically significant (P > .10), but there was a statistically significant decrease in the use of corticosteroids between the preoperative examination and the last postoperative examination (86% versus 27%, P < .05). There was also a statistically significant decrease in vitritis at the last follow-up (P = .0008) and a decrease in neovascularization of the vitreous base (P = .03) and in clinically apparent cystoid macular edema (P = .02). Epiretinal membranes were noted in 23% of eyes preoperatively and in 45% of eyes postoperatively. Only one of these epiretinal membranes was considered to be visually significant. One eye developed a tonic dilated pupil, which slowly improved. CONCLUSIONS Although the long-term natural history of clinical findings in pars planitis is not well documented, PLP appears to decrease the need for corticosteroids while stabilizing visual acuity. It also appears to decrease vitreous inflammation. PLP has few complications and should be considered in patients with pars planitis who are unresponsive or have adverse reactions to corticosteroids.
/** * A class for testing HTML parsing of class comment to find the img tag * * @author Goran Stack * */ public class TestHtmlParser implements FocusListener, DocumentListener { private static final String HTML_SAMPLE = "<html>\n" + "<img src=\"doc-files/ColorConstants.png\">\n" + "<p> \nThis is an example of a resource class with visual samples included in the Javadoc. " + "The images for the Javadoc are created by a few lines of code in a test class. " + "The test class is processed by the screenshot-maven-plugin in a Maven build to produce the images. </p>\n" + "</html>"; private JTextArea textArea; private JLabel result; public static void main(String[] args) { final TestHtmlParser instance = new TestHtmlParser(); SwingUtilities.invokeLater(new Runnable() { @Override public void run() { instance.openInWindow(); } }); } private void openInWindow() { JXFrame window = new JXFrame(getClass().getSimpleName(), true); window.getContentPane().add(createPanel()); window.setSize(1000, 800); window.setLocationRelativeTo(null); window.setVisible(true); } private Component createPanel() { result = new JLabel("Nothing"); textArea = new JTextArea(HTML_SAMPLE); textArea.setLineWrap(true); textArea.setRows(5); textArea.setEditable(true); textArea.addFocusListener( this ); textArea.getDocument().addDocumentListener( this ); JPanel panel = new JPanel(new MigLayout(new LC().wrapAfter(2).fillX().fillY())); panel.add(new JLabel("HTML:")); panel.add(textArea, new CC().grow()); panel.add(new JLabel("Image src path:")); panel.add(result); return panel; } @Override public void focusGained(FocusEvent e) { } @Override public void focusLost(FocusEvent e) { update(); } private void update() { result.setText("nothing"); HTMLEditorKit.ParserCallback callback = new HTMLEditorKit.ParserCallback() { @Override public void handleText(char[] data, int pos) { } @Override public void handleSimpleTag(Tag tag, MutableAttributeSet a, int pos) { if (tag.equals(Tag.IMG)) { Object attribute = a.getAttribute(HTML.Attribute.SRC); if (attribute instanceof String) { String srcAttribute = (String) attribute; result.setText(srcAttribute + " pos: " + pos); } } } }; try { new ParserDelegator().parse(new StringReader(textArea.getText()), callback, true); } catch (IOException e) { throw new RuntimeException(e); } } @Override public void changedUpdate(DocumentEvent e) { } @Override public void insertUpdate(DocumentEvent e) { update(); } @Override public void removeUpdate(DocumentEvent e) { update(); } }
/** * Created by Viliam Repan (lazyman). */ public class PrismEntityPair<T> { private PrismValue prism; private T repository; public PrismEntityPair(PrismValue prism, T repository) { this.prism = prism; this.repository = repository; } public PrismValue getPrism() { return prism; } public T getRepository() { return repository; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; PrismEntityPair that = (PrismEntityPair) o; return repository != null ? repository.equals(that.repository) : that.repository == null; } @Override public int hashCode() { return repository != null ? repository.hashCode() : 0; } }
Dynamics of glass-forming liquids. XIII. Microwave heating in slow motion. Using time-resolved nonlinear dielectric relaxation measurements at fields as high as 450 kV/cm, the nonthermal effects of energy absorption are studied for simple and associating polar liquids in their supercooled state. The experiment is a low frequency analog of microwave heating and facilitates tracking the flow of energy in time, as it accumulates in slow degrees of freedom and transfers eventually to the vibrational heat bath of the liquid. Most findings agree with a phenomenological model of heterogeneous relaxation regarding structure and configurational temperature. The relevant thermal behavior of monohydroxy alcohols differs considerably from the cases of simple nonassociating liquids due to their distinct origins of the prominent dielectric absorption mode for the two classes of liquids. Nonthermal effects are observed as dynamics that are accelerated without increasing sample temperature, but for the present low frequencies the changes remain too small to explain the high efficiencies reported for microwave chemistry. Limitations as to how rapidly the faster relaxation time constants are able to adjust to temperature separate the modes of the dispersive alpha-relaxation into a "relaxation" and an "aging" regime, thereby explaining the incompatibility of heterogeneous dynamics with common physical aging observations.
Steady your sights on Parkinson's disease. www.nursing2008.com December | Nursing2008 | 56hn1 A few years ago, my husband Brian, 44, was taken to the ED after he was found lying unresponsive in the hallway of a public building. He was described as having seizurelike activity with tremors in his right arm and muscle contractions of his right leg. At the ED, Brian was lethargic but oriented to person, place, and time. His BP was 88/50, and his heart rate was 100 bpm. A computed tomography (CT) scan of his head didn’t show any abnormalities. What had happened was actually a bad day in the life of someone with Parkinson’s disease (PD). In this article, I’ll explain what PD is and how you can help someone like Brian manage its effects and ward off complications.
def apt(x): dig = 0 Count = 0 while x > 0: dig = x%10 x = x//10 Count += 1 return (dig - 1)*10 + ((Count*(Count + 1)//2)) t = int(input()) for i in range(t): x = int(input()) print(apt(x))
//----------------------------------------------------------------------------- // Purpose: Activate's the player special ability // called when the player hits their "special" key //----------------------------------------------------------------------------- void TeamFortressViewport::InputPlayerSpecial(void) { if (!m_iInitialized) return; if (g_iPlayerClass == PC_ENGINEER || g_iPlayerClass == PC_SPY) { ShowCommandMenu(gViewPort->m_StandardMenu); if (m_pCurrentCommandMenu) { m_pCurrentCommandMenu->KeyInput('7'); } } else { ClientCmd("_special"); } }
<gh_stars>10-100 package com.github.diegopacheco.sandboxspring.dto; public class OutputMessage { private String from; private String text; private String time; public OutputMessage(){} public OutputMessage(String from, String text, String time) { this.from = from; this.text = text; this.time = time; } public String getFrom() { return from; } public String getText() { return text; } public String getTime() { return time; } public void setFrom(String from) { this.from = from; } public void setText(String text) { this.text = text; } public void setTime(String time) { this.time = time; } @Override public String toString() { return "OutputMessage{" + "from='" + from + '\'' + ", text='" + text + '\'' + ", time=" + time + '}'; } }
import React from 'react'; import CloseIcon from '../../../icons/CloseIcon'; import useChatContext from '../../../hooks/useChatContext/useChatContext'; export default function ChatWindowHeader() { const { setIsChatWindowOpen } = useChatContext(); return ( <div className="flex w-full py-2 justify-between items-center px-2 bg-white"> <div className="text-lg">Chat</div> <button className="" onClick={() => setIsChatWindowOpen(false)}> <CloseIcon /> </button> </div> ); }
An integrated approach to hospital strategic planning, quality assurance, and continuous quality improvement. Like many other healthcare organizations today, the authors' facility, a 306-bed acute care community hospital in Michigan, strives to visualize and make a transition from traditional quality assurance to continuous quality improvement. The Juran Trilogy provided the insight that strategic planning, measurement, and continuous improvement must exist side by side. At the authors' facility, this realization resulted in the hospital quality plan, which treats each of these components as part of the foundation for quality. The authors explain this model and the reporting and communication mechanisms that support it.
package com.highjet.portal.modules.portal.dao; import com.baomidou.mybatisplus.core.mapper.BaseMapper; import com.highjet.portal.modules.portal.entity.SysLoginLogEntity; /** * 登录日志 * * @author jiangyin * @email * @date 2018-07-19 22:57:30 */ public interface SysLoginLogDao extends BaseMapper<SysLoginLogEntity> { }
Looking to take a walk on the wild side? A centre located in Golden, British Columbia provides a once in a lifetime experience to visitors looking to try something totally unique. RELATED: At the Northern Lights Wolf Centre you can expect to come face to face with some of nature’s fiercest canines and come away completely unharmed. You can take a tour upon arrival with no reservation required. They do a 25 minute interpretive presentation about wolves and the role that they play in the environment. You’ll learn about the pivotal role these animals play in nature, and also why they have been deeply misrepresented. Up Close And Personal: Walk With Wolves If you want to have a more intimate experience, you can walk with wolves on a special photography hike. The walk takes place in the wilderness where jagged mountain peaks soar high above the forest floor. The wolves roam freely around you and treat you as a member of their pack! Qualified wolf handlers are always around to ensure that the experience is completely safe. For this tour, you will need to make a reservation. The forest is filled with meandering rivers, wetlands, and snow caped peaks, which make for a stunning backdrop for nature pictures (hello, Instagram!) You can also opt to take a full day-trip to the Northern Lights Wolf Centre and enjoy even more interactive fun as well as BBQ during the spring, summer and fall months. The tours are open all year long. Family tours are also available where children may learn more about wolves and view them from their enclosures. All of the wolves are bred in captivity, but they are pure-bred wolves. As a note, the centre asks that you please leave your dogs and other pets at home when you visit the centre. Wolves are territorial animals and therefore this policy is strictly enforced. If you are looking for some more hikes check out the 5 Best Hiking Trails To Explore In Burnaby and also this Easy Hike On Vancouver Island That Will Lead You To A Triple-Tumbling Waterfall.
/* * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef __XFS_DIR2_H__ #define __XFS_DIR2_H__ struct uio; struct xfs_dabuf; struct xfs_da_args; struct xfs_dir2_put_args; struct xfs_bmap_free; struct xfs_inode; struct xfs_mount; struct xfs_trans; /* * Directory version 2. * There are 4 possible formats: * shortform * single block - data with embedded leaf at the end * multiple data blocks, single leaf+freeindex block * data blocks, node&leaf blocks (btree), freeindex blocks * * The shortform format is in xfs_dir2_sf.h. * The single block format is in xfs_dir2_block.h. * The data block format is in xfs_dir2_data.h. * The leaf and freeindex block formats are in xfs_dir2_leaf.h. * Node blocks are the same as the other version, in xfs_da_btree.h. */ /* * Byte offset in data block and shortform entry. */ typedef __uint16_t xfs_dir2_data_off_t; #define NULLDATAOFF 0xffffU typedef uint xfs_dir2_data_aoff_t; /* argument form */ /* * Directory block number (logical dirblk in file) */ typedef __uint32_t xfs_dir2_db_t; /* * Byte offset in a directory. */ typedef xfs_off_t xfs_dir2_off_t; /* * For getdents, argument struct for put routines. */ typedef int (*xfs_dir2_put_t)(struct xfs_dir2_put_args *pa); typedef struct xfs_dir2_put_args { xfs_off_t cook; /* cookie of (next) entry */ xfs_intino_t ino; /* inode number */ xfs_dirent_t *dbp; /* buffer pointer */ char *name; /* directory entry name */ int namelen; /* length of name */ int done; /* output: set if value was stored */ xfs_dir2_put_t put; /* put function ptr (i/o) */ struct uio *uio; /* uio control structure */ } xfs_dir2_put_args_t; /* * Generic directory interface routines */ extern void xfs_dir_startup(void); extern void xfs_dir_mount(struct xfs_mount *mp); extern int xfs_dir_isempty(struct xfs_inode *dp); extern int xfs_dir_init(struct xfs_trans *tp, struct xfs_inode *dp, struct xfs_inode *pdp); extern int xfs_dir_createname(struct xfs_trans *tp, struct xfs_inode *dp, char *name, int namelen, xfs_ino_t inum, xfs_fsblock_t *first, struct xfs_bmap_free *flist, xfs_extlen_t tot); extern int xfs_dir_lookup(struct xfs_trans *tp, struct xfs_inode *dp, char *name, int namelen, xfs_ino_t *inum); extern int xfs_dir_removename(struct xfs_trans *tp, struct xfs_inode *dp, char *name, int namelen, xfs_ino_t ino, xfs_fsblock_t *first, struct xfs_bmap_free *flist, xfs_extlen_t tot); extern int xfs_dir_getdents(struct xfs_trans *tp, struct xfs_inode *dp, uio_t *uio, int *eofp); extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp, char *name, int namelen, xfs_ino_t inum, xfs_fsblock_t *first, struct xfs_bmap_free *flist, xfs_extlen_t tot); extern int xfs_dir_canenter(struct xfs_trans *tp, struct xfs_inode *dp, char *name, int namelen); extern int xfs_dir_ino_validate(struct xfs_mount *mp, xfs_ino_t ino); /* * Utility routines for v2 directories. */ extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space, xfs_dir2_db_t *dbp); extern int xfs_dir2_isblock(struct xfs_trans *tp, struct xfs_inode *dp, int *vp); extern int xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp, int *vp); extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db, struct xfs_dabuf *bp); #endif /* __XFS_DIR2_H__ */
/** * * Filter to add mandatory headers to all request * * @author Rajaram Kaliyaperumal * @since 2015-11-19 * @version 0.1 */ @Component public class ApplicationFilter implements Filter { private FilterConfig filterConfig; @Override public void init(FilterConfig filterConfig) throws ServletException { this.filterConfig = filterConfig; } @Override public void doFilter(ServletRequest sr, ServletResponse sr1, FilterChain fc) throws IOException, ServletException { HttpServletResponse response = (HttpServletResponse) sr1; HttpServletRequest request = (HttpServletRequest)sr; response.setHeader(HttpHeaders.SERVER, "FAIR data point (JAVA)"); response.setHeader(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN, "*"); response.setHeader(HttpHeaders.ALLOW, (RequestMethod.GET.name())); response.setHeader(HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS, (HttpHeaders.ACCEPT)); ThreadContext.put("ipAddress", request.getRemoteAddr()); ThreadContext.put("responseStatus", String.valueOf( response.getStatus())); fc.doFilter(sr, sr1); ThreadContext.clearAll(); } @Override public void destroy() {} }
<reponame>yamachig/VSCode-Lawtext import xmlToEL from "lawtext/dist/src/node/el/xmlToEL"; import renderLawtext from "lawtext/dist/src/renderer/lawtext"; import * as vscode from "vscode"; export const xmlToLawtext = async () => { const document = vscode.window.activeTextEditor?.document; if (!document) return; const xml = document.getText(); const law = xmlToEL(xml); const lawtext = renderLawtext(law); const lawtextDocument = await vscode.workspace.openTextDocument({ language: "lawtext", content: lawtext, }); await vscode.window.showTextDocument(lawtextDocument); }; export default xmlToLawtext;
/** * This file is part of TWIMExtract * * This object is a utility class for making calls to the imextract.exe in order to extract * specific ranges of HDMS data * @author Daniel Polasky * @author Kieran Neeson */ public class IMExtractRunner { // The single instance of this object private static IMExtractRunner instance; // Single instance of the preferences private static Preferences preferences = Preferences.getInstance(); // This is the root folder for the current analysis private static File root; /** * Private constructor */ private IMExtractRunner() { exeFile = new File(preferences.getLIB_PATH() + File.separator + "imextract.exe"); setRoot( preferences.getCIUGEN_HOME() + "\\root"); } /** * Returns the single instance of this object * @return - singleton IMExtractRunner */ public static IMExtractRunner getInstance() { if( instance == null ) { instance = new IMExtractRunner(); } return instance; } /** * Runs imextract.exe to determine the full RT, DT & MZ data ranges from the specified data * Generates 2 output files: * _rt.bin - the binary file containing the chromatogram map for scans to mins * _dt.bin - the binary file containing the mobiligram map for bins to millisecs * _hdc.bin - the hdc calibration binary file - not sure what this does but it is not relevant to HDMSCompare * ranges.txt - the text file with all the initial ranges for the data * @param rawFile - raw file to process * @param nFunction - data function to process */ public static void getFullDataRanges(File rawFile, int nFunction) { try { String cmdarray = exeFile.getCanonicalPath() + " " + "-d " + "\"" + rawFile.getPath() + "\" " + "-f " + nFunction + " " + "-o " + "\"" + getRoot() + File.separator + "ranges.txt\" " + "-t " + "mobilicube"; runIMSExtract(cmdarray); } catch (IOException ex) { Logger.getLogger(IMExtractRunner.class.getName()).log(Level.SEVERE, null, ex); } } /** * Read input range file and return an array of the ranges specified. * NOTE: max # of bins for each dimension is automatically determined by IMExtractRunner, * and will be set to 1 for the ranges array. * RANGE FILE FORMAT: * MZ_start_(m/z): xxx MZ_end_(m/z): xxxx RT_start_(minutes): xx RT_end_(minutes): xx DT_start_(bins): xx DT_end_(bins): xxx */ public static double[] readDataRanges(String rangesName, double[] rangesArr){ // Data reader BufferedReader reader = null; String line; // Read the file try { File rangesTxt = new File(rangesName); reader = new BufferedReader(new FileReader(rangesTxt)); while((line = reader.readLine()) != null){ // Skip lines beginning with '#' if (line.startsWith("#")){ // do nothing, it's a header continue; } String[] splits = line.split(":"); String inputName = splits[0]; double inputValue = Double.parseDouble(splits[1]); String[] nameSplits = inputName.split("_"); switch(nameSplits[0]){ case "MZ": if (nameSplits[1].toLowerCase().matches("start")){ minMZ = inputValue; rangesArr[0] = inputValue; } else if (nameSplits[1].toLowerCase().matches("end")){ maxMZ = inputValue; rangesArr[1] = inputValue; } // Invalid input name break; case "RT": if (nameSplits[1].toLowerCase().matches("start")){ minRT = inputValue; rangesArr[3] = inputValue; } else if (nameSplits[1].toLowerCase().matches("end")){ maxRT = inputValue; rangesArr[4] = inputValue; } // Invalid input name break; case "DT": if (nameSplits[1].toLowerCase().matches("start")){ minDT = inputValue; rangesArr[6] = inputValue; } else if (nameSplits[1].toLowerCase().matches("end")){ maxDT = inputValue; rangesArr[7] = inputValue; } // Invalid input name break; } } } catch (IOException ex) { Logger.getLogger(IMExtractRunner.class.getName()).log(Level.SEVERE, null, ex); } finally { try { reader.close(); } catch (IOException ex) { Logger.getLogger(IMExtractRunner.class.getName()).log(Level.SEVERE, null, ex); } } return rangesArr; } /** * Read the root/ce.dat file and return the CV information. ce.dat file has individual CV values, 1 per line * and no other information * @return CV list */ private static ArrayList<Double> getCVfromCEdat() { ArrayList<Double> cvData = new ArrayList<>(); File ceFile = new File(preferences.getROOT_PATH() + "\\_ce.dat"); BufferedReader reader; try { reader = new BufferedReader(new FileReader(ceFile)); String line; while ((line = reader.readLine()) != null){ double cv = Double.parseDouble(line); cvData.add(cv); } } catch (IOException e) { e.printStackTrace(); } return cvData; } /** * Reads the data ranges from the specified file * FOR OLD RANGE FILE FORMAT (updated 11/30/16) * @param rangesName: the path to the ranges text file * @return - an array of the ranges specified in the ranges file */ public static double[] readDataRangesOld(String rangesName) { // Data reader BufferedReader reader = null; String line = null; // array to store data values double[] rangesArr = new double[9]; // value counter int valueCounter = 0; try { //File rangesTxt = new File(getRoot() + File.separator + rangesName); File rangesTxt = new File(rangesName); reader = new BufferedReader(new FileReader(rangesTxt)); while((line = reader.readLine()) != null) { String[] splits = line.split(" "); for( String split : splits ){ double d = Double.parseDouble(split); //rangesArr[valueCounter] = d; switch( valueCounter ){ case START_MZ: minMZ = d; rangesArr[valueCounter] = minMZ; break; case STOP_MZ: maxMZ = d; rangesArr[valueCounter] = maxMZ; break; case MZ_BINS: mzBins = d; rangesArr[valueCounter] = mzBins; break; case START_RT: minRT = d; rangesArr[valueCounter] = minRT; break; case STOP_RT: maxRT = d; rangesArr[valueCounter] = maxRT; break; case RT_BINS: rtBins = d; rangesArr[valueCounter] = rtBins; break; case START_DT: minDT = Math.floor(d); rangesArr[valueCounter] = minDT; break; case STOP_DT: maxDT = Math.ceil(d); rangesArr[valueCounter] = maxDT; break; case DT_BINS: dtBins = d; rangesArr[valueCounter] = dtBins; break; } valueCounter++; } } } catch (IOException ex) { Logger.getLogger(IMExtractRunner.class.getName()).log(Level.SEVERE, null, ex); } finally { try { reader.close(); } catch (IOException ex) { Logger.getLogger(IMExtractRunner.class.getName()).log(Level.SEVERE, null, ex); } } return rangesArr; } /** * Print specified ranges * @param rangeArr: the range array to be printed */ public static void PrintRanges(double[] rangeArr){ // Print the ranges on 3 lines, as they're typically arranged that way in the text file. for(int i=0; i<rangeArr.length;i++){ System.out.print(rangeArr[i] + " "); if (i==2 || i==5 || i==rangeArr.length - 1){ System.out.println(); } } } /** * Returns an array of the last data ranges run * @return ranges */ public double[] getLastRanges() { return new double[]{minMZ, maxMZ, mzBins, minRT, maxRT, rtBins, minDT, maxDT, dtBins}; } /** * Gets the root directory * @return root dir (File) */ private static File getRoot(){ return root; } /** * Sets the root directory * @param path set root to this path */ private static void setRoot(String path){ //System.out.println("Setting root: " + path); root = new File(path); root.mkdirs(); } /** * Updated extract Mobiligram method that takes a list of functions to analyze (length 1 for single * file analyses), calls the appropriate helper methods based on the extraction mode, then combines * the returned (extracted) data for writing to an output file specified by the output path. * @param allFunctions = the list of functions (data) to be extracted with all their associated information in DataVectorInfoObject format * @param outputFilePath = where to write the output file * @param ruleMode = whether to use range files or rule files for extracting * @param ruleFile = the rule OR range file being used for the extraction * @param extractionMode = the type of extraction to be done (DT, MZ, RT, or RTDT) */ public void extractMobiligramOneFile(ArrayList<DataVectorInfoObject> allFunctions, String outputFilePath, boolean ruleMode, File ruleFile, int extractionMode, boolean dt_in_ms){ String lineSep = System.getProperty("line.separator"); // Get info types to print from first function (they will be the same for all functions) boolean[] infoTypes = allFunctions.get(0).getInfoTypes(); try { // Get data ArrayList<MobData> allMobData; if (extractionMode != IMExtractRunner.RTDT_MODE) { allMobData = extractMobiligramReturn(allFunctions, ruleMode, ruleFile, extractionMode, dt_in_ms); } else { assert allFunctions.size() == 1; // MRM mode can have at most 1 function passed at a time - this should be mandated in the outer function but checked here to confirm DataVectorInfoObject function = allFunctions.get(0); allMobData = generateMobiligram2D(function, ruleMode, ruleFile, extractionMode, dt_in_ms); } // Now, write the output file File out = new File(outputFilePath); BufferedWriter writer = new BufferedWriter(new FileWriter(out)); // Get the formatted text output for the appropriate extraction type (RT has to be handled differently from others) String[] arraylines; if (extractionMode == RT_MODE){ arraylines = rtWriteOutputs(allMobData, infoTypes); } else { double maxdt = 200; // if extracting in bins, maxdt = max bin if (extractionMode == DT_MODE || extractionMode == RTDT_MODE){ if (dt_in_ms){ // compute max DT using max m/z info from _extern.inf file maxdt = get_max_dt(allFunctions.get(0).getRawDataPath()); } } arraylines = dtmzWriteOutputs(allMobData, infoTypes, maxdt); } // Now, write all the lines to file for (String line : arraylines){ writer.write(line); writer.write(lineSep); } writer.flush(); writer.close(); } catch (IOException ex) { ex.printStackTrace(); } } public void writeExtractSave(ExtractSave saveObj){ String lineSep = System.getProperty("line.separator"); // Get info types to print from first function (they will be the same for all functions) boolean[] infoTypes = saveObj.getReferenceFunction().getInfoTypes(); // Now, write the output file File outFile = new File(saveObj.getOutputFilePath()); ArrayList<MobData> allMobData = saveObj.getMobData(); // Get the formatted text output for the appropriate extraction type (RT has to be handled differently from others) String[] arraylines; if (saveObj.getExtractionMode() == RT_MODE){ arraylines = rtWriteOutputs(allMobData, infoTypes); } else { double maxdt = 200; // if extracting in bins, maxdt = max bin if (saveObj.getExtractionMode() == DT_MODE || saveObj.getExtractionMode() == RTDT_MODE){ if (saveObj.isDT_in_MS()){ // compute max DT using max m/z info from _extern.inf file maxdt = get_max_dt(saveObj.getReferenceFunction().getRawDataPath()); } } arraylines = dtmzWriteOutputs(allMobData, infoTypes, maxdt); } // Now, write all the lines to file try { BufferedWriter writer = new BufferedWriter(new FileWriter(outFile)); for (String line : arraylines){ writer.write(line); writer.write(lineSep); } writer.flush(); writer.close(); } catch (IOException e) { e.printStackTrace(); } } /** * Extract Mobiligram method for CIU (2D RTDT) extractions. Takes a list of functions to analyze to convenient running with old setup, * but ONLY uses the first function. Calls the new helper methods to perform a SINGLE 2D RTDT extraction and returns * mobData as in the 1D extraction method. * the generated mobdata for output as appropriate. * @param function = function (data) to be extracted with all their associated information in DataVectorInfoObject format * @param dt_in_ms = whether to save output in bins or ms * @param ruleMode = whether to use range files or rule files for extracting * @param rangeFile = the rule OR range file being used for the extraction * @param extractionMode = the type of extraction to be done (DT, MZ, RT, or DTMZ) */ public ArrayList<MobData> generateMobiligram2D(DataVectorInfoObject function, boolean ruleMode, File rangeFile, int extractionMode, boolean dt_in_ms){ // Collect mobData for all functions in the list ArrayList<MobData> allMobData = new ArrayList<>(); if (extractionMode != RTDT_MODE){ System.out.println("ERROR: 2D extraction requested for non-2D mode. Returning no data"); return allMobData; } // DataVectorInfoObject function = allFunctions.get(0); double maxDT = get_max_dt(function.getRawDataPath()); allMobData = generateReplicateRTDT(function, rangeFile, ruleMode, dt_in_ms, maxDT); return allMobData; } /** * Updated extract Mobiligram method that takes a list of functions to analyze (length 1 for single * file analyses), calls the appropriate helper methods based on the extraction mode, then returns * the generated mobdata for output as appropriate. * @param allFunctions = the list of functions (data) to be extracted with all their associated information in DataVectorInfoObject format * @param dt_in_ms = whether to save output in bins or ms * @param ruleMode = whether to use range files or rule files for extracting * @param rangeFile = the rule OR range file being used for the extraction * @param extractionMode = the type of extraction to be done (DT, MZ, RT, or DTMZ) */ public ArrayList<MobData> extractMobiligramReturn(ArrayList<DataVectorInfoObject> allFunctions, boolean ruleMode, File rangeFile, int extractionMode, boolean dt_in_ms){ // Collect mobData for all functions in the list ArrayList<MobData> allMobData = new ArrayList<>(); if (extractionMode == RTDT_MODE){ for (DataVectorInfoObject function : allFunctions) { // TODO: fix or remove allMobData = generateMobiligram2D(function, ruleMode, rangeFile, extractionMode, dt_in_ms); } } else { for (DataVectorInfoObject function : allFunctions) { String rawDataFilePath = function.getRawDataPath(); String rawName = function.getRawDataName(); int functionNum = function.getFunction(); double conecv = function.getConeCV(); double trapcv = function.getCollisionEnergy(); double transfcv = function.getTransfCV(); double wh = function.getWaveHeight(); double wv = function.getWaveVel(); double[] rangeVals = function.getRangeVals(); String rangeName = function.getRangeName(); double[][] data = null; try { if (extractionMode == DT_MODE) { data = generateReplicateMobiligram(rawDataFilePath, functionNum, 0, rangeVals, rangeName, rangeFile, ruleMode); } else if (extractionMode == MZ_MODE) { data = generateReplicateSpectrum(rawDataFilePath, functionNum, 0, rangeVals, rangeName, rangeFile, ruleMode); } else if (extractionMode == RT_MODE) { data = generateReplicateChromatogram(rawDataFilePath, functionNum, 0, rangeVals, rangeName, rangeFile, ruleMode); } // data = generateReplicateDTMZ(rawDataFilePath, functionNum, 0, true, rangeVals, rangeName, ruleFile, ruleMode); } catch (IOException ex) { ex.printStackTrace(); } if (data == null) { System.out.println("Error during extraction! Check your raw data - it might be empty or corrupted"); } MobData currentMob = new MobData(data, rawName, rangeName, conecv, trapcv, transfcv, wh, wv); allMobData.add(currentMob); } } return allMobData; } /** * Method to manually find the maximum drift time of a file using the max m/z defined in * the acquisition mass range of the file's _extern.inf file. ONLY tested for Synapt G2 so far. * @param rawDataPath path to raw folder * @return max drift time (double) */ private double get_max_dt(String rawDataPath){ double max_dt = 0.0; double max_mz = 0.0; boolean mob_delay = false; double delay_time = 0.0; try { // read the file File rawData = new File(rawDataPath, "_extern.inf"); BufferedReader reader = new BufferedReader(new FileReader(rawData)); String line = reader.readLine(); while (line != null){ // MS mode if (line.toUpperCase().startsWith("END MASS")){ String[] splits = line.split("\\t"); String strmz = splits[splits.length - 1]; max_mz = Double.parseDouble(strmz); } // MSMS mode if (line.toUpperCase().startsWith("MSMS END MASS")){ String[] splits = line.split("\\t"); String strmz = splits[splits.length - 1]; max_mz = Double.parseDouble(strmz); } // check for mobility delays if (line.startsWith("Using Mobility Delay after Trap Release")){ String[] splits = line.split("\\t"); String strDelay = splits[splits.length - 1]; mob_delay = Boolean.parseBoolean(strDelay); } // check for mobility delays if (line.startsWith("IMS Wave Delay")){ String[] splits = line.split("\\t"); String strDelayTime = splits[splits.length - 1]; delay_time = Double.parseDouble(strDelayTime); // convert to ms. NOTE: dividing by 10,000 because I think the units are incorrect in MassLynx (this gives the correct max DT) delay_time = delay_time / 10000.0; } line = reader.readLine(); } // convert max m/z to max DT and return it. Account for delay if it was used if (mob_delay){ max_dt = convert_mzdt_max(max_mz, delay_time); } else { max_dt = convert_mzdt_max(max_mz, 0.0); } reader.close(); } catch (IOException ignored){ } return max_dt; } /** * Convert from maxmium m/z to max drift time for synapt G2 using Waters built-in cutoffs. Accounts * for mobility trapping delay times. * @param maxMZ max m/z in file to determine max DT used * @return max drift time (double) */ private double convert_mzdt_max(double maxMZ, double delay_time){ double dtmax; if (maxMZ <= 600){ dtmax = 7.61; } else if (maxMZ <= 1200){ dtmax = 10.8; } else if (maxMZ <= 2000){ dtmax = 13.78; } else if (maxMZ <= 5000){ dtmax = 21.940; } else if (maxMZ <= 8000){ dtmax = 27.513; } else if (maxMZ <= 14000){ dtmax = 36.268; } else if (maxMZ <= 32000){ dtmax = 54.580; } else { dtmax = 96.743; } dtmax = dtmax - delay_time; return dtmax; } /** * Method to change the DT information of the first MobData array ONLY in a list of mobdata. * Converts to DT using information from file's _extern.inf. * @param allmobdata arraylist of mobdata containers * @return updated mobdata */ private ArrayList<MobData> convert_mobdata_to_ms(ArrayList<MobData> allmobdata, double maxDT){ // Convert each bin to drift time ((bin - 1) * max_dt / 199) // NOTE: For some reason, there are actually only 199 bins, not 200. Doing the conversion by // dividing by 199 gives results that match the output from Driftscope/MassLynx. Bin 1 is set // to a drift time of 0 (millisecond DTs are 0-indexed, whereas bin numbers are 1-indexed, so all // bins have 1 subtracted from them to be converted correctly) for (int i=0; i < allmobdata.get(0).getMobdata().length; i++){ allmobdata.get(0).getMobdata()[i][0] = convertBinToDT(allmobdata.get(0).getMobdata()[i][0], maxDT); } return allmobdata; } /** * Convert bin to DT (ms). NOTE: For some reason, there are actually only 199 bins, not 200. Doing the conversion by * dividing by 199 gives results that match the output from Driftscope/MassLynx. Bin 1 is set * to a drift time of 0 (millisecond DTs are 0-indexed, whereas bin numbers are 1-indexed, so all * bins have 1 subtracted from them to be converted correctly) * @param inputBin starting bin number * @param maxDT max DT of the acquisition (ms) * @return DT in ms */ private static double convertBinToDT(double inputBin, double maxDT) { return (inputBin - 1) * maxDT / 199; } /** * Helper method to format text output for MS or DT extractions. Assumes that each function * (if using combined outputs) has the same bin names (e.g. DT bin 1, 2, 3, ...) and writes * one column per function using the same initial set of bins. Returns String[] that can * be directly written to the output file. * @param allMobData list of mobdata containers * @param infoTypes boolean array of what to print * @return output strings to write to file */ private String[] dtmzWriteOutputs(ArrayList<MobData> allMobData, boolean[] infoTypes, double maxdt){ ArrayList<String> lines = new ArrayList<String>(); // Headers // Loop through the list of data, writing each function's value for this CE to the line, and sorting int HEADER_LENGTH = 2; lines.add("# Range file name:"); lines.add("# Raw file name:"); if (infoTypes[USECONE_TYPES]){ lines.add("$ConeCV:"); HEADER_LENGTH++; // sort by cone if trap is not active if (! infoTypes[USETRAP_TYPES]) { allMobData.sort(Comparator.comparingInt(d -> (int) d.getConeCV())); } } if (infoTypes[USETRAP_TYPES]){ lines.add("$TrapCV:"); HEADER_LENGTH++; allMobData.sort(Comparator.comparingInt(d -> (int) d.getTrapCV())); } if (infoTypes[USETRANSF_TYPES]){ lines.add("$TransferCV:"); HEADER_LENGTH++; // sort by transfer if trap is not active if (! infoTypes[USETRAP_TYPES]) { allMobData.sort(Comparator.comparingInt(d -> (int) d.getTransferCV())); } } if (infoTypes[USEWH_TYPES]){ lines.add("$WaveHt:"); HEADER_LENGTH++; } if (infoTypes[USEWV_TYPES]){ lines.add("$WaveVel:"); HEADER_LENGTH++; } // ADD HEADER INFORMATION AND BIN NUMBERS (or ms) TO THE LINES int lineIndex = 0; try { // handle writing bin numbers if there's no data in the first file if (allMobData.get(0).getMobdata().length == 0){ for (int i = HEADER_LENGTH; i < 200 + HEADER_LENGTH; i++){ lines.add(String.valueOf(i - HEADER_LENGTH + 1)); lineIndex++; } } else { // Mobdata is not empty, so write its contents to the array if (maxdt != 200 && maxdt != 0){ // convert DT bins to ms (manually), then write to file allMobData = convert_mobdata_to_ms(allMobData, maxdt); } for (int i = HEADER_LENGTH; i < allMobData.get(0).getMobdata().length + HEADER_LENGTH; i++){ lines.add(String.valueOf(allMobData.get(0).getMobdata()[lineIndex][0])); lineIndex++; } } } catch (NullPointerException ex){ // mobdata is null - add default header for (int i = HEADER_LENGTH; i < 200 + HEADER_LENGTH; i++){ lines.add(String.valueOf(i - HEADER_LENGTH + 1)); lineIndex++; } } // Convert to array from arraylist String[] strings = new String[1]; String[] arraylines = lines.toArray(strings); arraylines[0] = lines.get(0); // FILL IN THE ARRAY WITH ACTUAL DATA, starting with headers for (MobData data : allMobData){ int lineCounter = 0; // // Print the range name only for the first data column // if (allMobData.indexOf(data) == 0) // arraylines[0] = arraylines[0] + "," + data.getRangeName(); // Print range/raw name for ALL columns in case ranges are being combined arraylines[0] = arraylines[0] + "," + data.getRangeName(); lineCounter++; arraylines[1] = arraylines[1] + "," + data.getRawFileName(); lineCounter++; // Print desired header information for the specified info types if (infoTypes[USECONE_TYPES]){ arraylines[lineCounter] = arraylines[lineCounter] + "," + data.getConeCV(); lineCounter++; } if (infoTypes[USETRAP_TYPES]){ arraylines[lineCounter] = arraylines[lineCounter] + "," + data.getTrapCV(); lineCounter++; } if (infoTypes[USETRANSF_TYPES]){ arraylines[lineCounter] = arraylines[lineCounter] + "," + data.getTransferCV(); lineCounter++; } if (infoTypes[USEWH_TYPES]){ arraylines[lineCounter] = arraylines[lineCounter] + "," + data.getWaveHeight(); lineCounter++; } if (infoTypes[USEWV_TYPES]){ arraylines[lineCounter] = arraylines[lineCounter] + "," + data.getWaveVelocity(); lineCounter++; } // WRITE THE ACTUAL DATA try{ // Added catch for null mobdata if there's no (or all 0's) data in the file lineIndex = 0; // Catch empty mobdata if (data.getMobdata().length == 0){ for (int i = HEADER_LENGTH; i < 200 + HEADER_LENGTH; i++){ arraylines[i] = arraylines[i] + "," + 0; } } for (int i = HEADER_LENGTH; i < data.getMobdata().length + HEADER_LENGTH; i++){ arraylines[i] = arraylines[i] + "," + data.getMobdata()[lineIndex][1]; lineIndex++; } } catch (NullPointerException ex){ // Warn the user that their data is no good System.out.println("WARNING: " + "No data in " + data.getRawFileName() + ", collision energy " + data.getCollisionEnergy()); for (int i = HEADER_LENGTH; i < 200 + HEADER_LENGTH; i++){ arraylines[i] = arraylines[i] + "," + 0; } } catch (ArrayIndexOutOfBoundsException ex){ System.out.println("\n" + "WARNING: " + "(Array index error) " + data.getRawFileName() + ", range File " + data.getRangeName() + "\n" + "Writing all 0's for this range"); for (int i = HEADER_LENGTH; i < allMobData.get(0).getMobdata().length + HEADER_LENGTH; i++){ // for (int i = HEADER_LENGTH; i < 200 + HEADER_LENGTH; i++){ arraylines[i] = arraylines[i] + "," + "0"; lineIndex++; } } } return arraylines; } /** * Alternate method for writing output data. Because RT data never repeats the same 'bins' * (the time keeps increasing by function, unlike DT and MZ, which are the same for all functions), * the data needs to have each function's 'x' data (raw RT) saved as well as 'y' (intensity). * Otherwise, code is identical to dtmzWriteOutputs. Duplicated rather than putting if/else * at every single line in a single method. * @param allMobData list of mobdata containers * @param infoTypes boolean array of what to print * @return output strings to write to file */ private String[] rtWriteOutputs(ArrayList<MobData> allMobData, boolean[] infoTypes){ ArrayList<String> lines = new ArrayList<>(); // Headers // Loop through the list of data, writing each function's value for this CE to the line int HEADER_LENGTH = 1; lines.add("#Range file name:"); if (infoTypes[USECONE_TYPES]){ lines.add("$ConeCV:"); HEADER_LENGTH++; } if (infoTypes[USETRAP_TYPES]){ lines.add("$TrapCV:"); HEADER_LENGTH++; } if (infoTypes[USETRANSF_TYPES]){ lines.add("$TransferCV:"); HEADER_LENGTH++; } if (infoTypes[USEWH_TYPES]){ lines.add("$WaveHt:"); HEADER_LENGTH++; } if (infoTypes[USEWV_TYPES]){ lines.add("$WaveVel:"); HEADER_LENGTH++; } // ADD HEADER INFORMATION AND BIN NUMBERS TO THE LINES int lineIndex = 0; try { // Mobdata is not empty, so write its contents to the array for (int i = HEADER_LENGTH; i < allMobData.get(0).getMobdata().length + HEADER_LENGTH; i++){ // lines.add(String.valueOf(allMobData.get(0).getMobdata()[lineIndex][0])); lines.add(""); lineIndex++; } } catch (NullPointerException ex){ for (int i = HEADER_LENGTH; i < 200 + HEADER_LENGTH; i++){ lines.add(String.valueOf(i - HEADER_LENGTH + 1)); lineIndex++; } } // Convert to array from arraylist String[] strings = new String[1]; String[] arraylines = lines.toArray(strings); arraylines[0] = lines.get(0); // FILL IN THE ARRAY WITH ACTUAL DATA, starting with headers for (MobData data : allMobData){ int lineCounter = 0; // Print the range name only for the first data column if (allMobData.indexOf(data) == 0) arraylines[0] = arraylines[0] + "," + data.getRangeName(); lineCounter++; // Print desired header information for the specified info types if (infoTypes[USECONE_TYPES]){ arraylines[lineCounter] = arraylines[lineCounter] + ",," + data.getConeCV(); lineCounter++; } if (infoTypes[USETRAP_TYPES]){ arraylines[lineCounter] = arraylines[lineCounter] + ",," + data.getTrapCV(); lineCounter++; } if (infoTypes[USETRANSF_TYPES]){ arraylines[lineCounter] = arraylines[lineCounter] + ",," + data.getTransferCV(); lineCounter++; } if (infoTypes[USEWH_TYPES]){ arraylines[lineCounter] = arraylines[lineCounter] + ",," + data.getWaveHeight(); lineCounter++; } if (infoTypes[USEWV_TYPES]){ arraylines[lineCounter] = arraylines[lineCounter] + ",," + data.getWaveVelocity(); lineCounter++; } // WRITE THE ACTUAL DATA try{ // Added catch for null mobdata if there's no (or all 0's) data in the file lineIndex = 0; if (data.getMobdata().length == 0){ // mobdata is empty! Write all 0's for (int i = HEADER_LENGTH; i < 200 + HEADER_LENGTH; i++){ arraylines[i] = arraylines[i] + "," + String.valueOf(0); } } // Otherwise, mobdata exists so write its contents to the lines (BOTH raw RT AND intensity) for (int i = HEADER_LENGTH; i < data.getMobdata().length + HEADER_LENGTH - 1; i++){ arraylines[i] = arraylines[i] + "," + String.valueOf(data.getMobdata()[lineIndex][0]) + "," + String.valueOf(data.getMobdata()[lineIndex][1]); lineIndex++; } } catch (NullPointerException ex){ // Warn the user that their data is no good System.out.println("WARNING: " + "No data in " + data.getRawFileName() + ", collision energy " + data.getCollisionEnergy()); for (int i = HEADER_LENGTH; i < 200 + HEADER_LENGTH; i++){ arraylines[i] = arraylines[i] + "," + 0; } } catch (ArrayIndexOutOfBoundsException ex){ System.out.println("\n" + "WARNING: " + "(Array index error) " + data.getRawFileName() + ", range File " + data.getRangeName()); } } return arraylines; } /** * Rule file spectrum extract method. Passes the extraction argument string to generateMZ. * @param rawPath path to raw * @param nfunction function number * @param slice I think this is irrelevant * @param rangeValues range value array * @param rangeName name of range file * @param ruleFile path to range or rule file * @return double[][] of 1 row of axis values, 1 row of intensity values * @throws FileNotFoundException if not found * @throws IOException if something happened */ public double[][] generateReplicateSpectrum(String rawPath, int nfunction, int slice, double[] rangeValues, String rangeName, File ruleFile, boolean ruleMode) throws FileNotFoundException, IOException { File rawFile = new File(rawPath); String rawDataName = rawFile.getName(); // Get a unique id for the replicate chromatogram // Edited to make it actually unique for multiple range files - added name of Range file to it String replicateID; if( slice > 0 ) replicateID = rangeName + "_" + rawDataName + "_" + nfunction + "[" + slice + "]"; else replicateID = rangeName + "_" + rawDataName + "_" + nfunction + "[" + rangeValues[START_MZ] + "_" + rangeValues[STOP_MZ] + "]"; // Generate a spectrum for the full data String specPath; if (ruleMode){ specPath = generateMZ(replicateID, rawFile, nfunction, rangeValues[IMExtractRunner.START_MZ], rangeValues[IMExtractRunner.STOP_MZ], rangeValues[IMExtractRunner.START_RT], rangeValues[IMExtractRunner.STOP_RT], rangeValues[IMExtractRunner.START_DT], rangeValues[IMExtractRunner.STOP_DT], (int)rangeValues[IMExtractRunner.DT_BINS], ruleMode, ruleFile); } else { specPath = generateMZ(replicateID, rawFile, nfunction, rangeValues[IMExtractRunner.START_MZ], rangeValues[IMExtractRunner.STOP_MZ], rangeValues[IMExtractRunner.START_RT], rangeValues[IMExtractRunner.STOP_RT], rangeValues[IMExtractRunner.START_DT], rangeValues[IMExtractRunner.STOP_DT], (int)rangeValues[IMExtractRunner.DT_BINS], ruleMode, null); } return getTraceData(specPath, MZ_MODE, rangeValues); } /** * Chomatogram (1D RT) extract method. Passes the extraction argument string to generateRT. * @param rawPath path to raw * @param nfunction function number * @param slice I think this is irrelevant * @param rangeValues range value array * @param rangeName name of range file * @param ruleFile path to range or rule file * @return double[][] of 1 row of axis values, 1 row of intensity values * @throws FileNotFoundException if not found * @throws IOException if something happened */ public double[][] generateReplicateChromatogram(String rawPath, int nfunction, int slice, double[] rangeValues, String rangeName, File ruleFile, boolean ruleMode) throws FileNotFoundException, IOException { File rawFile = new File(rawPath); String rawDataName = rawFile.getName(); // Get a unique id for the replicate chromatogram // Edited to make it actually unique for multiple range files - added name of Range file to it String replicateID; if( slice > 0 ) replicateID = rangeName + "_" + rawDataName + "_" + nfunction + "[" + slice + "]"; else replicateID = rangeName + "_" + rawDataName + "_" + nfunction + "[" + rangeValues[START_MZ] + "_" + rangeValues[STOP_MZ] + "]"; // Generate a spectrum for the full data String specPath; if (ruleMode){ specPath = generateRT(replicateID, rawFile, nfunction, rangeValues[IMExtractRunner.START_MZ], rangeValues[IMExtractRunner.STOP_MZ], rangeValues[IMExtractRunner.START_RT], rangeValues[IMExtractRunner.STOP_RT], rangeValues[IMExtractRunner.START_DT], rangeValues[IMExtractRunner.STOP_DT], (int)rangeValues[IMExtractRunner.DT_BINS], ruleMode, ruleFile); } else { specPath = generateRT(replicateID, rawFile, nfunction, rangeValues[IMExtractRunner.START_MZ], rangeValues[IMExtractRunner.STOP_MZ], rangeValues[IMExtractRunner.START_RT], rangeValues[IMExtractRunner.STOP_RT], rangeValues[IMExtractRunner.START_DT], rangeValues[IMExtractRunner.STOP_DT], (int)rangeValues[IMExtractRunner.DT_BINS], ruleMode, null); } return getTraceData(specPath, RT_MODE, rangeValues); } /** * 1D DT data slice generator. Passes slice name and argument string to generateDT method. * @param rawPath path to raw * @param nfunction function number * @param slice I think this is irrelevant * @param rangeValues range value array * @param rangeName name of range file * @param ruleFile path to range or rule file * @return double[][] of 1 row of axis values, 1 row of intensity values * @throws FileNotFoundException if not found * @throws IOException if something happened */ private double[][] generateReplicateMobiligram(String rawPath, int nfunction, int slice, double[] rangeValues, String rangeName, File ruleFile, boolean ruleMode)throws FileNotFoundException, IOException { File rawFile = new File(rawPath); String rawDataName = rawFile.getName(); // Get a unique id for the replicate chromatogram // Edited to make it actually unique for multiple range files - added name of Range file to it String replicateID; if( slice > 0 ) replicateID = rangeName + "_" + rawDataName + "_" + nfunction + "[" + slice + "]"; else replicateID = rangeName + "_" + rawDataName + "_" + nfunction + "[" + rangeValues[START_DT] + "_" + rangeValues[STOP_DT] + "]"; // Generate a spectrum for the full data String specPath; if (ruleMode){ specPath = generateDT(replicateID, rawFile, nfunction, rangeValues[IMExtractRunner.START_MZ], rangeValues[IMExtractRunner.STOP_MZ], rangeValues[IMExtractRunner.START_RT], rangeValues[IMExtractRunner.STOP_RT], rangeValues[IMExtractRunner.START_DT], rangeValues[IMExtractRunner.STOP_DT], (int)rangeValues[IMExtractRunner.DT_BINS], ruleMode, ruleFile); } else { specPath = generateDT(replicateID, rawFile, nfunction, rangeValues[IMExtractRunner.START_MZ], rangeValues[IMExtractRunner.STOP_MZ], rangeValues[IMExtractRunner.START_RT], rangeValues[IMExtractRunner.STOP_RT], rangeValues[IMExtractRunner.START_DT], rangeValues[IMExtractRunner.STOP_DT], (int)rangeValues[IMExtractRunner.DT_BINS], ruleMode, null); } return getTraceData(specPath, DT_MODE, rangeValues); } private static ArrayList<MobData> generateReplicateRTDT(DataVectorInfoObject function, File rangeFile, boolean ruleMode, boolean dt_in_ms, double maxDT){ /* RTDT (2D) */ // Write range file for IMExtract.exe StringBuilder cmdarray = new StringBuilder(); try { cmdarray.append(function.getRangeVals()[START_MZ]).append(" ").append(function.getRangeVals()[STOP_MZ]).append(" 1").append(System.getProperty("line.separator")); cmdarray.append(function.getRangeVals()[START_RT]).append(" ").append(function.getRangeVals()[STOP_RT]).append(" ").append(String.format("%d", (int) rtBins)).append(System.getProperty("line.separator")); cmdarray.append(function.getRangeVals()[START_DT]).append(" ").append(function.getRangeVals()[STOP_DT]).append(" ").append(String.format("%d", (int) dtBins)).append(System.getProperty("line.separator")); File dtRangeFile = new File(preferences.getLIB_PATH() + "\\ranges_2dRTDT.txt"); BufferedWriter writer = new BufferedWriter(new FileWriter(dtRangeFile)); writer.write(cmdarray.toString()); writer.flush(); writer.close(); } catch(Exception ex) { ex.printStackTrace(); return null; } // Run IMExtract.exe String path = null; String replicateID = function.getRangeName() + "_" + function.getRawDataName() + "_" + function.getFunction() + "[" + function.getRangeVals()[START_DT] + "_" + function.getRangeVals()[STOP_DT] + "]"; try { path = root.getPath() + File.separator + replicateID + ".csv"; cmdarray.setLength(0); cmdarray.append(exeFile.getCanonicalPath()).append(" "); cmdarray.append("-d "); cmdarray.append("\"").append(function.getRawDataPath()).append("\" "); cmdarray.append("-f ").append(function.getFunction()).append(" "); cmdarray.append("-o "); cmdarray.append("\"").append(path).append("\" "); cmdarray.append("-t "); cmdarray.append("mobilicube "); cmdarray.append("-p "); cmdarray.append("\"").append(preferences.getLIB_PATH()).append("\\ranges_2dRTDT.txt\" "); if (ruleMode) { cmdarray.append(" -pdtmz "); cmdarray.append("\"").append(rangeFile.getAbsolutePath()).append("\""); } cmdarray.append("-textOut 1"); runIMSExtract(cmdarray.toString()); } catch( Exception ex ) { ex.printStackTrace(); } ArrayList<Double> cv_axis = getCVfromCEdat(); int[] dt_axis_base = IntStream.rangeClosed(1, 200).toArray(); ArrayList<Double> dtAxisList = new ArrayList<>(); // convert DT to ms if requested for (int dtBin : dt_axis_base) { double finalDt = dtBin; // todo - check this - no need to convert because we make people enter DT range in bins (?) // if (dt_in_ms) { // finalDt = convertBinToDT(dtBin, maxDT); // } if (finalDt >= function.getRangeVals()[START_DT] && finalDt <= function.getRangeVals()[STOP_DT]) { dtAxisList.add(finalDt); } } double[] dt_axis = new double[dtAxisList.size()]; for (int i=0; i < dtAxisList.size(); i++) { dt_axis[i] = dtAxisList.get(i); } // Read data from IMExtract.exe and return it return getTextData(path, dt_axis, cv_axis, function); } /** * Generate a mz data set. We sum over all masses and drift times to generate * a 1 dimensional dataset. */ private static String generateMZ(String replicateID, File rawFile, int nFunction, double startMZ, double stopMZ, double startRT, double stopRT, double startDT, double stopDT, int mzBins, boolean bSelectRegion, File ruleFile) { StringBuilder cmdarray = new StringBuilder(); /* MZ plot (1D plot) */ try { cmdarray.append(startMZ).append(" ").append(stopMZ).append(" ").append(mzBins).append(System.getProperty("line.separator")); cmdarray.append(startRT).append(" ").append(stopRT).append(" 1").append(System.getProperty("line.separator")); cmdarray.append(startDT).append(" ").append(stopDT).append(" 1").append(System.getProperty("line.separator")); File mzRangeFile = new File(preferences.getLIB_PATH() + "\\ranges_1DMZ.txt"); BufferedWriter writer = new BufferedWriter(new FileWriter(mzRangeFile)); writer.write(cmdarray.toString()); writer.flush(); writer.close(); } catch(Exception ex) { //_log.writeMessage("Unable to write out MZ range file"); ex.printStackTrace(); // StackTraceElement[] trace = ex.getStackTrace(); // for( int i=0; i<trace.length; i++ ) // { // StackTraceElement st = trace[i]; // // _log.writeMessage(st.toString()); // } return null; } String path = null; try { path = root.getPath() + File.separator + replicateID + ".1dMZ"; cmdarray.setLength(0); cmdarray.append(exeFile.getCanonicalPath()).append(" "); cmdarray.append("-d "); cmdarray.append("\"").append(rawFile.getPath()).append("\" "); cmdarray.append("-f ").append(nFunction).append(" "); cmdarray.append("-o "); cmdarray.append("\"").append(path).append("\" "); cmdarray.append("-t "); cmdarray.append("mobilicube "); cmdarray.append("-p "); cmdarray.append("\"" + preferences.getLIB_PATH() + "\\ranges_1DMZ.txt\""); if( bSelectRegion ) { /* selected region rul files */ // File dtmz = new File( preferences.getLIB_PATH() + "\\outDTMZ.txt" ); if( ruleFile.exists() ) { cmdarray.append(" -pdtmz "); cmdarray.append("\"").append(ruleFile.getAbsolutePath()).append("\""); } File rtdt = new File( preferences.getLIB_PATH() + "\\outRTDT.txt" ); if( rtdt.exists() ) { cmdarray.append(" -prtdt "); cmdarray.append("\"").append(rtdt.getAbsolutePath()).append("\""); } File rtmz = new File( preferences.getLIB_PATH() + "\\outRTMZ.txt" ); if( rtmz.exists() ) { cmdarray.append(" -prtmz "); cmdarray.append("\"").append(rtmz.getAbsolutePath()).append("\""); } } // _log.writeMessage(cmdarray); // progMon.updateStatusMessage("Generating spectrum"); runIMSExtract(cmdarray.toString()); } catch( Exception ex ) { ex.printStackTrace(); } return path; } // /** // * Generate a retention time data set. We sum over all masses and drift times to generate // * a 1 dimensional dataset. // */ private static String generateRT(String replicateID, File rawFile, int nFunction, double startMZ, double stopMZ, double startRT, double stopRT, double startDT, double stopDT, int rtBins, boolean bSelectRegion, File ruleFile) { StringBuilder cmdarray = new StringBuilder(); /* RT plot (1D plot) */ try { cmdarray.append(startMZ).append(" ").append(stopMZ).append(" 1").append(System.getProperty("line.separator")); cmdarray.append(startRT).append(" ").append(stopRT).append(" ").append(rtBins).append(System.getProperty("line.separator")); cmdarray.append(startDT).append(" ").append(stopDT).append(" 1").append(System.getProperty("line.separator")); File rtRangeFile = new File(preferences.getLIB_PATH() + "\\ranges_1DRT.txt"); BufferedWriter writer = new BufferedWriter(new FileWriter(rtRangeFile)); writer.write(cmdarray.toString()); writer.flush(); writer.close(); } catch(Exception ex) { // _log.writeMessage("Unable to write out RT range file"); ex.printStackTrace(); return null; } String path; try { path = root.getPath() + File.separator + replicateID + ".1dRT"; cmdarray.setLength(0); cmdarray.append(exeFile.getCanonicalPath()).append(" "); cmdarray.append("-d "); cmdarray.append("\"").append(rawFile.getPath()).append("\" "); cmdarray.append("-f ").append(nFunction).append(" "); cmdarray.append("-o "); cmdarray.append("\"").append(path).append("\" "); cmdarray.append("-t "); cmdarray.append("mobilicube "); cmdarray.append("-p "); cmdarray.append("\"").append(preferences.getLIB_PATH()).append("\\ranges_1DRT.txt\""); if( bSelectRegion ) { /*cmdarray += " -px "; cmdarray += root.getPath() + File.separator + "selRegion.txt";*/ /* selected region rul files */ if( ruleFile.exists() ) { cmdarray.append(" -pdtmz "); cmdarray.append("\"").append(ruleFile.getAbsolutePath()).append("\""); } File dtmz = new File( preferences.getLIB_PATH() + "\\outDTMZ.txt" ); if( dtmz.exists() ) { cmdarray.append(" -pdtmz "); cmdarray.append("\"").append(dtmz.getAbsolutePath()).append("\""); } File rtdt = new File( preferences.getLIB_PATH() + "\\outRTDT.txt" ); if( rtdt.exists() ) { cmdarray.append(" -prtdt "); cmdarray.append("\"").append(rtdt.getAbsolutePath()).append("\""); } File rtmz = new File( preferences.getLIB_PATH() + "\\outRTMZ.txt" ); if( rtmz.exists() ) { cmdarray.append(" -prtmz "); cmdarray.append("\"").append(rtmz.getAbsolutePath()).append("\""); } } // _log.writeMessage(cmdarray); // progMon.updateStatusMessage("Generating chromatogram"); runIMSExtract(cmdarray.toString()); } catch( Exception ex ) { ex.printStackTrace(); System.err.println(ex.getMessage()); return null; } return path; } // /** * Generate a drift time data set. We sum over all masses and drift times to generate * a 1 dimensional dataset. */ private static String generateDT(String replicateID, File rawFile, int nFunction, double startMZ, double stopMZ, double startRT, double stopRT, double startDT, double stopDT, int dtBins, boolean bSelectRegion, File ruleFile) { /* DT plot (1d plot) */ StringBuilder cmdarray = new StringBuilder(); try { cmdarray.append(startMZ + " " + stopMZ + " 1" + System.getProperty("line.separator")); cmdarray.append(startRT + " " + stopRT + " 1" + System.getProperty("line.separator")); cmdarray.append(startDT + " " + stopDT + " " + dtBins + System.getProperty("line.separator")); File dtRangeFile = new File(preferences.getLIB_PATH() + "\\ranges_1DDT.txt"); BufferedWriter writer = new BufferedWriter(new FileWriter(dtRangeFile)); writer.write(cmdarray.toString()); writer.flush(); writer.close(); } catch(Exception ex) { // _log.writeMessage("Unable to write out DT range file"); ex.printStackTrace(); StackTraceElement[] trace = ex.getStackTrace(); for( int i=0; i<trace.length; i++ ) { } return null; } String path = null; try { path = root.getPath() + File.separator + replicateID + ".1dDT"; cmdarray.setLength(0); cmdarray.append(exeFile.getCanonicalPath() + " "); cmdarray.append("-d "); cmdarray.append("\"" + rawFile.getPath() + "\" "); cmdarray.append("-f " + nFunction + " "); cmdarray.append("-o "); cmdarray.append("\"" + path + "\" "); cmdarray.append("-t "); cmdarray.append("mobilicube "); cmdarray.append("-p "); cmdarray.append("\"" + preferences.getLIB_PATH() + "\\ranges_1DDT.txt\""); if( bSelectRegion ) { /* selected region rul files */ // File dtmz = new File( preferences.getLIB_PATH() + "\\outDTMZ.txt" ); if( ruleFile.exists() ) { cmdarray.append(" -pdtmz "); cmdarray.append("\"" + ruleFile.getAbsolutePath() + "\""); } File rtdt = new File( preferences.getLIB_PATH() + "\\outRTDT.txt" ); if( rtdt.exists() ) { cmdarray.append(" -prtdt "); cmdarray.append("\"" + rtdt.getAbsolutePath() + "\""); } File rtmz = new File( preferences.getLIB_PATH() + "\\outRTMZ.txt" ); if( rtmz.exists() ) { cmdarray.append(" -prtmz "); cmdarray.append("\"" + rtmz.getAbsolutePath() + "\""); } } // _log.writeMessage(cmdarray); runIMSExtract(cmdarray.toString()); } catch( Exception ex ) { ex.printStackTrace(); } return path; } /** * For methods that return text data instead of binary, use this to read the resulting output file. * (this is very convoluted, but allows this method to use all the existing print/etc code so..) * @param path path to output file * @return 2D data array */ private static ArrayList<MobData> getTextData(String path, double[] dt_axis, ArrayList<Double> cv_axis, DataVectorInfoObject function) { ArrayList<MobData> allMobData = new ArrayList<>(); // Open the text file to read File textFile = new File(path); textFile.deleteOnExit(); if( !textFile.exists() ) { return null; } // Read file try { BufferedReader reader = new BufferedReader(new FileReader(textFile)); String line; int cvIndex = 0; while((line = reader.readLine()) != null) { // Lines are transposed (each row is all DTs from a given CV) String[] splits = line.split(","); double[][] data = new double[dt_axis.length][2]; // Add dt axis into data array for (int i=0; i < dt_axis.length; i++) { data[i][0] = dt_axis[i]; } // Read intensity into into data array for (int i=0; i < splits.length; i++){ data[i][1] = Double.parseDouble(splits[i]); } // Generate this mobdata MobData currentMob = new MobData(data, function.getRawDataName(), function.getRangeName(), function.getConeCV(), cv_axis.get(cvIndex), function.getTransfCV(), function.getWaveHeight(), function.getWaveVel()); allMobData.add(currentMob); cvIndex++; } reader.close(); } catch (IOException ex) { System.out.println("Error: could not find text file to extract. No data returned " + path); return null; } return allMobData; } private synchronized static double[][] getTraceData(String path, int nType, double[] rangeVals) throws FileNotFoundException, IOException { // Open the binary file as channel File binFile = new File(path); double data[][] = (double[][])null; binFile.deleteOnExit(); if( !binFile.exists() ) { return null; } RandomAccessFile rafFile = new RandomAccessFile( binFile, "r" ); FileChannel channel = rafFile.getChannel(); // The memory mapped buffer MappedByteBuffer nMbb; //Read number of mass channels nMbb = channel.map(FileChannel.MapMode.READ_ONLY,0L,binFile.length()); nMbb = nMbb.load(); nMbb.order(ByteOrder.LITTLE_ENDIAN); /* Get the actual number of bins used */ // NOTE - this overwrites any bins passed in range files, so I'm removing bin arguments from ranges int nMZBins = 0; int nRTBins = 0; int nDTBins = 0; int nBins = 0; try{ nMZBins = nMbb.getInt(); nRTBins = nMbb.getInt(); nDTBins = nMbb.getInt(); nBins = 0; } catch(java.nio.BufferUnderflowException ex){ System.out.println("Buffer under flow: No data extracted from " + path); // ex.printStackTrace(); } if( nType == RT_MODE ) { nBins = nRTBins; } if( nType == DT_MODE ) { nBins = nDTBins; } if( nType == MZ_MODE ) { nBins = nMZBins; } // Generate our storage if (nType == MZ_MODE){ // Load only the data within the specified m/z range into our point array ArrayList<double[]> small_data = new ArrayList<double[]>(); for( int nZ = 0; nZ < nBins; nZ++) { float fX = NumberUtils.roundNumber(nMbb.getFloat(), 3); int nCount = nMbb.getInt(); if( nCount < 0 ){ //_log.writeMessage("Warning -ve counts " + nCount); } else { // Only add this value to the data array if it's in the desired range if (fX > rangeVals[START_MZ] && fX < rangeVals[STOP_MZ]){ small_data.add(new double[]{fX, nCount}); // data[nZ] = new double[]{fX, nCount}; } } } // Once data is loaded, return as an array of the correct size double[][] data_size = new double[small_data.size()][2]; data = small_data.toArray(data_size); } else { data = new double[nBins][2]; // Load all the data into our point array for DT and RT modes for( int nZ = 0; nZ < nBins; nZ++) { float fX = NumberUtils.roundNumber(nMbb.getFloat(), 3); int nCount = nMbb.getInt(); if( nCount < 0 ){ //_log.writeMessage("Warning -ve counts " + nCount); } else { data[nZ] = new double[]{fX, nCount}; } } } channel.close(); rafFile.close(); binFile.delete(); return data; } // /** * Runs imextract.exe using the specified command arguments * @param cmdarray - the commandline arguments for imextract.exe */ private synchronized static void runIMSExtract(String cmdarray) { //System.out.println(cmdarray); Process proc = null; Runtime runtime = Runtime.getRuntime(); try { proc = runtime.exec(cmdarray); } catch(Exception ex) { return; } try { InputStream procOut = proc.getInputStream(); InputStream procErr = proc.getErrorStream(); byte[] buf = new byte[1024]; int nRead; String lineSep = System.getProperty("line.separator"); do { boolean bHaveOutput = false; if(procOut.available() > 0) { bHaveOutput = true; nRead = procOut.read(buf); String out = new String(buf, 0, nRead); String[] splits = out.split(lineSep); for( String split : splits ) { if( split.startsWith("PROGRESS:") ) { String prog = split.replace("PROGRESS:", ""); if( prog.length() > 0 ) { Integer.parseInt(prog); } } } } if(procErr.available() > 0) { bHaveOutput = true; nRead = procErr.read(buf); } try { proc.exitValue(); break; } catch(IllegalThreadStateException itsx) { System.out.print("."); if(!bHaveOutput) try { Thread.sleep(300L); } catch(Exception ignored) { } } } while(true); } catch(Exception ignored) { } } // Flags indicating the position of data values in an array public static final int START_MZ = 0; public static final int STOP_MZ = 1; public static final int MZ_BINS = 2; public static final int START_RT = 3; public static final int STOP_RT = 4; public static final int RT_BINS = 5; public static final int START_DT = 6; public static final int STOP_DT = 7; public static final int DT_BINS = 8; // Trace data types public static int RT_MODE = 0; public static int DT_MODE = 1; public static int MZ_MODE = 2; public static int DTMZ_MODE = 3; public static int RTDT_MODE = 4; // Range values private static double minMZ = 0.0; private static double maxMZ = 0.0; private static double mzBins = 0.0; private static double minRT = 0.0; private static double maxRT = 0.0; private static double rtBins = 0.0; private static double minDT = 0.0; private static double maxDT = 0.0; private static double dtBins = 0.0; private static double zHigh = Double.MIN_VALUE; private static double zLow = Double.MAX_VALUE; private static File exeFile; public static int BPI = 1; public static int TIC = 0; private static final int USECONE_TYPES = 0; private static final int USETRAP_TYPES = 1; private static final int USETRANSF_TYPES = 2; private static final int USEWH_TYPES = 4; private static final int USEWV_TYPES = 3; /** * @return the zHigh */ public static double getzHigh() { return zHigh; } /** * @return the zLow */ public static double getzLow() { return zLow; } }
<gh_stars>0 package net.minecraft.client.renderer.entity.layers; import com.mojang.blaze3d.vertex.PoseStack; import com.mojang.math.Vector3f; import net.minecraft.client.Minecraft; import net.minecraft.client.model.SnowGolemModel; import net.minecraft.client.renderer.MultiBufferSource; import net.minecraft.client.renderer.RenderType; import net.minecraft.client.renderer.block.BlockRenderDispatcher; import net.minecraft.client.renderer.block.model.ItemTransforms; import net.minecraft.client.renderer.entity.LivingEntityRenderer; import net.minecraft.client.renderer.entity.RenderLayerParent; import net.minecraft.client.renderer.texture.TextureAtlas; import net.minecraft.client.resources.model.BakedModel; import net.minecraft.world.entity.animal.SnowGolem; import net.minecraft.world.item.ItemStack; import net.minecraft.world.level.block.Blocks; import net.minecraft.world.level.block.state.BlockState; import net.minecraftforge.api.distmarker.Dist; import net.minecraftforge.api.distmarker.OnlyIn; @OnlyIn(Dist.CLIENT) public class SnowGolemHeadLayer extends RenderLayer<SnowGolem, SnowGolemModel<SnowGolem>> { public SnowGolemHeadLayer(RenderLayerParent<SnowGolem, SnowGolemModel<SnowGolem>> p_117481_) { super(p_117481_); } public void render(PoseStack p_117494_, MultiBufferSource p_117495_, int p_117496_, SnowGolem p_117497_, float p_117498_, float p_117499_, float p_117500_, float p_117501_, float p_117502_, float p_117503_) { if (p_117497_.hasPumpkin()) { Minecraft minecraft = Minecraft.getInstance(); boolean flag = minecraft.shouldEntityAppearGlowing(p_117497_) && p_117497_.isInvisible(); if (!p_117497_.isInvisible() || flag) { p_117494_.pushPose(); this.getParentModel().getHead().translateAndRotate(p_117494_); float f = 0.625F; p_117494_.translate(0.0D, -0.34375D, 0.0D); p_117494_.mulPose(Vector3f.YP.rotationDegrees(180.0F)); p_117494_.scale(0.625F, -0.625F, -0.625F); ItemStack itemstack = new ItemStack(Blocks.CARVED_PUMPKIN); if (flag) { BlockState blockstate = Blocks.CARVED_PUMPKIN.defaultBlockState(); BlockRenderDispatcher blockrenderdispatcher = minecraft.getBlockRenderer(); BakedModel bakedmodel = blockrenderdispatcher.getBlockModel(blockstate); int i = LivingEntityRenderer.getOverlayCoords(p_117497_, 0.0F); p_117494_.translate(-0.5D, -0.5D, -0.5D); blockrenderdispatcher.getModelRenderer().renderModel(p_117494_.last(), p_117495_.getBuffer(RenderType.outline(TextureAtlas.LOCATION_BLOCKS)), blockstate, bakedmodel, 0.0F, 0.0F, 0.0F, p_117496_, i); } else { minecraft.getItemRenderer().renderStatic(p_117497_, itemstack, ItemTransforms.TransformType.HEAD, false, p_117494_, p_117495_, p_117497_.level, p_117496_, LivingEntityRenderer.getOverlayCoords(p_117497_, 0.0F), p_117497_.getId()); } p_117494_.popPose(); } } } }
// ------------------------------------------ // This function was copied from SDK samples // ------------------------------------------ /* This function obtain the textual representation of a binary Sid. A standardized shorthand notation for SIDs makes it simpler to visualize their components: S-R-I-S-S... In the notation shown above, S identifies the series of digits as an SID, R is the revision level, I is the identifier-authority value, S is subauthority value(s). An SID could be written in this notation as follows: S-1-5-32-544 In this example, the SID has a revision level of 1, an identifier-authority value of 5, first subauthority value of 32, second subauthority value of 544. (Note that the above Sid represents the local Administrators group) The GetTextualSid() function will convert a binary Sid to a textual string. The resulting string will take one of two forms. If the IdentifierAuthority value is not greater than 2^32, then the SID will be in the form: S-1-5-21-2127521184-1604012920-1887927527-19009 ^ ^ ^^ ^^^^^^^^^^ ^^^^^^^^^^ ^^^^^^^^^^ ^^^^^ | | | | | | | +-+-+------+----------+----------+--------+--- Decimal Otherwise it will take the form: S-1-0x206C277C6666-21-2127521184-1604012920-1887927527-19009 ^ ^^^^^^^^^^^^^^ ^^ ^^^^^^^^^^ ^^^^^^^^^^ ^^^^^^^^^^ ^^^^^ | | | | | | | | Hexidecimal | | | | | +----------------+------+----------+----------+--------+--- Decimal If the function succeeds, the return value is TRUE. If the function fails, the return value is FALSE. To get extended error information, call the Win32 API GetLastError(). */ static BOOL GetTextualSid( const PSID pSid, LPTSTR tstrTextualSid, LPDWORD cchSidSize ) { PSID_IDENTIFIER_AUTHORITY pSia; DWORD dwSubAuthorities; DWORD cchSidCopy; DWORD dwCounter; if(!IsValidSid(pSid)) { return FALSE; } SetLastError(0); pSia = GetSidIdentifierAuthority(pSid); if(GetLastError()) { return FALSE; } dwSubAuthorities = *GetSidSubAuthorityCount(pSid); if(GetLastError()) { return FALSE; } cchSidCopy = (15 + 12 + (12 * dwSubAuthorities) + 1) * sizeof(TCHAR); if(*cchSidSize < cchSidCopy) { *cchSidSize = cchSidCopy; SetLastError(ERROR_INSUFFICIENT_BUFFER); return FALSE; } cchSidCopy = wsprintf(tstrTextualSid, TEXT("S-%lu-"), SID_REVISION); if ( (pSia->Value[0] != 0) || (pSia->Value[1] != 0) ) { cchSidCopy += wsprintf(tstrTextualSid + cchSidCopy, TEXT("0x%02hx%02hx%02hx%02hx%02hx%02hx"), (USHORT)pSia->Value[0], (USHORT)pSia->Value[1], (USHORT)pSia->Value[2], (USHORT)pSia->Value[3], (USHORT)pSia->Value[4], (USHORT)pSia->Value[5]); } else { cchSidCopy += wsprintf(tstrTextualSid + cchSidCopy, TEXT("%lu"), (ULONG)(pSia->Value[5]) + (ULONG)(pSia->Value[4] << 8) + (ULONG)(pSia->Value[3] << 16) + (ULONG)(pSia->Value[2] << 24)); } for(dwCounter = 0 ; dwCounter < dwSubAuthorities ; dwCounter++) { cchSidCopy += wsprintf(tstrTextualSid + cchSidCopy, TEXT("-%lu"), *GetSidSubAuthority(pSid, dwCounter) ); } *cchSidSize = cchSidCopy; return TRUE; }
/** * A Camel Java DSL Router */ public class MyRouteBuilder extends RouteBuilder { /** * Let's configure the Camel routing rules using Java code... */ public void configure() { //example routes for consumer using a testnet node from("casper:http://localhost:8080/events/main?operation=block_added").log("Block Hash - ${body}"); //example routes for producer using a testnet node /* from("timer://simpleTimer?period=5000") .to("casper:http://65.21.227.180:7777/?operation="+CasperConstants.ACCOUNT_INFO) .log("call "+CasperConstants.ACCOUNT_INFO +" with params : blockHeight=530214 and publicKey=017d9aa0b86413d7ff9a9169182c53f0bacaa80d34c211adab007ed4876af17077 gives result = ${body}"); from("timer://simpleTimer?period=5000") .to("casper:http://65.21.227.180:7777/?operation="+CasperConstants.ACCOUNT_INFO+"&blockHeight=530214&publicKey=017d9aa0b86413d7ff9a9169182c53f0bacaa80d34c211adab007ed4876af17077") .log("call "+CasperConstants.ACCOUNT_INFO +" with params : blockHeight=530214 and publicKey=017d9aa0b86413d7ff9a9169182c53f0bacaa80d34c211adab007ed4876af17077 gives result = ${body}"); from("timer://simpleTimer?period=3000") .to("casper:http://65.21.227.180:7777/?operation="+CasperConstants.STATE_ROOT_HASH+"&blockHeight=530214") .log("call "+CasperConstants.STATE_ROOT_HASH +" with params : blockHeight=530214 gives result = - ${body}"); */ } }
A Survey on Hardware/Software Codesign Representation Models In hardware/software codesign, modeling is a very important issue. The model must capture the features of the system and describe its functionality. The design cycle must be based on formal representations so that the synthesis of a design from specification to implementation can be carried out systematically. Many models have been proposed for representing HW/SW systems. This report is the result of a survey on hardware/software codesign representation models. It relates the characteristics of several existing models and compares their properties. This work is encompassed in the SAVE project, which aims to study the specification and verification of heterogeneous electronic systems. The main objective of this survey is to explore the field of modeling of heterogeneous systems.
Unpredictability in some nonchaotic dynamical systems. We study properties of decay of the correlations for a class of smooth observables and mutual mixing for a class of subsets, with various rates ranging from the power-law to the exponential rate, in simple deterministic nonstrongly chaotic dynamical systems on the torus ${T}^{2}$. In fact, these systems are ergodic, have zero $K\ensuremath{-}S$ entropy and an absolutely continuous part in their spectrum, and display divergence of trajectories with a power-law rate. We show that they generate time series which are unpredictable in the sense of the statistical theory of prediction.
<reponame>DRIVER-EU/scenario-editor<filename>packages/tmt/src/components/trials/trial-list.ts import m from 'mithril'; import { TextInput, RoundIconButton, Icon } from 'mithril-materialized'; import { TrialSvc, dashboardSvc } from '../../services'; import { titleAndDescriptionFilter, padLeft } from '../../utils'; import { ITrial, ITrialOverview } from '../../../../models'; import { Dashboards, AppState } from '../../models'; export const TrialList = () => { const state = { filterValue: undefined as string | undefined, }; const formatDate = (date: Date | string) => { const d = new Date(date); return `${d.getFullYear()}/${d.getMonth() + 1}/${d.getDate()} ${padLeft(d.getHours())}:${padLeft(d.getMinutes())}`; }; return { oninit: () => TrialSvc.loadList(), view: () => { const trials = TrialSvc.getList(); const query = titleAndDescriptionFilter(state.filterValue); const filteredTrials = trials.filter(query); return m('.scenario-list', [ m('.row', [ m(RoundIconButton, { iconName: 'add', class: 'green input-field right btn-medium', style: 'margin: 1em 1em 0 0;', onclick: () => { TrialSvc.new({ title: 'New trial', creationDate: new Date(), } as ITrial); dashboardSvc.switchTo(Dashboards.TRIAL_INFO); }, }), m(TextInput, { label: 'Filter', id: 'filter', iconName: 'filter_list', onkeyup: (ev: KeyboardEvent, v?: string) => (state.filterValue = v), style: 'margin-right:100px', className: 'right', }), ]), m( '.row.sb.large', filteredTrials.map(trial => m('.col.s6.m4.l3', [ m( '.card', m('.card-content', { style: 'height: 150px' }, [ m( 'a[href=#].card-title', { onclick: () => { console.log('Set scenario to ' + trial.title); TrialSvc.load(trial.id); }, }, `${trial.title || 'Untitled'}${trial.lastEdit ? ` (${formatDate(trial.lastEdit)})` : ''}` ), m( 'p', trial.description && trial.description.length > 120 ? `${trial.description.substr(0, 119)}...` : trial.description ), ]), m('.card-action', [ m( 'a', { href: `${AppState.apiService()}/repo/${trial.id}`, }, m(Icon, { iconName: 'cloud_download', }) ), m( 'a', { href: '#!', onclick: () => { m.request<ITrialOverview>({ method: 'POST', url: `${AppState.apiService()}/repo/clone/${trial.id}`, }).then(to => { if (to && to.id) { TrialSvc.load(to.id); } }); }, }, m(Icon, { iconName: 'content_copy', }) ), ]) ), ]) ) ), ]); }, }; };
Have you noticed the impact of the sharing economy? Sharing Economy? What is it? Sharing economy also known in few other names, Peer Economy, Collaborative Consumption or Asset-light Life Style is all about sharing your under utilized assets through online or virtual markets. Think of it this way; you want a lawn mower for a one hour job. Instead of buying it you rent it from an online marketplace. Once you complete the job return it and pay the rent. Here, instead of buying an expensive lawn mower you rented it. You made a saving by not purchasing the lawn mower and the seller made an income by sharing his lawn mower when he was not using it. Both parties are happy. This is loosely the basis of sharing economy. The sharing economy made it possible for companies like Airbnb, Uber, Lyft and many others to setup their virtual market places. Sellers who are people who own the asset offer it on these virtual market places. And buyers who are people who need these from time to time they rent it from these virtual markets. This “Peer-to-Peer” sharing is gaining popularity among both sellers and buyers. A seller can now decide when to offer their service. And buyer has the option to accept it just for his job. A uber driver was telling me the other day that he was out with his wife the night before and came home very late and slept through 11 in the morning. When he got up and was ready to work he switched on his Uber app. I was his second customer he told me. He added that “I like this life style. I can now decide when to work and how long to work. I could not do this when I was working for the cab rental company” I know almost all of us have experienced the benefits of “Sharing Economy” may be without clearly understanding it. If you have used Uber or Airbnb service then you have already benefitted from the Sharing Economy. How to use sharing economy to earn extra cash? If you have unused assets in your possession, you can turn it into an income generating asset. Unused or sparingly used rooms, cars, boats all of this can be offered through the respective virtual marketplace. And you can decide when to position this for renting out. Look at it this way. A few years back if you want to rent a room in your house, how you will get a customer? You put a sign in your yard or put an ad in the local papers classified section. And then there is no guarantee that the people who are seeing your ad are actually buyers looking for renting a room. But today it all changed. When you place details of what you are offering on niche virtual marketplace, it is immediately visible to thousands or even millions of ready-to-buy customers. in the case your unused room, you can offer it on Airbnb. And the people who check Airbnb are people who are looking for renting rooms. If it is a boat that you have, then you offer it on Boatbound.co or getmyboat.com. In case you are offering your car then there is Uber. The point is this is highly targeted marketing! What if you do not have a room, boat or car? Look around your home. See if there is anything that you can possibly rent; your lawn mower, power drill, your home painting sprayer; all of these are something that you can offer to someone who needs it. Or if you don’t own any of these, may be you can offer your service as a child care, elderly care or pet care on care.com The choice is endless. What are some of the other sharing platform available? Click here to see a list of sharing platform operating in various niche markets. Please bookmark this page and check this frequently as I keep updating this regularly. List of some of the virtual market places Your location also plays a major role. Some of these virtual market places are start-ups and may be offering their services only in certain cities. But don’t worry if the service you are looking for is not offered in your area. You can always talk to the sharing economy platform and offer them to support to opening their service in your area. Is there any risk involved in participating the Sharing Economy? The major risk when you build your business on a third party platform is that what happens if it goes out of business? Or what happens if they change their rules? You have also no clue when they are going to change their pay rate. Uber has slashed its rates many times. Each time the drivers are left with only one choice; take it or leave it. And remember Uber has also tested its fleet of driverless cars. Another area of concern with the sharing economy is legal and regulatory risks. Be wary of this as some of these platforms are performing in not very clearly defined areas. So far law makers have not done much to enforce penalties. But please be aware of these risks. One word about Sharing Economy Sharing economy does not require you to have any specialized skills. This also means that just about anyone can get into it. And because of this, your earning will not be that great. In other words, you are not going to be very rich participating in sharing economy. On the other hand, if you have specialized skills, copy writing, web designing, developing mobile apps etc then it is a different story. You can earn many times more than offering your service on an unskilled sharing economy platform. Cheers! We will be covering lot more topics in future. Click here to register with us and get notified of our future posts. Like what you’re reading? Please share with your friends on Facebook! What do you think? Share your story in the comments.
<filename>src/app/app.module.ts import { MiddlewareConsumer, Module, NestModule, RequestMethod } from '@nestjs/common'; import * as cookieParser from 'cookie-parser'; import * as cors from 'cors'; import * as csurf from 'csurf'; import * as expressPino from 'express-pino-logger'; import * as helmet from 'helmet'; import * as uuid from 'uuid'; import { AppController } from './app.controller'; import { AppService } from './app.service'; import { ConfigModule } from './config/config.module'; import { ConfigService } from './config/config.service'; import { DatabaseModule } from './database/database.module'; import { LoggerModule } from './logger/logger.module'; import { LogService } from './logger/logger.service'; @Module({ imports: [ConfigModule, DatabaseModule, LoggerModule.forRoot()], controllers: [AppController], providers: [AppService], }) export class AppModule implements NestModule { constructor(private readonly configService: ConfigService, private readonly logService: LogService) {} /** * Adds application midlewares for security and logging * * @param {MiddlewareConsumer} consumer * @memberof AppModule */ public configure(consumer: MiddlewareConsumer): void { consumer .apply( expressPino({ logger: this.logService.getLogger(), genReqId: () => uuid.v4(), }), // Request logger cookieParser(), // Cookie parser for csurf helmet(), // Security headers csurf({ cookie: true }), // CSRF token to requests cors({ origin: (origin: string, callback: (err: Error | null, allow?: boolean) => void) => { // Origins init const whitelistOrigins = this.configService.get('CORS_ALLOWED_ORIGINS'); // If no white list origins, authorized if (whitelistOrigins.length === 0) return callback(null, true); // If request origin is in white list origin, authorized if (whitelistOrigins.indexOf(origin) !== -1) return callback(null, true); // Unauthorized origin return callback(new Error('Not allowed by CORS')); }, // CORS request handler methods: this.configService.get('CORS_ALLOWED_METHODS'), allowedHeaders: this.configService.get('CORS_ALLOWED_HEADERS'), exposedHeaders: this.configService.get('CORS_EXPOSED_METHODS'), credentials: this.configService.get('CORS_CREDENTIALS'), maxAge: this.configService.get('CORS_MAX_AGE'), preflightContinue: this.configService.get('CORS_PREFLIGHT_CONTINUE'), optionsSuccessStatus: this.configService.get('CORS_OPTIONS_SUCCESSS_CODE'), }) ) .forRoutes({ path: '*', method: RequestMethod.ALL }); } }
class MovingAverage: """ Calculates the moving average of reflection with the raw wavelength and raw trace from the cache """ command: str result: BaseResult def do_work(self) -> BaseResult: array_result: Tuple[np.array, np.array] = load_only_array_results() if not array_result: self._fail_result() return self.result filtered_wavelength, filtered_trace = self._moving_average(array_result) self._success_result(filtered_wavelength, filtered_trace) return self.result def _moving_average(self, array_result: Tuple[np.array, np.array], window_size: int = 10) -> Tuple[np.array, np.array]: """ :param window_size: sample points you want to take the average of """ raw_wavelength = array_result[0] raw_trace = array_result[1] window = np.ones(int(window_size)) / float(window_size) trace_average = np.convolve(raw_trace, window, 'same') return self._cut_data(raw_wavelength), self._cut_data(trace_average) def _success_result(self, filtered_wavelength: np.array, filtered_trace: np.array) -> None: self.result.value = (filtered_wavelength, filtered_trace) self.result.msg = "moving average calculated and saved to cache" def _fail_result(self): self.result.msg = "retrieve Data before calculating moving average" @staticmethod def _cut_data(array, num_cut=10) -> np.array: """ used to cut the data for the moving average filter :param array: 1-Dim numpy array what should be cutted :param num_cut: the number of datapoints to cut off :return: the cutted array """ c = num_cut // 2 array = array[c:-c] return array
/** * A naive implementation of QueryEngine that just loads all the documents in the queried collection * and then filters them in memory. */ public class SimpleQueryEngine implements QueryEngine { private final LocalDocumentsView localDocumentsView; public SimpleQueryEngine(LocalDocumentsView localDocumentsView) { this.localDocumentsView = localDocumentsView; } @Override public ImmutableSortedMap<DocumentKey, Document> getDocumentsMatchingQuery(Query query) { // TODO: Once LocalDocumentsView provides a getCollectionDocuments() method, we // should call that here and then filter the results. return localDocumentsView.getDocumentsMatchingQuery(query); } @Override public void handleDocumentChange(MaybeDocument oldDocument, MaybeDocument newDocument) { // No indexes to update. } }
def render_latex(input_text, dpath=None, fname=None, preamb_extra=None, verbose=1, **kwargs): import utool as ut try: import vtool_ibeis as vt except ImportError: import vtool as vt input_text_ = '\\pagenumbering{gobble}\n' + input_text img_fname = ut.ensure_ext(fname, ['.jpg'] + list(ut.IMG_EXTENSIONS)) img_fpath = join(dpath, img_fname) pdf_fpath = ut.compile_latex_text( input_text_, fname=fname, dpath=dpath, preamb_extra=preamb_extra, verbose=verbose, move=False) ext = splitext(img_fname)[1] fpath_in = ut.convert_pdf_to_image(pdf_fpath, ext=ext, verbose=verbose) vt.clipwhite_ondisk(fpath_in, fpath_out=img_fpath, verbose=verbose > 1) return img_fpath
/* get a maximal span to read. Returns 0 if buffer * is empty */ ssize_t bufGetReadSpan (buffer_t * b, void **addr) { if (b->empty) { *addr = NULL; return 0; } *addr = &(b->buf[b->ridx]); ssize_t len = b->widx - b->ridx; if (len <= 0) len = b->size - b->ridx; return len; }
/*** * Exception thrown when there's no possible gtop route that would satisfy the conditions. * * */ @SuppressWarnings("serial") public class PathDescriptionException extends PathFinderException { /*** * Route does not match gtop. * * @param notFoundInputHint additional information */ public PathDescriptionException(final String notFoundInputHint) { super("The following hint set does not match anything on Gtop: " + notFoundInputHint); } /*** * Route does not match gtop. * * @param message additional information * @param cause original exception */ public PathDescriptionException(final String message, final Throwable cause) { super(message, cause); } /*** * Route does not match gtop. * * @param cause original exception. */ public PathDescriptionException(final Throwable cause) { super(cause); } }
/** * * @author Adam Margolin * @author Raktim Sinha */ public class CGHPositionGraphSeparatedCanvas extends CGHPositionGraphCanvas implements ActionListener{ PositionDataRegionClickedPopup regionClickedPopup; public static final int AMPLIFICATIONS = 0; public static final int DELETIONS = 1; boolean isReversed = false; private int flankingRegionType = AMPLIFICATIONS; /** Creates a new instance of CGHPositionGraphCanvas */ public CGHPositionGraphSeparatedCanvas(Insets insets) { super(insets); regionClickedPopup = new PositionDataRegionClickedPopup(this); } public CGHPositionGraphSeparatedCanvas(Insets insets, boolean isReversed, int flankingRegionType) { super(insets); regionClickedPopup = new PositionDataRegionClickedPopup(this); this.flankingRegionType = flankingRegionType; this.isReversed = isReversed; } public void paint(Graphics g) { super.paint(g); Graphics2D g2D = (Graphics2D)g; drawColumns(g2D); } private void drawColumns(Graphics2D g) { for (int column = 0; column < model.getNumExperiments(); column++) { drawColumn(g, model.getExperimentIndexAt(column)); } } /** * Draws a specified column. */ private void drawColumn(Graphics2D g, int column) { int columnIndex = model.getExperimentIndexAt(column); //if(isReversed){ // for(int flankingRegion = model.getNumFlankingRegions(columnIndex) - 1; flankingRegion >= 0; flankingRegion--){ // drawFlankingRegion(g, flankingRegion, column, columnIndex); // } //}else{ for(int flankingRegion = 0; flankingRegion < model.getNumFlankingRegions(columnIndex); flankingRegion++){ drawFlankingRegion(g, flankingRegion, column, columnIndex); } //} } private void drawFlankingRegion(Graphics2D g2, int frIndex, int column, int columnIndex){ FlankingRegion fr = model.getFlankingRegionAt(columnIndex, frIndex); if(flankingRegionType == DELETIONS && fr.getType() == FlankingRegion.AMPLIFICATION){ return; } if(flankingRegionType == AMPLIFICATIONS && fr.getType() == FlankingRegion.DELETION){ return; } int frStart = fr.getStart(); int frStop = fr.getStop(); Dimension d = getSize(); double width = d.width; double height = d.height - 50; double maxVal = model.getMaxClonePosition(); int rectX = insets.left + column * (elementWidth + rectSpacing); if(isReversed){ //System.out.println("Panel width " + getWidth()); //System.out.println("Panel preferred width " + getPreferredSize().getWidth()); //System.out.println("Panel size width " + getSize().getWidth()); //rectX = getWidth() - rectX - elementWidth; //rectX = getWidth() - ( (model.getNumExperiments() - column - 1)* (elementWidth + rectSpacing) ) - insets.right; rectX = getWidth() - elementWidth - insets.right - ( (model.getNumExperiments() - column - 1)* (elementWidth + rectSpacing) ); } int rectY = (int) (insets.top + frStart * unitLength); double dRectHeight = (frStop - frStart) * unitLength; int rectHeight; if(dRectHeight < 1 && dRectHeight > 0){ rectHeight = 1; }else{ rectHeight = (int)dRectHeight; } Rectangle curRect = new Rectangle(rectX, rectY, elementWidth, rectHeight); g2.setPaint(model.getFlankingRegionColor(columnIndex, frIndex)); g2.fill(curRect); } /** Getter for property isReversed. * @return Value of property isReversed. */ public boolean isIsReversed() { return isReversed; } /** Setter for property isReversed. * @param isReversed New value of property isReversed. */ public void setIsReversed(boolean isReversed) { this.isReversed = isReversed; } /** Getter for property flankingRegionType. * @return Value of property flankingRegionType. */ public int getFlankingRegionType() { return flankingRegionType; } /** Setter for property flankingRegionType. * @param flankingRegionType New value of property flankingRegionType. */ public void setFlankingRegionType(int flankingRegionType) { this.flankingRegionType = flankingRegionType; } protected void formMouseClicked(java.awt.event.MouseEvent evt) { if(evt.getButton() == MouseEvent.BUTTON3 ){ //if(evt.isPopupTrigger() ){ Point point = evt.getPoint(); int selectedColumn = getSelectedColumn(point.x); int selectedPosition = getSelectedPosition(point.y); if(selectedColumn == -1 || selectedPosition < 0 || selectedPosition > model.getMaxClonePosition()){ selectedDataRegion = null; return; } int experimentIndex = model.getExperimentIndexAt(selectedColumn); selectedDataRegion = getFlankingRegionAtLocation(experimentIndex, selectedPosition); if(selectedDataRegion != null){ regionClickedPopup.show(evt.getComponent(), evt.getX(), evt.getY()); } } /* if(selectedDataRegion != null){ System.out.println("Type " + selectedDataRegion.getDataRegion().getClass()); System.out.println("Exp Index " + selectedDataRegion.getExperimentIndex()); }else{ System.out.println("Null"); }*/ } private int getSelectedColumn(int xCoord){ //Calculate if the point falls in any column if(!isReversed){ for(int column = 0; column < model.getNumExperiments(); column++){ int rectX = insets.left + column * (elementWidth + rectSpacing); if(xCoord >= rectX && xCoord <= rectX + elementWidth){ return column; } } }else{ for(int column = 0; column < model.getNumExperiments(); column++){ //int rectX = insets.left + column * (elementWidth + rectSpacing); //rectX = getWidth() - rectX - elementWidth; int rectX = getWidth() - elementWidth - insets.right - ( (model.getNumExperiments() - column - 1)* (elementWidth + rectSpacing) ); if(xCoord >= rectX && xCoord <= rectX + elementWidth){ return column; } } } return -1; } private int getSelectedPosition(int yCoord){ return (int) ((yCoord - insets.top) / unitLength); } private CGHDataRegionInfo getFlankingRegionAtLocation(int experimentIndex, int selectedPosition){ int selectedFrIndex = -1; for(int i = 0; i < model.getNumFlankingRegions(experimentIndex); i++){ FlankingRegion fr = model.getFlankingRegionAt(experimentIndex, i); if(selectedPosition >= fr.getStart() && selectedPosition <= fr.getStop() && fr.getType() == this.flankingRegionType){ selectedFrIndex = i; } } if(selectedFrIndex != -1){ FlankingRegion fr = model.getFlankingRegionAt(experimentIndex, selectedFrIndex); return new CGHDataRegionInfo(fr, experimentIndex); } return null; } }
Russian Trial ALGORITHM: Implementation of Combined Antihypertensive and Hypolipidemic Treatment for Clinical Efficacy Achievement in Routine Clinical Practice Aim. To study the clinical outcomes (achievement of target blood pressure ) and tolerability of antihypertensive and hypolipidemic therapy with fixed combinations of indapamide/perindopril, amlodipine/perindopril, amlodipine/indapamide/perindopril and rosuvastatin in patients with hypertension and high/very high cardiovascular risk in real clinical practice. Material and methods. The study included 16,788 patients from 104 cities of the Russian Federation. The duration of observation was 12 weeks. All patients had three monitoring visits. BP level was measured twice during the visits: the arithmetic mean of the obtained parameters was calculated. The results of the study were analyzed and presented by descriptive statistics. Results. The average age of the patients was 60.6±10.2 years; 42.2% of the patients were men and 57.8% women. Patients who demonstrated systolicdiastolic hypertension (BP>140/90 mm Hg) at the initial visit accounted for 73.9% (n=12,413) of the total number of participants. The average level of systolic BP at the inclusion into the study was 162.94±13.07 mm Hg, the level of diastolic BP was 93.43±8.61 mm Hg. As expected, the Russian population consists of over 90% of patients with very high (57%; n=9,586) and high (35.9%; n=6,022) additional cardiovascular risk. Despite the fact that the overwhelming majority of patients with hypertension had a high and very high additional risk, more than a third of patients received monotherapy to control their BP level (36.8%; n=6,182), while 13.8% of patients (n=2,321) had never received antihypertensive therapy before. According to the results of therapy with combination drugs based on perindopril, 92.7% of patients managed to reach the target BP levels of <140/90 mm Hg. After treatment the average level of systolic BP decreased from 162.94±13.07 mm Hg to 127.80±7.56 mm Hg, and the level of diastolic BP – from 93.43±8.61 mm Hg to 78.54±5.59. Adherence to the treatment in more than 97% of cases was recognized as very high and high. Only 10 adverse events were recorded on a large sample of patients during 12 weeks of treatment. Conclusion. In the Russian population, 73.9% of patients with hypertension and high/very high risk do not achieve the target BP levels. Application of combined therapy based on fixed combinations with perindopril allows achieving effective BP control in 93% of patients with hypertension having high and very high risk during 12 weeks with good tolerability of treatment.
/** * Represents a Databus physical partition * * @see <a href="https://iwww.corp.linkedin.com/wiki/cf/display/ENGS/Databus+2.0+and+Databus+3.0+Data+Model">Databus 2.0 and Databus 3.0 Data Model</a> */ public class PhysicalPartition implements NamedObject, Comparable<PhysicalPartition> { private final Integer _id; private final String _name; private String _simpleStringCache; static final Integer ANY_PHYSICAL_PARTITION_ID = -1; static final String ANY_PHYSICAL_PARTITION_NAME = "*"; public static final char DBNAME_PARTID_SEPARATOR = ':'; public static final PhysicalPartition ANY_PHYSICAL_PARTITION = new PhysicalPartition(ANY_PHYSICAL_PARTITION_ID, ANY_PHYSICAL_PARTITION_NAME); /** Default constructor for bean compliance and JSON deserialization. Sets to partition to * {@link #ANY_PHYSICAL_PARTITION_ID} */ public PhysicalPartition() { this(ANY_PHYSICAL_PARTITION_ID, ANY_PHYSICAL_PARTITION_NAME); } /** * For Espresso consumers, a database (e.g. EspressoDB8 with 8 partitions), the physical partition for partition 2 will be instantiated as (2, "EspressoDB8") * @param id Partition id * @param name The name of the database */ public PhysicalPartition(Integer id, String name) { super(); if (null == id) throw new NullPointerException("id"); _id = id; _name = name; } public static PhysicalPartition parsePhysicalPartitionString(String pPartString, String del) throws IOException { // format is name<del>id String [] parts = pPartString.split(del); if(parts.length <= 1) throw new IOException("invalid physical source name/id format in " + pPartString + ";del=" + del); String idS = parts[parts.length-1]; Integer id = Integer.parseInt(idS); String name = pPartString.substring(0, pPartString.length() - idS.length() - 1); if(name.length()<1) throw new IOException("invalid physical source name format in " + pPartString + ";del=" + del); if(id.intValue()<0) throw new IOException("invalid physical source id format in " + pPartString + ";del=" + del); return new PhysicalPartition(id, name); } public static PhysicalPartition createAnyPartitionWildcard() { return ANY_PHYSICAL_PARTITION; } public static PhysicalPartition createAnyPartitionWildcard(String dbName) { return new PhysicalPartition(ANY_PHYSICAL_PARTITION_ID, dbName); } /** * Create a PhysicalPartition object from a JSON string * @param json the string with JSON serialization of the PhysicalPartition */ public static PhysicalPartition createFromJsonString(String json) throws JsonParseException, JsonMappingException, IOException { ObjectMapper mapper = new ObjectMapper(); Builder result = mapper.readValue(json, Builder.class); return result.build(); } /** Creates from a string in the format DBNAME:PARTITIONID or just DBNAME for a partition wildcard */ public static PhysicalPartition createFromSimpleString(String simpleString) { if (null == simpleString) return null; int index = simpleString.indexOf(DBNAME_PARTID_SEPARATOR); String dbName = (index < 0) ? simpleString : simpleString.substring(0, index); if (dbName.length() < 1) throw new IllegalArgumentException("invalid physical partition string: " + simpleString); PhysicalPartition result = null; if (index < 0) result = new PhysicalPartition(ANY_PHYSICAL_PARTITION_ID, dbName); else { String idStr = simpleString.substring(index + 1); if (idStr.equals("*")) result = new PhysicalPartition(ANY_PHYSICAL_PARTITION_ID, dbName); else { Integer ppartId = -1; try { ppartId = Integer.parseInt(idStr); } catch (NumberFormatException nfe) { throw new IllegalArgumentException("invalid physical partition string: " + simpleString); } result = new PhysicalPartition(ppartId, dbName); } } return result; } /** The physical partition globally unique id */ public Integer getId() { return _id; } @Override public String toString() { return toJsonString(); } public String toJsonString() { StringBuilder sb = new StringBuilder(64); sb.append("{\"id\":"); sb.append(_id.shortValue()); sb.append(",\"name\":"); sb.append("\""); sb.append(_name); sb.append("\""); sb.append("}"); return sb.toString(); } /** Generates a string in the format DBNAME:PARTITIONID or just DBNAME for a partition wildcard */ public String toSimpleString() { if (null == _simpleStringCache) { _simpleStringCache = toSimpleString(null).toString(); } return _simpleStringCache; } /** Checks if the object denotes a wildcard */ public boolean isWildcard() { return isAnyPartitionWildcard(); } /** Checks if the object denotes a ALL_LOGICAL_SOURCES wildcard */ public boolean isAnyPartitionWildcard() { return _id.equals(ANY_PHYSICAL_PARTITION_ID); } public boolean equalsPartition(PhysicalPartition other) { return (_id.shortValue() == other._id.shortValue() && _name.equals(other._name)); } @Override public boolean equals(Object other) { if (this == other) { return true; } if (null == other || !(other instanceof PhysicalPartition)) return false; return equalsPartition((PhysicalPartition)other); } @Override public int hashCode() { return _id.hashCode()<<16 + _name.hashCode(); } @Override /** return name of the partition. Actual meaning of this name depends on the application. * For espresso it is db name */ public String getName() { return _name; } @Override public int compareTo(PhysicalPartition other) { int cv = getName().compareTo((other.getName())); if (cv == 0) { return getId() - other.getId(); } return cv; } public static class Builder { private Integer _id = ANY_PHYSICAL_PARTITION_ID; private String _name = ANY_PHYSICAL_PARTITION_NAME; public Integer getId() { return _id; } public void setId(Integer id) { _id = id; } public String getName() { return _name; } public void setName(String name) { _name = name;; } public void makeAnyPartitionWildcard() { _id = ANY_PHYSICAL_PARTITION_ID; _name = ANY_PHYSICAL_PARTITION_NAME; } public PhysicalPartition build() { return new PhysicalPartition(_id, _name); } } /** * Converts the physical partition to a human-readable string * @param sb a StringBuilder to accumulate the string representation; if null, a new one will be allocated * @return the StringBuilder */ public StringBuilder toSimpleString(StringBuilder sb) { if (null == sb) { sb = new StringBuilder(20); } sb.append(_name).append(DBNAME_PARTID_SEPARATOR); if (isAnyPartitionWildcard()) { sb.append("*"); } else { sb.append(_id); } return sb; } }
// CreateInviteInvitationPath computes a request path to the createInvite action of invitation. func CreateInviteInvitationPath(inviteTo string) string { param0 := inviteTo return fmt.Sprintf("/api/invitations/%s", param0) }
<gh_stars>1-10 /** * */ /** * @author Loic * */ package csv;
Microvascular obstructions in portal bile duct capillaries and hepatic sinusoids during normothermic machine perfusion of marginal human livers We read with great interest the recent work of DiRito et al. who demonstrate the formation of Rouleaux-like aggregations of red blood cells (RBCs) during normothermic machine perfusion (NMP) of marginal human kidneys. They show that these aggregates cause microvascular obstructions, which can be pharmacologically cleared, improving renal function and decreasing injury.
def _create_aggregator(self, aggregation_method): aggregators={ 'sum': SumSentimentIntensityAggregator, 'avg': AvgSentimentIntensityAggregator, 'max': MaxSentimentIntensityAggregator } am = aggregation_method.lower() if am in aggregators: aggregator = aggregators[am]() else: aggregator=aggregators[DEFAULT_AGGREGATOR]() self.aggregator_method_name = DEFAULT_AGGREGATOR return aggregator
Disclosure: This post may contain Amazon affiliate links. As an Amazon Associate I earn from qualifying purchases. Thank you for helping me keep the lights on! See my Privacy Policy for more details. Make this rich, flavorful Red Curry Soup with whatever veggies you want! It’s a filling, one-bowl meal that only takes about 45 minutes to make. This Red Curry Soup recipe is from Richa Hingle’s latest cookbook: Vegan Richa’s Everyday Kitchen. Richa’s recipes are always top notch. She writes clear instructions, and her photos are always beautiful. This book is a gorgeous collection of flavorful recipes, and I can already see it becoming a staple in my own kitchen. More Curry Goodness: All-Purpose Coconut Curry Sauce, Magical Curried Hummus I chose the Red Curry Soup with Red Lentils, because I. LOVE. CURRY. I’m also the only person in my house who does, so I don’t get to eat curries as often as I’d like. This soup was exactly what I needed in my life! It’s rich and flavorful, and it reheats well. I made a batch on a Tuesday and got to eat curry for lunch for three days in a row! Heaven. This soup is incredibly simple to make. Just simmer your lentils, saute a few ingredients, then add a few more, and simmer again until you reach curry perfection. Serve topped with Thai basil and a squeeze of lime, and you’ve got restaurant-worthy curry right in your own kitchen. Check out a few more recipes from Vegan Richa’s Everyday Kitchen:
/// Process a single input event. fn handle_event(&mut self, state: &mut EngineStateRef, event: &Event) -> EventResult { let _ = state; let _ = event; EventResult::Ignored }
package dynamo import ( "bytes" "encoding" "fmt" "reflect" "strconv" "strings" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" ) // Marshaler is the interface implemented by objects that can marshal themselves into // an AttributeValue. type Marshaler interface { MarshalDynamo() (*dynamodb.AttributeValue, error) } // MarshalItem converts the given struct into a DynamoDB item. func MarshalItem(v interface{}) (map[string]*dynamodb.AttributeValue, error) { return marshalItem(v) } func marshalItem(v interface{}) (map[string]*dynamodb.AttributeValue, error) { rv := reflect.ValueOf(v) switch rv.Type().Kind() { case reflect.Ptr: return marshalItem(rv.Elem().Interface()) case reflect.Struct: return marshalStruct(rv) case reflect.Map: return marshalMap(rv.Interface()) } return nil, fmt.Errorf("dynamo: marshal item: unsupported type %T: %v", rv.Interface(), rv.Interface()) } func marshalMap(v interface{}) (map[string]*dynamodb.AttributeValue, error) { // TODO: maybe unify this with the map stuff in marshal av, err := marshal(v, "") if err != nil { return nil, err } if av.M == nil { return nil, fmt.Errorf("dynamo: internal error: encoding map but M was empty") } return av.M, nil } func marshalStruct(rv reflect.Value) (map[string]*dynamodb.AttributeValue, error) { item := make(map[string]*dynamodb.AttributeValue) var err error for i := 0; i < rv.Type().NumField(); i++ { field := rv.Type().Field(i) fv := rv.Field(i) name, special, omitempty := fieldInfo(field) anonStruct := fv.Type().Kind() == reflect.Struct && field.Anonymous switch { case !fv.CanInterface(): if !anonStruct { continue } case name == "-": continue case omitempty: if isZero(fv) { continue } } // embed anonymous structs if anonStruct { avs, err := marshalStruct(fv) if err != nil { return nil, err } for k, v := range avs { // don't clobber pre-existing fields if _, exists := item[k]; exists { continue } item[k] = v } continue } av, err := marshal(fv.Interface(), special) if err != nil { return nil, err } if av != nil { item[name] = av } } return item, err } // Marshal converts the given value into a DynamoDB attribute value. func Marshal(v interface{}) (*dynamodb.AttributeValue, error) { return marshal(v, "") } func marshal(v interface{}, special string) (*dynamodb.AttributeValue, error) { switch x := v.(type) { case *dynamodb.AttributeValue: return x, nil case Marshaler: return x.MarshalDynamo() case dynamodbattribute.Marshaler: av := &dynamodb.AttributeValue{} return av, x.MarshalDynamoDBAttributeValue(av) case encoding.TextMarshaler: text, err := x.MarshalText() if err != nil { return nil, err } if len(text) == 0 { return nil, nil } return &dynamodb.AttributeValue{S: aws.String(string(text))}, err case nil: return nil, nil } return marshalReflect(reflect.ValueOf(v), special) } var nilTm encoding.TextMarshaler var tmType = reflect.TypeOf(&nilTm).Elem() func marshalReflect(rv reflect.Value, special string) (*dynamodb.AttributeValue, error) { switch rv.Kind() { case reflect.Ptr: if rv.IsNil() { return nil, nil } return marshal(rv.Elem().Interface(), special) case reflect.Bool: return &dynamodb.AttributeValue{BOOL: aws.Bool(rv.Bool())}, nil case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8: return &dynamodb.AttributeValue{N: aws.String(strconv.FormatInt(rv.Int(), 10))}, nil case reflect.Uint, reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8: return &dynamodb.AttributeValue{N: aws.String(strconv.FormatUint(rv.Uint(), 10))}, nil case reflect.Float32, reflect.Float64: return &dynamodb.AttributeValue{N: aws.String(strconv.FormatFloat(rv.Float(), 'f', -1, 64))}, nil case reflect.String: s := rv.String() if len(s) == 0 { return nil, nil } return &dynamodb.AttributeValue{S: aws.String(s)}, nil case reflect.Map: if special == "set" { // sets can't be empty if rv.Len() == 0 { return nil, nil } return marshalSet(rv) } // automatically omit nil maps if rv.IsNil() { return nil, nil } var keyString func(k reflect.Value) (string, error) if ktype := rv.Type().Key(); ktype.Implements(tmType) { keyString = func(k reflect.Value) (string, error) { tm := k.Interface().(encoding.TextMarshaler) txt, err := tm.MarshalText() if err != nil { return "", fmt.Errorf("dynamo: marshal map: key error: %v", err) } return string(txt), nil } } else if ktype.Kind() == reflect.String { keyString = func(k reflect.Value) (string, error) { return k.String(), nil } } else { return nil, fmt.Errorf("dynamo marshal: map key must be string: %T", rv.Interface()) } avs := make(map[string]*dynamodb.AttributeValue) for _, key := range rv.MapKeys() { v, err := marshal(rv.MapIndex(key).Interface(), "") if err != nil { return nil, err } if v != nil { kstr, err := keyString(key) if err != nil { return nil, err } avs[kstr] = v } } return &dynamodb.AttributeValue{M: avs}, nil case reflect.Struct: avs, err := marshalStruct(rv) if err != nil { return nil, err } return &dynamodb.AttributeValue{M: avs}, nil case reflect.Slice, reflect.Array: // special case: byte slice is B if rv.Type().Elem().Kind() == reflect.Uint8 { // binary values can't be empty if rv.Len() == 0 { return nil, nil } var data []byte if rv.Kind() == reflect.Array { data = make([]byte, rv.Len()) for i := 0; i < rv.Len(); i++ { data[i] = rv.Index(i).Interface().(byte) } } else { data = rv.Bytes() } return &dynamodb.AttributeValue{B: data}, nil } // sets if special == "set" { // sets can't be empty if rv.Len() == 0 { return nil, nil } return marshalSet(rv) } // lists CAN be empty avs := make([]*dynamodb.AttributeValue, 0, rv.Len()) for i := 0; i < rv.Len(); i++ { innerVal := rv.Index(i) av, err := marshal(innerVal.Interface(), "") if err != nil { return nil, err } avs = append(avs, av) } return &dynamodb.AttributeValue{L: avs}, nil default: return nil, fmt.Errorf("dynamo marshal: unknown type %s", rv.Type().String()) } } func marshalSet(rv reflect.Value) (*dynamodb.AttributeValue, error) { iface := reflect.Zero(rv.Type().Elem()).Interface() switch iface.(type) { case encoding.TextMarshaler: ss := make([]*string, 0, rv.Len()) for i := 0; i < rv.Len(); i++ { tm := rv.Index(i).Interface().(encoding.TextMarshaler) text, err := tm.MarshalText() if err != nil { return nil, err } if len(text) > 0 { ss = append(ss, aws.String(string(text))) } } return &dynamodb.AttributeValue{SS: ss}, nil } switch rv.Type().Kind() { case reflect.Slice: switch rv.Type().Elem().Kind() { case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8: ns := make([]*string, 0, rv.Len()) for i := 0; i < rv.Len(); i++ { ns = append(ns, aws.String(strconv.FormatInt(rv.Index(i).Int(), 10))) } return &dynamodb.AttributeValue{NS: ns}, nil case reflect.Uint, reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8: ns := make([]*string, 0, rv.Len()) for i := 0; i < rv.Len(); i++ { ns = append(ns, aws.String(strconv.FormatUint(rv.Index(i).Uint(), 10))) } return &dynamodb.AttributeValue{NS: ns}, nil case reflect.Float32, reflect.Float64: ns := make([]*string, 0, rv.Len()) for i := 0; i < rv.Len(); i++ { ns = append(ns, aws.String(strconv.FormatFloat(rv.Index(i).Float(), 'f', -1, 64))) } return &dynamodb.AttributeValue{NS: ns}, nil case reflect.String: ss := make([]*string, 0, rv.Len()) for i := 0; i < rv.Len(); i++ { ss = append(ss, aws.String(rv.Index(i).String())) } return &dynamodb.AttributeValue{SS: ss}, nil case reflect.Slice: if rv.Type().Elem().Elem().Kind() == reflect.Uint8 { bs := make([][]byte, 0, rv.Len()) for i := 0; i < rv.Len(); i++ { bs = append(bs, rv.Index(i).Bytes()) } return &dynamodb.AttributeValue{BS: bs}, nil } } case reflect.Map: useBool := rv.Type().Elem().Kind() == reflect.Bool if !useBool && rv.Type().Elem() != emptyStructType { return nil, fmt.Errorf("dynamo: cannot marshal type %v into a set", rv.Type()) } switch rv.Type().Key().Kind() { case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8: ns := make([]*string, 0, rv.Len()) for _, k := range rv.MapKeys() { if !useBool || rv.MapIndex(k).Bool() { ns = append(ns, aws.String(strconv.FormatInt(k.Int(), 10))) } } return &dynamodb.AttributeValue{NS: ns}, nil case reflect.Uint, reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8: ns := make([]*string, 0, rv.Len()) for _, k := range rv.MapKeys() { if !useBool || rv.MapIndex(k).Bool() { ns = append(ns, aws.String(strconv.FormatUint(k.Uint(), 10))) } } return &dynamodb.AttributeValue{NS: ns}, nil case reflect.Float32, reflect.Float64: ns := make([]*string, 0, rv.Len()) for _, k := range rv.MapKeys() { if !useBool || rv.MapIndex(k).Bool() { ns = append(ns, aws.String(strconv.FormatFloat(k.Float(), 'f', -1, 64))) } } return &dynamodb.AttributeValue{NS: ns}, nil case reflect.String: ss := make([]*string, 0, rv.Len()) for _, k := range rv.MapKeys() { if !useBool || rv.MapIndex(k).Bool() { ss = append(ss, aws.String(k.String())) } } return &dynamodb.AttributeValue{SS: ss}, nil case reflect.Array: if rv.Type().Key().Elem().Kind() == reflect.Uint8 { bs := make([][]byte, 0, rv.Len()) for _, k := range rv.MapKeys() { if useBool && !rv.MapIndex(k).Bool() { continue } // TODO: is there a better way to turn [n]byte into []byte with reflection? key := make([]byte, k.Len()) for i := 0; i < k.Len(); i++ { key[i] = (byte)(k.Index(i).Uint()) } bs = append(bs, key) } return &dynamodb.AttributeValue{BS: bs}, nil } } } return nil, fmt.Errorf("dynamo marshal: unknown type for sets %s", rv.Type().String()) } var emptyStructType = reflect.TypeOf(struct{}{}) func marshalSlice(values []interface{}) ([]*dynamodb.AttributeValue, error) { avs := make([]*dynamodb.AttributeValue, 0, len(values)) for _, v := range values { av, err := marshal(v, "") if err != nil { return nil, err } if av != nil { avs = append(avs, av) } } return avs, nil } func fieldInfo(field reflect.StructField) (name, special string, omitempty bool) { tags := strings.Split(field.Tag.Get("dynamo"), ",") if len(tags) == 0 { return field.Name, "", false } name = tags[0] if name == "" { name = field.Name } for _, t := range tags[1:] { if t == "omitempty" { omitempty = true } else { special = t } } return } type isZeroer interface { IsZero() bool } // thanks James Henstridge func isZero(rv reflect.Value) bool { // use IsZero for supported types if rv.CanInterface() { if zeroer, ok := rv.Interface().(isZeroer); ok { return zeroer.IsZero() } } // always return false for certain interfaces, check these later iface := rv.Interface() switch iface.(type) { case Marshaler: return false case encoding.TextMarshaler: return false } switch rv.Kind() { case reflect.Func, reflect.Map, reflect.Slice: return rv.IsNil() case reflect.Array: z := true for i := 0; i < rv.Len(); i++ { z = z && isZero(rv.Index(i)) } return z case reflect.Struct: z := true for i := 0; i < rv.NumField(); i++ { z = z && isZero(rv.Field(i)) } return z } // Compare other types directly: z := reflect.Zero(rv.Type()) return rv.Interface() == z.Interface() } // only works for primary key types func isAVEqual(a, b *dynamodb.AttributeValue) bool { if a.S != nil { if b.S == nil { return false } return *a.S == *b.S } if a.N != nil { if b.N == nil { return false } // TODO: parse numbers? return *a.N == *b.N } if a.B != nil { if b.B == nil { return false } return bytes.Equal(a.B, b.B) } return false }
Superpriming of synaptic vesicles after their recruitment to the readily releasable pool Significance During sustained nerve activity, synapses must continuously recycle vesicles. We used the unique opportunities for quantitative analysis offered by the calyx of Held synapse to study late stages in the process that renders vesicles release-ready. We dissect two sequential steps with distinct pharmacology and kinetics, the characterization of which is essential for an understanding of molecular mechanisms of transmitter release and short-term plasticity. Recruitment of release-competent vesicles during sustained synaptic activity is one of the major factors governing short-term plasticity. During bursts of synaptic activity, vesicles are recruited to a fast-releasing pool from a reluctant vesicle pool through an actin-dependent mechanism. We now show that newly recruited vesicles in the fast-releasing pool do not respond at full speed to a strong Ca2+ stimulus, but require approximately 4 s to mature to a “superprimed” state. Superpriming was found to be altered by agents that modulate the function of unc13 homolog proteins (Munc13s), but not by calmodulin inhibitors or actin-disrupting agents. These findings indicate that recruitment and superpriming of vesicles are regulated by separate mechanisms, which require integrity of the cytoskeleton and activation of Munc13s, respectively. We propose that refilling of the fast-releasing vesicle pool proceeds in two steps, rapid actin-dependent “positional priming,” which brings vesicles closer to Ca2+ sources, followed by slower superpriming, which enhances the Ca2+ sensitivity of primed vesicles.
// NonOptional returns v.Elem() if v is non-nil Optional, otherwise returns v. func (v *Value) NonOptional() *Value { if v.t.kind == Optional && !v.IsNil() { return v.Elem() } return v }
/* ** This file contains proprietary software owned by Motorola Mobility, Inc. ** ** No rights, expressed or implied, whatsoever to this software are provided by Motorola Mobility, Inc. hereunder. ** ** ** (c) Copyright 2011 Motorola Mobility, Inc. All Rights Reserved. ** */ #include "string.h" #include "Image.h" #include <node_buffer.h> #include <iostream> using namespace std; using namespace node; using namespace v8; namespace freeimage { Persistent<FunctionTemplate> Image::constructor_template; Image::Image(Handle<Object> wrapper) {} Image::~Image() { Local<Value> internalField = NanObjectWrapHandle(this)->GetInternalField(0); if (internalField->IsNull()) return; FIBITMAP *dib = static_cast<FIBITMAP*>(Local<External>::Cast(internalField)->Value()); FreeImage_Unload(dib); } void Image::Initialize(Handle<Object> target) { NanScope(); Local<FunctionTemplate> t = NanNew<FunctionTemplate>(New); NanAssignPersistent(constructor_template, t); t->InstanceTemplate()->SetInternalFieldCount(1); t->SetClassName(NanNew<String>("Image")); NODE_SET_PROTOTYPE_METHOD(t, "unload", unload); NODE_SET_PROTOTYPE_METHOD(t, "save", save); NODE_SET_PROTOTYPE_METHOD(t, "saveToMemory", saveToMemory); NODE_SET_PROTOTYPE_METHOD(t, "convertTo32Bits", convertTo32Bits); NODE_SET_PROTOTYPE_METHOD(t, "convertTo24Bits", convertTo24Bits); NODE_SET_PROTOTYPE_METHOD(t, "flipHorizontal", flipHorizontal); NODE_SET_PROTOTYPE_METHOD(t, "flipVertical", flipVertical); target->Set(NanNew<String>("Image"), t->GetFunction()); } NAN_METHOD(Image::New) { NanScope(); Image *fi = new Image(args.This()); fi->Wrap(args.This()); NanReturnThis(); } Image *Image::New(FIBITMAP* dib) { NanScope(); Local<Value> arg = NanNew<Integer>(0); Local<Object> obj = NanNew<FunctionTemplate>(constructor_template)->GetFunction()->NewInstance(1, &arg); Image *image = ObjectWrap::Unwrap<Image>(obj); int w,h,pitch; FREE_IMAGE_TYPE type = FreeImage_GetImageType(dib); obj->SetInternalField(0, NanNew<External>(dib)); obj->Set(NanNew<String>("width"), NanNew<Integer>(w=FreeImage_GetWidth(dib))); obj->Set(NanNew<String>("height"), NanNew<Integer>(h=FreeImage_GetHeight(dib))); obj->Set(NanNew<String>("bpp"), NanNew<Integer>((int)FreeImage_GetBPP(dib))); obj->Set(NanNew<String>("pitch"), NanNew<Integer>(pitch=FreeImage_GetPitch(dib))); obj->Set(NanNew<String>("type"), NanNew<Integer>(type)); obj->Set(NanNew<String>("redMask"), NanNew<Integer>((int)FreeImage_GetRedMask(dib))); obj->Set(NanNew<String>("greenMask"), NanNew<Integer>((int)FreeImage_GetGreenMask(dib))); obj->Set(NanNew<String>("blueMask"), NanNew<Integer>((int)FreeImage_GetBlueMask(dib))); BYTE *bits = FreeImage_GetBits(dib); obj->Set(NanNew<String>("buffer"), NanNewBufferHandle((char*) bits, h * pitch)); return image; } NAN_METHOD(Image::unload) { NanScope(); Local<Value> internalField = args.This()->GetInternalField(0); if (!internalField->IsNull()) { FIBITMAP *dib = static_cast<FIBITMAP*>(Local<External>::Cast(internalField)->Value()); FreeImage_Unload(dib); args.This()->SetInternalField(0, v8::Null(v8::Isolate::GetCurrent())); } NanReturnUndefined(); } NAN_METHOD(Image::save) { NanScope(); Local<External> wrap = Local<External>::Cast(args.This()->GetInternalField(0)); FIBITMAP *dib=static_cast<FIBITMAP*>(wrap->Value()); FREE_IMAGE_FORMAT fif=(FREE_IMAGE_FORMAT) args[0]->Uint32Value(); String::Utf8Value str(args[1]->ToString()); int flags=0; if (!args[2]->IsUndefined()) { flags=args[2]->Int32Value(); } // cout<<"Saving image to "<<*str<<" format: "<<hex<<fif<<dec<<" flags: "<<hex<<flags<<dec<<endl; if (fif == FIF_JPEG && FreeImage_GetBPP(dib) != 24) { // FIBITMAP *old=dib; dib=FreeImage_ConvertTo24Bits(dib); // FreeImage_Unload(old); } FreeImage_Save(fif,dib,*str,flags); NanReturnUndefined(); } NAN_METHOD(Image::saveToMemory) { NanScope(); Local<External> wrap = Local<External>::Cast(args.This()->GetInternalField(0)); FIBITMAP *dib = static_cast<FIBITMAP*>(wrap->Value()); // cout << "dib " << hex << dib << dec << endl; FREE_IMAGE_FORMAT fif = (FREE_IMAGE_FORMAT) args[0]->Uint32Value(); int flags = 0; if (!args[1]->IsUndefined()) { flags = args[1]->Int32Value(); } if (fif == FIF_JPEG && FreeImage_GetBPP(dib) != 24) { // FIBITMAP *old = dib; dib = FreeImage_ConvertTo24Bits(dib); // FreeImage_Unload(old); } Local<Object> actualBuffer; BYTE *mem_buffer = NULL; DWORD file_size; FIMEMORY *hmem = FreeImage_OpenMemory(); FreeImage_SaveToMemory(fif, dib, hmem, flags); FreeImage_AcquireMemory(hmem, &mem_buffer, &file_size); // This Buffer constructor actually copies the data actualBuffer = NanNewBufferHandle((char *) mem_buffer, (size_t) file_size); FreeImage_CloseMemory(hmem); NanReturnValue(actualBuffer); } NAN_METHOD(Image::convertTo32Bits) { NanScope(); Local<External> wrap = Local<External>::Cast(args.This()->GetInternalField(0)); FIBITMAP *dib=static_cast<FIBITMAP*>(wrap->Value()); FIBITMAP *conv=FreeImage_ConvertTo32Bits(dib); NanReturnValue(NanObjectWrapHandle(Image::New(conv))); } NAN_METHOD(Image::convertTo24Bits) { NanScope(); Local<External> wrap = Local<External>::Cast(args.This()->GetInternalField(0)); FIBITMAP *dib=static_cast<FIBITMAP*>(wrap->Value()); FIBITMAP *conv=FreeImage_ConvertTo24Bits(dib); NanReturnValue(NanObjectWrapHandle(Image::New(conv))); } NAN_METHOD(Image::flipHorizontal) { NanScope(); Local<External> wrap = Local<External>::Cast(args.This()->GetInternalField(0)); FIBITMAP *dib = static_cast<FIBITMAP *>(wrap->Value()); BOOL flip = FreeImage_FlipHorizontal(dib); NanReturnValue(NanNew<Boolean>(flip)); } NAN_METHOD(Image::flipVertical) { NanScope(); Local<External> wrap = Local<External>::Cast(args.This()->GetInternalField(0)); FIBITMAP *dib = static_cast<FIBITMAP *>(wrap->Value()); BOOL flip = FreeImage_FlipVertical(dib); NanReturnValue(NanNew<Boolean>(flip)); } }
<filename>claypot/migrations/0011_recipe_description.py<gh_stars>0 # Generated by Django 2.2.2 on 2019-07-24 08:14 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [("claypot", "0010_auto_20190709_1847")] operations = [ migrations.AddField( model_name="recipe", name="description", field=models.TextField(blank=True, verbose_name="Additional information"), ) ]
/** * Make sure special characters are serialized properly. * * @throws Exception... */ public void testSerializeRegexErrorParserSpecialCharacters() throws Exception { final String TESTING_ID = "org.eclipse.cdt.core.test.regexerrorparser"; final String TESTING_NAME = "<>\"'\\& Error Parser"; final String TESTING_REGEX = "Pattern-<>\"'\\&"; final String ALL_IDS = ErrorParserManager.toDelimitedString(ErrorParserManager.getErrorParserAvailableIds()); { RegexErrorParser regexErrorParser = new RegexErrorParser(TESTING_ID, TESTING_NAME); regexErrorParser.addPattern(new RegexErrorPattern(TESTING_REGEX, "line-<>\"'\\&", "file-<>\"'\\&", "description-<>\"'\\&", null, IMarkerGenerator.SEVERITY_WARNING, false)); ErrorParserExtensionManager.setUserDefinedErrorParsersInternal(new IErrorParserNamed[] {regexErrorParser}); assertNotNull(ErrorParserManager.getErrorParserCopy(TESTING_ID)); ErrorParserExtensionManager.serializeUserDefinedErrorParsers(); } { ErrorParserExtensionManager.loadUserDefinedErrorParsers(); String all = ErrorParserManager.toDelimitedString(ErrorParserManager.getErrorParserAvailableIds()); assertTrue(all.contains(TESTING_ID)); IErrorParser errorParser = ErrorParserManager.getErrorParserCopy(TESTING_ID); assertNotNull(errorParser); assertTrue(errorParser instanceof RegexErrorParser); RegexErrorParser regexErrorParser = (RegexErrorParser)errorParser; assertEquals(TESTING_ID, regexErrorParser.getId()); assertEquals(TESTING_NAME, regexErrorParser.getName()); RegexErrorPattern[] errorPatterns = regexErrorParser.getPatterns(); assertEquals(1, errorPatterns.length); assertEquals(TESTING_REGEX, errorPatterns[0].getPattern()); } }
Three-dimensional Characteristics of the Loess's microstructure and comparison before and after collapse Microstructure surface undulation is an important parameter to denote the loess microstructure characteristics, but the current studies are mostly qualitative and quantitative two-dimensional analysis, which difficult to reflect the situation of soil microstructure surface undulation. Analysis based on the microstructure characteristics of loess in western Liaoning, translate the gray value which extracted from SEM image into the elevation data of loess microstructure surface, constitute the three-dimensional digital images of Loess's microstructure surface undulation before and after collapse, proposing SEM photographs three-dimensional visualization method and characterization parameters. This method is simple to implement, and can intuitively observe the structure of the Loess's microstructure surface undulation before and after collapse. Through improved projective covering method, calculate the loess's fractal dimension before collapse is 2.508, but 2.590 after collapse, this shows that loess after collapse, microstructure surface undulation extent is increased, and pore complexity extent significant increased.
#include<bits/stdc++.h> using namespace std; int a[3005]; int main() { int t;cin>>t; while(t--) { int n,sum=0;cin>>n; for(int i=0;i<n;++i) { cin>>a[i];sum+=a[i]; }int flag=0,step=0,minstep=1e9; for(int i=1;i<=sum;++i) { if(sum%i!=0) continue; flag=0;int t=0;step=0; for(int j=0;j<n;++j) { t+=a[j];step++; if(t==(sum/i)) { t=0;step--; } if(t>(sum/i)) { flag=1; break; } } if(flag==0) minstep=min(minstep,step); } cout<<minstep<<endl; } }
/** * Saves a given stream of elements to their respective files. * * @param elements The elements to save. * @return Returns a stream of all elements that were successfully saved. */ public Stream<T> saveSelected(final Stream<T> elements) { if (elements == null) { return Stream.<T>empty().parallel(); } return elements.parallel() .filter(Objects::nonNull) .map(this::saveElement) .filter(Objects::nonNull); }
package data import ( "github.com/bububa/oppo-omni/enum" "github.com/bububa/oppo-omni/model" ) type QTodayTopRequest struct { model.BaseRequest Demision *enum.DataDemision `json:"demision,omitempty"` } type QTodayTopResponse struct { model.BaseResponse Data *QTodayTopResult `json:"data,omitempty"` } type QTodayTopResult struct { AdID uint64 `json:"adId,omitempty"` // 广告ID AdName string `json:"adName,omitempty"` // 广告名称 OrderData int64 `json:"orderData,omitempty"` // 排序数据值(如果是下载量排序,就是下载次数,如果是消耗,就是消耗金额) }
<gh_stars>0 package com.kmjd.wcqp.single.zxh.model; /** * Created by zym on 2017/3/17. * 微信零钱明细原始数据 */ public class ChangeDetailBean { /** * balance : 1181 * balance_source : 收入 * bill_type : * bkid : 6b650cb2c0959fb305d2c87d4103fa7f * conmum : * createtime : 2017-01-29 17:45:36 * explain : * fcon : 0 * list_sign : * modifytime : 2017-01-29 17:45:36 * paynum : 188 * trans_state_name : 微信红包 * transid : 1000039401170129003208308600005895635966 * type : 1 */ private String balance; private String balance_source; private String bill_type; private String bkid; private String conmum; private String createtime; private String explain; private String fcon; private String list_sign; private String modifytime; private String paynum; private String trans_state_name; private String transid; private String type; public String getBalance() { return balance; } public void setBalance(String balance) { this.balance = balance; } public String getBalance_source() { return balance_source; } public void setBalance_source(String balance_source) { this.balance_source = balance_source; } public String getBill_type() { return bill_type; } public void setBill_type(String bill_type) { this.bill_type = bill_type; } public String getBkid() { return bkid; } public void setBkid(String bkid) { this.bkid = bkid; } public String getConmum() { return conmum; } public void setConmum(String conmum) { this.conmum = conmum; } public String getCreatetime() { return createtime; } public void setCreatetime(String createtime) { this.createtime = createtime; } public String getExplain() { return explain; } public void setExplain(String explain) { this.explain = explain; } public String getFcon() { return fcon; } public void setFcon(String fcon) { this.fcon = fcon; } public String getList_sign() { return list_sign; } public void setList_sign(String list_sign) { this.list_sign = list_sign; } public String getModifytime() { return modifytime; } public void setModifytime(String modifytime) { this.modifytime = modifytime; } public String getPaynum() { return paynum; } public void setPaynum(String paynum) { this.paynum = paynum; } public String getTrans_state_name() { return trans_state_name; } public void setTrans_state_name(String trans_state_name) { this.trans_state_name = trans_state_name; } public String getTransid() { return transid; } public void setTransid(String transid) { this.transid = transid; } public String getType() { return type; } public void setType(String type) { this.type = type; } @Override public String toString() { return "ChangeDetailBean{" + "balance='" + balance + '\'' + ", balance_source='" + balance_source + '\'' + ", bill_type='" + bill_type + '\'' + ", bkid='" + bkid + '\'' + ", conmum='" + conmum + '\'' + ", createtime='" + createtime + '\'' + ", explain='" + explain + '\'' + ", fcon='" + fcon + '\'' + ", list_sign='" + list_sign + '\'' + ", modifytime='" + modifytime + '\'' + ", paynum='" + paynum + '\'' + ", trans_state_name='" + trans_state_name + '\'' + ", transid='" + transid + '\'' + ", type='" + type + '\'' + '}'; } }