Unnamed: 0
int64
0
2.05k
func
stringlengths
27
124k
target
bool
2 classes
project
stringlengths
39
117
0
public interface AbbreviationService { /** * Gets the available abbreviations for a string. If no abbreviations * are found, the returned available abbreviations consist of each * word in the original string, in turn, with one abbreviation, the * word unchanged. * * @param s the string to abbreviate * @return the available abbreviations */ Abbreviations getAbbreviations(String s); }
false
tableViews_src_main_java_gov_nasa_arc_mct_abbreviation_AbbreviationService.java
1
public interface Abbreviations { /** * Gets the original string for which the available abbreviations have * been calculated. * * @return the original string */ String getValue(); /** * Gets the phrases into which the original string has been divided as * possible abbreviations were found. The phrases, in order, comprise * all words of the original string. * * @return a list of phrases that can be abbreviated */ List<String> getPhrases(); /** * Gets the available abbreviations for a phrase. The list is always * nonempty, since the first element is the phrase unchanged. * * @param phrase the phrase to abbreviate, which may be a single word * @return a list of possible abbreviations for the phrase */ List<String> getAbbreviations(String phrase); }
false
tableViews_src_main_java_gov_nasa_arc_mct_abbreviation_Abbreviations.java
2
public class AbbreviationServiceImpl implements AbbreviationService { private static final Logger logger = LoggerFactory.getLogger(AbbreviationServiceImpl.class); private static final String ABBREVIATIONS_FILE_PROPERTY = "abbreviations-file"; private AbbreviationsManager manager; /** * Activates the service implementation. A map of properties is * used to configure the service. * * @param context the component context for this service */ public void activate(ComponentContext context) { @SuppressWarnings("unchecked") Dictionary<String,String> properties = context.getProperties(); // This property will always be present, according to OSGi 4.1 Compendium // Specification section 112.6. String componentName = properties.get("component.name"); String abbreviationsFilePath = properties.get(ABBREVIATIONS_FILE_PROPERTY); Properties abbreviationsProperties = null; if (abbreviationsFilePath == null) { logger.warn("{}: no configuration value for {} - no abbreviations will be available.", componentName, ABBREVIATIONS_FILE_PROPERTY); } else { InputStream in = findFile(abbreviationsFilePath); if (in == null) { logger.warn("{}: abbreviations file <{}> not found - no abbreviations will be available.", componentName, abbreviationsFilePath); } else { try { abbreviationsProperties = new Properties(); abbreviationsProperties.load(in); } catch (IOException ex) { logger.warn("{}: error loading abbreviations file <{}> - no abbreviations will be available.", componentName, abbreviationsFilePath); abbreviationsProperties = null; } } } if (abbreviationsProperties == null) { abbreviationsProperties = new Properties(); } manager = new AbbreviationsManager(abbreviationsProperties); } /** * Looks up a file given a path. The file is looked up first relative to the * current directory. If not found, a matching resource within the bundle is * tried. If neither method works, null is returned to indicate that the file * could not be found. * * @param path a relative or absolute pathname, or a resource name from within the bundle * @return an input stream for reading from the file, or null if the file could not be found */ InputStream findFile(String path) { // 1. Try to find using the file path, which may be absolute or // relative to the current directory. File f = new File(path); if (f.isFile() && f.canRead()) { try { return new FileInputStream(f); } catch (Exception ex) { // ignore, for now } } // 2. Try to find a resource in the bundle. This return value may be null, // if no resource is found matching the path. return getClass().getResourceAsStream(path); } @Override public Abbreviations getAbbreviations(String s) { return manager.getAbbreviations(s); } }
false
tableViews_src_main_java_gov_nasa_arc_mct_abbreviation_impl_AbbreviationServiceImpl.java
3
public class AbbreviationServiceImplTest { @Mock private ComponentContext context; AbbreviationServiceImpl abbrev; Dictionary<String, String> properties; @BeforeMethod public void init() { MockitoAnnotations.initMocks(this); abbrev = new AbbreviationServiceImpl(); properties = new Hashtable<String, String>(); properties.put("component.name", "AbbreviationService"); when(context.getProperties()).thenReturn(properties); } @Test public void testActivateGoodFile() { properties.put("abbreviations-file", "/default-abbreviations.properties"); abbrev.activate(context); Abbreviations abbreviations = abbrev.getAbbreviations("Fiber Optic MDM System"); assertEquals(abbreviations.getPhrases().size(), 3); assertEquals(abbreviations.getPhrases().get(0), "Fiber Optic"); assertEquals(abbreviations.getPhrases().get(1), "MDM"); assertEquals(abbreviations.getPhrases().get(2), "System"); } @Test public void testActivateNoFileProperty() { abbrev.activate(context); Abbreviations abbreviations = abbrev.getAbbreviations("Fiber Optic MDM System"); assertEquals(abbreviations.getPhrases().size(), 4); assertEquals(abbreviations.getPhrases().get(0), "Fiber"); assertEquals(abbreviations.getPhrases().get(1), "Optic"); assertEquals(abbreviations.getPhrases().get(2), "MDM"); assertEquals(abbreviations.getPhrases().get(3), "System"); } @Test public void testActivateNonexistentAbbreviationsFile() { properties.put("abbreviations-file", "/file-does-not-exist.properties"); abbrev.activate(context); Abbreviations abbreviations = abbrev.getAbbreviations("Fiber Optic MDM System"); assertEquals(abbreviations.getPhrases().size(), 4); assertEquals(abbreviations.getPhrases().get(0), "Fiber"); assertEquals(abbreviations.getPhrases().get(1), "Optic"); assertEquals(abbreviations.getPhrases().get(2), "MDM"); assertEquals(abbreviations.getPhrases().get(3), "System"); } @Test(dataProvider="findFileTests") public void testFindFile(String path, String fileProperty) throws IOException { InputStream in; Properties p; p = new Properties(); in = abbrev.findFile(path); assertNotNull(in); p.load(in); assertEquals(p.getProperty("file"), fileProperty); } @DataProvider(name = "findFileTests") public Object[][] getFindFileTests() { return new Object[][] { // A file path { "src/test/data/abbreviations.properties", "in file system" }, // A resource in the bundle using an absolute name { "/test-abbreviations.properties", "root of bundle" }, // A resource in the bundle using a relative name { "package-abbreviations.properties", "in bundle package" }, }; } }
false
tableViews_src_test_java_gov_nasa_arc_mct_abbreviation_impl_AbbreviationServiceImplTest.java
4
public class AbbreviationsImpl implements Abbreviations { private String value; private List<String> phrases = new ArrayList<String>(); private Map<String, List<String>> abbreviations = new HashMap<String, List<String>>(); /** * Create an abbreviation object. * @param value original text */ protected AbbreviationsImpl(String value) { this.value = value; } @Override public List<String> getAbbreviations(String phrase) { return abbreviations.get(phrase); } @Override public List<String> getPhrases() { return phrases; } @Override public String getValue() { return value; } /** * Add a set of abbreviations to a phrase. * @param phrase the phrase to be abbreviated * @param alternatives the phrase's abbreviations */ protected void addPhrase(String phrase, List<String> alternatives) { phrases.add(phrase); abbreviations.put(phrase, alternatives); } }
false
tableViews_src_main_java_gov_nasa_arc_mct_abbreviation_impl_AbbreviationsImpl.java
5
public class AbbreviationsManager { /** A regular expression used to separate alternative abbreviations. (\s == any whitespace) */ private static final Pattern ABBREVIATION_SEPARATOR = Pattern.compile("\\s*\\|\\s*"); /** A regular expression used to separate words. */ private static final Pattern WORD_SEPARATOR = Pattern.compile("\\s+"); private Map<String, List<String>> abbreviations = new HashMap<String, List<String>>(); /** * Creates a new abbreviations manager configured with a set of abbreviation * properties. Abbreviation properties are of the form: * <pre> * phrase = alt1 | alt2 | ... * </pre> * Whitespace around the "=" and "|" separators is removed. The phrase is * converted to lower case, but the alternatives are used verbatim. * * @param abbreviationProperties the abbreviation properties */ public AbbreviationsManager(Properties abbreviationProperties) { @SuppressWarnings("unchecked") Enumeration<String> e = (Enumeration<String>) abbreviationProperties.propertyNames(); while (e.hasMoreElements()) { String phrase = e.nextElement(); String lcPhrase = phrase.toLowerCase(); String[] alternatives = ABBREVIATION_SEPARATOR.split(abbreviationProperties.getProperty(phrase).trim()); List<String> abbreviationsForPhrase = new ArrayList<String>(Arrays.asList(alternatives)); Collections.sort(abbreviationsForPhrase, new Comparator<String>() { @Override public int compare(String o1, String o2) { return o1.length() - o2.length(); } }); abbreviations.put(lcPhrase, abbreviationsForPhrase); } } /** * Gets the alternative abbreviations for a phrase. The original phrase is always the * the first alternative returned. If no abbreviations are found for the phrase, returns * a list with one element, the original phrase. The phrase is converted to lower case * before looking up its alternatives. * * @param phrase the phrase to abbreviate * @return a list of alternative abbreviations, with the original phrase as the first element */ public List<String> getAlternatives(String phrase) { List<String> result = new ArrayList<String>(); result.add(phrase); List<String> alternatives = abbreviations.get(phrase.toLowerCase()); if (alternatives != null) { result.addAll(alternatives); } return result; } /** * Finds the phrases within a string that can be abbreviated, and returns * a structure with those phrases and the alternatives for each phrase. * A phrase is a sequence of one or more words in the original string, where * words are delimited by whitespace. At each point in the original string, * the longest phrase for which there are abbreviations is found. * * @param s the string to find abbreviations for * @return a structure describing the available abbreviations */ public Abbreviations getAbbreviations(String s) { AbbreviationsImpl abbrev = new AbbreviationsImpl(s); List<String> phrases = getPhrasesWithAbbreviations(s); for (String phrase : phrases) { abbrev.addPhrase(phrase, getAlternatives(phrase)); } return abbrev; } /** * Constructs a partition of a string into phrases, along word boundaries, * where each phrase has one or more alternative abbreviations, and each * phrase is the longest match against the abbreviations at that position * in the original string. * * @param s the original string to partition into phrases * @return a list of phrases */ private List<String> getPhrasesWithAbbreviations(String s) { int phraseStart = 0; List<String> phrasesWithAbbreviations = new ArrayList<String>(); Matcher wordBoundary = WORD_SEPARATOR.matcher(s); while (phraseStart < s.length()) { int phraseLength = getLongestPhraseLength(s.substring(phraseStart)); phrasesWithAbbreviations.add(s.substring(phraseStart, phraseStart + phraseLength)); if (wordBoundary.find(phraseStart + phraseLength)) { phraseStart = wordBoundary.end(); } else { phraseStart = s.length(); } } return phrasesWithAbbreviations; } /** * Finds the longest phrase within a string that has abbreviations. The first word * is always a possibility, even if no alternatives exist to that word. * * @param s the string for which to find the longest phrase with alternatives * @return the length of the longest phrase with alternative abbreviations */ private int getLongestPhraseLength(String s) { // If the entire string matches, then it is obviously the longest matching phrase. if (abbreviations.containsKey(s.toLowerCase())) { return s.length(); } Matcher wordBoundary = WORD_SEPARATOR.matcher(s); if (!wordBoundary.find()) { // No word boundaries found. Entire string is only possible phrase. return s.length(); } // First word is always an abbreviation candidate, perhaps with no // alternatives but itself. int longestMatchLength = wordBoundary.start(); while (wordBoundary.find()) { if (abbreviations.containsKey(s.substring(0, wordBoundary.start()).toLowerCase())) { longestMatchLength = wordBoundary.start(); } } return longestMatchLength; } }
false
tableViews_src_main_java_gov_nasa_arc_mct_abbreviation_impl_AbbreviationsManager.java
6
Collections.sort(abbreviationsForPhrase, new Comparator<String>() { @Override public int compare(String o1, String o2) { return o1.length() - o2.length(); } });
false
tableViews_src_main_java_gov_nasa_arc_mct_abbreviation_impl_AbbreviationsManager.java
7
public class AbbreviationsManagerTest { private Properties defaultProperties; @BeforeMethod public void init() { defaultProperties = new Properties(); defaultProperties.setProperty("fiber optic", "F/O"); defaultProperties.setProperty("system", "Sys"); } @Test public void testGetAlternatives() { AbbreviationsManager manager; Properties props; List<String> alternatives; props = new Properties(); manager = new AbbreviationsManager(props); alternatives = manager.getAlternatives("volts"); assertEquals(alternatives.size(), 1); assertEquals(alternatives.get(0), "volts"); props = new Properties(); props.setProperty("Volts", "V"); // Note that lookup should be case insensitive. manager = new AbbreviationsManager(props); alternatives = manager.getAlternatives("volts"); assertEquals(alternatives.size(), 2); assertEquals(alternatives.get(0), "volts"); // Matches the case of getAbbreviations() argument. assertEquals(alternatives.get(1), "V"); props = new Properties(); props.setProperty("Amperes", "Amps | A | aa | bbbb | a | aaaa "); manager = new AbbreviationsManager(props); alternatives = manager.getAlternatives("amperes"); assertEquals(alternatives.size(), 7); assertEquals(alternatives.get(0), "amperes"); // Must match in case to getAbbreviations() argument. assertEquals(alternatives.get(1), "A"); assertEquals(alternatives.get(2), "a"); assertEquals(alternatives.get(3), "aa"); assertEquals(alternatives.get(4), "Amps"); // same length items are in left to right specified order assertEquals(alternatives.get(5), "bbbb"); assertEquals(alternatives.get(6), "aaaa"); } @Test(dataProvider="getAbbreviationsTests") public void testGetAbbreviations(String s, String[] expectedPhrases) { AbbreviationsManager manager = new AbbreviationsManager(defaultProperties); Abbreviations abbrev = manager.getAbbreviations(s); assertEquals(abbrev.getValue(), s); assertEquals(abbrev.getPhrases().size(), expectedPhrases.length); for (int i=0; i<abbrev.getPhrases().size(); ++i) { String phrase = abbrev.getPhrases().get(i); assertEquals(phrase, expectedPhrases[i]); List<String> alternatives = abbrev.getAbbreviations(phrase); List<String> expectedAlternatives = manager.getAlternatives(abbrev.getPhrases().get(i)); assertTrue(alternatives.size() >= 1); assertEquals(alternatives.size(), expectedAlternatives.size()); assertEquals(alternatives.get(0), abbrev.getPhrases().get(i)); } } @DataProvider(name="getAbbreviationsTests") private Object[][] getGetAbbreviationsTests() { return new Object[][] { { "System", new String[] { "System" } }, // One word in abbreviations map { "MDM", new String[] { "MDM" } }, // One word not in abbreviations map { "Fiber Optic", new String[] { "Fiber Optic" } }, // Exact phrase in abbreviations map // Some longer tests. { "Junk1 Junk2 Junk3", new String[] { "Junk1", "Junk2", "Junk3" } }, // No matches { "Fiber Optic MDM System", new String[] { "Fiber Optic", "MDM", "System" } }, }; } }
false
tableViews_src_test_java_gov_nasa_arc_mct_abbreviation_impl_AbbreviationsManagerTest.java
8
public class LabelAbbreviationsTest { @Test public void getAbbreviation() throws SecurityException, NoSuchFieldException, IllegalArgumentException, IllegalAccessException { AbbreviationsImpl availableAbbreviations = new AbbreviationsImpl("value"); availableAbbreviations.addPhrase("Amps", Collections.singletonList("A")); availableAbbreviations.addPhrase("BCA1", Collections.<String>emptyList()); availableAbbreviations.addPhrase("Ch1", Collections.<String>emptyList()); availableAbbreviations.addPhrase("Serial", Collections.<String>emptyList()); AbbreviationSettings aSettings = new AbbreviationSettings("fullLabel", availableAbbreviations, new LabelAbbreviations()); String abbreviatedLabel = aSettings.getAbbreviatedLabel(); Assert.assertEquals(abbreviatedLabel, "Amps BCA1 Ch1 Serial"); LabelAbbreviations available2 = aSettings.getAbbreviations(); Assert.assertEquals(available2.getAbbreviation("BCA1"), "BCA1"); Assert.assertEquals(available2.getAbbreviation("Amps"), "Amps"); // Change the state of the control panel via currentAbbreviations LabelAbbreviations currentAbbreviations = new LabelAbbreviations(); currentAbbreviations.addAbbreviation("Amps", "A | a | Amp"); currentAbbreviations.addAbbreviation("BCA1", "B | bca1"); currentAbbreviations.addAbbreviation("CAT", "C"); currentAbbreviations.addAbbreviation("DOG", "D"); currentAbbreviations.addAbbreviation("Ace", "ace"); currentAbbreviations.addAbbreviation("Abb", "a"); currentAbbreviations.addAbbreviation("Rabbit", "R"); AbbreviationSettings a2Settings = new AbbreviationSettings("fullLabel", availableAbbreviations, currentAbbreviations); LabelAbbreviations available2afterSelect = a2Settings.getAbbreviations(); Assert.assertEquals(available2afterSelect.getAbbreviation("BCA1"), "B | bca1"); Assert.assertEquals(available2afterSelect.getAbbreviation("Amps"), "A | a | Amp"); Map<String, String> map = getAbbreviations(currentAbbreviations); Assert.assertEquals(map.size(), 7); } private Map<String, String> getAbbreviations( LabelAbbreviations currentAbbreviations) throws SecurityException, NoSuchFieldException, IllegalArgumentException, IllegalAccessException { Field f = currentAbbreviations.getClass().getDeclaredField("abbreviations"); //NoSuchFieldException f.setAccessible(true); @SuppressWarnings("unchecked") Map<String, String> map = (HashMap<String,String>) f.get(currentAbbreviations); //IllegalAccessException return map; } }
false
tableViews_src_test_java_gov_nasa_arc_mct_abbreviation_impl_LabelAbbreviationsTest.java
9
public class BufferFullException extends Exception { private static final long serialVersionUID = 2028815233703151762L; /** * Default constructor. */ public BufferFullException() { super(); } /** * Overloaded constructor with single message argument. * @param msg - Message to display buffer full exception. */ public BufferFullException(String msg) { super(msg); } }
false
mctcore_src_main_java_gov_nasa_arc_mct_api_feed_BufferFullException.java
10
public interface DataArchive extends FeedDataArchive { /** * Return the level of service of this data archive. * * @return the level of service of this data archive */ public LOS getLOS(); }
false
mctcore_src_main_java_gov_nasa_arc_mct_api_feed_DataArchive.java
11
public interface DataProvider extends FeedAggregator { /** * An enum which defines the various level of service (LOS) a data provider can provide. * */ public static enum LOS { /** Fast enum. */ fast, /** Medium enum. */ medium, /** Slow enum.*/ slow } /** * Returns a map of data for each feed. This allows the data to be queried in batch and improves performance. * @param feedIDs to retrieve data for * @param startTime the start time of the return data set. * @param endTime the end time of the return data set. * @param timeUnit the time unit of startTime and endTime parameters. * @return map of data for the specified feeds. Each entry in the map has data * with a timestamp that is >= startTime and <= endTime ordered according to the time. */ public Map<String, SortedMap<Long, Map<String, String>>> getData(Set<String> feedIDs, long startTime, long endTime, TimeUnit timeUnit); /** * Check if a request can be fully serviced by a data provider. * @param feedID feed that is requested * @param startTime start time of the request * @param timeUnit the time unit of startTime * @return true if a request can be fully serviced by a data provider. */ public boolean isFullyWithinTimeSpan(String feedID, long startTime, TimeUnit timeUnit); /** * Return the level of service that a data retrieval provider can provide. * @return the level of service. */ public LOS getLOS(); }
false
mctcore_src_main_java_gov_nasa_arc_mct_api_feed_DataProvider.java
12
public static enum LOS { /** Fast enum. */ fast, /** Medium enum. */ medium, /** Slow enum.*/ slow }
false
mctcore_src_main_java_gov_nasa_arc_mct_api_feed_DataProvider.java
13
public interface FeedAggregator { /** * Returns a map of data for each feed. This allows the data to be queried in batch and improves performance. * @param feedIDs to retrieve data for * @param timeUnit the time unit of startTime and endTime parameters. * @param startTime the start time of the return data set. * @param endTime the end time of the return data set. * @return map of data for the specified feeds. Each entry in the map has data * with a timestamp that is >= startTime and < endTime. */ public Map<String, List<Map<String, String>>> getData(Set<String> feedIDs, TimeUnit timeUnit, long startTime, long endTime); }
false
mctcore_src_main_java_gov_nasa_arc_mct_api_feed_FeedAggregator.java
14
public interface FeedDataArchive { /** * This api will put data into the archive. * * @param feedID the feedID from which the data should be archived. * @param timeUnit the time unit of the time stamp of each data record that is put into the * archive. * @param entries a map from timestamp to data record. * @throws BufferFullException - Buffer full exception. */ public void putData(String feedID, TimeUnit timeUnit, Map<Long, Map<String, String>> entries) throws BufferFullException; /** * This api will put data into the archive. * @param feedID the feedID from which the data should be archived. * @param timeUnit the time unit of the time stamp of each data record that is put into the * archive. * @param time the timestamp of the data record. * @param value the data record to be saved in the archive that corresponds to the time. * @throws BufferFullException - Buffer full exception. */ public void putData(String feedID, TimeUnit timeUnit, long time, Map<String, String> value) throws BufferFullException; /** * This method accepts a set of data and will invoke the runnable once all the data has * been persisted. * @param value for the set of data, feedId is the key for the Map based on key of time * @param timeUnit the time unit of the time stamp of each data record that is put into the * archive. * @param callback to execute when the data has been committed to the repository * @throws BufferFullException - Buffer full exception. */ public void putData(Map<String,Map<Long, Map<String, String>>> value, TimeUnit timeUnit, Runnable callback) throws BufferFullException; /** * Reset the Feed Aggregator so that the content provided by the Feed Aggregator starts from the very beginning. */ public void reset(); }
false
mctcore_src_main_java_gov_nasa_arc_mct_api_feed_FeedDataArchive.java
15
public class OptimisticLockException extends RuntimeException { private static final long serialVersionUID = 1L; /** * Creates a new instance based on an existing exception. * @param e root exception */ public OptimisticLockException(Exception e) { super(e); } }
false
mctcore_src_main_java_gov_nasa_arc_mct_api_persistence_OptimisticLockException.java
16
public interface PersistenceService { /** * This method is invoked when starting a set of related persistence operations in the current thread. If the underlying * persistence implementation is a database this will likely start a transaction. This method will * generally only be used from code that operates outside the scope of an action, for example an action * that does some processing in the background. */ void startRelatedOperations(); /** * This method is invoked when completing a set of related persistence operations. This method must * be invoked following {@link #startRelatedOperations()} and only a single time. T * @param save if the operation should be saved, false if the operation should not be saved. */ void completeRelatedOperations(boolean save); /** * Checks if <code>tagId</code> is used on at least one component in the database. * @param tagId tag ID * @return true there are components tagged with <code>tagId</code>; false, otherwise. */ boolean hasComponentsTaggedBy(String tagId); /** * Returns the component with the specified external key and component type. * @param externalKey to use for search criteria * @param componentType to use with external key * @param <T> type of component * @return instance of component with the given type or null if the component cannot be found. */ <T extends AbstractComponent> T getComponent(String externalKey, Class<T> componentType); /** * Returns the component with the specified external key and component type. * @param externalKey to use for search criteria * @param componentType to use with external key * @return instance of component with the given type or null if the component cannot * be found. */ AbstractComponent getComponent(String externalKey, String componentType); }
false
mctcore_src_main_java_gov_nasa_arc_mct_api_persistence_PersistenceService.java
17
public interface DataBufferEnv { public int getNumOfBufferPartitions(); public int getCurrentBufferPartition(); public long getBufferTime(); public long getBufferPartitionOverlap(); public int previousBufferPartition(int currentPartition); public int getConcurrencyDegree(); public int getBufferWriteThreadPoolSize(); public void closeAndRestartEnvironment(); public void restartEnvironment(boolean isReadOnly); public int nextBufferPartition(); public DataBufferEnv advanceBufferPartition(); public Object clone(); public Object cloneMetaBuffer(); public Properties getConfigProperties(); public LOS getLOS(); public void flush(); }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_config_DataBufferEnv.java
18
public final class DiskBufferEnv implements DataBufferEnv, Cloneable { private static final Logger LOGGER = LoggerFactory.getLogger(DiskBufferEnv.class); private static final String META_DATABASE_PATH = "metaBuffer"; private static final String META_DATABASE_NAME = "meta"; private static enum STATE { unInitialized, initializing, initialized; } private Environment dbufferEnv; private STATE state = STATE.unInitialized; private final Properties prop; private volatile long bufferTimeMills; private long evictorRecurrMills; private File envHome; private final int concurrency; private final int bufferWriteThreadPoolSize; private final int numOfBufferPartitions; private final int currentBufferPartition; private final long partitionOverlapMillis; private final long metaRefreshMillis; private TransactionConfig txnConfig; private CursorConfig cursorConfig; private DiskQuotaHelper diskQuotaHelper; private static Properties loadDefaultPropertyFile() { Properties prop = new Properties(); InputStream is = null; try { is = ClassLoader.getSystemResourceAsStream("properties/feed.properties"); prop.load(is); } catch (Exception e) { LOGGER.error("Cannot initialized DataBufferEnv properties", e); } finally { if (is != null) { try { is.close(); } catch (IOException ioe) { // ignore exception } } } return prop; } public String getErrorMsg() { return diskQuotaHelper.getErrorMsg(); } public DiskBufferEnv(Properties prop) { if (prop == null) { prop = loadDefaultPropertyFile(); } this.prop = prop; this.currentBufferPartition = 0; File bufferHome = new File(FilepathReplacer.substitute(getPropertyWithPrecedence(prop, "buffer.disk.loc"))); if (!bufferHome.exists()) { bufferHome.mkdirs(); } envHome = new File(bufferHome, META_DATABASE_PATH); if (!envHome.exists()) { envHome.mkdirs(); } concurrency = Integer.parseInt(prop.getProperty("buffer.concurrency")); evictorRecurrMills = Long.parseLong(prop.getProperty("buffer.evictor.recurrMills")); bufferWriteThreadPoolSize = Integer.parseInt(prop.getProperty("buffer.write.threadPool.size")); numOfBufferPartitions = Integer.parseInt(prop.getProperty("buffer.partitions")); bufferTimeMills = Long.parseLong(prop.getProperty("buffer.time.millis")); metaRefreshMillis = Long.parseLong(prop.getProperty("meta.buffer.refresh.millis")); if (bufferTimeMills > numOfBufferPartitions) { bufferTimeMills = bufferTimeMills / numOfBufferPartitions; } partitionOverlapMillis = Long.parseLong(prop.getProperty("buffer.partition.overlap.millis")); diskQuotaHelper = new DiskQuotaHelper(prop, bufferHome); this.state = STATE.initializing; setup(false); } public DiskBufferEnv(Properties prop, int currentBufferPartition) { //throws Exception { if (prop == null) { prop = loadDefaultPropertyFile(); } this.prop = prop; this.currentBufferPartition = currentBufferPartition; File bufferHome = new File(FilepathReplacer.substitute(getPropertyWithPrecedence(prop, "buffer.disk.loc"))); if (!bufferHome.exists()) { bufferHome.mkdirs(); } envHome = new File(bufferHome, String.valueOf(currentBufferPartition)); if (!envHome.exists()) { envHome.mkdirs(); } concurrency = Integer.parseInt(prop.getProperty("buffer.concurrency")); evictorRecurrMills = Long.parseLong(prop.getProperty("buffer.evictor.recurrMills")); bufferWriteThreadPoolSize = Integer.parseInt(prop.getProperty("buffer.write.threadPool.size")); numOfBufferPartitions = Integer.parseInt(prop.getProperty("buffer.partitions")); bufferTimeMills = Long.parseLong(prop.getProperty("buffer.time.millis")); bufferTimeMills = bufferTimeMills / numOfBufferPartitions; partitionOverlapMillis = Long.parseLong(prop.getProperty("buffer.partition.overlap.millis")); metaRefreshMillis = Long.parseLong(prop.getProperty("meta.buffer.refresh.millis")); diskQuotaHelper = new DiskQuotaHelper(prop, bufferHome); this.state = STATE.initializing; setup(false); } private void setup(boolean readOnly) { assertState(STATE.initializing); // Instantiate an environment configuration object EnvironmentConfig envConfig = new EnvironmentConfig(); envConfig.setSharedCache(true); String cachePercent = prop.getProperty("bdb.cache.percent"); if (cachePercent != null) { envConfig.setCachePercent(Integer.parseInt(cachePercent)); } // Configure the environment for the read-only state as identified by // the readOnly parameter on this method call. envConfig.setReadOnly(readOnly); // If the environment is opened for write, then we want to be able to // create the environment if it does not exist. envConfig.setAllowCreate(true); envConfig.setConfigParam(EnvironmentConfig.CHECKPOINTER_BYTES_INTERVAL, "40000000"); envConfig.setTransactional(false); envConfig.setDurability(Durability.COMMIT_NO_SYNC); envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, Boolean.FALSE.toString()); envConfig.setConfigParam(EnvironmentConfig.ENV_IS_LOCKING, Boolean.FALSE.toString()); setupConfig(); // Instantiate the Environment. This opens it and also possibly // creates it. try { dbufferEnv = new Environment(envHome, envConfig); state = STATE.initialized; } catch (DatabaseException de) { LOGGER.error("DatabaseException in setup", de); state = STATE.unInitialized; } } private void setupConfig() { txnConfig = new TransactionConfig(); txnConfig.setReadUncommitted(true); txnConfig.setDurability(Durability.COMMIT_NO_SYNC); cursorConfig = new CursorConfig(); cursorConfig.setReadUncommitted(true); } private String getPropertyWithPrecedence(Properties localProps, String key) { String systemProp = System.getProperty(key); return systemProp != null ? systemProp.trim() : localProps.getProperty(key, "unset").trim(); } public Database openMetaDiskStore() throws DatabaseException { assertState(STATE.initialized); DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setAllowCreate(true); dbConfig.setSortedDuplicates(false); dbConfig.setTransactional(false); Database diskStore = dbufferEnv.openDatabase(null, META_DATABASE_NAME, dbConfig); return diskStore; } public Database openDiskStore(String dbName, SecondaryKeyCreator... keyCreators) throws DatabaseException { assertState(STATE.initialized); DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setAllowCreate(true); dbConfig.setSortedDuplicates(false); dbConfig.setTransactional(false); Database diskStore = dbufferEnv.openDatabase(null, dbName, dbConfig); int i=0; for (SecondaryKeyCreator keyCreator : keyCreators) { SecondaryConfig secDbConfig = new SecondaryConfig(); secDbConfig.setKeyCreator(keyCreator); secDbConfig.setAllowCreate(true); secDbConfig.setSortedDuplicates(true); secDbConfig.setTransactional(false); // Perform the actual open String secDbName = dbName + i; dbufferEnv.openSecondaryDatabase(null, secDbName, diskStore, secDbConfig); i++; } return diskStore; } public boolean isDiskBufferFull() { return diskQuotaHelper.isDiskBufferFull(); } public Transaction beginTransaction() throws DatabaseException { assertState(STATE.initialized); TransactionConfig txnConfig = new TransactionConfig(); txnConfig.setReadUncommitted(true); return dbufferEnv.beginTransaction(null, txnConfig); } public SecondaryCursor openSecondaryCursor(Transaction txn, Database database, int index) throws DatabaseException { List<SecondaryDatabase> secDbs = database.getSecondaryDatabases(); assert secDbs.size() == 2; SecondaryDatabase secDb = secDbs.get(index); SecondaryCursor mySecCursor = secDb.openCursor(txn, cursorConfig); return mySecCursor; } public void removeEnvironment() throws DatabaseException { dbufferEnv.cleanLog(); dbufferEnv.close(); deleteDatabaseFile(currentBufferPartition); this.state = STATE.unInitialized; } public void closeEnvironment() throws DatabaseException { dbufferEnv.cleanLog(); dbufferEnv.close(); this.state = STATE.unInitialized; } public void removeAndCloseAllDiskStores() throws DatabaseException { List<String> dbNames = dbufferEnv.getDatabaseNames(); for (String dbName : dbNames) { try { dbufferEnv.removeDatabase(null, dbName); } catch (DatabaseException de) { continue; } } closeEnvironment(); } public void closeDatabase(Database database) throws DatabaseException { if (database == null) { return; } List<SecondaryDatabase> secDbs = database.getSecondaryDatabases(); for (Database secDb : secDbs) { secDb.close(); } database.close(); } public void closeAndRestartEnvironment() throws DatabaseException { boolean isReadOnly = dbufferEnv.getConfig().getReadOnly(); removeAndCloseAllDiskStores(); restartEnvironment(isReadOnly); } public void restartEnvironment(boolean isReadOnly) throws DatabaseException { state = STATE.initializing; setup(isReadOnly); } public int getConcurrencyDegree() { return concurrency; } public int getBufferWriteThreadPoolSize() { return bufferWriteThreadPoolSize; } public long getBufferTime() { return bufferTimeMills; } public long getEvictorRecurr() { return evictorRecurrMills; } public int getNumOfBufferPartitions() { return numOfBufferPartitions; } public void setBufferTime(long bufferTimeMills) { this.bufferTimeMills = bufferTimeMills; } public long getBufferPartitionOverlap() { return partitionOverlapMillis; } public int getCurrentBufferPartition() { return currentBufferPartition; } public DataBufferEnv advanceBufferPartition() { int nextBufferPartition = nextBufferPartition(); deleteDatabaseFile(nextBufferPartition); DiskBufferEnv newBufferEnv = new DiskBufferEnv(prop, (this.currentBufferPartition + 1) % numOfBufferPartitions); return newBufferEnv; } private void deleteDatabaseFile(int partitionNo) { File parentDir = this.envHome.getParentFile(); File nextBufferPartitionDir = new File(parentDir, String.valueOf(partitionNo)); if (nextBufferPartitionDir.exists()) { if (nextBufferPartitionDir.isDirectory()) { File[] files = nextBufferPartitionDir.listFiles(); for (File f: files) { f.delete(); } } nextBufferPartitionDir.delete(); } } public int nextBufferPartition() { return (this.currentBufferPartition+1)%numOfBufferPartitions; } public int previousBufferPartition(int currentPartition) { int i = currentPartition; if (i == 0) { i = this.numOfBufferPartitions-1; } else { i--; } return i; } public long getMetaRefresh() { return this.metaRefreshMillis; } @Override public Object clone() { return new DiskBufferEnv(prop, 0); } @Override public Object cloneMetaBuffer() { return new DiskBufferEnv(prop); } private void assertState(STATE expectedState) { assert this.state == expectedState; } @Override public Properties getConfigProperties() { return this.prop; } public void flush() { this.dbufferEnv.sync(); } @Override public LOS getLOS() { return LOS.medium; } }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_config_DiskBufferEnv.java
19
private static enum STATE { unInitialized, initializing, initialized; }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_config_DiskBufferEnv.java
20
class DiskQuotaHelper { private static final Logger LOGGER = LoggerFactory.getLogger(DiskQuotaHelper.class); private double usableSpaceAvailableInPercentage = 0; private double freeSpaceAvailableInPercentage = 0; private double totalSpaceInMB = 0; private double freeSpaceInMB = 0; private double usableSpaceInMB = 0; private int bufferMinDiskSpaceAvailableInMB = 10; private int bufferMinDiskSpaceAvailableInPercentage = 1; public String DISK_SPACE_PERCENTAGE_ERROR_MSG = "bufferMinDiskSpaceAvailableInMB = " + bufferMinDiskSpaceAvailableInMB + " bufferMinDiskSpaceAvailableInPercentage= " + bufferMinDiskSpaceAvailableInPercentage + "%"; public DiskQuotaHelper(Properties prop, File bufferHome) { bufferMinDiskSpaceAvailableInMB = Integer.parseInt(prop.getProperty("buffer.min.disk.space.megabytes")); bufferMinDiskSpaceAvailableInPercentage = Integer.parseInt(prop.getProperty("buffer.min.percentage.disk.space")); DISK_SPACE_PERCENTAGE_ERROR_MSG = "Disk space for MCT Buffer is &lt;= " + bufferMinDiskSpaceAvailableInMB + " MB or Total free disk space available is &lt;= " + bufferMinDiskSpaceAvailableInPercentage + "%"; printAvailableDiskSpace("bufferHome from properties", bufferHome); } private void printAvailableDiskSpace(String fileNameDesignation, File filePartition) { // NOTE: Usable Disk Space available in JVM is always less than Free Disk Space LOGGER.info("*** Disk Partition '" + fileNameDesignation + "' at path:"+ filePartition.getAbsolutePath()+" ***"); // Prints total disk space in bytes for volume partition specified by file abstract pathname. long totalSpace = filePartition.getTotalSpace(); totalSpaceInMB = totalSpace /1024 /1024; // Prints an accurate estimate of the total free (and available to this JVM) bytes // on the volume. This method may return the same result as 'getFreeSpace()' on some platforms. long usableSpace = filePartition.getUsableSpace(); usableSpaceInMB = usableSpace /1024 /1024; // Prints the total free unallocated bytes for the volume in bytes. long freeSpace = filePartition.getFreeSpace(); freeSpaceInMB = freeSpace /1024 /1024; LOGGER.info("MCT property specifying Min Disk Space Available (in MB): " + bufferMinDiskSpaceAvailableInMB ); LOGGER.info("MCT property specifying Min Disk Space Available (in Percentage): " + bufferMinDiskSpaceAvailableInPercentage ); LOGGER.info("total Space In MB: " + totalSpaceInMB + " MB"); LOGGER.info("usable Space In MB: " + usableSpaceInMB + " MB"); LOGGER.info("free Space In MB: " + freeSpaceInMB + " MB"); if (totalSpaceInMB > 0) { usableSpaceAvailableInPercentage = (usableSpaceInMB / totalSpaceInMB) * 100; freeSpaceAvailableInPercentage = (freeSpaceInMB / totalSpaceInMB) * 100; LOGGER.info("Calculated Usable Space Available (in Percentage): " + usableSpaceAvailableInPercentage + " %"); LOGGER.info("Calculated Free Space Available (in Percentage): " + freeSpaceAvailableInPercentage + " %"); } else { LOGGER.info("filePartition.getTotalSpace() reported: " + totalSpace); } String m = String.format("The disc is full when: " + "\n usableSpaceAvailableInPercentage (%.1f) <= bufferMinDiskSpaceAvailableInPercentage (%d), or \n " + "usableSpaceInMB (%.1f) <= bufferMinDiskSpaceAvailableInMB (%d), or \n " + "freeSpaceInMB (%.1f) <= bufferMinDiskSpaceAvailableInMB (%d) \n" + "***", usableSpaceAvailableInPercentage, bufferMinDiskSpaceAvailableInPercentage, usableSpaceInMB, bufferMinDiskSpaceAvailableInMB, freeSpaceInMB, bufferMinDiskSpaceAvailableInMB); LOGGER.info(m); } public String getErrorMsg() { return ("<HTML>" + DISK_SPACE_PERCENTAGE_ERROR_MSG + "<BR>Total Disk Space (in MB): " + totalSpaceInMB + "<BR>JVM Usable Disk Space Available (in MB): " + usableSpaceInMB + "<BR>System Free Disk Space Availble (in MB): " + freeSpaceInMB + "<BR>Percentage JVM Usable Disk Space Available: " + usableSpaceAvailableInPercentage + "%<BR>Percentage System Free Disk Space Available: " + freeSpaceAvailableInPercentage + "%</HTML>"); } public boolean isDiskBufferFull() { return (usableSpaceAvailableInPercentage <= bufferMinDiskSpaceAvailableInPercentage) || (usableSpaceInMB <= bufferMinDiskSpaceAvailableInMB) || (freeSpaceInMB <= bufferMinDiskSpaceAvailableInMB); } }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_config_DiskQuotaHelper.java
21
public class FastDiskBufferEnv implements DataBufferEnv, Cloneable { private static final Logger LOGGER = LoggerFactory.getLogger(FastDiskBufferEnv.class); private static final String META_DATABASE_PATH = "metaBuffer"; private static final String META_DATABASE_NAME = "meta"; private static enum STATE { unInitialized, initializing, initialized; } private Environment dbufferEnv; private STATE state = STATE.unInitialized; private final Properties prop; private volatile long bufferTimeMills; private long evictorRecurrMills; private File envHome; private final int concurrency; private final int bufferWriteThreadPoolSize; private final int numOfBufferPartitions; private final int currentBufferPartition; private final long partitionOverlapMillis; private final long metaRefreshMillis; private TransactionConfig txnConfig; private CursorConfig cursorConfig; private List<EntityStore> openStores = new LinkedList<EntityStore>(); private DiskQuotaHelper diskQuotaHelper; private static Properties loadDefaultPropertyFile() { Properties prop = new Properties(); InputStream is = null; try { is = ClassLoader.getSystemResourceAsStream("properties/feed.properties"); prop.load(is); } catch (Exception e) { LOGGER.error("Cannot initialized DataBufferEnv properties", e); } finally { if (is != null) { try { is.close(); } catch (IOException ioe) { // ignore exception } } } return prop; } public FastDiskBufferEnv(Properties prop) { if (prop == null) { prop = loadDefaultPropertyFile(); } this.prop = prop; this.currentBufferPartition = 0; File bufferHome = new File(FilepathReplacer.substitute(getPropertyWithPrecedence(prop, "buffer.disk.loc"))); if (!bufferHome.exists()) { bufferHome.mkdirs(); } envHome = new File(bufferHome, META_DATABASE_PATH); if (!envHome.exists()) { envHome.mkdirs(); } concurrency = Integer.parseInt(prop.getProperty("buffer.concurrency")); evictorRecurrMills = Long.parseLong(prop.getProperty("buffer.evictor.recurrMills")); bufferWriteThreadPoolSize = Integer.parseInt(prop.getProperty("buffer.write.threadPool.size")); numOfBufferPartitions = Integer.parseInt(prop.getProperty("buffer.partitions")); bufferTimeMills = Long.parseLong(prop.getProperty("buffer.time.millis")); metaRefreshMillis = Long.parseLong(prop.getProperty("meta.buffer.refresh.millis")); if (bufferTimeMills > numOfBufferPartitions) { bufferTimeMills = bufferTimeMills / numOfBufferPartitions; } partitionOverlapMillis = Long.parseLong(prop.getProperty("buffer.partition.overlap.millis")); diskQuotaHelper = new DiskQuotaHelper(prop, bufferHome); this.state = STATE.initializing; setup(false); } public FastDiskBufferEnv(Properties prop, int currentBufferPartition) { if (prop == null) { prop = loadDefaultPropertyFile(); } this.prop = prop; this.currentBufferPartition = currentBufferPartition; File bufferHome = new File(FilepathReplacer.substitute(getPropertyWithPrecedence(prop, "buffer.disk.loc"))); if (!bufferHome.exists()) { bufferHome.mkdirs(); } envHome = new File(bufferHome, String.valueOf(currentBufferPartition)); if (!envHome.exists()) { envHome.mkdirs(); } concurrency = Integer.parseInt(prop.getProperty("buffer.concurrency")); evictorRecurrMills = Long.parseLong(prop.getProperty("buffer.evictor.recurrMills")); bufferWriteThreadPoolSize = Integer.parseInt(prop.getProperty("buffer.write.threadPool.size")); numOfBufferPartitions = Integer.parseInt(prop.getProperty("buffer.partitions")); bufferTimeMills = Long.parseLong(prop.getProperty("buffer.time.millis")); bufferTimeMills = bufferTimeMills / numOfBufferPartitions; partitionOverlapMillis = Long.parseLong(prop.getProperty("buffer.partition.overlap.millis")); metaRefreshMillis = Long.parseLong(prop.getProperty("meta.buffer.refresh.millis")); diskQuotaHelper = new DiskQuotaHelper(prop, bufferHome); this.state = STATE.initializing; setup(false); } private void setup(boolean readOnly) { assertState(STATE.initializing); // Instantiate an environment configuration object EnvironmentConfig envConfig = new EnvironmentConfig(); envConfig.setSharedCache(true); String cachePercent = prop.getProperty("bdb.cache.percent"); if (cachePercent != null) { envConfig.setCachePercent(Integer.parseInt(cachePercent)); } // Configure the environment for the read-only state as identified by // the readOnly parameter on this method call. envConfig.setReadOnly(readOnly); // If the environment is opened for write, then we want to be able to // create the environment if it does not exist. envConfig.setAllowCreate(!readOnly); envConfig.setConfigParam(EnvironmentConfig.CHECKPOINTER_BYTES_INTERVAL, "40000000"); envConfig.setTransactional(false); envConfig.setDurability(Durability.COMMIT_NO_SYNC); envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, Boolean.FALSE.toString()); envConfig.setConfigParam(EnvironmentConfig.ENV_IS_LOCKING, Boolean.FALSE.toString()); setupConfig(); // Instantiate the Environment. This opens it and also possibly // creates it. try { dbufferEnv = new Environment(envHome, envConfig); state = STATE.initialized; } catch (DatabaseException de) { LOGGER.error("DatabaseException in setup", de); state = STATE.unInitialized; } } private void setupConfig() { txnConfig = new TransactionConfig(); txnConfig.setReadUncommitted(true); txnConfig.setDurability(Durability.COMMIT_NO_SYNC); cursorConfig = new CursorConfig(); cursorConfig.setReadUncommitted(true); } public boolean isDiskBufferFull() { return diskQuotaHelper.isDiskBufferFull(); } public String getErrorMsg() { return diskQuotaHelper.getErrorMsg(); } private String getPropertyWithPrecedence(Properties localProps, String key) { String systemProp = System.getProperty(key); return systemProp != null ? systemProp.trim() : localProps.getProperty(key, "unset").trim(); } public EntityStore openMetaDiskStore() throws DatabaseException { assertState(STATE.initialized); StoreConfig storeConfig = new StoreConfig(); storeConfig.setAllowCreate(true); storeConfig.setDeferredWrite(true); storeConfig.setTransactional(false); ClassLoader originalClassloader = Thread.currentThread().getContextClassLoader(); try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); return new EntityStore(dbufferEnv, META_DATABASE_NAME, storeConfig); } finally { Thread.currentThread().setContextClassLoader(originalClassloader); } } public EntityStore openDiskStore(String dbName) throws DatabaseException { assertState(STATE.initialized); StoreConfig storeConfig = new StoreConfig(); storeConfig.setAllowCreate(true); storeConfig.setDeferredWrite(true); storeConfig.setTransactional(false); EntityStore store = new EntityStore(dbufferEnv, dbName, storeConfig); openStores.add(store); return store; } public void removeEnvironment() throws DatabaseException { dbufferEnv.cleanLog(); dbufferEnv.close(); deleteDatabaseFile(currentBufferPartition); this.state = STATE.unInitialized; } public void closeEnvironment() throws DatabaseException { dbufferEnv.cleanLog(); dbufferEnv.close(); this.state = STATE.unInitialized; } public void removeAndCloseAllDiskStores() throws DatabaseException { for (EntityStore store: openStores) { store.close(); } openStores.clear(); removeEnvironment(); } public void closeDatabase(EntityStore store) throws DatabaseException { if (store == null) { return; } store.close(); openStores.remove(store); } public void closeAndRestartEnvironment() throws DatabaseException { boolean isReadOnly = dbufferEnv.getConfig().getReadOnly(); removeAndCloseAllDiskStores(); restartEnvironment(isReadOnly); } public void restartEnvironment(boolean isReadOnly) throws DatabaseException { state = STATE.initializing; setup(isReadOnly); } public int getConcurrencyDegree() { return concurrency; } public int getBufferWriteThreadPoolSize() { return bufferWriteThreadPoolSize; } public long getBufferTime() { return bufferTimeMills; } public long getEvictorRecurr() { return evictorRecurrMills; } public int getNumOfBufferPartitions() { return numOfBufferPartitions; } public void setBufferTime(long bufferTimeMills) { this.bufferTimeMills = bufferTimeMills; } public long getBufferPartitionOverlap() { return partitionOverlapMillis; } public int getCurrentBufferPartition() { return currentBufferPartition; } public DataBufferEnv advanceBufferPartition() { int nextBufferPartition = nextBufferPartition(); deleteDatabaseFile(nextBufferPartition); FastDiskBufferEnv newBufferEnv = new FastDiskBufferEnv(prop, (this.currentBufferPartition + 1) % numOfBufferPartitions); return newBufferEnv; } private void deleteDatabaseFile(int partitionNo) { File parentDir = this.envHome.getParentFile(); File nextBufferPartitionDir = new File(parentDir, String.valueOf(partitionNo)); if (nextBufferPartitionDir.exists()) { if (nextBufferPartitionDir.isDirectory()) { File[] files = nextBufferPartitionDir.listFiles(); for (File f: files) { f.delete(); } } nextBufferPartitionDir.delete(); } } public int nextBufferPartition() { return (this.currentBufferPartition+1)%numOfBufferPartitions; } public int previousBufferPartition(int currentPartition) { int i = currentPartition; if (i == 0) { i = this.numOfBufferPartitions-1; } else { i--; } return i; } public long getMetaRefresh() { return this.metaRefreshMillis; } @Override public Object clone() { return new FastDiskBufferEnv(prop, 0); } @Override public Object cloneMetaBuffer() { return new FastDiskBufferEnv(prop); } private void assertState(STATE expectedState) { assert this.state == expectedState; } @Override public Properties getConfigProperties() { return this.prop; } public void flush() { this.dbufferEnv.sync(); } @Override public LOS getLOS() { return LOS.medium; } }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_config_FastDiskBufferEnv.java
22
private static enum STATE { unInitialized, initializing, initialized; }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_config_FastDiskBufferEnv.java
23
public class MemoryBufferEnv implements DataBufferEnv, Cloneable { private static final Logger LOGGER = LoggerFactory.getLogger(MemoryBufferEnv.class); private static Properties loadDefaultPropertyFile() { Properties prop = new Properties(); InputStream is = null; try { is = ClassLoader.getSystemResourceAsStream("properties/feed.properties"); prop.load(is); } catch (Exception e) { LOGGER.error("Cannot initialized DataBufferEnv properties", e); } finally { if (is != null) { try { is.close(); } catch (IOException ioe) { // ignore exception } } } return prop; } private final Properties prop; private final long bufferTimeMillis; private final int numOfBufferPartitions; private int currentBufferPartition; public MemoryBufferEnv(Properties prop) { if (prop == null) { prop = loadDefaultPropertyFile(); } this.prop = prop; numOfBufferPartitions = Integer.parseInt(prop.getProperty("memory.buffer.partition")); long bufferTime = Long.parseLong(prop.getProperty("memory.buffer.time.millis")); if (bufferTime > numOfBufferPartitions) { bufferTimeMillis = bufferTime / numOfBufferPartitions; } else { bufferTimeMillis = bufferTime; } this.currentBufferPartition = 0; } public MemoryBufferEnv(Properties prop, int currentBufferPartition) { if (prop == null) { prop = loadDefaultPropertyFile(); } this.prop = prop; this.currentBufferPartition = currentBufferPartition; numOfBufferPartitions = Integer.parseInt(prop.getProperty("memory.buffer.partition")); bufferTimeMillis = Long.parseLong(prop.getProperty("memory.buffer.time.millis")); } @Override public final long getBufferPartitionOverlap() { return 0; } @Override public long getBufferTime() { return bufferTimeMillis; } @Override public int getCurrentBufferPartition() { return currentBufferPartition; } @Override public int getNumOfBufferPartitions() { return this.numOfBufferPartitions; } @Override public int nextBufferPartition() { return (this.currentBufferPartition+1)%numOfBufferPartitions; } @Override public int previousBufferPartition(int currentPartition) { int i = currentPartition; if (i == 0) { i = this.numOfBufferPartitions-1; } else { i--; } return i; } @Override public DataBufferEnv advanceBufferPartition() { int nextBufferPartition = nextBufferPartition(); MemoryBufferEnv newBufferEnv = new MemoryBufferEnv(prop, nextBufferPartition); return newBufferEnv; } @Override public void closeAndRestartEnvironment() { this.currentBufferPartition = 0; } @Override public int getBufferWriteThreadPoolSize() { return 1; } @Override public int getConcurrencyDegree() { return 1; } @Override public void restartEnvironment(boolean isReadOnly) { this.currentBufferPartition = 0; } @Override public Object clone() { return new MemoryBufferEnv(prop); } @Override public Object cloneMetaBuffer() { return new MemoryBufferEnv(prop); } @Override public Properties getConfigProperties() { return this.prop; } @Override public LOS getLOS() { return LOS.fast; } @Override public void flush() { // TODO Auto-generated method stub } }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_config_MemoryBufferEnv.java
24
public class NetworkBufferConstants { public final static String HTTP_PROTOCOL = "http://"; public final static char DELIMITER = '/'; public final static char PORT_DELIMITER = ':'; public final static String GET_DATA_COMMAND = "requestData"; public final static String FEED_ID_PARAMETER = "feeds"; public final static String START_TIME_PARAMETER = "startTime"; public final static String END_TIME_PARAMETER = "endTime"; public final static char PARAMETER_DELIMITER = ','; public final static String constructURL(String host, int port, String command) { StringBuilder sb = new StringBuilder(HTTP_PROTOCOL); sb.append(host); sb.append(PORT_DELIMITER); sb.append(port); sb.append(DELIMITER); sb.append(command); sb.append(DELIMITER); return sb.toString(); } }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_config_NetworkBufferConstants.java
25
public class NetworkBufferEnv implements DataBufferEnv, Cloneable { private static final Logger LOGGER = LoggerFactory.getLogger(NetworkBufferEnv.class); private static Properties loadDefaultPropertyFile() { Properties prop = new Properties(); InputStream is = null; try { is = ClassLoader.getSystemResourceAsStream("properties/feed.properties"); prop.load(is); } catch (Exception e) { LOGGER.error("Cannot initialized DataBufferEnv properties", e); } finally { if (is != null) { try { is.close(); } catch (IOException ioe) { // ignore exception } } } return prop; } private final Properties prop; private final int numOfBufferPartitions; private int currentBufferPartition; private final String networkBufferServerHost; private final int networkBufferServerPort; public NetworkBufferEnv(Properties prop) { if (prop == null) { prop = loadDefaultPropertyFile(); } this.prop = prop; numOfBufferPartitions = Integer.parseInt(prop.getProperty("network.buffer.partition")); networkBufferServerHost = prop.getProperty("network.buffer.server.host"); networkBufferServerPort = Integer.parseInt(prop.getProperty("network.buffer.server.port")); this.currentBufferPartition = 0; } public NetworkBufferEnv(Properties prop, int currentBufferPartition) { if (prop == null) { prop = loadDefaultPropertyFile(); } this.prop = prop; this.currentBufferPartition = currentBufferPartition; numOfBufferPartitions = Integer.parseInt(prop.getProperty("network.buffer.partition")); networkBufferServerHost = prop.getProperty("network.buffer.server.host"); networkBufferServerPort = Integer.parseInt(prop.getProperty("network.buffer.server.port")); } @Override public final long getBufferPartitionOverlap() { return 0; } @Override public long getBufferTime() { return -1; } @Override public int getCurrentBufferPartition() { return currentBufferPartition; } @Override public int getNumOfBufferPartitions() { return this.numOfBufferPartitions; } @Override public int nextBufferPartition() { return (this.currentBufferPartition+1)%numOfBufferPartitions; } @Override public int previousBufferPartition(int currentPartition) { int i = currentPartition; if (i == 0) { i = this.numOfBufferPartitions-1; } else { i--; } return i; } @Override public DataBufferEnv advanceBufferPartition() { int nextBufferPartition = nextBufferPartition(); NetworkBufferEnv newBufferEnv = new NetworkBufferEnv(prop, nextBufferPartition); return newBufferEnv; } @Override public void closeAndRestartEnvironment() { this.currentBufferPartition = 0; } @Override public int getBufferWriteThreadPoolSize() { return 1; } @Override public int getConcurrencyDegree() { return 1; } @Override public void restartEnvironment(boolean isReadOnly) { this.currentBufferPartition = 0; } @Override public Object clone() { return new NetworkBufferEnv(prop); } @Override public Object cloneMetaBuffer() { return new NetworkBufferEnv(prop); } public String getNetworkBufferServerHost() { return networkBufferServerHost; } public int getNetworkBufferServerPort() { return networkBufferServerPort; } @Override public Properties getConfigProperties() { return this.prop; } @Override public LOS getLOS() { return LOS.medium; } @Override public void flush() { // TODO Auto-generated method stub } }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_config_NetworkBufferEnv.java
26
final class DataValue { private final Map<String, String> data; DataValue(Map<String, String> data) { this.data = data; } Map<String, String> getData() { return data; } }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_disk_internal_DataValue.java
27
class DataValueTupleBinding extends TupleBinding<DataValue> { @Override public void objectToEntry(DataValue dv, TupleOutput to) { Map<String, String> data = dv.getData(); for (String key: data.keySet()) { String value = data.get(key); to.writeString(key); to.writeString(value); } } @Override public DataValue entryToObject(TupleInput ti) { Map<String, String> data = new HashMap<String, String>(); while (ti.available() > 0) { String key = ti.readString(); String value = ti.readString(); data.put(key, value); } return new DataValue(data); } }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_disk_internal_DataValueTupleBinding.java
28
public class FastDiskDataBufferHelper implements DataBufferHelper { public final PartitionDataBuffer newPartitionBuffer(int partitionNo) { return new PartitionFastDiskBuffer(partitionNo); } public final PartitionDataBuffer newPartitionBuffer(DataBufferEnv env) { assert env instanceof FastDiskBufferEnv; return new PartitionFastDiskBuffer((FastDiskBufferEnv)env); } @Override public MetaDataBuffer newMetaDataBuffer(DataBufferEnv env) { if (env == null) { return new MetaDiskBuffer(); } assert env instanceof FastDiskBufferEnv; return new MetaDiskBuffer((FastDiskBufferEnv)env); } @Override public DataBufferEnv newMetaDataBufferEnv(Properties prop) { return new FastDiskBufferEnv(prop); } }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_disk_internal_FastDiskDataBufferHelper.java
29
public class FeedIDKeyCreator implements SecondaryKeyCreator { private TupleBinding<?> keyBinding; FeedIDKeyCreator(TupleBinding<?> keyBinding) { this.keyBinding = keyBinding; } @Override public boolean createSecondaryKey(SecondaryDatabase secDb, DatabaseEntry keyEntry, DatabaseEntry valueEntry, DatabaseEntry resultEntry) throws DatabaseException { KeyValue kv = KeyValue.class.cast(keyBinding.entryToObject(keyEntry)); String feedID = kv.getFeedID(); TupleOutput to = new TupleOutput(); to.writeString(feedID); resultEntry.setData(to.getBufferBytes()); return true; } }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_disk_internal_FeedIDKeyCreator.java
30
final class KeyValue { private final String feedID; private final long timestamp; KeyValue(String feedID, long timestamp) { this.feedID = feedID; this.timestamp = timestamp; } String getFeedID() { return feedID; } long getTimestamp() { return timestamp; } }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_disk_internal_KeyValue.java
31
class KeyValueTupleBinding extends TupleBinding<KeyValue> { @Override public void objectToEntry(KeyValue kv, TupleOutput to) { to.writeLong(kv.getTimestamp()); to.writeString(kv.getFeedID()); } @Override public KeyValue entryToObject(TupleInput ti) { long timestamp = ti.readLong(); String feedID = ti.readString(); return new KeyValue(feedID, timestamp); } }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_disk_internal_KeyValueTupleBinding.java
32
public class MetaDiskBuffer extends MetaDataBuffer { private static final Logger LOGGER = LoggerFactory.getLogger(MetaDiskBuffer.class); private FastDiskBufferEnv metaEnv; private EntityStore metaDatabase; private final Timer updateTimer; public MetaDiskBuffer() { this(new FastDiskBufferEnv(null)); } public MetaDiskBuffer(FastDiskBufferEnv env) { super(env); metaEnv = env; metaDatabase = metaEnv.openMetaDiskStore(); loadAllPartitionsInformation(); long metaRefreshTime = metaEnv.getMetaRefresh(); if (metaRefreshTime == -1) { updateTimer = null; } else { updateTimer = new Timer("Meta Data Buffer Update timer"); updateTimer.schedule(new TimerTask() { @Override public void run() { for (int i = 0; i < partitionMetaDatas.length; i++) { if (partitionMetaDatas[i] != null) { writePartitionMetaData(i); } } } }, metaRefreshTime, metaRefreshTime); } } private PrimaryIndex<Integer, PartitionMetaData> getMetaStoreIndex() { ClassLoader originalClassloader = Thread.currentThread().getContextClassLoader(); try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); return metaDatabase.getPrimaryIndex(Integer.class, PartitionMetaData.class); } finally { Thread.currentThread().setContextClassLoader(originalClassloader); } } private void loadAllPartitionsInformation() { try { PrimaryIndex<Integer, PartitionMetaData> pi = getMetaStoreIndex(); if (pi.count() == 0) { writeCurrentBufferPartition(0); return; } EntityCursor<PartitionMetaData> piCursor = pi.entities(); try { for (PartitionMetaData pObj : piCursor) { partitionMetaDatas[pObj.getPartitionId()] = pObj; if (pObj.isCurrentPartition()) { this.currentPartition = pObj.getPartitionId(); } } } finally { if (piCursor != null) { piCursor.close(); } } } catch (Exception e) { LOGGER.error("Exception in loadAllPartitionInformation", e); } } public PartitionMetaData removePartitionMetaData(int bufferPartition) { PartitionMetaData pObj = super.removePartitionMetaData(bufferPartition); if (pObj == null) { return null; } try { getMetaStoreIndex().delete(pObj.getPartitionId()); } catch (Exception e) { LOGGER.error("Exception in getData", e); } finally { metaEnv.flush(); LOGGER.info("Removing partition {} timestamp", bufferPartition); } return pObj; } public Set<String> resetPartitionMetaData(int bufferPartition) { Set<String> rowoverFeedIDs = super.resetPartitionMetaData(bufferPartition); PartitionMetaData pObj = getPartitionMetaData(bufferPartition); if (pObj != null) { try { getMetaStoreIndex().putNoReturn(pObj); } catch (Exception e) { LOGGER.error("Exception in getData", e); } finally { metaEnv.flush(); LOGGER.info("Removing partition {} timestamp", bufferPartition); } } return rowoverFeedIDs; } @Override public void writePartitionMetaData(int bufferPartition) { PartitionMetaData pObj = getPartitionMetaData(bufferPartition); if (pObj == null) { return; } try { getMetaStoreIndex().putNoReturn(pObj); } catch (Exception e) { LOGGER.error("Exception in getData", e); } finally { metaEnv.flush(); LOGGER.debug("Putting start time and end time of partition {}", bufferPartition); } } @Override public void writeCurrentBufferPartition(int newCurrentBufferPartition) { PartitionMetaData existingPartitionMetaData = getPartitionMetaData(this.currentPartition); PartitionMetaData newPartitionMetaData = getPartitionMetaData(newCurrentBufferPartition); if (existingPartitionMetaData != null) { existingPartitionMetaData.setCurrentPartition(false); } if (newPartitionMetaData == null) { newPartitionMetaData = new PartitionMetaData(newCurrentBufferPartition); synchronized(this) { this.partitionMetaDatas[newCurrentBufferPartition] = newPartitionMetaData; } } newPartitionMetaData.setCurrentPartition(true); boolean failed = false; try { if (existingPartitionMetaData != null) { getMetaStoreIndex().putNoReturn(existingPartitionMetaData); } getMetaStoreIndex().putNoReturn(newPartitionMetaData); } catch (Exception e) { LOGGER.error("Exception in getData", e); failed = true; } finally { if (!failed) { metaEnv.flush(); this.currentPartition = newCurrentBufferPartition; LOGGER.info("moving to partition {}", newCurrentBufferPartition); } } } public void close() { metaEnv.closeDatabase(metaDatabase); super.close(); } public void closeDatabase() { metaEnv.closeDatabase(metaDatabase); super.closeDatabase(); } public void restart() { int numOfBufferPartitions = metaEnv.getNumOfBufferPartitions(); for (int i=0; i<numOfBufferPartitions; i++) { removePartitionMetaData(i); } super.restart(); writeCurrentBufferPartition(0); } }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_disk_internal_MetaDiskBuffer.java
33
updateTimer.schedule(new TimerTask() { @Override public void run() { for (int i = 0; i < partitionMetaDatas.length; i++) { if (partitionMetaDatas[i] != null) { writePartitionMetaData(i); } } } }, metaRefreshTime, metaRefreshTime);
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_disk_internal_MetaDiskBuffer.java
34
class PartitionDataValueTupleBinding extends TupleBinding<PartitionTimestamps> { @Override public void objectToEntry(PartitionTimestamps dv, TupleOutput to) { to.writeLong(dv.getStartTimestamp()); to.writeLong(dv.getEndTimestamp()); } @Override public PartitionTimestamps entryToObject(TupleInput ti) { long startTimestamp; long endTimestamp; startTimestamp = ti.readLong(); endTimestamp = ti.readLong(); return new PartitionTimestamps(startTimestamp, endTimestamp); } }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_disk_internal_PartitionDataValueTupleBinding.java
35
public class PartitionFastDiskBuffer implements PartitionDataBuffer { private static final Logger LOGGER = LoggerFactory.getLogger(PartitionFastDiskBuffer.class); private static final Logger READ_PERF_LOGGER = LoggerFactory .getLogger("gov.nasa.arc.mct.performance.fastDisk.partitionbuffer.read"); private static final Logger WRITE_PERF_LOGGER = LoggerFactory .getLogger("gov.nasa.arc.mct.performance.fastDisk.partitionbuffer.write"); private static final class TimeStampComparator implements Comparator<Long>, Serializable { private static final long serialVersionUID = -665810351953536404L; @Override public int compare(Long o1, Long o2) { return o1.compareTo(o2); } } private static final Comparator<Long> TIMESTAMP_COMPARATOR = new TimeStampComparator(); private final EntityStore[] databases; private final FastDiskBufferEnv env; /** * Mask value for indexing into segments. The upper bits of a key's hash * code are used to choose the segment. */ private final int segmentMask; /** * Shift value for indexing within segments. */ private final int segmentShift; private static final ThreadFactory tf = new ThreadFactory() { @Override public Thread newThread(Runnable r) { Thread t = Executors.defaultThreadFactory().newThread(r); t.setContextClassLoader(getClass().getClassLoader()); return t; } }; private static final ExecutorService writeThreads = new ThreadPoolExecutor(4, 4, 10L, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), tf); private static final ExecutorService readThreads = new ThreadPoolExecutor(0, 10, 10L, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), tf); private volatile boolean active; public PartitionFastDiskBuffer(int partitionNumber) { this(new FastDiskBufferEnv(null, partitionNumber)); } public PartitionFastDiskBuffer(FastDiskBufferEnv env) { this.env = env; int concurrencyLevel = env.getConcurrencyDegree(); // Determine the degree of concurrency which is a power of 2 and closest // to what the user has indicated. For instance, if user specifies a // degree of concurrency of 5, the degree of concurrency we will be // using will be 8. This will allow a fairer hashing. This algorithm is // copied from java.util.concurrent.ConcurrentHashMap. int sshift = 0; int ssize = 1; while (ssize < concurrencyLevel) { ++sshift; ssize <<= 1; } segmentShift = 32 - sshift; this.segmentMask = ssize - 1; this.databases = new EntityStore[ssize]; setupDatabasePartition(env); this.active = true; } private synchronized void setupDatabasePartition(FastDiskBufferEnv env) { for (int i = 0; i < databases.length; i++) { try { this.databases[i] = env.openDiskStore(String.valueOf(i)); } catch (DatabaseException e) { databases[i] = null; } } } @SuppressWarnings("unchecked") private Set<String>[] groupInputFeeds(Set<String> feedIDs) { Set<String>[] groupFeeds = new Set[databases.length]; for (int i = 0; i < groupFeeds.length; i++) { groupFeeds[i] = new HashSet<String>(); } for (String feedID : feedIDs) { int segmentIndex = hash(feedID.hashCode()); groupFeeds[segmentIndex].add(feedID); } return groupFeeds; } @SuppressWarnings("unchecked") public Map<String, SortedMap<Long, Map<String, String>>> getData(Set<String> feedIDs, final TimeUnit timeUnit, final long startTime, final long endTime) { final ElapsedTimer timer = new ElapsedTimer(); timer.startInterval(); final Set<String>[] groupFeeds = groupInputFeeds(feedIDs); final Map<String, SortedMap<Long, Map<String, String>>>[] dataSlices = new Map[groupFeeds.length]; final CountDownLatch readLatch = new CountDownLatch(groupFeeds.length); for (int i = 0; i < groupFeeds.length; i++) { final int dataIndex = i; Runnable r = new Runnable() { @Override public void run() { try { Map<String, SortedMap<Long, Map<String, String>>> dataSlice = getData(databases[dataIndex], groupFeeds[dataIndex], timeUnit, startTime, endTime); if (dataSlice != null) { dataSlices[dataIndex] = dataSlice; } } finally { readLatch.countDown(); } } }; readThreads.execute(r); } try { readLatch.await(); } catch (InterruptedException e) { LOGGER.warn("Internal error during getData thread", e); } Map<String, SortedMap<Long, Map<String, String>>> returnedData = new HashMap<String, SortedMap<Long, Map<String, String>>>(); for (int i = 0; i < dataSlices.length; i++) { Map<String, SortedMap<Long, Map<String, String>>> dataSlice = dataSlices[i]; if (dataSlice != null) { returnedData.putAll(dataSlice); } } timer.stopInterval(); READ_PERF_LOGGER.debug("time to get 1 partition Data for {} feeds: {}", feedIDs.size(), timer .getIntervalInMillis()); return returnedData; } @SuppressWarnings("unchecked") @Override public Map<String, SortedMap<Long, Map<String, String>>> getLastData(Set<String> feedIDs, final TimeUnit timeUnit, final long startTime, final long endTime) { final ElapsedTimer timer = new ElapsedTimer(); timer.startInterval(); final Set<String>[] groupFeeds = groupInputFeeds(feedIDs); final Map<String, SortedMap<Long, Map<String, String>>>[] dataSlices = new Map[groupFeeds.length]; final CountDownLatch latch = new CountDownLatch(groupFeeds.length); for (int i = 0; i < groupFeeds.length; i++) { final int dataIndex = i; Runnable r = new Runnable() { @Override public void run() { try { Map<String, SortedMap<Long, Map<String, String>>> dataSlice = getLastData(databases[dataIndex], groupFeeds[dataIndex], timeUnit, startTime, endTime); if (dataSlice != null) { dataSlices[dataIndex] = dataSlice; } } finally { latch.countDown(); } } }; readThreads.execute(r); } try { latch.await(); } catch (InterruptedException e) { LOGGER.warn("Internal error during getLastData thread", e); } Map<String, SortedMap<Long, Map<String, String>>> returnedData = new HashMap<String, SortedMap<Long, Map<String, String>>>(); for (int i = 0; i < dataSlices.length; i++) { Map<String, SortedMap<Long, Map<String, String>>> dataSlice = dataSlices[i]; if (dataSlice != null) { returnedData.putAll(dataSlice); } } timer.stopInterval(); READ_PERF_LOGGER.debug("time to get 1 partition last Data for {} feeds: {}", feedIDs.size(), timer .getIntervalInMillis()); return returnedData; } private Map<String, SortedMap<Long, Map<String, String>>> getLastData(EntityStore db, Set<String> feedIDs, TimeUnit timeUnit, long startTime, long endTime) { Map<String, SortedMap<Long, Map<String, String>>> returnedData = new HashMap<String, SortedMap<Long, Map<String, String>>>(); startTime = TimeUnit.NANOSECONDS.convert(startTime, timeUnit); endTime = TimeUnit.NANOSECONDS.convert(endTime, timeUnit); PersistentBufferObjectAccess pObjectAccess = new PersistentBufferObjectAccess(db); PersistentBufferKey startKey = new PersistentBufferKey(); PersistentBufferKey endKey = new PersistentBufferKey(); for (String feedID : feedIDs) { startKey.feedID = feedID; startKey.timestamp = startTime; endKey.feedID = feedID; endKey.timestamp = endTime; EntityCursor<PersistentBufferObject> piCursor = pObjectAccess.pIdx.entities(startKey, true, endKey, true); try { PersistentBufferObject pObj = piCursor.last(); SortedMap<Long, Map<String, String>> data = new TreeMap<Long, Map<String, String>>(TIMESTAMP_COMPARATOR); returnedData.put(feedID, data); if (pObj != null) data.put(pObj.getKey().timestamp, pObj.getData()); } catch (DatabaseException e) { e.printStackTrace(); } finally { piCursor.close(); } } return returnedData; } private Map<String, SortedMap<Long, Map<String, String>>> getData(EntityStore db, Set<String> feedIDs, TimeUnit timeUnit, long startTime, long endTime) { Map<String, SortedMap<Long, Map<String, String>>> returnedData = new HashMap<String, SortedMap<Long, Map<String, String>>>(); startTime = TimeUnit.NANOSECONDS.convert(startTime, timeUnit); endTime = TimeUnit.NANOSECONDS.convert(endTime, timeUnit); PersistentBufferObjectAccess pObjectAccess = new PersistentBufferObjectAccess(db); PersistentBufferKey startKey = new PersistentBufferKey(); PersistentBufferKey endKey = new PersistentBufferKey(); for (String feedID : feedIDs) { startKey.feedID = feedID; startKey.timestamp = startTime; endKey.feedID = feedID; endKey.timestamp = endTime; EntityCursor<PersistentBufferObject> piCursor = pObjectAccess.pIdx.entities(startKey, true, endKey, true); try { for (PersistentBufferObject pObj : piCursor) { SortedMap<Long, Map<String, String>> data = returnedData.get(feedID); if (data == null) { data = new TreeMap<Long, Map<String, String>>(TIMESTAMP_COMPARATOR); returnedData.put(feedID, data); } data.put(pObj.getKey().timestamp, pObj.getData()); } } catch (DatabaseException e) { e.printStackTrace(); } finally { piCursor.close(); } } return returnedData; } @SuppressWarnings("unchecked") private Map<String, Map<Long, Map<String, String>>>[] groupInputDataByFeed( Map<String, Map<Long, Map<String, String>>> value) { Map[] groupInputData = new Map[databases.length]; for (int i = 0; i < groupInputData.length; i++) { groupInputData[i] = new HashMap<String, Map<Long, Map<String, String>>>(); } for (Entry<String, Map<Long, Map<String, String>>> entry : value.entrySet()) { int segmentIndex = hash(entry.getKey().hashCode()); groupInputData[segmentIndex].put(entry.getKey(), entry.getValue()); } return (Map<String, Map<Long, Map<String, String>>>[]) groupInputData; } public Map<String, PartitionTimestamps> putData(Map<String, Map<Long, Map<String, String>>> value, final TimeUnit timeUnit) throws BufferFullException { final ElapsedTimer timer = new ElapsedTimer(); timer.startInterval(); final Map<String, Map<Long, Map<String, String>>>[] groupData = groupInputDataByFeed(value); final Map<String, PartitionTimestamps> timestamps = new HashMap<String, PartitionTimestamps>(); final AtomicBoolean bufferFull = new AtomicBoolean(false); final CountDownLatch latch = new CountDownLatch(groupData.length); for (int i = 0; i < groupData.length; i++) { final int dataIndex = i; Runnable r = new Runnable() { @Override public void run() { try { for (Entry<String, Map<Long, Map<String, String>>> feedData : groupData[dataIndex].entrySet()) { PartitionTimestamps timeStamp = null; try { timeStamp = putData(null, feedData.getKey(), databases[dataIndex], timeUnit, feedData.getValue()); } catch (BufferFullException e) { bufferFull.compareAndSet(false, true); } if (timeStamp == null) { break; } else { timestamps.put(feedData.getKey(), timeStamp); } } } finally { latch.countDown(); } } }; writeThreads.execute(r); } try { latch.await(); } catch (InterruptedException e) { LOGGER.warn("Internal error during putData thread", e); } if (bufferFull.get()) { throw new BufferFullException(env.getErrorMsg()); } timer.stopInterval(); WRITE_PERF_LOGGER.debug("Time to write {} feeds: {}", value.size(), timer.getIntervalInMillis()); return timestamps; } @Override public void putData(Map<String, Map<Long, Map<String, String>>> value, final TimeUnit timeUnit, final MetaDataBuffer metadata, final int metadataIndex) throws BufferFullException { final ElapsedTimer timer = new ElapsedTimer(); timer.startInterval(); final Map<String, Map<Long, Map<String, String>>>[] groupData = groupInputDataByFeed(value); final AtomicBoolean bufferFull = new AtomicBoolean(false); final CountDownLatch latch = new CountDownLatch(groupData.length); for (int i = 0; i < groupData.length; i++) { final int dataIndex = i; Runnable r = new Runnable() { @Override public void run() { try { for (Entry<String, Map<Long, Map<String, String>>> feedData : groupData[dataIndex].entrySet()) { PartitionTimestamps timeStamp = null; try { timeStamp = putData(null, feedData.getKey(), databases[dataIndex], timeUnit, feedData.getValue()); } catch (BufferFullException e) { bufferFull.compareAndSet(false, true); } if (timeStamp == null) { break; } else { metadata.updatePartitionMetaData(metadataIndex, feedData.getKey(), timeStamp.getStartTimestamp(), timeStamp.getEndTimestamp()); } } } finally { latch.countDown(); } } }; writeThreads.execute(r); } try { latch.await(); } catch (InterruptedException e) { LOGGER.warn("Internal error during putData thread", e); } if (bufferFull.get()) { throw new BufferFullException(env.getErrorMsg()); } timer.stopInterval(); WRITE_PERF_LOGGER.debug("Time to write {} feeds: {}", value.size(), timer.getIntervalInMillis()); } private PartitionTimestamps putData(Transaction txn, String feedID, EntityStore db, TimeUnit timeUnit, Map<Long, Map<String, String>> entries) throws BufferFullException { long largestTime = 0; long smallestTime = 0; try { PersistentBufferObjectAccess pObjAccess = new PersistentBufferObjectAccess(db); for (Long time : entries.keySet()) { try { Map<String, String> value = entries.get(time); time = TimeUnit.NANOSECONDS.convert(time, timeUnit); LOGGER.debug("Putting data for feed {} with time {}", feedID, time); if (time.longValue() > largestTime) { largestTime = time.longValue(); } if (smallestTime == 0) { smallestTime = time.longValue(); } else if (time.longValue() < smallestTime) { smallestTime = time.longValue(); } PersistentBufferObject pObj = new PersistentBufferObject(); pObj.setKey(new PersistentBufferKey(feedID, time.longValue())); pObj.setData(value); pObjAccess.pIdx.putNoReturn(pObj); } catch (DatabaseException de) { largestTime = -1; LOGGER.error("Putting data for feed {} failed", feedID, de); if (env.isDiskBufferFull()) { LOGGER.error("[PartitionFastDiskBuffer]: " + env.getErrorMsg()); throw new BufferFullException(); } break; } } } catch (DatabaseException de) { largestTime = -1; LOGGER.error("Putting data for feed {} failed", feedID, de); if (env.isDiskBufferFull()) { LOGGER.error("[PartitionFastDiskBuffer]: " + env.getErrorMsg()); throw new BufferFullException(); } } return new PartitionTimestamps(smallestTime, largestTime); } private int hash(int h) { // Spread bits to regularize both segment and index locations, // using variant of single-word Wang/Jenkins hash. h += (h << 15) ^ 0xffffcd7d; h ^= (h >>> 10); h += (h << 3); h ^= (h >>> 6); h += (h << 2) + (h << 14); int i = h ^ (h >>> 16); return ((i >>> segmentShift) & segmentMask); } public synchronized void removeBuffer() { for (int i = 0; i < databases.length; i++) { try { if (databases[i] != null) { env.closeDatabase(databases[i]); databases[i] = null; } } catch (DatabaseException de) { LOGGER.debug("DatabaseException in closeBuffer", de); } } env.removeEnvironment(); } public synchronized void closeBuffer() { this.env.flush(); for (int i = 0; i < databases.length; i++) { try { if (databases[i] != null) { env.closeDatabase(databases[i]); databases[i] = null; } } catch (DatabaseException de) { LOGGER.debug("DatabaseException in closeBuffer", de); } } env.closeEnvironment(); } public synchronized boolean isClosed() { for (int i = 0; i < databases.length; i++) { if (databases[i] != null) { return false; } } return true; } private void closeDatabases() { for (int i = 0; i < databases.length; i++) { try { if (databases[i] != null) { env.closeDatabase(databases[i]); databases[i] = null; } } catch (DatabaseException de) { LOGGER.debug("DatabaseException in closeBuffer", de); } } } public synchronized void resetBuffer() { closeDatabases(); env.closeAndRestartEnvironment(); setupDatabasePartition(env); } public void inactive() { active = false; } public boolean isActive() { return active; } public DataBufferEnv getBufferEnv() { return env; } }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_disk_internal_PartitionFastDiskBuffer.java
36
private static final ThreadFactory tf = new ThreadFactory() { @Override public Thread newThread(Runnable r) { Thread t = Executors.defaultThreadFactory().newThread(r); t.setContextClassLoader(getClass().getClassLoader()); return t; } };
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_disk_internal_PartitionFastDiskBuffer.java
37
Runnable r = new Runnable() { @Override public void run() { try { Map<String, SortedMap<Long, Map<String, String>>> dataSlice = getData(databases[dataIndex], groupFeeds[dataIndex], timeUnit, startTime, endTime); if (dataSlice != null) { dataSlices[dataIndex] = dataSlice; } } finally { readLatch.countDown(); } } };
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_disk_internal_PartitionFastDiskBuffer.java
38
Runnable r = new Runnable() { @Override public void run() { try { Map<String, SortedMap<Long, Map<String, String>>> dataSlice = getLastData(databases[dataIndex], groupFeeds[dataIndex], timeUnit, startTime, endTime); if (dataSlice != null) { dataSlices[dataIndex] = dataSlice; } } finally { latch.countDown(); } } };
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_disk_internal_PartitionFastDiskBuffer.java
39
Runnable r = new Runnable() { @Override public void run() { try { for (Entry<String, Map<Long, Map<String, String>>> feedData : groupData[dataIndex].entrySet()) { PartitionTimestamps timeStamp = null; try { timeStamp = putData(null, feedData.getKey(), databases[dataIndex], timeUnit, feedData.getValue()); } catch (BufferFullException e) { bufferFull.compareAndSet(false, true); } if (timeStamp == null) { break; } else { timestamps.put(feedData.getKey(), timeStamp); } } } finally { latch.countDown(); } } };
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_disk_internal_PartitionFastDiskBuffer.java
40
Runnable r = new Runnable() { @Override public void run() { try { for (Entry<String, Map<Long, Map<String, String>>> feedData : groupData[dataIndex].entrySet()) { PartitionTimestamps timeStamp = null; try { timeStamp = putData(null, feedData.getKey(), databases[dataIndex], timeUnit, feedData.getValue()); } catch (BufferFullException e) { bufferFull.compareAndSet(false, true); } if (timeStamp == null) { break; } else { metadata.updatePartitionMetaData(metadataIndex, feedData.getKey(), timeStamp.getStartTimestamp(), timeStamp.getEndTimestamp()); } } } finally { latch.countDown(); } } };
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_disk_internal_PartitionFastDiskBuffer.java
41
private static final class TimeStampComparator implements Comparator<Long>, Serializable { private static final long serialVersionUID = -665810351953536404L; @Override public int compare(Long o1, Long o2) { return o1.compareTo(o2); } }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_disk_internal_PartitionFastDiskBuffer.java
42
@Persistent public final class PartitionTimestamps implements Cloneable { private long startTimestamp; private long endTimestamp; public PartitionTimestamps() { // } public PartitionTimestamps(long startTimestamp, long endTimestamp) { this.startTimestamp = startTimestamp; this.endTimestamp = endTimestamp; } public long getStartTimestamp() { return startTimestamp; } public void setStartTimestamp(long startTimestamp) { this.startTimestamp = startTimestamp; } public long getEndTimestamp() { return endTimestamp; } public void setEndTimestamp(long endTimestamp) { this.endTimestamp = endTimestamp; } @Override public PartitionTimestamps clone() { try { return (PartitionTimestamps) super.clone(); } catch (CloneNotSupportedException e) { throw new RuntimeException(e); // should never happen } } public void merge(long aStartTimestamp, long aEndTimestamp) { startTimestamp = Math.min(startTimestamp, aStartTimestamp); endTimestamp = Math.max(endTimestamp, aEndTimestamp); } public void merge(PartitionTimestamps timeStamp) { merge(timeStamp.startTimestamp, timeStamp.endTimestamp); } @Override public String toString() { return "[" + startTimestamp + ", " + endTimestamp + "]"; } }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_disk_internal_PartitionTimestamps.java
43
@Persistent public class PersistentBufferKey { @KeyField(1) String feedID; @KeyField(2) Long timestamp; public PersistentBufferKey() { // } public PersistentBufferKey(String feedID, Long timestamp) { this.feedID = feedID; this.timestamp = timestamp; } }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_disk_internal_PersistentBufferKey.java
44
@Entity public class PersistentBufferObject { @PrimaryKey private PersistentBufferKey key; private Map<String, String> data; public PersistentBufferKey getKey() { return key; } public void setKey(PersistentBufferKey key) { this.key = key; } public Map<String, String> getData() { return data; } public void setData(Map<String, String> data) { this.data = data; } }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_disk_internal_PersistentBufferObject.java
45
class PersistentBufferObjectAccess { PrimaryIndex<PersistentBufferKey, PersistentBufferObject> pIdx; public PersistentBufferObjectAccess(EntityStore store) throws DatabaseException { pIdx = store.getPrimaryIndex(PersistentBufferKey.class, PersistentBufferObject.class); } }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_disk_internal_PersistentBufferObjectAccess.java
46
final class TimestampKeyCreator implements SecondaryKeyCreator { private TupleBinding<?> keyBinding; TimestampKeyCreator(TupleBinding<?> keyBinding) { this.keyBinding = keyBinding; } @Override public boolean createSecondaryKey(SecondaryDatabase secDb, DatabaseEntry keyEntry, DatabaseEntry valueEntry, DatabaseEntry resultEntry) throws DatabaseException { KeyValue kv = KeyValue.class.cast(keyBinding.entryToObject(keyEntry)); long timestamp = kv.getTimestamp(); TupleOutput to = new TupleOutput(); to.writeLong(timestamp); resultEntry.setData(to.getBufferBytes()); return true; } }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_disk_internal_TimestampKeyCreator.java
47
public class BufferFullTest { private FeedAggregatorService feedAggregatorService; private Vector<DataArchive> dataArchives; private MockLogger mockLogger; private File bufferLocation; @BeforeMethod public void startup() throws Exception { Properties prop = new Properties(); prop.load(ClassLoader.getSystemResourceAsStream("properties/testFeed2.properties")); prop.put("buffer.partitions", "2"); prop.put("buffer.time.millis", "-1"); bufferLocation = File.createTempFile("mct-buffer", ""); bufferLocation.delete(); bufferLocation.mkdir(); prop.put("buffer.disk.loc", bufferLocation.toString()); feedAggregatorService = new FeedAggregatorService(prop); dataArchives = getDataArchives(); dataArchives.clear(); dataArchives.add(new MockBuffer(false)); dataArchives.add(new MockBuffer(true)); mockLogger = new MockLogger(); setMockLogger(); } @AfterMethod public void reset() { delete(bufferLocation); } private void delete(File f) { if (f.isDirectory()) { for (File f2 : f.listFiles()) { delete(f2); } } f.delete(); } @Test public void testBufferFull() throws InterruptedException { Map<String, String> testData = new HashMap<String, String>(); testData.put("key1", "value1"); Assert.assertEquals(dataArchives.size(), 2); Assert.assertFalse(mockLogger.errorLogged); feedAggregatorService.putData("testFeed", TimeUnit.MILLISECONDS, System.currentTimeMillis(), testData); Thread.sleep(5000); Assert.assertEquals(dataArchives.size(), 1); Assert.assertEquals(dataArchives.get(0).getLOS(), LOS.fast); Assert.assertTrue(mockLogger.errorLogged); } private void setMockLogger() throws Exception { Field f = feedAggregatorService.getClass().getDeclaredField("LOGGER"); f.setAccessible(true); f.set(null, mockLogger); } @SuppressWarnings("unchecked") private Vector<DataArchive> getDataArchives() throws Exception { Field f = feedAggregatorService.getClass().getDeclaredField("dataArchives"); f.setAccessible(true); return (Vector<DataArchive>)f.get(feedAggregatorService); } private static class MockLogger implements Logger { private static final long serialVersionUID = 531417069158028639L; private boolean errorLogged = false; @Override public void debug(String arg0) { // TODO Auto-generated method stub } @Override public void debug(String arg0, Object arg1) { // TODO Auto-generated method stub } @Override public void debug(String arg0, Object[] arg1) { // TODO Auto-generated method stub } @Override public void debug(String arg0, Throwable arg1) { // TODO Auto-generated method stub } @Override public void debug(Marker arg0, String arg1) { // TODO Auto-generated method stub } @Override public void debug(String arg0, Object arg1, Object arg2) { // TODO Auto-generated method stub } @Override public void debug(Marker arg0, String arg1, Object arg2) { // TODO Auto-generated method stub } @Override public void debug(Marker arg0, String arg1, Object[] arg2) { // TODO Auto-generated method stub } @Override public void debug(Marker arg0, String arg1, Throwable arg2) { // TODO Auto-generated method stub } @Override public void debug(Marker arg0, String arg1, Object arg2, Object arg3) { // TODO Auto-generated method stub } @Override public void error(String arg0) { this.errorLogged = true; } @Override public void error(String arg0, Object arg1) { // TODO Auto-generated method stub } @Override public void error(String arg0, Object[] arg1) { // TODO Auto-generated method stub } @Override public void error(String arg0, Throwable arg1) { // TODO Auto-generated method stub } @Override public void error(Marker arg0, String arg1) { // TODO Auto-generated method stub } @Override public void error(String arg0, Object arg1, Object arg2) { // TODO Auto-generated method stub } @Override public void error(Marker arg0, String arg1, Object arg2) { // TODO Auto-generated method stub } @Override public void error(Marker arg0, String arg1, Object[] arg2) { // TODO Auto-generated method stub } @Override public void error(Marker arg0, String arg1, Throwable arg2) { // TODO Auto-generated method stub } @Override public void error(Marker arg0, String arg1, Object arg2, Object arg3) { // TODO Auto-generated method stub } @Override public String getName() { // TODO Auto-generated method stub return null; } @Override public void info(String arg0) { // TODO Auto-generated method stub } @Override public void info(String arg0, Object arg1) { // TODO Auto-generated method stub } @Override public void info(String arg0, Object[] arg1) { // TODO Auto-generated method stub } @Override public void info(String arg0, Throwable arg1) { // TODO Auto-generated method stub } @Override public void info(Marker arg0, String arg1) { // TODO Auto-generated method stub } @Override public void info(String arg0, Object arg1, Object arg2) { // TODO Auto-generated method stub } @Override public void info(Marker arg0, String arg1, Object arg2) { // TODO Auto-generated method stub } @Override public void info(Marker arg0, String arg1, Object[] arg2) { // TODO Auto-generated method stub } @Override public void info(Marker arg0, String arg1, Throwable arg2) { // TODO Auto-generated method stub } @Override public void info(Marker arg0, String arg1, Object arg2, Object arg3) { // TODO Auto-generated method stub } @Override public boolean isDebugEnabled() { // TODO Auto-generated method stub return false; } @Override public boolean isDebugEnabled(Marker arg0) { // TODO Auto-generated method stub return false; } @Override public boolean isErrorEnabled() { // TODO Auto-generated method stub return false; } @Override public boolean isErrorEnabled(Marker arg0) { // TODO Auto-generated method stub return false; } @Override public boolean isInfoEnabled() { // TODO Auto-generated method stub return false; } @Override public boolean isInfoEnabled(Marker arg0) { // TODO Auto-generated method stub return false; } @Override public boolean isTraceEnabled() { // TODO Auto-generated method stub return false; } @Override public boolean isTraceEnabled(Marker arg0) { // TODO Auto-generated method stub return false; } @Override public boolean isWarnEnabled() { // TODO Auto-generated method stub return false; } @Override public boolean isWarnEnabled(Marker arg0) { // TODO Auto-generated method stub return false; } @Override public void trace(String arg0) { // TODO Auto-generated method stub } @Override public void trace(String arg0, Object arg1) { // TODO Auto-generated method stub } @Override public void trace(String arg0, Object[] arg1) { // TODO Auto-generated method stub } @Override public void trace(String arg0, Throwable arg1) { // TODO Auto-generated method stub } @Override public void trace(Marker arg0, String arg1) { // TODO Auto-generated method stub } @Override public void trace(String arg0, Object arg1, Object arg2) { // TODO Auto-generated method stub } @Override public void trace(Marker arg0, String arg1, Object arg2) { // TODO Auto-generated method stub } @Override public void trace(Marker arg0, String arg1, Object[] arg2) { // TODO Auto-generated method stub } @Override public void trace(Marker arg0, String arg1, Throwable arg2) { // TODO Auto-generated method stub } @Override public void trace(Marker arg0, String arg1, Object arg2, Object arg3) { // TODO Auto-generated method stub } @Override public void warn(String arg0) { // TODO Auto-generated method stub } @Override public void warn(String arg0, Object arg1) { // TODO Auto-generated method stub } @Override public void warn(String arg0, Object[] arg1) { // TODO Auto-generated method stub } @Override public void warn(String arg0, Throwable arg1) { // TODO Auto-generated method stub } @Override public void warn(Marker arg0, String arg1) { // TODO Auto-generated method stub } @Override public void warn(String arg0, Object arg1, Object arg2) { // TODO Auto-generated method stub } @Override public void warn(Marker arg0, String arg1, Object arg2) { // TODO Auto-generated method stub } @Override public void warn(Marker arg0, String arg1, Object[] arg2) { // TODO Auto-generated method stub } @Override public void warn(Marker arg0, String arg1, Throwable arg2) { // TODO Auto-generated method stub } @Override public void warn(Marker arg0, String arg1, Object arg2, Object arg3) { // TODO Auto-generated method stub } } private static class MockBuffer implements DataArchive, DataProvider { private boolean bufferFull; MockBuffer(boolean bufferFull) { this.bufferFull = bufferFull; } @Override public LOS getLOS() { if (bufferFull) { return LOS.medium; } return LOS.fast; } @Override public void putData(String feedID, TimeUnit timeUnit, Map<Long, Map<String, String>> entries) throws BufferFullException { if (bufferFull) { throw new BufferFullException("Test buffer full."); } } @Override public void putData(String feedID, TimeUnit timeUnit, long time, Map<String, String> value) throws BufferFullException { if (bufferFull) { throw new BufferFullException("Test buffer full."); } } @Override public void putData(Map<String, Map<Long, Map<String, String>>> value, TimeUnit timeUnit, Runnable callback) throws BufferFullException { if (bufferFull) { throw new BufferFullException("Test buffer full."); } } @Override public void reset() { // TODO Auto-generated method stub } @Override public Map<String, SortedMap<Long, Map<String, String>>> getData(Set<String> feedIDs, long startTime, long endTime, TimeUnit timeUnit) { // TODO Auto-generated method stub return null; } @Override public boolean isFullyWithinTimeSpan(String feedID, long startTime, TimeUnit timeUnit) { // TODO Auto-generated method stub return false; } @Override public Map<String, List<Map<String, String>>> getData(Set<String> feedIDs, TimeUnit timeUnit, long startTime, long endTime) { // TODO Auto-generated method stub return null; } } }
false
timeSequenceFeedAggregator_src_test_java_gov_nasa_arc_mct_buffer_internal_BufferFullTest.java
48
private static class MockBuffer implements DataArchive, DataProvider { private boolean bufferFull; MockBuffer(boolean bufferFull) { this.bufferFull = bufferFull; } @Override public LOS getLOS() { if (bufferFull) { return LOS.medium; } return LOS.fast; } @Override public void putData(String feedID, TimeUnit timeUnit, Map<Long, Map<String, String>> entries) throws BufferFullException { if (bufferFull) { throw new BufferFullException("Test buffer full."); } } @Override public void putData(String feedID, TimeUnit timeUnit, long time, Map<String, String> value) throws BufferFullException { if (bufferFull) { throw new BufferFullException("Test buffer full."); } } @Override public void putData(Map<String, Map<Long, Map<String, String>>> value, TimeUnit timeUnit, Runnable callback) throws BufferFullException { if (bufferFull) { throw new BufferFullException("Test buffer full."); } } @Override public void reset() { // TODO Auto-generated method stub } @Override public Map<String, SortedMap<Long, Map<String, String>>> getData(Set<String> feedIDs, long startTime, long endTime, TimeUnit timeUnit) { // TODO Auto-generated method stub return null; } @Override public boolean isFullyWithinTimeSpan(String feedID, long startTime, TimeUnit timeUnit) { // TODO Auto-generated method stub return false; } @Override public Map<String, List<Map<String, String>>> getData(Set<String> feedIDs, TimeUnit timeUnit, long startTime, long endTime) { // TODO Auto-generated method stub return null; } }
false
timeSequenceFeedAggregator_src_test_java_gov_nasa_arc_mct_buffer_internal_BufferFullTest.java
49
private static class MockLogger implements Logger { private static final long serialVersionUID = 531417069158028639L; private boolean errorLogged = false; @Override public void debug(String arg0) { // TODO Auto-generated method stub } @Override public void debug(String arg0, Object arg1) { // TODO Auto-generated method stub } @Override public void debug(String arg0, Object[] arg1) { // TODO Auto-generated method stub } @Override public void debug(String arg0, Throwable arg1) { // TODO Auto-generated method stub } @Override public void debug(Marker arg0, String arg1) { // TODO Auto-generated method stub } @Override public void debug(String arg0, Object arg1, Object arg2) { // TODO Auto-generated method stub } @Override public void debug(Marker arg0, String arg1, Object arg2) { // TODO Auto-generated method stub } @Override public void debug(Marker arg0, String arg1, Object[] arg2) { // TODO Auto-generated method stub } @Override public void debug(Marker arg0, String arg1, Throwable arg2) { // TODO Auto-generated method stub } @Override public void debug(Marker arg0, String arg1, Object arg2, Object arg3) { // TODO Auto-generated method stub } @Override public void error(String arg0) { this.errorLogged = true; } @Override public void error(String arg0, Object arg1) { // TODO Auto-generated method stub } @Override public void error(String arg0, Object[] arg1) { // TODO Auto-generated method stub } @Override public void error(String arg0, Throwable arg1) { // TODO Auto-generated method stub } @Override public void error(Marker arg0, String arg1) { // TODO Auto-generated method stub } @Override public void error(String arg0, Object arg1, Object arg2) { // TODO Auto-generated method stub } @Override public void error(Marker arg0, String arg1, Object arg2) { // TODO Auto-generated method stub } @Override public void error(Marker arg0, String arg1, Object[] arg2) { // TODO Auto-generated method stub } @Override public void error(Marker arg0, String arg1, Throwable arg2) { // TODO Auto-generated method stub } @Override public void error(Marker arg0, String arg1, Object arg2, Object arg3) { // TODO Auto-generated method stub } @Override public String getName() { // TODO Auto-generated method stub return null; } @Override public void info(String arg0) { // TODO Auto-generated method stub } @Override public void info(String arg0, Object arg1) { // TODO Auto-generated method stub } @Override public void info(String arg0, Object[] arg1) { // TODO Auto-generated method stub } @Override public void info(String arg0, Throwable arg1) { // TODO Auto-generated method stub } @Override public void info(Marker arg0, String arg1) { // TODO Auto-generated method stub } @Override public void info(String arg0, Object arg1, Object arg2) { // TODO Auto-generated method stub } @Override public void info(Marker arg0, String arg1, Object arg2) { // TODO Auto-generated method stub } @Override public void info(Marker arg0, String arg1, Object[] arg2) { // TODO Auto-generated method stub } @Override public void info(Marker arg0, String arg1, Throwable arg2) { // TODO Auto-generated method stub } @Override public void info(Marker arg0, String arg1, Object arg2, Object arg3) { // TODO Auto-generated method stub } @Override public boolean isDebugEnabled() { // TODO Auto-generated method stub return false; } @Override public boolean isDebugEnabled(Marker arg0) { // TODO Auto-generated method stub return false; } @Override public boolean isErrorEnabled() { // TODO Auto-generated method stub return false; } @Override public boolean isErrorEnabled(Marker arg0) { // TODO Auto-generated method stub return false; } @Override public boolean isInfoEnabled() { // TODO Auto-generated method stub return false; } @Override public boolean isInfoEnabled(Marker arg0) { // TODO Auto-generated method stub return false; } @Override public boolean isTraceEnabled() { // TODO Auto-generated method stub return false; } @Override public boolean isTraceEnabled(Marker arg0) { // TODO Auto-generated method stub return false; } @Override public boolean isWarnEnabled() { // TODO Auto-generated method stub return false; } @Override public boolean isWarnEnabled(Marker arg0) { // TODO Auto-generated method stub return false; } @Override public void trace(String arg0) { // TODO Auto-generated method stub } @Override public void trace(String arg0, Object arg1) { // TODO Auto-generated method stub } @Override public void trace(String arg0, Object[] arg1) { // TODO Auto-generated method stub } @Override public void trace(String arg0, Throwable arg1) { // TODO Auto-generated method stub } @Override public void trace(Marker arg0, String arg1) { // TODO Auto-generated method stub } @Override public void trace(String arg0, Object arg1, Object arg2) { // TODO Auto-generated method stub } @Override public void trace(Marker arg0, String arg1, Object arg2) { // TODO Auto-generated method stub } @Override public void trace(Marker arg0, String arg1, Object[] arg2) { // TODO Auto-generated method stub } @Override public void trace(Marker arg0, String arg1, Throwable arg2) { // TODO Auto-generated method stub } @Override public void trace(Marker arg0, String arg1, Object arg2, Object arg3) { // TODO Auto-generated method stub } @Override public void warn(String arg0) { // TODO Auto-generated method stub } @Override public void warn(String arg0, Object arg1) { // TODO Auto-generated method stub } @Override public void warn(String arg0, Object[] arg1) { // TODO Auto-generated method stub } @Override public void warn(String arg0, Throwable arg1) { // TODO Auto-generated method stub } @Override public void warn(Marker arg0, String arg1) { // TODO Auto-generated method stub } @Override public void warn(String arg0, Object arg1, Object arg2) { // TODO Auto-generated method stub } @Override public void warn(Marker arg0, String arg1, Object arg2) { // TODO Auto-generated method stub } @Override public void warn(Marker arg0, String arg1, Object[] arg2) { // TODO Auto-generated method stub } @Override public void warn(Marker arg0, String arg1, Throwable arg2) { // TODO Auto-generated method stub } @Override public void warn(Marker arg0, String arg1, Object arg2, Object arg3) { // TODO Auto-generated method stub } }
false
timeSequenceFeedAggregator_src_test_java_gov_nasa_arc_mct_buffer_internal_BufferFullTest.java
50
public final class CODataBuffer extends DataBuffer implements DataArchive, DataProvider { private static final Logger LOGGER = LoggerFactory.getLogger(CODataBuffer.class); private static final Logger PERF_READ_LOGGER = LoggerFactory .getLogger("gov.nasa.arc.mct.performance.read.codbuffer"); private static final Logger PERF_WRITE_LOGGER = LoggerFactory .getLogger("gov.nasa.arc.mct.performance.write.codbuffer"); private static final Logger PERF_LOGGER = LoggerFactory .getLogger("gov.nasa.arc.mct.performance.codbuffer"); CODataBuffer(DataBufferEnv env, DataBufferHelper partitionBufferFactory) { super(env, partitionBufferFactory); } @Override protected void setupPartitionBuffers(DataBufferEnv env, DataBufferHelper partitionBufferFactory) { for (int i=0; i<partitionDataBuffers.length; i++) { this.partitionDataBuffers[i] = new AtomicReference<PartitionDataBuffer>(); } if (env == null) { for (int i=0; i<partitionDataBuffers.length; i++) { this.partitionDataBuffers[i].set(partitionBufferFactory.newPartitionBuffer(i)); if (i == metaDataBuffer.getCurrentPartition()) { this.currentParition = this.partitionDataBuffers[i].get(); } else { this.partitionDataBuffers[i].get().inactive(); } } } else { this.currentParition = partitionBufferFactory.newPartitionBuffer(env); this.partitionDataBuffers[currentParition.getBufferEnv().getCurrentBufferPartition()].set(currentParition); } } private final static class FeedRequestContext { private final String feedID; private final boolean getLastDataIfNeeded; public FeedRequestContext(String feedID, boolean getLastDataIfNeeded) { this.feedID = feedID; this.getLastDataIfNeeded = getLastDataIfNeeded; } @Override public boolean equals(Object obj) { if (obj == null || ! (obj instanceof FeedRequestContext)) { return false; } return feedID.equals(FeedRequestContext.class.cast(obj).feedID); } @Override public int hashCode() { return feedID.hashCode(); } @Override public String toString() { return ("feedID: " + feedID + ", lastDataRequired: " + getLastDataIfNeeded); } } @SuppressWarnings("unchecked") private Map<String, FeedRequestContext>[] mapFeedsToPartitions(Set<String> feedIDs, long startTime, long endTime, TimeUnit timeUnit) { Map<String, FeedRequestContext>[] partitionFeeds = new Map[this.currentParition.getBufferEnv().getNumOfBufferPartitions()]; int startPartition = this.currentParition.getBufferEnv().getCurrentBufferPartition(); int i = startPartition; do { Map<String, FeedRequestContext> feedsForThisPartition = null; for (Iterator<String> it = feedIDs.iterator(); it.hasNext(); ) { String feedID = it.next(); if (metaDataBuffer.hasFeed(i, feedID)) { feedsForThisPartition = partitionFeeds[i]; if (feedsForThisPartition == null) { feedsForThisPartition = new HashMap<String, FeedRequestContext>(); partitionFeeds[i] = feedsForThisPartition; } FeedRequestContext frc = null; if (metaDataBuffer.isFullyWithinTimeSpan(i, feedID, timeUnit, startTime)) { frc = new FeedRequestContext(feedID, true); it.remove(); } else { frc = new FeedRequestContext(feedID, false); } feedsForThisPartition.put(feedID, frc); } } i = this.currentParition.getBufferEnv().previousBufferPartition(i); } while (i != startPartition); return partitionFeeds; } @Override public Map<String, SortedMap<Long, Map<String, String>>> getData(Set<String> feedIDs, long startTime, long endTime, TimeUnit timeUnit) { Set<String> requestFeedIDs = new HashSet<String>(feedIDs); Map<String, FeedRequestContext>[] partitionFeeds = mapFeedsToPartitions(requestFeedIDs, startTime, endTime, timeUnit); synchronized (movePartitionLock) { if (reset) return Collections.emptyMap(); } Map<String, SortedMap<Long, Map<String, String>>> aggregateData = new HashMap<String, SortedMap<Long, Map<String,String>>>(); for (int i=0; i< partitionFeeds.length; i++) { Map<String, FeedRequestContext> partitionFeed = partitionFeeds[i]; if (partitionFeed != null) { PartitionDataBuffer partitionBuffer = this.partitionDataBuffers[i].get(); Map<String, SortedMap<Long, Map<String, String>>> data = getData(partitionBuffer, partitionFeed, timeUnit, startTime, endTime); for (Entry<String, SortedMap<Long, Map<String, String>>> entry: data.entrySet()) { SortedMap<Long, Map<String, String>> cumulativeData = aggregateData.get(entry.getKey()); if (cumulativeData != null) { cumulativeData.putAll(entry.getValue()); } else { aggregateData.put(entry.getKey(), entry.getValue()); } } } } return aggregateData; } @Override public Map<String, List<Map<String, String>>> getData(Set<String> feedIDs, TimeUnit timeUnit, long startTime, long endTime) { Set<String> requestFeedIDs = new HashSet<String>(feedIDs); Map<String, FeedRequestContext>[] partitionFeeds = mapFeedsToPartitions(requestFeedIDs, startTime, endTime, timeUnit); synchronized (movePartitionLock) { if (reset) return Collections.emptyMap(); } Map<String, List<Map<String, String>>> aggregateData = new HashMap<String, List<Map<String,String>>>(); for (int i=0; i<partitionFeeds.length; i++) { Map<String, FeedRequestContext> partitionFeed = partitionFeeds[i]; if (partitionFeed != null) { PartitionDataBuffer partitionBuffer = this.partitionDataBuffers[i].get(); Map<String, SortedMap<Long, Map<String, String>>> data = getData(partitionBuffer, partitionFeed, timeUnit, startTime, endTime); for (Entry<String, SortedMap<Long, Map<String, String>>> entry: data.entrySet()) { List<Map<String, String>> cumulativeData = aggregateData.get(entry.getKey()); if (cumulativeData != null) { cumulativeData.addAll(0, entry.getValue().values()); } else { aggregateData.put(entry.getKey(), new LinkedList<Map<String, String>>(entry.getValue().values())); } } } } return aggregateData; } private Map<String, SortedMap<Long, Map<String, String>>> getData(PartitionDataBuffer partitionDataBuffer, Map<String, FeedRequestContext> feedRequestContexts, TimeUnit timeUnit, long startTime, long endTime) { synchronized (movePartitionLock) { if (reset) return Collections.emptyMap(); while (moveParitionInProgress) { try { movePartitionLock.wait(); } catch (InterruptedException e) { // ignore } } readInProgress = true; } try { final ElapsedTimer timer = new ElapsedTimer(); timer.startInterval(); Map<String, SortedMap<Long, Map<String, String>>> returnedData = partitionDataBuffer.getData(feedRequestContexts.keySet(), timeUnit, startTime, endTime); PERF_READ_LOGGER.debug("Get Regular Data feeds: {} from partition: {}", returnedData, partitionDataBuffer.getBufferEnv().getCurrentBufferPartition()); for (Iterator<Entry<String, FeedRequestContext>> it = feedRequestContexts.entrySet().iterator(); it.hasNext(); ) { Entry<String, FeedRequestContext> entry = it.next(); String feedID = entry.getKey(); SortedMap<Long, Map<String, String>> data = returnedData.get(feedID); boolean needPrevPoint = true; if (data != null && !data.isEmpty()) { long firstPointTS = data.firstKey(); needPrevPoint = firstPointTS > TimeUnit.NANOSECONDS.convert(startTime, timeUnit); } if (!entry.getValue().getLastDataIfNeeded || !needPrevPoint) { it.remove(); } } if (!feedRequestContexts.isEmpty()) { Set<String> feedIDs = feedRequestContexts.keySet(); Map<String, SortedMap<Long, Map<String, String>>> lastData = partitionDataBuffer.getLastData(feedIDs, timeUnit, 0, startTime); for (Entry<String, SortedMap<Long, Map<String, String>>> entry : lastData.entrySet()) { String feedID = entry.getKey(); SortedMap<Long, Map<String, String>> data = entry.getValue(); if (data != null && !data.isEmpty()) { SortedMap<Long, Map<String, String>> feedData = returnedData.get(feedID); if (feedData == null) { feedData = new TreeMap<Long, Map<String, String>>(); returnedData.put(feedID, feedData); } Long ts = data.firstKey(); feedData.put(ts, data.get(ts)); } } PERF_READ_LOGGER.debug("Get Last Data feeds: {} from partition: {} ", returnedData, partitionDataBuffer.getBufferEnv().getCurrentBufferPartition()); } timer.stopInterval(); LOGGER.debug("time to get Data for feeds {}: {}", feedRequestContexts, timer.getIntervalInMillis()); return returnedData; } finally { synchronized (movePartitionLock) { readInProgress = false; movePartitionLock.notifyAll(); } } } @Override public void putData(String feedID, TimeUnit timeUnit, Map<Long, Map<String, String>> entries) throws BufferFullException { synchronized (movePartitionLock) { if (reset) return; } final ElapsedTimer timer = new ElapsedTimer(); timer.startInterval(); Map<String, Map<Long, Map<String, String>>> feedDataToPut = new HashMap<String, Map<Long,Map<String,String>>>(); feedDataToPut.put(feedID, entries); int i = this.currentParition.getBufferEnv().getCurrentBufferPartition(); int startPartition = i; do { PartitionDataBuffer partitionBuffer = this.partitionDataBuffers[i].get(); if (partitionBuffer == null || !partitionBuffer.isActive()) { break; } LOGGER.debug("Putting in partition {}", i); Map<String, PartitionTimestamps> timeStamps = putData(partitionBuffer, feedDataToPut, timeUnit); if (timeStamps != null) { metaDataBuffer.updatePartitionMetaData(partitionBuffer.getBufferEnv().getCurrentBufferPartition(), timeStamps); } i = (i + 1) % this.currentParition.getBufferEnv().getNumOfBufferPartitions(); } while (i != startPartition); timer.stopInterval(); PERF_LOGGER.debug("Time to save data for feed {}: {}", feedID, timer.getIntervalInMillis()); } private void putData(PartitionDataBuffer partitionBuffer, Map<String, Map<Long, Map<String, String>>> value, TimeUnit timeUnit, MetaDataBuffer metadata, int metadataIndex) throws BufferFullException { synchronized (movePartitionLock) { if (reset) return; while (moveParitionInProgress) { try { movePartitionLock.wait(); } catch (InterruptedException e) { // ignore } } writeInProgress = true; } if (this.partitionDataBuffers[partitionBuffer.getBufferEnv().getCurrentBufferPartition()].get() == null) { return; } try { partitionBuffer.putData(value, timeUnit, metadata, metadataIndex); } finally { synchronized (movePartitionLock) { writeInProgress = false; movePartitionLock.notifyAll(); } } } private Map<String, PartitionTimestamps> putData(PartitionDataBuffer partitionBuffer, Map<String, Map<Long, Map<String, String>>> value, TimeUnit timeUnit) throws BufferFullException { synchronized (movePartitionLock) { if (reset) return null; while (moveParitionInProgress) { try { movePartitionLock.wait(); } catch (InterruptedException e) { // ignore } } writeInProgress = true; } if (this.partitionDataBuffers[partitionBuffer.getBufferEnv().getCurrentBufferPartition()].get() == null) { return null; } try { return partitionBuffer.putData(value, timeUnit); } finally { synchronized (movePartitionLock) { writeInProgress = false; movePartitionLock.notifyAll(); } } } @Override public void putData(Map<String, Map<Long, Map<String, String>>> value, TimeUnit timeUnit, Runnable callback) throws BufferFullException { PERF_WRITE_LOGGER.debug("COD Putting data for {} feeds", value); synchronized (movePartitionLock) { if (reset) return; } final ElapsedTimer timer = new ElapsedTimer(); timer.startInterval(); int i = this.currentParition.getBufferEnv().getCurrentBufferPartition(); int startPartition = i; do { PartitionDataBuffer partitionBuffer = this.partitionDataBuffers[i].get(); if (partitionBuffer == null || !partitionBuffer.isActive()) { break; } LOGGER.debug("Putting in partition {}", i); putData(partitionBuffer, value, timeUnit, metaDataBuffer, i); timer.stopInterval(); PERF_LOGGER.debug("Time to save data for {} feeds: {}", value.size(), timer .getIntervalInMillis()); i = (i + 1) % currentParition.getBufferEnv().getNumOfBufferPartitions(); } while (i != startPartition); if (callback != null) { callback.run(); } } @Override public void putData(String feedID, TimeUnit timeUnit, long time, Map<String, String> value) throws BufferFullException { synchronized (movePartitionLock) { if (reset) return; } final ElapsedTimer timer = new ElapsedTimer(); timer.startInterval(); Map<Long, Map<String, String>> dataToPut = new HashMap<Long, Map<String, String>>(); dataToPut.put(Long.valueOf(time), value); Map<String, Map<Long, Map<String, String>>> feedDataToPut = new HashMap<String, Map<Long,Map<String,String>>>(); feedDataToPut.put(feedID, dataToPut); int i = this.currentParition.getBufferEnv().getCurrentBufferPartition(); int startPartition = i; do { PartitionDataBuffer partitionBuffer = this.partitionDataBuffers[i].get(); if (partitionBuffer == null || !partitionBuffer.isActive()) { break; } LOGGER.debug("Putting in partition {}", i); Map<String, PartitionTimestamps> timeStamps = putData(partitionBuffer, feedDataToPut, timeUnit); if (timeStamps != null) { metaDataBuffer.updatePartitionMetaData(partitionBuffer.getBufferEnv().getCurrentBufferPartition(), timeStamps); } i = (i + 1) % this.currentParition.getBufferEnv().getNumOfBufferPartitions(); } while (i != startPartition); timer.stopInterval(); PERF_LOGGER.debug("Time to save data for feed {}: {}", feedID, timer.getIntervalInMillis()); } }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_internal_CODataBuffer.java
51
private final static class FeedRequestContext { private final String feedID; private final boolean getLastDataIfNeeded; public FeedRequestContext(String feedID, boolean getLastDataIfNeeded) { this.feedID = feedID; this.getLastDataIfNeeded = getLastDataIfNeeded; } @Override public boolean equals(Object obj) { if (obj == null || ! (obj instanceof FeedRequestContext)) { return false; } return feedID.equals(FeedRequestContext.class.cast(obj).feedID); } @Override public int hashCode() { return feedID.hashCode(); } @Override public String toString() { return ("feedID: " + feedID + ", lastDataRequired: " + getLastDataIfNeeded); } }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_internal_CODataBuffer.java
52
public class CODataBufferTest { private DataBuffer codataBuffer; private String testFeedID1 = "TestPui1"; private String testFeedID2 = "TestPui2"; private File bufferLocation; @BeforeMethod public void setup() throws IOException { Properties prop = new Properties(); prop.load(ClassLoader.getSystemResourceAsStream("properties/testFeed.properties")); prop.put("buffer.partitions", "2"); prop.put("buffer.time.millis", "-1"); bufferLocation = File.createTempFile("mct-buffer", ""); bufferLocation.delete(); bufferLocation.mkdir(); prop.put("buffer.disk.loc", bufferLocation.toString()); DataBufferFactory.reset(); codataBuffer = DataBufferFactory.getFastDiskDataBuffer(prop); if (codataBuffer.isDataBufferClose()) { codataBuffer.reset(); } } @AfterMethod public void reset() { if (codataBuffer != null) { codataBuffer.closeBuffer(); } DataBufferFactory.reset(); delete(bufferLocation); } private void delete(File f) { if (f.isDirectory()) { for (File f2 : f.listFiles()) { delete(f2); } } f.delete(); } @Test public void CODReadTest() throws Exception { int currentPartition = getCurrentBufferPartition(codataBuffer); Assert.assertEquals(currentPartition, 0); Map<String, String> value = new HashMap<String, String>(); value.put("value", "1.3"); value.put("status", "ok"); long time = System.currentTimeMillis(); long nanotime = TimeUnit.NANOSECONDS.convert(time, TimeUnit.MILLISECONDS); codataBuffer.putData(testFeedID1, TimeUnit.MILLISECONDS, time, value); codataBuffer.prepareForNextPartition(); currentPartition = getCurrentBufferPartition(codataBuffer); Assert.assertEquals(currentPartition, 0); codataBuffer.moveToNextPartition(); currentPartition = getCurrentBufferPartition(codataBuffer); Assert.assertEquals(currentPartition, 1); List<Map<String, String>> returnData = codataBuffer.getData(Collections.singleton(testFeedID1), TimeUnit.NANOSECONDS, nanotime+1000, nanotime + 2000).get(testFeedID1); Assert.assertEquals(returnData.size(), 1); Map<String, String> returnValue = returnData.get(0); Assert.assertNotSame(returnValue, value); assertHasSameValue(returnValue, value); } @Test public void prevPointTest() throws Exception { int currentPartition = getCurrentBufferPartition(codataBuffer); Assert.assertEquals(currentPartition, 0); Map<String, String> value = new HashMap<String, String>(); value.put("value", "1.3"); value.put("status", "ok"); long time = System.currentTimeMillis(); long nanotime = TimeUnit.NANOSECONDS.convert(time, TimeUnit.MILLISECONDS); codataBuffer.putData(testFeedID1, TimeUnit.MILLISECONDS, time, value); Assert.assertEquals(getCurrentBufferPartition(codataBuffer), 0); Map<String, String> value2 = new HashMap<String, String>(); value2.put("value", "1.4"); value2.put("status", "ok"); codataBuffer.putData(testFeedID1, TimeUnit.MILLISECONDS, time + 1, value2); Assert.assertEquals(getCurrentBufferPartition(codataBuffer), 0); List<Map<String, String>> returnData = codataBuffer.getData(Collections.singleton(testFeedID1), TimeUnit.NANOSECONDS, nanotime + 2000000, nanotime + 3000000).get(testFeedID1); Assert.assertEquals(returnData.size(), 1); Map<String, String> returnValue = returnData.get(0); Assert.assertNotSame(returnValue, value2); assertHasSameValue(returnValue, value2); } @Test public void noNextPointTest() throws Exception { int currentPartition = getCurrentBufferPartition(codataBuffer); Assert.assertEquals(currentPartition, 0); Map<String, String> value = new HashMap<String, String>(); value.put("value", "1.3"); value.put("status", "ok"); long time = System.currentTimeMillis(); long nanotime = TimeUnit.NANOSECONDS.convert(time, TimeUnit.MILLISECONDS); codataBuffer.putData(testFeedID1, TimeUnit.MILLISECONDS, time, value); List<Map<String, String>> returnData = codataBuffer.getData(Collections.singleton(testFeedID1), TimeUnit.NANOSECONDS, nanotime-2000, nanotime - 1000).get(testFeedID1); Assert.assertNull(returnData); } @Test public void putDataTimeRangeTest() throws Exception { long time = System.currentTimeMillis(); Map<String, String> value = new HashMap<String, String>(); value.put("value", "1.3"); value.put("status", "ok"); Map<Long, Map<String, String>> feedData1 = new HashMap<Long, Map<String, String>>(); feedData1.put(time, value); Map<String, String> value2 = new HashMap<String, String>(); value2.put("value", "1.4"); value2.put("status", "ok"); Map<Long, Map<String, String>> feedData2 = new HashMap<Long, Map<String, String>>(); feedData2.put(time + 100, value2); Map<String, Map<Long, Map<String, String>>> data = new HashMap<String, Map<Long, Map<String, String>>>(); data.put(testFeedID1, feedData1); data.put(testFeedID2, feedData2); codataBuffer.putData(data, TimeUnit.MILLISECONDS, null); long nanotime = TimeUnit.NANOSECONDS.convert(time, TimeUnit.MILLISECONDS); long nanotime2 = TimeUnit.NANOSECONDS.convert(time + 100, TimeUnit.MILLISECONDS); Assert.assertEquals(codataBuffer.metaDataBuffer.getStartTimestamp(0, testFeedID1), nanotime); Assert.assertEquals(codataBuffer.metaDataBuffer.getEndTimestamp(0, testFeedID1), nanotime); Assert.assertEquals(codataBuffer.metaDataBuffer.getStartTimestamp(0, testFeedID2), nanotime2); Assert.assertEquals(codataBuffer.metaDataBuffer.getEndTimestamp(0, testFeedID2), nanotime2); } private void assertHasSameValue(Map<String, String> actualValue, Map<String, String> expectedValue) { Assert.assertEquals(actualValue.size(), expectedValue.size()); for (String key : actualValue.keySet()) { Assert.assertEquals(actualValue.get(key), expectedValue.get(key)); } } private int getCurrentBufferPartition(DataBuffer dataBuffer) throws Exception { Field f = DataBuffer.class.getDeclaredField("currentParition"); f.setAccessible(true); PartitionDataBuffer currentPartitionBuffer = (PartitionDataBuffer) f.get(dataBuffer); return currentPartitionBuffer.getBufferEnv().getCurrentBufferPartition(); } }
false
timeSequenceFeedAggregator_src_test_java_gov_nasa_arc_mct_buffer_internal_CODataBufferTest.java
53
public class DataBuffer implements DataArchive, DataProvider { private static final Logger LOGGER = LoggerFactory.getLogger(DataBuffer.class); private static final Logger PERF_LOGGER = LoggerFactory .getLogger("gov.nasa.arc.mct.performance.buffer"); protected final AtomicReference<PartitionDataBuffer>[] partitionDataBuffers; protected MetaDataBuffer metaDataBuffer; protected volatile PartitionDataBuffer currentParition; protected DataBufferEvictor evictor; protected final Object movePartitionLock = new Object(); protected final Object resetLock = new Object(); protected boolean readInProgress = false; protected boolean writeInProgress = false; protected boolean moveParitionInProgress = false; protected volatile boolean reset = false; protected volatile boolean prepareNewPartitionInProgress = false; protected final DataBufferHelper dataBufferHelper; @SuppressWarnings("unchecked") DataBuffer(DataBufferEnv env, DataBufferHelper partitionBufferFactory) { this.dataBufferHelper = partitionBufferFactory; if (env == null) { metaDataBuffer = partitionBufferFactory.newMetaDataBuffer(null); } else { metaDataBuffer = partitionBufferFactory.newMetaDataBuffer(partitionBufferFactory.newMetaDataBufferEnv(env.getConfigProperties())); } this.partitionDataBuffers = new AtomicReference[metaDataBuffer.getNumOfPartitions()]; setupPartitionBuffers(env, partitionBufferFactory); startEvictor(); } protected void setupPartitionBuffers(DataBufferEnv env, DataBufferHelper partitionBufferFactory) { PartitionDataBuffer partitionBuffer; if (env == null) { partitionBuffer = partitionBufferFactory.newPartitionBuffer(metaDataBuffer.getCurrentPartition()); } else { partitionBuffer = partitionBufferFactory.newPartitionBuffer(env); } this.currentParition = partitionBuffer; DataBufferEnv currentEnv = currentParition.getBufferEnv(); for (int i=0; i<partitionDataBuffers.length; i++) { this.partitionDataBuffers[i] = new AtomicReference<PartitionDataBuffer>(); } this.partitionDataBuffers[currentEnv.getCurrentBufferPartition()].set(currentParition); } private void startEvictor() { DataBufferEnv currentEnv = currentParition.getBufferEnv(); if (currentEnv.getNumOfBufferPartitions() > 1 && currentEnv.getBufferTime() != -1) { this.evictor = new DataBufferEvictor(this, currentEnv.getBufferTime() - currentEnv.getBufferPartitionOverlap(), currentEnv.getBufferPartitionOverlap()); evictor.schedule(); } } @Override public boolean isFullyWithinTimeSpan(String feedID, long startTime, TimeUnit timeUnit) { int startPartition = this.currentParition.getBufferEnv().getCurrentBufferPartition(); int i = startPartition; do { if (metaDataBuffer.isFullyWithinTimeSpan(i, feedID, timeUnit, startTime)) { return true; } i = this.currentParition.getBufferEnv().previousBufferPartition(i); } while (i != startPartition); return false; } /** * Returns true if the entire request can be satisfied for all feeds. * @param partition partition index * @param feedIDs feed IDs * @param startTime start time * @param timeUnit unit of time for startTime * @return */ private boolean isFullyWithinTimeSpan(int partition, Set<String> feedIDs, long startTime, TimeUnit timeUnit) { for (String feedID : feedIDs) { if(!metaDataBuffer.isFullyWithinTimeSpan(partition, feedID, timeUnit, startTime)) { return false; } } return true; } private boolean isWithinTimeSpan(int partition, Set<String> feedIDs, TimeUnit timeUnit, long startTime, long endTime) { for (String feedID : feedIDs) { if(metaDataBuffer.isWithinTimeSpan(partition, feedID, timeUnit, startTime, endTime)) { return true; } } return false; } @Override public Map<String, SortedMap<Long, Map<String, String>>> getData(Set<String> feedIDs, long startTime, long endTime, TimeUnit timeUnit) { synchronized (movePartitionLock) { if (reset) return Collections.emptyMap(); } Map<String, SortedMap<Long, Map<String, String>>> aggregateData = new HashMap<String, SortedMap<Long, Map<String,String>>>(); int startPartition = this.currentParition.getBufferEnv().getCurrentBufferPartition(); int i = startPartition; do { PartitionDataBuffer partitionBuffer = this.partitionDataBuffers[i].get(); if (partitionBuffer != null && isWithinTimeSpan(i, feedIDs, timeUnit, startTime, endTime)) { Map<String, SortedMap<Long, Map<String, String>>> data = getData(partitionBuffer, feedIDs, timeUnit, startTime, endTime); for (Entry<String, SortedMap<Long, Map<String, String>>> entry: data.entrySet()) { SortedMap<Long, Map<String, String>> cumulativeData = aggregateData.get(entry.getKey()); if (cumulativeData != null) { cumulativeData.putAll(entry.getValue()); } else { aggregateData.put(entry.getKey(), entry.getValue()); } } if (isFullyWithinTimeSpan(i, feedIDs, startTime, timeUnit)) { break; } } i = this.currentParition.getBufferEnv().previousBufferPartition(i); } while (i != startPartition); return aggregateData; } @Override public Map<String, List<Map<String, String>>> getData(Set<String> feedIDs, TimeUnit timeUnit, long startTime, long endTime) { synchronized (movePartitionLock) { if (reset) return Collections.emptyMap(); } Map<String, List<Map<String, String>>> aggregateData = new HashMap<String, List<Map<String,String>>>(); int startPartition = this.currentParition.getBufferEnv().getCurrentBufferPartition(); int i = startPartition; do { PartitionDataBuffer partitionBuffer = this.partitionDataBuffers[i].get(); if (partitionBuffer != null && isWithinTimeSpan(i, feedIDs, timeUnit, startTime, endTime)) { Map<String, SortedMap<Long, Map<String, String>>> data = getData(partitionBuffer, feedIDs, timeUnit, startTime, endTime); for (Entry<String, SortedMap<Long, Map<String, String>>> entry: data.entrySet()) { List<Map<String, String>> cumulativeData = aggregateData.get(entry.getKey()); if (cumulativeData != null) { cumulativeData.addAll(0, entry.getValue().values()); } else { aggregateData.put(entry.getKey(), new LinkedList<Map<String, String>>(entry.getValue().values())); } } if (isFullyWithinTimeSpan(i, feedIDs, startTime, timeUnit)) { break; } } i = this.currentParition.getBufferEnv().previousBufferPartition(i); } while (i != startPartition); return aggregateData; } private Map<String, SortedMap<Long, Map<String, String>>> getData(PartitionDataBuffer partitionDataBuffer, Set<String> feedIDs, TimeUnit timeUnit, long startTime, long endTime) { synchronized (movePartitionLock) { if (reset) return Collections.emptyMap(); while (moveParitionInProgress) { try { movePartitionLock.wait(); } catch (InterruptedException e) { // ignore } } readInProgress = true; } Set<String> searchFeedIDS = new HashSet<String>(feedIDs); try { final ElapsedTimer timer = new ElapsedTimer(); timer.startInterval(); Map<String, SortedMap<Long, Map<String, String>>> returnedData = partitionDataBuffer.getData(searchFeedIDS, timeUnit, startTime, endTime); timer.stopInterval(); LOGGER.debug("time to get Data for feeds {}: {}", feedIDs, timer.getIntervalInMillis()); return returnedData; } finally { synchronized (movePartitionLock) { readInProgress = false; movePartitionLock.notifyAll(); } } } @Override public void putData(String feedID, TimeUnit timeUnit, Map<Long, Map<String, String>> entries) throws BufferFullException { synchronized (movePartitionLock) { if (reset) return; } final ElapsedTimer timer = new ElapsedTimer(); timer.startInterval(); Map<String, Map<Long, Map<String, String>>> feedDataToPut = new HashMap<String, Map<Long,Map<String,String>>>(); feedDataToPut.put(feedID, entries); int i = this.currentParition.getBufferEnv().getCurrentBufferPartition(); int startPartition = i; do { PartitionDataBuffer partitionBuffer = this.partitionDataBuffers[i].get(); if (partitionBuffer == null || !partitionBuffer.isActive()) { break; } LOGGER.debug("Putting in partition {}", i); Map<String, PartitionTimestamps> timeStamps = putData(partitionBuffer, feedDataToPut, timeUnit); if (timeStamps != null) { metaDataBuffer.updatePartitionMetaData(partitionBuffer.getBufferEnv().getCurrentBufferPartition(), timeStamps); } i = (i + 1) % this.currentParition.getBufferEnv().getNumOfBufferPartitions(); } while (i != startPartition); timer.stopInterval(); PERF_LOGGER.debug("Time to save data for feed {}: {}", feedID, timer.getIntervalInMillis()); } private Map<String, PartitionTimestamps> putData(PartitionDataBuffer partitionBuffer, Map<String, Map<Long, Map<String, String>>> value, TimeUnit timeUnit) throws BufferFullException { synchronized (movePartitionLock) { if (reset) return null; while (moveParitionInProgress) { try { movePartitionLock.wait(); } catch (InterruptedException e) { // ignore } } writeInProgress = true; } if (this.partitionDataBuffers[partitionBuffer.getBufferEnv().getCurrentBufferPartition()].get() == null) { return null; } try { return partitionBuffer.putData(value, timeUnit); } finally { synchronized (movePartitionLock) { writeInProgress = false; movePartitionLock.notifyAll(); } } } @Override public void putData(Map<String, Map<Long, Map<String, String>>> value, TimeUnit timeUnit, Runnable callback) throws BufferFullException { PERF_LOGGER.debug("Putting data for {} feeds", value.size()); synchronized (movePartitionLock) { if (reset) return; } final ElapsedTimer timer = new ElapsedTimer(); timer.startInterval(); int i = this.currentParition.getBufferEnv().getCurrentBufferPartition(); int startPartition = i; do { PartitionDataBuffer partitionBuffer = this.partitionDataBuffers[i].get(); if (partitionBuffer == null || !partitionBuffer.isActive()) { break; } LOGGER.debug("Putting in partition {}", i); Map<String, PartitionTimestamps> timeStamps = putData(partitionBuffer, value, timeUnit); if (timeStamps != null) { metaDataBuffer.updatePartitionMetaData(partitionBuffer.getBufferEnv().getCurrentBufferPartition(), timeStamps); } timer.stopInterval(); PERF_LOGGER.debug("Time to save data for {} feeds: {}", value.size(), timer .getIntervalInMillis()); i = (i + 1) % currentParition.getBufferEnv().getNumOfBufferPartitions(); } while (i != startPartition); if (callback != null) { callback.run(); } } @Override public void putData(String feedID, TimeUnit timeUnit, long time, Map<String, String> value) throws BufferFullException { synchronized (movePartitionLock) { if (reset) return; } final ElapsedTimer timer = new ElapsedTimer(); timer.startInterval(); Map<Long, Map<String, String>> dataToPut = new HashMap<Long, Map<String, String>>(); dataToPut.put(Long.valueOf(time), value); Map<String, Map<Long, Map<String, String>>> feedDataToPut = new HashMap<String, Map<Long,Map<String,String>>>(); feedDataToPut.put(feedID, dataToPut); int i = this.currentParition.getBufferEnv().getCurrentBufferPartition(); int startPartition = i; do { PartitionDataBuffer partitionBuffer = this.partitionDataBuffers[i].get(); if (partitionBuffer == null || !partitionBuffer.isActive()) { break; } LOGGER.debug("Putting in partition {}", i); Map<String, PartitionTimestamps> timeStamps = putData(partitionBuffer, feedDataToPut, timeUnit); if (timeStamps != null) { metaDataBuffer.updatePartitionMetaData(partitionBuffer.getBufferEnv().getCurrentBufferPartition(), timeStamps); } i = (i + 1) % this.currentParition.getBufferEnv().getNumOfBufferPartitions(); } while (i != startPartition); timer.stopInterval(); PERF_LOGGER.debug("Time to save data for feed {}: {}", feedID, timer.getIntervalInMillis()); } public int getConcurrencyDegree() { return this.currentParition.getBufferEnv().getConcurrencyDegree(); } public int getBufferWriteThreadPoolSize() { return this.currentParition.getBufferEnv().getBufferWriteThreadPoolSize(); } @Override public void reset() { synchronized (movePartitionLock) { while (moveParitionInProgress || writeInProgress || readInProgress) { try { movePartitionLock.wait(); } catch (InterruptedException e) { // ignore } } reset = true; } synchronized(resetLock) { while (prepareNewPartitionInProgress) { try { resetLock.wait(); } catch (InterruptedException e) { // ignore } } } try { if (evictor != null) { evictor.cancel(); } DataBufferEnv env = currentParition.getBufferEnv(); for (int j = 0; j < this.partitionDataBuffers.length; j++) { if (partitionDataBuffers[j].get() != null) { partitionDataBuffers[j].get().removeBuffer(); partitionDataBuffers[j].set(null); } } if (this.metaDataBuffer != null) { this.metaDataBuffer.restart(); } DataBufferEnv currentEnv = (DataBufferEnv)env.clone(); assert currentEnv != null : "Current DataBufferEnv should not be null."; assert dataBufferHelper != null : "DataBufferHelper should not be null."; PartitionDataBuffer partitionBuffer = dataBufferHelper.newPartitionBuffer(currentEnv); this.currentParition = partitionBuffer; this.partitionDataBuffers[currentEnv.getCurrentBufferPartition()].set(currentParition); startEvictor(); } finally { synchronized(movePartitionLock) { reset = false; } } } public void closeBuffer() { if (evictor != null) { evictor.cancel(); evictor = null; } for (int j = 0; j < this.partitionDataBuffers.length; j++) { if (partitionDataBuffers[j].get() != null) { partitionDataBuffers[j].get().closeBuffer(); partitionDataBuffers[j].set(null); } } if (this.metaDataBuffer != null) { this.metaDataBuffer.close(); } } private synchronized void closeBuffer(PartitionDataBuffer partitionBuffer) { partitionBuffer.removeBuffer(); } public boolean isDataBufferClose() { return this.currentParition.isClosed(); } public boolean isAllDataBuffersClose() { for (int i=0; i<this.partitionDataBuffers.length; i++) { PartitionDataBuffer partitionBuffer = partitionDataBuffers[i].get(); if (partitionBuffer != null && !partitionBuffer.isClosed()) { return false; } } return true; } public void prepareForNextPartition() { synchronized(resetLock) { if (reset) { return; } prepareNewPartitionInProgress = true; } try { int newBufferPartition = this.currentParition.getBufferEnv().nextBufferPartition(); PartitionDataBuffer toBeClosedBuffer = this.partitionDataBuffers[newBufferPartition].get(); Map<String, SortedMap<Long, Map<String, String>>> rowOverData = null; if (toBeClosedBuffer != null) { Set<String> rowOverFeedIDs = metaDataBuffer.resetPartitionMetaData(newBufferPartition); if (!rowOverFeedIDs.isEmpty()) { rowOverData = toBeClosedBuffer.getLastData(rowOverFeedIDs, TimeUnit.NANOSECONDS, 0, Long.MAX_VALUE); } closeBuffer(toBeClosedBuffer); } DataBufferEnv newBufferEnv = this.currentParition.getBufferEnv().advanceBufferPartition(); PartitionDataBuffer newPartitionBuffer = dataBufferHelper.newPartitionBuffer(newBufferEnv); if (rowOverData != null) { Map<String, Map<Long, Map<String, String>>> data = new HashMap<String, Map<Long,Map<String,String>>>(); for (Entry<String, SortedMap<Long, Map<String, String>>> entry: rowOverData.entrySet()) { Map<Long, Map<String, String>> feedData = new HashMap<Long, Map<String,String>>(entry.getValue()); data.put(entry.getKey(), feedData); } try { Map<String, PartitionTimestamps> timeStamps = putData(newPartitionBuffer, data, TimeUnit.NANOSECONDS); if (timeStamps != null) { metaDataBuffer.updatePartitionMetaData(newBufferPartition, timeStamps); } } catch (BufferFullException e) { LOGGER.error("Buffer full during prepareForNextPartition", e); } } this.partitionDataBuffers[newBufferEnv.getCurrentBufferPartition()].set(newPartitionBuffer); } finally { synchronized(resetLock) { prepareNewPartitionInProgress = false; resetLock.notifyAll(); } } } public void moveToNextPartition() { int nextBufferPartition = this.currentParition.getBufferEnv().nextBufferPartition(); int currentBufferPartition = this.currentParition.getBufferEnv().getCurrentBufferPartition(); PartitionDataBuffer toBeInActiveBuffer = this.partitionDataBuffers[currentBufferPartition].get(); metaDataBuffer.writeCurrentBufferPartition(nextBufferPartition); synchronized (movePartitionLock) { if (reset) { return; } while (readInProgress || writeInProgress) { try { movePartitionLock.wait(); } catch (InterruptedException e) { // ignore } } moveParitionInProgress = true; } try { this.currentParition = this.partitionDataBuffers[nextBufferPartition].get(); } finally { synchronized (movePartitionLock) { moveParitionInProgress = false; movePartitionLock.notifyAll(); } } metaDataBuffer.writePartitionMetaData(currentBufferPartition); if (toBeInActiveBuffer != null) { toBeInActiveBuffer.getBufferEnv().flush(); toBeInActiveBuffer.inactive(); } else { LOGGER.warn("PartitionDataBuffer object should not be null!"); LOGGER.warn("currentBufferPartition={}, nextBufferPartition={}", currentBufferPartition, nextBufferPartition); } } @Override public LOS getLOS() { return this.currentParition.getBufferEnv().getLOS(); } DataBufferEvictor getEvictor() { return this.evictor; } }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_internal_DataBuffer.java
54
final class DataBufferEvictor extends Timer { private static final Logger logger = LoggerFactory.getLogger(DataBufferEvictor.class); private final long evictMillis; private final long switchMillis; private final DataBuffer dataBuffer; DataBufferEvictor(DataBuffer dataBuffer, long evictMillis, long switchMillis) { super("DataBuffer Evictor", true); this.evictMillis = evictMillis; this.switchMillis = switchMillis; this.dataBuffer = dataBuffer; } void schedule() { super.schedule(newPrepareTask(), evictMillis); } private TimerTask newPrepareTask() { return new TimerTask() { @Override public void run() { try { dataBuffer.prepareForNextPartition(); schedule(newMoveTask(), switchMillis); } catch(Exception e) { logger.error(e.toString(), e); schedule(newPrepareTask(), evictMillis); } } }; } private TimerTask newMoveTask() { return new TimerTask() { @Override public void run() { try { dataBuffer.moveToNextPartition(); schedule(newPrepareTask(), evictMillis); } catch(Exception e) { logger.error(e.toString(), e); schedule(newMoveTask(), switchMillis); } } }; } }
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_internal_DataBufferEvictor.java
55
return new TimerTask() { @Override public void run() { try { dataBuffer.prepareForNextPartition(); schedule(newMoveTask(), switchMillis); } catch(Exception e) { logger.error(e.toString(), e); schedule(newPrepareTask(), evictMillis); } } };
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_internal_DataBufferEvictor.java
56
return new TimerTask() { @Override public void run() { try { dataBuffer.moveToNextPartition(); schedule(newPrepareTask(), evictMillis); } catch(Exception e) { logger.error(e.toString(), e); schedule(newMoveTask(), switchMillis); } } };
false
timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_internal_DataBufferEvictor.java
57
public class DataBufferEvictorTest { private DataBuffer dataBuffer; private File bufferLocation; @BeforeMethod public void setup() throws IOException { DataBufferFactory.reset(); Properties prop = new Properties(); prop.load(ClassLoader.getSystemResourceAsStream("properties/testFeed.properties")); prop.put("buffer.partitions", "3"); prop.put("buffer.time.millis", "12000"); prop.put("buffer.partition.overlap.millis", "1000"); bufferLocation = File.createTempFile("mct-buffer", ""); bufferLocation.delete(); bufferLocation.mkdir(); prop.put("buffer.disk.loc", bufferLocation.toString()); dataBuffer = DataBufferFactory.getFastDiskDataBuffer(prop); if (dataBuffer.isDataBufferClose()) { dataBuffer.reset(); } } @AfterMethod public void reset() { if (dataBuffer != null) { dataBuffer.closeBuffer(); } DataBufferFactory.reset(); delete(bufferLocation); } private void delete(File f) { if (f.isDirectory()) { for (File f2 : f.listFiles()) { delete(f2); } } f.delete(); } @Test public void switchPartitionsTest() throws Exception { int currentPartition = getCurrentBufferPartition(); Assert.assertEquals(currentPartition, 0); Thread.sleep(5000); currentPartition = getCurrentBufferPartition(); Assert.assertEquals(currentPartition, 1); Thread.sleep(5000); currentPartition = getCurrentBufferPartition(); Assert.assertEquals(currentPartition, 2); Thread.sleep(5000); currentPartition = getCurrentBufferPartition(); Assert.assertEquals(currentPartition, 0); } @Test public void testExceptions() { Properties prop = new Properties(); prop.put("memory.buffer.partition", "2"); prop.put("memory.buffer.time.millis", "100"); final CountDownLatch latch = new CountDownLatch(2); DataBufferEnv env = new MemoryBufferEnv(prop); DataBufferHelper partitionBufferFactory = new MemoryDataBufferHelper(); DataBuffer mockBuffer = new DataBuffer(env, partitionBufferFactory) { int prepareCount; int moveCount; @Override public void prepareForNextPartition() { prepareCount++; if (prepareCount == 1) { throw new RuntimeException("This exception is normal."); } else if (prepareCount == 2) { latch.countDown(); } } @Override public void moveToNextPartition() { moveCount++; if (moveCount == 1) { throw new RuntimeException("This exception is normal."); } else if (moveCount == 2) { latch.countDown(); } } }; try { latch.await(1, TimeUnit.SECONDS); } catch (InterruptedException e) { Assert.fail("Evictor failed"); } mockBuffer.closeBuffer(); } private int getCurrentBufferPartition() throws Exception { Field f = DataBuffer.class.getDeclaredField("currentParition"); f.setAccessible(true); PartitionFastDiskBuffer currentPartitionBuffer = (PartitionFastDiskBuffer)f.get(dataBuffer); return currentPartitionBuffer.getBufferEnv().getCurrentBufferPartition(); } }
false
timeSequenceFeedAggregator_src_test_java_gov_nasa_arc_mct_buffer_internal_DataBufferEvictorTest.java

Dataset Card for "mct_1_7b1"

More Information needed

Downloads last month
1
Edit dataset card