content
stringlengths
7
2.61M
def _expect_vars(vs=None): if vs is None: return list() elif isinstance(vs, Variable): return [vs] else: checked = list() for v in vs: if isinstance(v, Variable): checked.append(v) else: fstr = "expected Variable, got {0.__name__}" raise TypeError(fstr.format(type(v))) return checked
package com.xjj; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * Created by cheng on 2017/8/15. */ public class Came { /** * 下划线转驼峰法 * @param line 源字符串 * @param smallCamel 大小驼峰,是否为小驼峰 * @return 转换后的字符串 */ public static String underline2Camel(String line,boolean smallCamel){ if(line==null||"".equals(line)){ return ""; } StringBuffer sb=new StringBuffer(); Pattern pattern= Pattern.compile("([A-Za-z\\d]+)(_)?"); Matcher matcher=pattern.matcher(line); while(matcher.find()){ String word=matcher.group(); sb.append(smallCamel&&matcher.start()==0?Character.toLowerCase(word.charAt(0)):Character.toUpperCase(word.charAt(0))); int index=word.lastIndexOf('_'); if(index>0){ sb.append(word.substring(1, index).toLowerCase()); }else{ sb.append(word.substring(1).toLowerCase()); } } return sb.toString(); } public static void main(String[] args) { System.out.println(underline2Camel("rule_name", true)); } }
from skdecide.builders.discrete_optimization.generic_tools.graph_api import Graph from skdecide.builders.discrete_optimization.rcpsp.rcpsp_model import RCPSPSolution, RCPSPModel, RCPSPModelCalendar from typing import List, Union import matplotlib.pyplot as plt from copy import deepcopy import numpy as np from shapely.geometry import Polygon from matplotlib.patches import Polygon as pp from matplotlib.collections import PatchCollection import matplotlib.cm import scipy.stats def compute_resource_consumption(rcpsp_model: RCPSPModel, rcpsp_sol: RCPSPSolution, list_resources: List[Union[int, str]]=None, future_view=True): modes_extended = deepcopy(rcpsp_sol.rcpsp_modes) modes_extended.insert(0, 1) modes_extended.append(1) last_activity = max(rcpsp_sol.rcpsp_schedule) makespan = rcpsp_sol.rcpsp_schedule[last_activity]['end_time'] if list_resources is None: list_resources = rcpsp_model.resources_list consumptions = np.zeros((len(list_resources), makespan + 1)) for act_id in rcpsp_sol.rcpsp_schedule: for ir in range(len(list_resources)): use_ir = rcpsp_model.mode_details[act_id][modes_extended[act_id - 1]][list_resources[ir]] if future_view: consumptions[ir, rcpsp_sol.rcpsp_schedule[act_id]["start_time"] + 1:rcpsp_sol.rcpsp_schedule[act_id][ "end_time"] + 1] += use_ir else: consumptions[ir, rcpsp_sol.rcpsp_schedule[act_id]["start_time"]:rcpsp_sol.rcpsp_schedule[act_id]["end_time"]] += use_ir return consumptions, np.arange(0, makespan+1, 1) def compute_nice_resource_consumption(rcpsp_model: RCPSPModel, rcpsp_sol: RCPSPSolution, list_resources: List[Union[int, str]] = None): if list_resources is None: list_resources = rcpsp_model.resources_list c_future, times = compute_resource_consumption(rcpsp_model, rcpsp_sol, list_resources=list_resources, future_view=True) c_past, times = compute_resource_consumption(rcpsp_model, rcpsp_sol, list_resources=list_resources, future_view=False) merged_times = {i: [] for i in range(len(list_resources))} merged_cons = {i: [] for i in range(len(list_resources))} for r in range(len(list_resources)): for index_t in range(len(times)): merged_times[r] += [times[index_t], times[index_t]] merged_cons[r] += [c_future[r, index_t], c_past[r, index_t]] for r in merged_times: merged_times[r] = np.array(merged_times[r]) merged_cons[r] = np.array(merged_cons[r]) return merged_times, merged_cons def plot_ressource_view(rcpsp_model: RCPSPModel, rcpsp_sol: RCPSPSolution, list_resource: List[Union[int, str]]=None, title_figure="", fig=None, ax=None): modes_extended = deepcopy(rcpsp_sol.rcpsp_modes) modes_extended.insert(0, 1) modes_extended.append(1) with_calendar = isinstance(rcpsp_model, RCPSPModelCalendar) if list_resource is None: list_resource = rcpsp_model.resources_list if ax is None: fig, ax = plt.subplots(nrows=len(list_resource), figsize=(10, 5), sharex=True) fig.suptitle(title_figure) polygons_ax = {i: [] for i in range(len(list_resource))} labels_ax = {i: [] for i in range(len(list_resource))} sorted_activities = sorted(rcpsp_sol.rcpsp_schedule, key=lambda x: rcpsp_sol.rcpsp_schedule[x]["start_time"]) for j in sorted_activities: time_start = rcpsp_sol.rcpsp_schedule[j]["start_time"] time_end = rcpsp_sol.rcpsp_schedule[j]["end_time"] for i in range(len(list_resource)): cons = rcpsp_model.mode_details[j][modes_extended[j-1]][list_resource[i]] if cons == 0: continue bound = rcpsp_model.resources[list_resource[i]] if not with_calendar \ else max(rcpsp_model.resources[list_resource[i]]) for k in range(0, bound): polygon = Polygon([(time_start, k), (time_end, k), (time_end, k+cons), (time_start, k+cons), (time_start, k)]) areas = [p.intersection(polygon).area for p in polygons_ax[i]] if len(areas) == 0 or max(areas) == 0: polygons_ax[i].append(polygon) labels_ax[i].append(j) break for i in range(len(list_resource)): patches = [] for polygon in polygons_ax[i]: x, y = polygon.exterior.xy ax[i].plot(x, y, zorder=-1, color="b") patches.append(pp(xy=polygon.exterior.coords)) p = PatchCollection(patches, cmap=matplotlib.cm.get_cmap('Blues'), alpha=0.4) ax[i].add_collection(p) merged_times, merged_cons = compute_nice_resource_consumption(rcpsp_model, rcpsp_sol, list_resources=list_resource) for i in range(len(list_resource)): ax[i].plot(merged_times[i], merged_cons[i], color="r", linewidth=2, label="Consumption "+str(list_resource[i]), zorder=1) if not with_calendar: ax[i].axhline(y=rcpsp_model.resources[list_resource[i]], linestyle="--", label="Limit : "+str(list_resource[i]), zorder=0) else: ax[i].plot(merged_times[i], [rcpsp_model.resources[list_resource[i]][m] for m in merged_times[i]], linestyle="--", label="Limit : " + str(list_resource[i]), zorder=0) ax[i].legend(fontsize=5) lims = ax[i].get_xlim() ax[i].set_xlim([lims[0], 1.*lims[1]]) return fig def plot_task_gantt(rcpsp_model: RCPSPModel, rcpsp_sol: RCPSPSolution, fig=None, ax=None, current_t=None): if fig is None or ax is None: fig, ax = plt.subplots(1, figsize=(10, 5)) ax.set_title("Gantt Task") tasks = sorted(rcpsp_model.mode_details.keys()) nb_task = len(tasks) sorted_task_by_start = sorted(rcpsp_sol.rcpsp_schedule, key=lambda x: 100000 * rcpsp_sol.rcpsp_schedule[x]["start_time"] + x) sorted_task_by_end = sorted(rcpsp_sol.rcpsp_schedule, key=lambda x: 100000 * rcpsp_sol.rcpsp_schedule[x]["end_time"] + x) max_time = rcpsp_sol.rcpsp_schedule[sorted_task_by_end[-1]]["end_time"] min_time = rcpsp_sol.rcpsp_schedule[sorted_task_by_end[0]]["start_time"] patches = [] for j in range(nb_task): nb_colors = len(tasks)//2 colors = plt.cm.get_cmap("hsv", nb_colors) box = [(j-0.25, rcpsp_sol.rcpsp_schedule[tasks[j]]["start_time"]), (j-0.25, rcpsp_sol.rcpsp_schedule[tasks[j]]["end_time"]), (j+0.25, rcpsp_sol.rcpsp_schedule[tasks[j]]["end_time"]), (j+0.25, rcpsp_sol.rcpsp_schedule[tasks[j]]["start_time"]), (j-0.25, rcpsp_sol.rcpsp_schedule[tasks[j]]["start_time"])] polygon = Polygon([(b[1], b[0]) for b in box]) x, y = polygon.exterior.xy ax.plot(x, y, zorder=-1, color="b") patches.append(pp(xy=polygon.exterior.coords, facecolor=colors((j - 1) % nb_colors))) p = PatchCollection(patches, match_original=True, #cmap=matplotlib.cm.get_cmap('Blues'), alpha=0.4) ax.add_collection(p) ax.set_xlim((min_time, max_time)) ax.set_ylim((-0.5, nb_task)) ax.set_yticks(range(nb_task)) ax.set_yticklabels(tuple(["Task "+str(tasks[j]) for j in range(nb_task)]), fontdict={"size": 7}) return fig def compute_schedule_per_resource_individual(rcpsp_model: RCPSPModel, rcpsp_sol: RCPSPSolution, resource_types_to_consider: List[str]=None, verbose=False): nb_ressources = len(rcpsp_model.resources_list) modes_extended = deepcopy(rcpsp_sol.rcpsp_modes) modes_extended.insert(0, 1) modes_extended.append(1) if resource_types_to_consider is None: resources = rcpsp_model.resources_list else: resources = resource_types_to_consider sorted_task_by_start = sorted(rcpsp_sol.rcpsp_schedule, key=lambda x: 100000*rcpsp_sol.rcpsp_schedule[x]["start_time"]+x) sorted_task_by_end = sorted(rcpsp_sol.rcpsp_schedule, key=lambda x: 100000*rcpsp_sol.rcpsp_schedule[x]["end_time"]+x) max_time = rcpsp_sol.rcpsp_schedule[sorted_task_by_end[-1]]["end_time"] min_time = rcpsp_sol.rcpsp_schedule[sorted_task_by_end[0]]["start_time"] print("Min time ", min_time) print("Max time ", max_time) with_calendar = isinstance(rcpsp_model, RCPSPModelCalendar) array_ressource_usage = {resources[i]: {"activity": np.zeros((max_time-min_time+1, max(rcpsp_model.resources[resources[i]]) if with_calendar else rcpsp_model.resources[resources[i]])), "binary_activity": np.zeros((max_time - min_time + 1, max(rcpsp_model.resources[resources[i]]) if with_calendar else rcpsp_model.resources[resources[i]])), "total_activity": np.zeros(max(rcpsp_model.resources[resources[i]]) if with_calendar else rcpsp_model.resources[resources[i]]), "activity_last_n_hours": np.zeros((max_time-min_time+1, max(rcpsp_model.resources[resources[i]]) if with_calendar else rcpsp_model.resources[resources[i]])), "boxes_time": [] } for i in range(len(resources))} total_time = max_time-min_time+1 nhour = int(min(8, total_time/2-1)) index_to_time = {i: min_time+i for i in range(max_time-min_time+1)} time_to_index = {index_to_time[i]: i for i in index_to_time} for activity in sorted_task_by_start: mode = modes_extended[activity-1] start_time = rcpsp_sol.rcpsp_schedule[activity]["start_time"] end_time = rcpsp_sol.rcpsp_schedule[activity]["end_time"] if end_time == start_time: continue resources_needed = {r: rcpsp_model.mode_details[activity][mode][r] for r in resources} for r in resources_needed: if r not in array_ressource_usage: continue rneeded = resources_needed[r] if not with_calendar: range_interest = range(array_ressource_usage[r]["activity"].shape[1]) else: # try: # range_interest = [x for x in range(len(rcpsp_model.calendar_details[r])) if # rcpsp_model.calendar_details[r][x][time_to_index[start_time]] == 1] # except: range_interest = range(rcpsp_model.resources[r][time_to_index[start_time]]) while rneeded > 0: # availables_people_r = [i for i in range(array_ressource_usage[r]["activity"].shape[1]) # if array_ressource_usage[r]["activity"][time_to_index[start_time], i] == 0] availables_people_r = [i for i in range_interest if array_ressource_usage[r]["activity"][time_to_index[start_time], i] == 0] if verbose: print(len(availables_people_r), " people available : ") if len(availables_people_r) > 0: resource = min(availables_people_r, key=lambda x: array_ressource_usage[r]["total_activity"][x]) # greedy choice, # the one who worked the less until now. array_ressource_usage[r]["activity"][time_to_index[start_time]:time_to_index[end_time], resource] \ = activity array_ressource_usage[r]["binary_activity"][time_to_index[start_time]:time_to_index[end_time], resource] \ = 1 array_ressource_usage[r]["total_activity"][resource] += (end_time-start_time) array_ressource_usage[r]["activity_last_n_hours"][:, resource] = np.convolve(array_ressource_usage[r]["binary_activity"][:, resource], np.array([1]*nhour+[0]+[0]*nhour), mode="same") array_ressource_usage[r]["boxes_time"] += [[(resource-0.25, start_time+0.01, activity), (resource-0.25, end_time-0.01, activity), (resource+0.25, end_time-0.01, activity), (resource+0.25, start_time+0.01, activity), (resource-0.25, start_time+0.01, activity)]] # for plot purposes. rneeded -= 1 else: print("r_needed ", rneeded) print("Ressource needed : ", resources_needed) print("ressource : ", r) print("activity : ", activity) print("Problem, can't build schedule") print(array_ressource_usage[r]["activity"]) rneeded = 0 return array_ressource_usage def plot_resource_individual_gantt(rcpsp_model: RCPSPModel, rcpsp_sol: RCPSPSolution, resource_types_to_consider: List[str]=None, title_figure="", fig=None, ax=None, current_t=None): array_ressource_usage = compute_schedule_per_resource_individual(rcpsp_model, rcpsp_sol, resource_types_to_consider= resource_types_to_consider) sorted_task_by_start = sorted(rcpsp_sol.rcpsp_schedule, key=lambda x: 100000 * rcpsp_sol.rcpsp_schedule[x]["start_time"] + x) sorted_task_by_end = sorted(rcpsp_sol.rcpsp_schedule, key=lambda x: 100000 * rcpsp_sol.rcpsp_schedule[x]["end_time"] + x) max_time = rcpsp_sol.rcpsp_schedule[sorted_task_by_end[-1]]["end_time"] min_time = rcpsp_sol.rcpsp_schedule[sorted_task_by_end[0]]["start_time"] for key in list(array_ressource_usage.keys()): if np.sum(array_ressource_usage[key]["total_activity"]) == 0: array_ressource_usage.pop(key) resources_list = list(array_ressource_usage.keys()) # fig, ax = plt.subplots(len(array_ressource_usage), # figsize=(10, 5)) # for i in range(len(array_ressource_usage)): # ax[i].imshow(array_ressource_usage[resources_list[i]]["binary_activity"].T) if fig is None or ax is None: fig, ax = plt.subplots(len(array_ressource_usage), figsize=(10, 5)) fig.suptitle(title_figure) if len(array_ressource_usage) == 1: ax = [ax] for i in range(len(resources_list)): patches = [] nb_colors = len(sorted_task_by_start)//2 colors = plt.cm.get_cmap("hsv", nb_colors) for boxe in array_ressource_usage[resources_list[i]]["boxes_time"]: polygon = Polygon([(b[1], b[0]) for b in boxe]) activity = boxe[0][2] x, y = polygon.exterior.xy ax[i].plot(x, y, zorder=-1, color="b") patches.append(pp(xy=polygon.exterior.coords, facecolor=colors((activity-1) % nb_colors))) p = PatchCollection(patches, match_original=True, #cmap=matplotlib.cm.get_cmap('Blues'), alpha=0.4) ax[i].add_collection(p) ax[i].set_title(resources_list[i]) ax[i].set_xlim((min_time, max_time)) try: ax[i].set_ylim((-0.5, rcpsp_model.resources[resources_list[i]])) ax[i].set_yticks(range(rcpsp_model.resources[resources_list[i]])) ax[i].set_yticklabels(tuple([j for j in range(rcpsp_model.resources[resources_list[i]])]), fontdict={"size": 7}) except: m = max(rcpsp_model.resources[resources_list[i]]) ax[i].set_ylim((-0.5, m)) ax[i].set_yticks(range(m)) ax[i].set_yticklabels(tuple([j for j in range(m)]), fontdict={"size": 7}) ax[i].grid(True) if current_t is not None: ax[i].axvline(x=current_t, label='pyplot vertical line', color='r', ls='--') return fig # TODO: Check if the scipy version of KTD is the most meaningful for what we want to use it for (ktd between -1 and 1) def kendall_tau_similarity(rcpsp_sols: (RCPSPSolution, RCPSPSolution)): sol1 = rcpsp_sols[0] sol2 = rcpsp_sols[1] perm1 = sol1.generate_permutation_from_schedule() perm2 = sol2.generate_permutation_from_schedule() ktd, p_value = scipy.stats.kendalltau(perm1, perm2) return ktd def all_diff_start_time(rcpsp_sols: (RCPSPSolution, RCPSPSolution)): sol1 = rcpsp_sols[0] sol2 = rcpsp_sols[1] diffs = {} for act_id in sol1.rcpsp_schedule.keys(): diff = sol1.rcpsp_schedule[act_id]['start_time'] - sol2.rcpsp_schedule[act_id]['start_time'] diffs[act_id] = diff return diffs def compute_graph_rcpsp(rcpsp_model: RCPSPModel): nodes = [(n, {mode: rcpsp_model.mode_details[n][mode]["duration"] for mode in rcpsp_model.mode_details[n]}) for n in range(1, rcpsp_model.n_jobs + 3)] edges = [] for n in rcpsp_model.successors: for succ in rcpsp_model.successors[n]: dict_transition = {mode: rcpsp_model.mode_details[n][mode]["duration"] for mode in rcpsp_model.mode_details[n]} min_duration = min(dict_transition.values()) max_duration = max(dict_transition.values()) dict_transition["min_duration"] = min_duration dict_transition["max_duration"] = max_duration dict_transition["minus_min_duration"] = -min_duration dict_transition["minus_max_duration"] = -max_duration dict_transition["link"] = 1 edges += [(n, succ, dict_transition)] return Graph(nodes, edges, False)
/** * Content provider for templates. Provides all the enabled templates * defined for this editor. */ private final class TemplatesContentProvider implements ITreeContentProvider { /* * @see org.eclipse.jface.viewers.ITreeContentProvider#getChildren(java.lang.Object) */ public Object[] getChildren(Object parentElement) { if (parentElement instanceof TemplatePersistenceData) return new Object[0]; else if (parentElement instanceof TemplateContextType) { TemplateContextType contextType= (TemplateContextType) parentElement; return getTemplates(contextType.getId()); } return null; } private TemplatePersistenceData[] getTemplates(String contextId) { List templateList= new ArrayList(); TemplatePersistenceData[] datas= getTemplateStore().getTemplateData(false); for (int i= 0; i < datas.length; i++) { if (datas[i].isEnabled() && datas[i].getTemplate().getContextTypeId().equals(contextId)) templateList.add(datas[i]); } return (TemplatePersistenceData[]) templateList .toArray(new TemplatePersistenceData[templateList.size()]); } /* * @see org.eclipse.jface.viewers.ITreeContentProvider#getParent(java.lang.Object) */ public Object getParent(Object element) { if (element instanceof TemplatePersistenceData) { TemplatePersistenceData templateData= (TemplatePersistenceData) element; return getContextTypeRegistry().getContextType( templateData.getTemplate().getContextTypeId()); } return null; } /* * @see org.eclipse.jface.viewers.ITreeContentProvider#hasChildren(java.lang.Object) */ public boolean hasChildren(Object parentElement) { if (parentElement instanceof TemplatePersistenceData) return false; else if (parentElement instanceof TemplateContextType) { String contextId= ((TemplateContextType) parentElement).getId(); TemplatePersistenceData[] datas= getTemplateStore().getTemplateData(false); if (datas.length <= 0) return false; for (int i= 0; i < datas.length; i++) { if (datas[i].isEnabled() && datas[i].getTemplate().getContextTypeId().equals(contextId)) return true; } return false; } return false; } /* * @see org.eclipse.jface.viewers.IStructuredContentProvider#getElements(java.lang.Object) */ public Object[] getElements(Object inputElement) { List contextTypes= new ArrayList(); for (Iterator iterator= getContextTypeRegistry().contextTypes(); iterator.hasNext();) { TemplateContextType contextType= (TemplateContextType) iterator.next(); if (!fLinkWithEditorAction.isChecked() || isActiveContext(contextType)) contextTypes.add(contextType); } return contextTypes.toArray(new TemplateContextType[contextTypes.size()]); } private boolean isActiveContext(TemplateContextType contextType) { return fActiveTypes == null || fActiveTypes.contains(contextType.getId()); } /* * @see org.eclipse.jface.viewers.IContentProvider#dispose() */ public void dispose() { } /* * @see org.eclipse.jface.viewers.IContentProvider#inputChanged(org.eclipse.jface.viewers.Viewer, java.lang.Object, java.lang.Object) */ public void inputChanged(Viewer viewer, Object oldInput, Object newInput) { } }
def add_feed(self, feedlike, **kwargs): if 'fnum' in kwargs: fnum = kwargs['fnum'] del kwargs['fnum'] else: fnum = None if isinstance(feedlike, bFeed): munging = feedlike.munging if 'munging' in kwargs: explicit_munging = kwargs['munging'].as_odict for key in explicit_munging: munging[key] = explicit_munging[key] fed = Feed(self, feedlike.ftype, feedlike.sourcing, munging, feedlike.meta, fnum) elif isinstance(feedlike, Feed): fed = feedlike else: raise Exception("Invalid Feed {}".format(repr(feedlike))) self.feeds.append(fed) objs = object_session(self) objs.add(fed) objs.commit()
The 87Sr/86Sr and 143Nd/144Nd disequilibrium between Polynesian hot spot lavas and the clinopyroxenes they host: Evidence complementing isotopic disequilibrium in melt inclusions We report 87Sr/86Sr and 143Nd/144Nd data on clinopyroxenes recovered from 10 ocean island lavas from three different hot spots (Samoa, Society, and CookAustral island chains). The clinopyroxenes recovered from eight of the 10 lavas analyzed in this study exhibit 87Sr/86Sr disequilibrium with respect to the host lava. The 87Sr/86Sr ratios in clinopyroxene separates are 953146 ppm (0.00950.31%) different from their respective host whole rocks. Clinopyroxenes in three lavas have 143Nd/144Nd ratios that are 70160 ppm (0.0070.016%) different from the host lavas. The 87Sr/86Sr and 143Nd/144Nd disequilibrium in one lava (the oldest lava considered in this study, Mangaia sample MGAB47) can be attributed to posteruptive radiogenic ingrowth, but the isotope disequilibrium in the other, younger lavas cannot be explained by this mechanism. In five of the lava samples, two populations of clinopyroxene were isolated (black and green, separated by color). In four out of five of these samples, the 87Sr/86Sr ratios of the two clinopyroxene populations are isotopically different from each other. In addition to 87Sr/86Sr disequilibrium, the two clinopyroxene populations in one of the lavas (Tahaa sample TAAB26) have 143Nd/144Nd ratios that are ∼100 ppm different from each other. Given the resilience of clinopyroxene to seawater alteration and the likelihood that the Sr and Nd isotope composition of fresh clinopyroxene separates provides a faithful record of primary magmatic compositions, the clinopyroxeneclinopyroxene isotope disequilibrium in these four lavas provides strong evidence that a mechanism other than seawater alteration has generated the observed isotopic disequilibrium. This study confirms the isotopic diversity in ocean island lavas previously observed in olivinehosted melt inclusions. For example, the Sr isotopic variability previously observed in olivinehosted melt inclusions is mirrored by the isotopic diversity in clinopyroxenes isolated from many of the same Samoan lavas. The isotopic data from melt inclusions and clinopyroxenes are not consistent with shallow assimilation of sediment or with entrainment of xenocrystic clinopyroxene from the oceanic crust or upper mantle. Instead, the data are interpreted as reflecting isotopic heterogeneity in the mantle sources of the lavas. The isotopic diversity in clinopyroxenes and melt inclusions suggests that a single lava can host components derived from isotopically diverse source regions.
package de.baswil.spring.proxy.proxy; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.net.MalformedURLException; import java.net.URL; /** * Analyze the Environment variable for proxies (http or https) * * @author <NAME> */ public abstract class AbstractUrlProxySettingsParser { private static final Logger LOGGER = LoggerFactory.getLogger(AbstractUrlProxySettingsParser.class); /** * Get the value (url) of the environment variable. * * @return url */ public abstract String getUrl(); /** * Analyze the url of the environment variable and save the result in the {@link ProxySettings} object. * * @param proxySettings The settings object for the result of the analyze. */ public void readProxySettingsFromUrl(ProxySettings proxySettings) { String urlString = getUrl(); if(urlString == null){ return; } final URL url; try { url = new URL(urlString); } catch (MalformedURLException e) { LOGGER.warn("Wrong proxy url format. Ignore url for proxy settings.", e); return; } if (url.getHost().trim().isEmpty()) { LOGGER.warn("Proxy url has no hostname. Ignore url for proxy settings."); return; } proxySettings.setHost(url.getHost()); if (url.getPort() != -1) { proxySettings.setPort(url.getPort()); } if (url.getUserInfo() != null) { String[] userInfoSplit = url.getUserInfo().split(":", 2); if (userInfoSplit.length == 1) { proxySettings.setUser(userInfoSplit[0]); } else { proxySettings.setUser(userInfoSplit[0]); proxySettings.setPassword(userInfoSplit[1]); } } } }
Optimization Design of Shaper Mechanism Based on Nonlinear Programming Model Due to the lack of the optimization algorithm or model which could be used to optimize the complicated plane linkage mechanism (like a shaper mechanism), a nonlinear programming model which could solve this problem was proposed in this work, whose objective function was the minimum initial velocity of the cutting tool in the shaper mechanism, and the length of the linkages in the shaper mechanism was regarded as the constraint conditions. Subsequently, a typical shaper mechanism was chosen as the example to be optimized via this nonlinear programming model. The optimization results revealed that this model could acquire an appropriate optimization scheme effectively and the optimization scheme was reasonable. Introduction With the advancement of computer science and programming language, an increasing number of intelligent algorithms or models had been applied in the machinery industry field, especially for the optimization design of a mechanism, which had aroused the attention of a host of scholars in recent years actually. According to the available literatures, a large quantity of optimization algorithms and models which were utilized to optimize the size of the components in a certain mechanism had been reported, such as multi-objective programming model (MOOM), differential evolution, Monte Carlo method, particle swarm optimization (PSO) and so on, all of which had been proved to be effective and efficient to carry out an appropriate optimization scheme for some simple plane linkage mechanisms like slider-crank mechanism and four-bar linkage mechanism. Nevertheless, the investigations on the optimization algorithms and models which had the capacity to be used to propose a proper optimization scheme for complex linkage mechanisms, such as the actuator of a shaper mechanism, were inadequate. Therefore, an optimization model which could be used to optimize the length of the linkages in a six-bar shaper mechanism was established via utilizing a nonlinear programming model, which was the main contribution of this work. Rules of establishing optimization model While optimizing the design of a shaper mechanism, several critical issues needed to be considered: 1) it was widely acknowledged that the cutting tool of the shaper mechanism needed to move smoothly during the process of cutting metal. Furthermore, the impact load which would occur when the cutting tool contacted with the metal should be limited to a lower level as much as possible, which revealed that the initial velocity of the cutting tool should be as low as possible; 2) due to some special limitations of working conditions, the length of the linkages in the shaper mechanism should be controlled within a certain range. According to the analysis which had been mentioned above, some of the constraints couldn't be described as a linear function, and they were nonlinear functions actually. Thus, a nonlinear programming model should be presented to solve this problem. Establishing objective function Assuming that the initial velocity of the cutting tool was v 0, and there were n independent optimization variables which may exert an effect on the initial velocity of the cutting tool, all of which could be written as a column vector as Equation 1. Where l i was the independent variable, and the length of the linkages in this shaper mechanism would always been regarded as the independent variable. Therefore, the initial velocity of the cutting tool v 0 could use a function like Equation 2 to express. Where l i was the element of vector L. The main purpose of this optimization model was to make v 0 as low as possible, thus, Equation 2 could be regarded as the objective function of this nonlinear optimization model. Establishing constraint conditions As a matter of fact, a large number of factors which could exert an influence on the length of the linkages in the shaper mechanism, such as the value of pressure angle and the condition of forming an oscillating guard bar mechanism, which would limit the length of the linkages into a certain range. Providing that the minimum value of a linkage was l imin, and the maximum value of a linkage was l imax, the constraint conditions of this nonlinear optimization model could be written as Equation 3. Optimization problem definitions This work chose a shaper mechanism like Figure 1 as the example to illustrate how to use the nonlinear programming model to optimize this shaper mechanism. As shown in Figure 1, Component 1, 3 and 4 were linkages, and Component 2 was a slider. Besides, Component 5 represented the cutting tool of this shaper mechanism. Component 1 was the original move part which would rotate contraclockwise, whose angular velocity 1 was a constant. At the beginning of the motion, the angle 1 was equal to 0. The range of the size of the linkages in Figure 1 would be introduced in Section 3.2. We intended to know the value of the length of the linkages in Figure 1 when the initial velocity of Component 5 was the lowest. Determining constraint conditions Taking the conditions of forming this mechanism and the value of pressure angle into consideration, the range of the size of the linkages in Figure 1 Determining objective function The initial velocity of Component 5 could use graphical method of vector equation to solve. Since the graphical method of vector equation was a mature mechanism kinematic analysis theory, the expression of the initial velocity of Component 5 v 0 would be given directly and the process of acquiring this expression was omitted, which could be written as Equation 4. Where 1 was a constant and tan could be written as Equation 5. Solutions & Results Using Python programming language to program the code which could solve this nonlinear programming model and running these codes in PyCharm, the optimization results were attained. The comparison results between the original design and the optimization design which was acquired via using this nonlinear programming model were shown in Table 1. As shown in Table 1, this nonlinear programming model presented an optimization design scheme. In this scheme, the initial velocity of the cutting tool was 0. It was an ideal circumstance, because the impact load which would occur when the cutting tool contacted with the metal would be the lowest. Conclusion This work proposed an optimization design model for optimizing the shaper mechanism based on nonlinear programming model. The optimization results of the example demonstrated that this model could acquire the optimization design scheme effectively. Furthermore, the design scheme of the shaper mechanism which was calculated via this model was better than the original design scheme, which provided a new solution for the similar optimization problems in the machinery industry field.
On March 29th, Rob Elliot won his fourth Ireland cap, against Slovakia in a pre-Euros friendly in Dublin. Elliot had hopes of not just making Martin O’Neill’s squad for France, but of being in the starting XI. He was the only Irish goalkeeper playing in the Premier League every week and, although it was at struggling Newcastle United, Elliot says he was “growing”. At 29, season 2014/’15 was turning into the best of his career. Then, 16 minutes in, Elliot dived to his left and heard his right knee “pop”. His season, his Euros, his career, stopped there and then. He left Lansdowne Road in an ambulance. It’s said 2016 will go down as an unforgettable, landscape-shifting year: here’s Rob Elliot’s. JANUARY The first game of the year was a 1-0 defeat at Arsenal. Steve McClaren had been in charge for six months but was under pressure. On arrival he and the Newcastle board told Elliot he could leave – Tim Krul was first choice and Karl Darlow had been signed. “Then [October 2015] I was away with Ireland and found out Tim ruptured his cruciate,” Elliot says. “Karl was injured, so I was the only fit ’keeper at the club. I remember thinking that no-one really wanted me here a few months ago, my future wasn’t here, but now I was playing for Newcastle again.” By January 2016 Elliot was re-established as Newcastle’s number one. “After Arsenal we drew with Man United and I felt we were on the cusp of developing something. We signed Jonjo [Shelvey] and Andros Townsend. We beat West Ham at home and we’d a lift. “Then we lost at Watford, who were big and strong. Maybe we didn’t have the character to stand up to that. If it was just a football game – like at Arsenal – we could hold our own because we’d good, technical players. “It was a strange time. Alan Rickman died. I’m a movie buff.” FEBRUARY A 3-0 defeat at Everton kept Newcastle in the relegation zone. “After the game some harsh words were said by Steve McClaren. I’d made some good saves, but it was a kick in the balls. I wanted to make saves that won us points, not just keep the score down. “Then we beat West Brom. Again I thought: ‘Maybe this is the turning point.’” MARCH “Seamus [McDonagh] was in touch loads. There were also messages from the gaffer [O’Neill] or Roy [Keane]. “I knew the friendly games were coming up, then the Euros, I was looking forward. I felt it was between me and Darren [Randolph] to have a real go at it.” Newcastle lost 3-1 at home to Bournemouth. McClaren was sacked. “It felt like the end. The fans had lost patience with us, the manager and the club. The whole thing had turned sour. “Then we heard about Rafa [Benítez] and we thought: ‘This can’t be right!’ “We met him here in the canteen, talking like school kids. It was almost like the club changed instantly. The fans changed. We went to Leicester in his first game and did well. “Then we drew with Sunderland. That was my last Newcastle game. I went to the airport with John O’Shea, had a beer, flew over to Dublin. “We had Switzerland on the Friday night – Darren played in that one and did well. We’d Saturday off. Me and Daz went into town, watched a film. We grew up together at Charlton, we’ve known each other since we were 15, became pros together. We room together. “Brian Kerr called me up for the under-17s. People were suddenly asking me: ‘Are you Irish?’ I said yeah – for me football was always Ireland. That was the passion. “I’d go to my nan’s in Cork. My Mum’s family are English, from Greenwich. My granddad’s actually Scottish but he met my nan in Ireland. I’m a proper mongrel. “They moved over in the 50s, as you did. I grew up as Irish, my best friend on our street, Liam, his family were all from Tipperary. In London in the 80s that was a bit tough. They’d been the immigrants, ‘the problem’. It all changes, doesn’t it? “USA 94, I remember. Then 2002, I was watching Shay Given. We’d Mark Kinsella and Matt Holland at Charlton. “Slovakia? I remember the ball being played across and I moved to my right. The fella’s had a shot and I moved to my left. As I dived I just felt a crack and pop. I knew it was my ACL [anterior cruciate ligament]. There’s a picture of me in the air and I’m holding my knee even before I hit the floor. Wincing. “The pain for the first 10-15 seconds was really bad, then it disappeared. I thought: ‘Have I just made a big deal of this?’ Then I felt my knee and I knew. “The doc tested it. I was crying. I knew my season was over, the Euros were gone. I got wound up. “I went to the hotel and waited for the lads. They all came up to me, it was nice. Martin came and sat with me. He just said he couldn’t believe it. He said nice things, that I’d be back. Roy did the same. He talked about when he’d done his. I was thinking: ‘God, it’s Roy Keane.’ I know he’s our assistant manager but when I was growing up he was one of the best players in the world. “I flew home the next morning.” APRIL “I went down to London, saw surgeon Andy Williams. April 5th. It was a horrible time but I was so well looked after. “It was so painful. The physio, she was asking me to move my knee and it was so hard. I was sweating. You’re thinking: ‘Jesus, am I going to play again?’ It makes you realise how serious it was. “I was on crutches for six, seven weeks. Our physio, Sean Beech, was magnificent. “From January to March our results weren’t great but my performances were getting better. I was growing. I felt in a really good place, really comfortable. My mentality was really good. The one thing that’s gutting is that I’ve lost that momentum. “I’d my 30th birthday. That was probably the toughest day. We played Crystal Palace. My little boy Max was supposed to be mascot and I was to lead him out. It was something I could remember for ever and ever, something I could show him. “I was in pain that day, I probably shouldn’t have gone to the game, my knee was killing me. It was s**t. Selfishly those are the milestones. Other than when I cried when I first did it, that was the only other day when I’ve been really down.” MAY Newcastle are relegated. “It was probably waiting to happen for three years. It was solemn. “But then the whole club turned. It was like the closing of a chapter. “Then the questions: ‘Will Rafa stay?’ ‘Are things going to change the way we do things at the club?’ Luckily those things have happened.” On the last day of the season, relegated Newcastle beat Tottenham 5-1. “If you needed a game to convince a manager, it was the Tottenham game, it showed what the club could be. The best I’ve ever seen this club was the day we got relegated. “The mentality of the group changed. It became more collective. Maybe players had come to do well at Newcastle in order to move on. No disrespect to them, but as a club Newcastle United shouldn’t accept that. We should be the pinnacle. “Rafa sent me a message. I was having a thigh operation, they cut the tendon off completely. “I got a text. ‘Hi Rob, hope injury is well.’ He asked me which players I ‘like for the Championship’. I was half drugged-up, I thought I might put ‘Keegan’. “I thought: ‘Wow, he’s staying and he’s asking my opinion.’ It gave me such a massive lift.” JUNE Euro 2016: “During the Slovakia game friends had been calling. They knew. On the group chat they put up a picture of a Eurostar ticket for me for the Italy game. That was nice. “I’d got over it [missing out]. I remember texting Darren because I was buzzing to watch him – he’s my friend. And of course I wanted to watch the lads. “We went into Lille the day before the game – the Eurostar, my mate had got an Airbnb. Eight, nine of us. “On the day of the game we camped ourselves in this bar, had lunch, a few drinks. There was the Brexit thing. We were sitting there in Lille saying it’s not going to happen, no chance. All the polls said no chance. Then you wake up and it’s happened. You go: ‘Okay!’ I would have stayed had it been me – well, I did vote to stay. “The biggest thing I’ve noticed this year is getting away from PC – Brexit, Trump. To be a good leader you need to have made mistakes, got things wrong and learned from them. You need life experience. It’s been a strange year. “In the stadium I was in line where Robbie [Brady] scored. When he scored it was just mental, I thought I’d done my knee again. “I was walking by then. Even though I didn’t get to play, it was just great to go.” JULY “My wife, Robyn, was pregnant, we managed to get a few days away. I started driving again and was doing some work outside. I could cycle without wincing.” AUGUST On the opening day of the season, Newcastle lost 1-0 at Fulham. “I went, I love going. We were poor. It was a wake-up call for the lads – how big Newcastle are in the Championship. “In the dressing room after, I was surprised, the lads were all chatting about what could be done better. I don’t know if it would have been like that last season. There was a group trust developing. “Then we lost to Huddersfield at home. Again, I think that was good. It showed us how teams were going to play at St James’. We’d lost six points, I remember saying that there were over 130 points left to play for: ‘Don’t stress.’” SEPTEMBER Elliot signed a new contract. “I love living up here, we’ve really settled. I wanted to sign a new contract that showed I was part of the club again and part of its future. “It means something – if you don’t feel part of where you are, you can drift. I’m signed to 2020 with a two-year option. I was really happy.” OCTOBER “I started going outside with Simon Smith, our goalie coach. The repetition made my knee better and better. It gave all the work I’d done in the gym a meaning. October was a big month for me.” NOVEMBER “Towards the end I joined in a 5-a-side, which I shouldn’t have done because you’re twisting and turning. Everything I’d done before that was controlled. But I did it and I felt amazing.” DECEMBER “I’d a reserve game at St James’ v Aston Villa. I was captain. I looked at their team and didn’t really recognise anyone, it was a young team. I was hoping one of them wouldn’t get excited and smash into me. You do think that – the one thing you can’t control are collisions. “The game kicked off and I conceded immediately. But after that, fine, loads of kicking. “Carrie Fisher died. The big thing for me this year – this injury – is learning to appreciate what you’ve got. “We’ve a new baby [daughter Oa]. I’ve ruptured my knee but I’ve signed a new contract. I’m moving to the coast, to a house I never thought I could live in. As you get older you have to appreciate how lucky you are, to have this lifestyle. “Hopefully I’ll get back in the squad, back on the bench, back with the lads. I still speak to Seamus. It’d be great to be back involved for March, when we play the next qualifier. There’s still loads in front of me.”
What you can do now to prepare for ICD-10. The United States is moving toward adoption of the 10th version of the World Health Organization's International Classification of Diseases (ICD) codes. Because the change will have a significant impact on electronic health record and billing systems, ICD-10 is being rolled out in phases over the next couple of years. Physicians will need to begin using the new diagnosis codes starting in October 2013. This article describes the differences between ICD-9 and ICD-10 and the steps physicians and clinics can take now to prepare for the implementation.
Incidence of maternal Toxoplasma infections in pregnancy in Upper Austria, 2000-2007 Background Despite three decades of prenatal screening program for toxoplasmosis in Austria, population-based estimates for the incidence of maternal infections with Toxoplasma gondii during pregnancy are lacking. We studied the incidence of primary maternal infections during pregnancy in the Federal State of Upper Austria. Methods Screening tests for 63,416 women and over 90,000 pregnancies (more than 84.5% of pregnancies in the studied region) in the time period between 01.01.2000 and 31.12.2007 were analysed. The incidence of toxoplasmosis was estimated indirectly by binomial and directly by interval censored regression. Results During the studied period, 66 acute infections (risk of 0.07% per pregnancy) were detected, but only 29.8% of seronegative women were tested at least three times during their pregnancies. The seroprevalence of Toxoplasma antibodies among all tested women was 31%. Indirectly estimated incidence (from differences in prevalence by age) was 0.5% per pregnancy, while directly estimated incidence (interval censored regression) was 0.17% per pregnancy (95% confidence interval: 0.13-0.21%). Conclusions Calculating incidence from observed infections results in severe underreporting due to many missed tests and potential diagnostic problems. Using statistical modelling, we estimated primary toxoplasmosis to occur in 0.17% (0.13-0.21%) of all pregnancies in Upper Austria. Background Congenital toxoplasmosis is among the infections associated with a high risk of complications, but fortunately acute infections during pregnancy are relatively rare. Due to the potential to cause life-long disability, the burden of disease of congenital toxoplasmosis is considerable. In order to prevent foetal infections and complications of toxoplasmosis, screening programs during pregnancy and a subsequent treatment of identified maternal primoinfections were introduced in a few countries [1,2,. Austria was the first country to start with populationwide free screening and treatment of maternal infections in 1975, soon followed by France. Nonetheless, little is known about the incidence of these infections from these countries despite of their long tradition of toxoplasmosis prevention. We used data from a screening laboratory that covers most of the population of one federal state in Austria in an attempt to determine the incidence in this region. Sample We retrospectively analysed serological data of all pregnant women aged 15-45 years insured by the OGKK ("Obersterreichische Gebietskrankenkasse": Upper Austrian Regional Health Insurance) and place of residence in Upper Austria. The OGKK is the largest statutory health insurance company in Upper Austria. Based on a special agreement with the health insurance company, all serological tests for Toxoplasma-specific IgG and IgM antibodies were conducted in one single laboratory (analyse BioLab GmbH, Linz). Information on gestational week when the screening was performed and the date of delivery was not available. We included only women for whom it could be assumed that their last test in a given pregnancy was conducted in the period from 01.01.2000 to 31. 12.2007. Tests were classified as belonging to the same pregnancy when they were performed within a time window of 200 days (the analysis was also repeated using 300 days as a time window). According to the regulations in Austria, screening has to be performed before the sixteenth week of gestation and repeated in seronegative women in the fifth and eighth pregnancy month. Austrian experts recommended the application of shorter, eight-week screening intervals in 2005. Diagnostic tools The diagnostic algorithm is presented in Figure 1. All tests with an IIFT titer of 1:16 or higher were defined as seropositive. A suspected acute infection in pregnancy was defined by the following findings: anti-Toxoplasma-specific IgM-antibodies positive (>0.65) and low (<0.2) Toxoplasma-specific IgG-avidity. A suspected infection was considered as proven (and classified as certain infection in our analysis) when there was a more than fourfold antibody-titre rise. Given the difficulties of assessing the threshold in the IIFT when seroconversions occurred in a short time period, but were not accompanied by a positive IgM or a low avidity they were considered false positive and were excluded. Data flow and data protection Data was extracted from the laboratory software Basu-Lab (Berger Analysen und Informationstechnik GmbH, Puchenau, Austria) and imported into STATA, version 8.2 (Statacorp, College Station, TX, USA) for all subsequent analyses (STATA-log-file available from the corresponding author on request). To ensure data protection and to meet the obligations of the Austrian data protection law ( § 46 2 and § 46 Datenschutzgesetz 2000), personal identifiers were replaced by unique pseudonyms. Furthermore, the place of residence and its postal code were replaced by the corresponding NUTS-3 regions (AT311: "Innviertel", AT312: "Linz-Wels", AT313: "Muehlviertel", AT314: "Steyr-Kirchdorf", AT315: "Traunviertel" ) and an indicator variable for the three big cities of Linz, Wels or Steyr (the former two being part of region AT312 and the latter part of AT314). The study was reviewed and approved by the ethics committee of the Elisabethinen Hospital Linz, Austria. Statistical analysis Firstly, we estimated the crude incidence from observed primoinfections during pregnancy. As testing did not cover the whole pregnancy for many seronegative women, we expected to miss a lot of infections and to underestimate the incidence. We therefore used further indirect and direct methods to estimate the true infection rate in pregnancy. From a binomial regression model, we estimated the increase in the seroprevalence per year of age and calculated the increase corresponding to the pregnancy duration of 268 days to obtain incidence under the assumption that differences in prevalence by age reflect new infections (indirect method). Since diagnosing seroprevalence is less error prone than correctly assessing the very rare event of acute infection, this method was robust against diagnostic errors. We subsequently analysed the incidence of Toxoplasma infections during pregnancy in seronegative women by means of interval censored regression (direct method). Interval censored regression allows one to account for the fact that in the case of a positive test it was only known that the infection occurred in the preceding time interval since the last negative test. Again, the estimate was recalculated to the period of 268 days. In order to obtain the incidence in relation to all pregnant women (as typically reported in other studies), the result was multiplied by (1-seropositive fraction). Since this analysis was based only on time during pregnancy, we were able to use information about IgM and avidity to rule out false positive results of the IIFT test. Within a pregnancy, screening tests were usually only about 3 or 4 months apart, and IgM remains positive and avidity low in this time span after an acute infection. Seroprevalence of Toxoplasma infections among pregnant women There were 275,842 test results in the database in total ( Figure 2). Inclusion criteria for the study population were met by 63,416 women in the dataset. These women contributed 92,365 pregnancies, based on the 200 days estimate. This number only slightly decreased when a more conservative estimate of 300 days was used. The total population for the studied region is around 1.4 million. In Upper Austria, there were 109,327 life births in total in the years 2000-2007. The total number of pregnancies including spontaneous and induced abortion and stillbirths was certainly substantially higher, but most of the spontaneous and induced abortions will happen before the seroprevalence testing, leaving only the stillbirths (<0.1% of life births) which are unaccounted for. Dividing 92,365 pregnancies included in our study by 109,327 life births in the region, we concluded that our data covered more than 84.5% of all life births in Upper Austria during this period. At their first examination in the study period, women were in median 28.3 years old (interquartile range (IR) 24.3 to 32.2 years). The seroprevalence at the first examination was 30.6%. At their latest examination, women were on average 1.3 years older and the seroprevalence was slightly higher (31.7%). The seroprevalence increased in a linear manner with age (p < 0.01 for trend, Figure 3) and was significantly lower in cities (Table 1) than in the larger regions (p < 0.01, regardless of whether prevalence at first or latest examination was studied). Suspected and certain Toxoplasma primoinfections detected during pregnancy The case definition for a suspected primoinfection during pregnancy was met by 222 women. Their median age was 27.8 years (IR 24.6-32.2 years). Of those 222 cases, 66 (29.7%) were classified as certain (i.e., these women had at least two tests with discordant results during the same pregnancy). Table 1 shows the distribution of the cases by regions and the corresponding incidence rates, with lower rates in cities than in regions including rural areas. The rural-urban difference was significant for suspected infections (p < 0.01), but not for certain infections (p = 0.18). The yearly numbers of cases ranged from 16 to 41 (mean: 26.5) for suspected infections and from 5 to 12 (mean: 8.3) for certain infections. No clear trend over time was observed (data not shown). Estimated incidence of acute Toxoplasma infections during pregnancy The results of indirectly estimating incidence rates from age-related differences in seroprevalence are presented in Table 2 (first two columns). Consistent with estimates based on observed cases, the incidence rates appeared to be lower in the cities than in other regions, but the difference is not significant. The interval censored regression yielded a substantially lower estimate for incidence of toxoplasmosis in all pregnancies with 0.17% (0.13-0.21%) ( Table 2, columns 3 and 4). The results were virtually unchanged when 300 days were used instead of 200 days to define tests belonging to one pregnancy. Similarly to the binomial regression model, the estimated incidence rates were slightly lower in the three biggest cities than in overall Upper Austria, but again the difference was statistically not significant. A model including calendar years did not show a significant change over time. Based on the findings from interval censored regression, we estimated that there were 152 (95% confidence interval: 118-196) acute Toxoplasma infections during pregnancy in the years 2000-2007 in the study sample (based on 92,365 pregnancies in the same period). Coverage of pregnancy with screening in seronegative women In the study population, 38,576 women had their latest screening (based on the 200 days time window) and were seronegative in this examination. When only the latest pregnancy for each woman was included, we Table 1 Seroprevalence and observed (suspected and certain) primoinfections by region Discussion Our study estimated prevalence and incidence of toxoplasmosis and coverage with screening in pregnant women in Austria. The estimated seroprevalence of about 31% in pregnant women is in line with findings from other countries in Europe. As expected, seroprevalence was higher in rural areas than in cities. The three recommended screening tests were conducted in only about 29.8% of seronegative women, despite the fact that about 95% of OGKK members attended all the check-ups of the Austrian maternal care program in pregnancy. A recent study from a region in southeast France reported similar problems: Only 40% of pregnant women had all seven or more recommended tests. Poor compliance to a complete screening program jeopardizes a direct analysis of the incidence of Toxoplasma infections in pregnancy. Consequently, incidence based on observed cases only resulted in severe underestimation if only certain diagnoses (0.07%) were considered. A certain diagnosis requires more than one test in pregnancy and, therefore, misses infections that occurred in early pregnancy before the first test. In addition, the period between the latest examination and birth is not included in the analysis. If only a single test result was available, infection could be only suspected, since high IgM and low avidity do not rule out a past infection. Therefore, incidence based on observed suspected infections suffers from both an underestimation due to cases which were not observed, and an overestimation caused by false positive IgM and avidity tests. Statistical methods are therefore necessary to derive estimates of true incidence. We used an indirect approach : the age-specific seroprevalence suggested a linear association between age and seroprevalence (Figure 3), as also observed by others. The estimates derived for incidence using this approach were higher than those obtained from observed suspected cases (0.5% per 100 pregnancies). While false test results are unlikely to cause a substantial overestimation in this method, differences in age-specific prevalence can be subject to age cohort effects, with a share of infections taking place in younger years of life but decreasing over time. A decrease in the seroprevalence of Toxoplasma infections over time that may lead to overestimation in the indirect estimate has been observed in several European countries. Consistently, a seroprevalence of 41% reported for 1995/96 in Upper Austria was considerably higher than our findings for 2000-2007. The reliability of the data for 1995/96 was questioned, but other reports from Austria also suggested a decreasing seroprevalence in the region, not only in humans but also in animals that are important for the transmission of disease to humans. A decreasing trend is also in line with findings in The Netherlands comparing 1995/1996 and 2006/2007. Furthermore, the seroprevalence estimate is mostly based on the non-pregnant time. Women during pregnancy might be more conscious about avoiding potential sources of infection, such as eating undercooked meat and contact with contaminated soil. Therefore, incidence of Toxoplasma infections during pregnancy in the same age group could be lower than in non-pregnant women. This effect might be partly compensated by an opposite bias, as pregnancy has been shown to be a risk factor for Toxoplasma infection in an epidemiological study from Brazil. The authors assumed changes in lymphocyte functions during late pregnancy, which led to some level of immunosuppression towards protozoal infections and to explain this increased susceptibility. As late stages of pregnancy were underrepresented in our study due to the poor adherence to the screening scheme, changes in immunity might not play a major role. Overall, we conclude that estimating incidence from age-specific prevalence might not provide valid results for the true incidence. The interval censored regression directly assessing incidence during pregnancy, appears to be the most appropriate approach to estimate the true incidence. However, the method is based directly on the rare event of acute infections and is therefore more affected by an imperfect specificity of testing. Interval censored regression depended on clear cut IIFT tests distinguishing seronegative from seropositive results and on IgM and avidity test results. We identified the following information regarding test characteristics: in the laboratory of analyse Biolab GmbH, 1,039 sera tested by IIFT were compared to the AxSYM and ARCHITECT test kits for anti-Toxoplasma gondii-IgG (Abbott Laboratories, Abbott Park, Illinois), with two investigators reading the IIFT. Sensitivity and specificity were 99.7% and 97.2% for the first investigator and 96.8% and 99.4% for the second investigator for AxSYM, and 99.7% and 98.3%/ 96.6% and 99.2% for ARCHITECT, respectively . According to the manufacturer's product information regarding sera from pregnant women, sensitivity of VIDAS IgM is 96.0% (95% confidence interval: 91.4-98.2%) and 100% of pregnant women with an acute infection not more than 4 months old show a low IgG antibody avidity (95% confidence interval: 98.1-100.0%). False positive results can be ruled out in the subsequent avidity testing, while false negative tests escape further diagnostics. Fortunately, sensitivity is particularly high, resulting in a marginal underestimation only. However, there is a potential mechanism which could cause a more substantial underestimation: using only times between tests during pregnancy excludes early pregnancy in which women might not be aware of being pregnant and thus be less careful in avoiding the exposure to toxoplasmosis. The contribution of this mechanism depends on the fraction of unplanned pregnancies and consciousness in avoiding sources of infection during early pregnancy. Strengths and limitations The strength of our study is that we were able to analyse more than 84.5% of pregnancies leading to life births in Upper Austria. OGKK covers all social classes, the catchment area was clearly defined and only pregnant women were included. In most regions in Austria, screening is performed in several laboratories and it is difficult to assemble their screening data. Analysis of subsequent tests requires personal identifiers and exchange of this information between several institutes is complicated by personal data protection requirements. The use of routine data on toxoplasmosis testing in most other countries in the world (including the USA) is hampered by the fact that usually only privileged groups have access to screening. Due to the missing information on parity, we could not provide separate estimates by parity. As seroprevalence increases with age, rates are also typically lower in primipara than in multipara. Unfortunately, we did not have any information about the gestational week at the time of infection. This information is important if complications of the infection should be studied. However, it is beyond the scope of this analysis to provide information about maternal-foetal transmission rates and the rate of children with clinical sequels in cases of congenital toxoplasmosis. Various studies gave heterogeneous information about these rates and were questioned with regard to their data quality. We did not have information to study individual risk factors affecting incidence beyond place of residence. In an earlier analysis using the same data, a seasonal trend with a slight increase of diagnoses in winter (probably reflecting more infections in the fall) has been described. Another problem is the clear allocation of patients to the study period. A pregnancy with several serological checks is not a time point but a time span. We used the last examination per pregnancy to decide on its allocation. In addition, we investigated a large, eight-year study period to reduce the number of pregnancies crossing the start or the end of the study period. Conclusions Using statistical models, we estimated the incidence of maternal Toxoplasma primoinfections in pregnancy in Upper Austria, 2000 -2007. All approaches to determine the incidence of Toxoplasma infections in pregnancy suffered from limitations. We consider the proportion of observed certain cases only (0.07%) the low bound and the estimate based on age-specific seroprevalence (0.5%) the high bound, and propose the interval censored regression model (0.17%) as the best estimate.
<reponame>ShowKa/HanbaiKanri<filename>src/main/java/com/showka/service/query/u05/UriageKeijoQueryImpl.java package com.showka.service.query.u05; import java.util.Date; import java.util.List; import java.util.Optional; import java.util.stream.Collectors; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import com.showka.domain.builder.BushoUriageBuilder; import com.showka.domain.u05.Uriage; import com.showka.domain.u05.UriageRireki; import com.showka.domain.u17.BushoUriage; import com.showka.domain.z00.Busho; import com.showka.entity.RUriage; import com.showka.entity.RUriageKeijo; import com.showka.entity.RUriageKeijoTeisei; import com.showka.entity.RUriagePK; import com.showka.repository.i.RUriageKeijoRepository; import com.showka.repository.i.RUriageKeijoTeiseiRepository; import com.showka.repository.i.RUriageRepository; import com.showka.service.query.u05.i.UriageKeijoQuery; import com.showka.service.query.u05.i.UriageRirekiQuery; import com.showka.value.EigyoDate; import com.showka.value.Kakaku; @Service public class UriageKeijoQueryImpl implements UriageKeijoQuery { @Autowired private RUriageKeijoRepository repo; @Autowired private RUriageKeijoTeiseiRepository repoTeisei; @Autowired private RUriageRepository rUriageRepository; @Autowired private UriageRirekiQuery uriageRirekiQuery; @Override public BushoUriage getBushoUriage(Busho busho, EigyoDate date) { // 売上計上金額集計(訂正除く) int keijoKingaku = this.getKeijoKingaku(busho, date); int teiseiKingaku = this.getTeiseiKingaku(busho, date); // build BushoUriageBuilder b = new BushoUriageBuilder(); b.withBusho(busho); b.withKeijoDate(date); b.withKeijoKingaku(keijoKingaku); b.withTeiseiKingaku(teiseiKingaku); return b.build(); } @Override public boolean hasDone(Uriage uriage) { // get 売上履歴 RUriagePK pk = new RUriagePK(); pk.setKeijoDate(uriage.getKeijoDate().toDate()); pk.setUriageId(uriage.getRecordId()); RUriage uriageRireki = rUriageRepository.getOne(pk); // exists 売上計上 boolean exists = repo.existsById(uriageRireki.getRecordId()); return exists; } /** * 指定した計上日の部署売上計上を取得. * * @param busho * 部署 * @param date * 計上日 * @return 売上計上 */ List<RUriageKeijo> get(Busho busho, EigyoDate date) { // search 計上対象売上 List<RUriage> uriageRirekiList = uriageRirekiQuery.getEntityList(busho, date); // 売上履歴 record id Iterable<String> uriageRirekiRecordIds = uriageRirekiList.stream().map(uriageRireki -> { return uriageRireki.getRecordId(); }).collect(Collectors.toList()); // 売上計上 entities return repo.findAllById(uriageRirekiRecordIds); } /** * 指定した計上日における部署の売上の計上金額を集計. * * <pre> * ただし、売上訂正の金額は除く * </pre> * * @param busho * 部署 * @param date * 計上日 * @return 集計金額 */ int getKeijoKingaku(Busho busho, EigyoDate date) { // 売上計上 entities List<RUriageKeijo> keijoEntities = this.get(busho, date); // 売上計上金額集計 int keijoKingaku = keijoEntities.stream().mapToInt(ke -> { String uriageId = ke.getUriageId(); UriageRireki rireki = uriageRirekiQuery.get(uriageId); Optional<Uriage> uriage = rireki.getUriageOf(date); // 指定して日付での売上が取得できない場合、データ不整合なのでそのまま落ちて良い Kakaku uriageGokeiKingaku = uriage.get().getUriageGokeiKakaku(); return uriageGokeiKingaku.getZeinuki().intValue(); }).sum(); return keijoKingaku; } /** * 指定した計上日における部署の売上の訂正分の計上金額を集計. * * <pre> * 基本的にマイナス円として集計. * </pre> * * @param busho * 部署 * @param date * 計上日 * @return 売上訂正の集計金額 */ int getTeiseiKingaku(Busho busho, EigyoDate date) { // 売上計上 entities List<RUriageKeijo> keijoEntities = this.get(busho, date); // 売上計上訂正分金額集計 Iterable<String> keijoIds = keijoEntities.stream().map(e -> e.getRecordId()).collect(Collectors.toList()); List<RUriageKeijoTeisei> teiseiEntities = repoTeisei.findAllById(keijoIds); int teiseiKingaku = teiseiEntities.stream().mapToInt(teisei -> { String uriageId = teisei.getUriageId(); UriageRireki rireki = uriageRirekiQuery.get(uriageId); Date pastKeijoDate = teisei.getTeiseiUriageRirekiKeijoDate(); Optional<Uriage> uriage = rireki.getUriageOf(new EigyoDate(pastKeijoDate)); // 指定して日付での売上が取得できない場合、データ不整合なのでそのまま落ちて良い Kakaku uriageGokeiKingaku = uriage.get().getUriageGokeiKakaku(); // 訂正分は負数として集計 return uriageGokeiKingaku.getZeinuki().intValue() * -1; }).sum(); return teiseiKingaku; } }
Rumor Mill: A Baby for Beyoncé? It was only four days ago that Beyoncé was celebrating her marriage to long-time boyfriend Jay-Z at a party in his lavish NYC penthouse, and now OK! has learned that the couple could soon have a baby on the way. “I’ve heard that Beyoncé is pregnant from at least two people,” a source close to the couple tells OK!. In past interviews, the Crazy in Love singer has talked cautiously about her desire to start a family. "You can’t rush a man into anything whether it’s a relationship, marriage or having children. " Beyoncé has said. "When he’s ready he’ll let you know." Even friend Vivica A. Fox can’t wait for the happy couple to start a family. "I wish them many, many years of happiness and some babies," she tells OK!. "I think she will be really pretty pregnant, just like J. Lo and Halle Berry." Keep a look out for a growing baby bump if the rumors hold true.
Patella resection and patellectomy for comminuted fractures The work is based on the analysis of the results of surgical treatment of 106 patients with multiple fractures of the patella, of which 92 underwent patella resection and 14 - patelllectomy. The indications for these operations are substantiated, the details of the surgical technique are described. To eliminate tension on the tendon-bone contact line (for resection) and tendon-tendon (for patelllectomy), a locking wire loop was used. Original methods have been developed for replacing extensor defects after resection of the lower third of the patella with autografts from the rectus muscle tendon and from the patellar ligament. Long-term results studied in 76 patients confirm the advantages of the proposed patellar resection and patelllectomy techniques
The pathophysiology and clinical aspects of hypercalcemic disorders. FOR THE PURPOSES OF THIS REVIEW, THE VAST AND INCREASINGLY COMPLEX SUBJECT OF HYPERCALCEMIC DISORDERS CAN BE BROKEN DOWN INTO THE FOLLOWING CATEGORIES: Physiochemical state of calcium in circulation. Pathophysiological basis of hypercalcemia. Causes of hypercalcemia encountered in clinical practice: causes indicated by experience at the University of California, Los Angeles; neoplasia; hyperparathyroidism; nonparathyroid endocrinopathies; pharmacological agents; possible increased sensitivity to vitamin D; miscellaneous causes. Clinical manifestations and diagnostic considerations of hypercalcemic disorders. The management of hypercalcemic disorders: general measures; measures for lowering serum calcium concentration; measures for correcting primary causes-the management of asymptomatic hyperparathyroidism.
Evaluation of permethrin-treated military uniforms for personal protection against malaria in northeastern Thailand. A trial to compare the effect of military clothing treated by high-pressure spray with permethrin or placebo on the incidence of malaria in Royal Thai Army troops was conducted in northeastern Thailand. Bioassays of treated clothing using laboratory-reared Anopheles dirus females showed permethrin remained in the treated fabric for up to 90 days. Both permethrin- and placebo-treated uniform shirts provided > 84% protection from biting An. dirus in laboratory bioassays for the duration of the study. In laboratory tests, knockdown of An. dirus exposed to permethrin-treated cloth fell to < 20% after 3 hand washes, despite the presence of 28.7-59.9% of the original dose of permethrin. The use of permethrin-treated uniforms without adjunct application of topical repellents did not reduce malaria in Thai troops in an operational setting where incidence during 6 months was as high as 412 cases/1,000 in spite of chemoprophylaxis and use of untreated bednets.
// Copyright 2018 The MATRIX Authors as well as Copyright 2014-2017 The go-ethereum Authors // This file is consisted of the MATRIX library and part of the go-ethereum library. // // The MATRIX-ethereum library is free software: you can redistribute it and/or modify it under the terms of the MIT License. // // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, //and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject tothe following conditions: // //The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. // //THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, //FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, //WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISINGFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE //OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. package liner import "unicode" // These character classes are mostly zero width (when combined). // A few might not be, depending on the user's font. Fixing this // is non-trivial, given that some terminals don't support // ANSI DSR/CPR var zeroWidth = []*unicode.RangeTable{ unicode.Mn, unicode.Me, unicode.Cc, unicode.Cf, } var doubleWidth = []*unicode.RangeTable{ unicode.Han, unicode.Hangul, unicode.Hiragana, unicode.Katakana, } // countGlyphs considers zero-width characters to be zero glyphs wide, // and members of Chinese, Japanese, and Korean scripts to be 2 glyphs wide. func countGlyphs(s []rune) int { n := 0 for _, r := range s { // speed up the common case if r < 127 { n++ continue } switch { case unicode.IsOneOf(zeroWidth, r): case unicode.IsOneOf(doubleWidth, r): n += 2 default: n++ } } return n } func countMultiLineGlyphs(s []rune, columns int, start int) int { n := start for _, r := range s { if r < 127 { n++ continue } switch { case unicode.IsOneOf(zeroWidth, r): case unicode.IsOneOf(doubleWidth, r): n += 2 // no room for a 2-glyphs-wide char in the ending // so skip a column and display it at the beginning if n%columns == 1 { n++ } default: n++ } } return n } func getPrefixGlyphs(s []rune, num int) []rune { p := 0 for n := 0; n < num && p < len(s); p++ { // speed up the common case if s[p] < 127 { n++ continue } if !unicode.IsOneOf(zeroWidth, s[p]) { n++ } } for p < len(s) && unicode.IsOneOf(zeroWidth, s[p]) { p++ } return s[:p] } func getSuffixGlyphs(s []rune, num int) []rune { p := len(s) for n := 0; n < num && p > 0; p-- { // speed up the common case if s[p-1] < 127 { n++ continue } if !unicode.IsOneOf(zeroWidth, s[p-1]) { n++ } } return s[p:] }
<filename>src/ui/components/AnswerView.tsx import React from 'react'; import {ColorValue, StyleSheet, Text} from 'react-native'; import fonts from '../../styles/fonts'; import {Question} from '../../types/model-types'; import Card from './Card'; type Props = { index: number; question: Question; }; /** * This component is used to present how the user has answered a particular question. * <br><br> * If the answer is correct, the background would be in green color. * If the answer is incorrect, the background will be in crimson color. * If the user hasn't answered the question, the background will be in dark orange color. * * @param index The question number. Will be incremented by 1 upon display. * @param question The question to be displayed. */ export default function AnswerView({index, question}: Props) { let backgroundColor: ColorValue; if (question.given_answer === question.correct_answer) { backgroundColor = 'green'; } else if (question.given_answer === '') { backgroundColor = 'darkorange'; } else { backgroundColor = 'crimson'; } return ( <Card style={[styles.container, {backgroundColor: backgroundColor}]}> <Text style={styles.questionNumber}>Question no. {index + 1}</Text> <Text style={styles.question}>{question.text}</Text> <Text style={styles.answerLabel}>Correct answer</Text> <Text style={styles.answerText}>{question.correct_answer}</Text> <Text style={styles.answerLabel}>Your answer</Text> <Text style={styles.answerText}> {question.given_answer.length > 0 ? question.given_answer : '-'} </Text> </Card> ); } const styles = StyleSheet.create({ container: { marginHorizontal: 16, marginBottom: 24, }, questionNumber: { fontFamily: fonts.light, fontSize: 12, color: 'whitesmoke', }, question: { paddingTop: 4, paddingBottom: 8, fontFamily: fonts.medium, fontSize: 24, color: 'white', }, answerLabel: { fontFamily: fonts.medium, fontSize: 12, color: 'lavender', paddingTop: 16, }, answerText: { fontFamily: fonts.medium, fontSize: 16, color: 'white', paddingTop: 4, }, });
Law, Religion and Theology This is a valuable study, developed from a Harvard doctorate. The author provides an overview of biblical sources (legal and narrative) and consistently compares them with sources from the ANE (going well beyond the law collections). She sees the biblical traditions as largely independent and stresses the diversity to be found also within the ANE. Biblical law in this context is based upon family solidarity (the blood feud being a decentralized form of legal regulation), as contrasted with the greater central control found in Mesopotamia. She is sceptical of developmental interpretations of the relationship between the principal homicide passages in the Covenant Code (Exod. 21.1214), Deuteronomy 19 and Numbers 35, and stresses the differences in theological outlook, particularly between the latter two. She considers, in particular, their respective accounts of places of refuge, the role of pollution, and distinctions as regards the mental state of the offender. An opening chapter sets the scene by discussing the case of Cain and Abel, and the book concludes with chapters devoted to the lex talionis and homicide of a foreign citizen (as found in ANE documents). On a number of points, this reviewer takes a different view (see further in the Zeitschrift fr altorientalische und biblische Rechtsgeschichte 2006). Future students of this topic will, however, profit from this presentation. B.S. JACKSON
THE MAGNITUDE OF PRESCRIBED ANTIBIOTICS IN PEDIATRIC EMERGENCY DEPARTMENT IN BASRA HOSPITAL FOR MATERNITY AND CHILDREN Introduction: Antimicrobial agents are common employ in paediatric patients. Emergency department make good place for known prescribing pattern of antibiotics with frequent use for disease that interfere in treatment between private pharmacy & hospital. The irrational and overuse of antibiotic in last decades did not follow the international guidelines and infectious strategy which if not controlled or minimize will lead to higher rates of mortality in human societies. The aim was to find the magnitude of antibiotics prescribing in children internal emergency department at Basra hospital for Maternity and Children & showing relationship with misuse of it Methods: The study was performed on 560 paediatrics patients aged (1 month -13 years) that seen in the (Basra Hospital for Maternity and Children) emergency department during 4 months from December 2017 to March 2018. These cases were dividing according to containment antibiotic and several parameters such as patient diagnosis and number of antibiotics prescribed. Results: A total of 61% (n=342) patients were males. The average number of antibiotics per patient was 1.45 Prescription did not contain antibiotics 28.57% (n=160) Prescription contain antibiotics 71.42% (n =400) of all patient from total prescribed. Most cases diagnosis for prescribing antibiotics are (24%) Gastroenteritis (16.25%) bronchiolitis and (11.75%) pneumonia. Conclusion: The dispensing of antibiotics is not following constant or international guidelines which will cause problems like resistance and economic side also. Nearly almost the admitted patients received antibiotics regardless the culture results.
The IT industry has never been as popular as it is today, given that our day-to-day lives involve computers, mobile phones, tablets and gaming consoles to keep our schedules on track, complete work or study assignments or just to keep us busy for a few minutes. The number of certified IT professionals who have undertaken Microsoft MCSA online courses has increased by an astounding amount over the last 15 years, but there are still those that deem it unnecessary to undertake studies in IT before applying for a position, as they expect training to be delivered via their company. Consider then that, after studying source code and writing payroll programs, Microsoft founder, Bill Gates, started a small shop called “Traf-O-Data” which designed a computer used by the city of Seattle to count traffic flow. Currently, Microsoft’s latest operating system, Windows 10, has been installed on over 110 million devices. This speaks volumes for the Microsoft MCSA online courses and gaining the necessary knowledge when attempting to enter a trade. Microsoft Certified Solutions Associate (MCSA) overview Microsoft Certified Solutions Associate is a group of Microsoft MCSA online courses specifically designed to guide individuals that are just stepping into the IT field towards a meaningful and reliable certification and is a prerequisite to gaining your MCSE (Microsoft Certified Solutions Expert) certification. The Microsoft MCSA online courses have helped many IT professionals get their careers underway and continue to do so to this day. The mere fact that you have an MCSA certification on your CV will boost your chances to be considered for a position in IT, given that it a well-known and proven certification which is awarded by the biggest IT organisation in the world and will show that you truly have the passion that potential employers look for in a candidate. List of Microsoft MCSA Online Courses Below you will find a complete list of all the Microsoft MCSA online courses available and their accompanying exams. MCSA: Windows Server 2012 The MCSA Windows Server 2012 course will teach students a firm understanding of databases, servers and networking using Windows server 2012 operating system. The exams that need to be passed to gain this qualification are: • 70-410: Installing and Configuring Windows Server 2012 • 70-411: Administering Windows Server 2012 • 70-412: Configuring Advanced Windows Server 2012 Services Gaining this qualification will ensure that you have the necessary skills to confidently apply for positions like Computer Systems Administrator, Computer Network Specialist, Systems Engineer or IT Support Analyst. MCSA: Windows Server 2008 Designed to increase the reliability and flexibility of server infrastructure, this course is designed to teach you how to apply and oversee the Windows Server 2008 operating system. The Windows MCSA: Windows server 2008 will ready you to apply for positions such as It Technician, Network Administrator, Desktop Support Technician or Network Manager. You will need to pass the following exams in order to complete your online MCSA certification: • 70-640: Windows Server 2008 Network Infrastructure, Configuring • 70-642: Windows Server 2008 Active Directory, Configuring • 70-646: Windows Server 2008 Server Administrator MCSA: Windows 10 This certification will teach you to install, configure and further manage the Windows 10 certification. Upon completion of this course and its exam, you will qualify for positions such as Computer Support Specialist, Technical Support Engineer and Desktop Support Analyst. For this course, the following exam must be passed to become MCSA certified: • 70-697: Configuring Windows Devices MCSA: Windows 8 Anyone with an interest in becoming a Computer Support Specialist, It Manager or Support Specialist will find much to gain from the MCSA: Windows 8 course. It will teach you how to install Windows 8, resolve any issues that may occur and troubleshoot problems with network connections. The two required exams are as follows: • 70-687: Configuring Windows 8.1 • 70-688: Supporting Windows 8.1 MCSA: SQL Server 2012 This course will perfectly suit aspiring Database Developers or Database Analysts, as it teaches the installation, configuration and maintenance of SQL (Structured Query Language) server services as well as the management and configuration of databases and their security. To complete this Microsoft MCSA online course, the following exams must be undertaken and passed: • 70-461: Querying Microsoft SQL Server 2012 • 70-462: Administering Microsoft SQL Server 2012 Databases • 70-463: Implementing a Data Warehouse with Microsoft SQL Server 2012 MCSA: Office 365 Software as a Service (SaaS) Administrator, Cloud Applications Administrator or Software Administrator are some of the positions you will be able to apply for upon completion of the MCSA: Office 365 course. The MCSA: Office 365 exams are: • 70-346: Managing Office 365 Identities and Requirements • 70-347: Enabling Office 365 Services MCSA: Linux on Azure To gain the MCSA: Linux on Azure qualification, students will need to pass the accompanying exams, namely: • 70-533: Implementing Microsoft Azure Infrastructure Solutions • LFCS: Linux Foundation Certified System Administrator After completion of this course, you will be able to design cloud-based Linux solutions using features offered by Microsoft Azure, as well as proving your capabilities in Linux system administration, opening doors to careers like Linux System Administrator or IT Cloud Solutions Consultant. Writing my MCSA certification exam Once you have completed your Microsoft MCSA online courses and passed the relevant exams, you will become officially MCSA certified and well on your way to an exciting career in the field of IT. The Microsoft MCSA exams can, however, only be booked and taken through Pearson Vue or Prometric who will help with the scheduling of your exam, as well as providing any other information you may need regarding the writing of your MCSA exams. After finishing your Microsoft MCSA online courses and gaining your certification, you will have the option of upgrading to a MCSE (Microsoft Certified Solutions Expert) certification which focuses more on the creation, implementation and security of networks and preparing students for roles like Systems Engineer, whereas the Microsoft MCSA online courses deal with the maintenance of those networks once they are in use. This process is explained in our article explaining MCSE courses.
Author Matt Elliot has taken a slice of Whanganui history and created a gripping tale for young readers. Matt Elliot has taken a small slice of Whanganui history and skilfully turned it in to a book for young readers. When ocean liner RMS Lusitania sank off the Irish coast in May 1915 after it was torpedoed by a German U-boat, it triggered anti-German sentiment around the world. The deaths of 1198 British, Canadian and American civilians provoked anger in Whanganui and triggered an incident that made newspaper headlines. A mob attacked the shop of Whanganui pork butcher Conrad Heinold on May 14, 1915, taking exception to his nationality even though he had lived in Whanganui for many years. A young boy was wrongly accused of starting the riot and Elliot has taken details from the newspaper reports of the day as the basis for his book Night of the Riot. Elliot's central character is 12-year-old "Snow" Goodison who works for Mr Schmidt the butcher. This adult reader was highly impressed with Elliot's attention to detail and his ability to re-create the Whanganui of 100 years ago. But what does the target audience think? "Night of the Riot is very interesting and it definitely makes for a different reading experience when you know the area in which it's set well. "Also discovering it was based on a true story gave me a shock as I never knew that the sinking of the Lusitania had quite such an effect on Whanganui and its people. "I particularly enjoyed learning some of the history involved and whenever I go down Victoria Ave I try to guess where Mr Schmidt's shop stood. "In all, I really liked Night of the Riot and I would absolutely recommend it." The addition of a beautifully drawn map of central Whanganui circa 1915 by Melissa Elliot at the front of the book is a real bonus for the reader. Elliot is the author of more than a dozen books and was the 2012 NZ Post Children's Book Awards Book of the Year winner with Nice Day for a War: Adventures of a Kiwi soldier in WWI.
Fifty years after the signing of the landmark Immigration and Naturalization Act, a total of 59 million people have migrated to the United States, according to a new report. Before 1965, immigrants coming to American shores had been primarily European. The legislation, also called the Hart-Celler Act, ended the former system of placing quotas on immigrants by national origin, instead prioritizing skilled workers and family members. Today, one in five immigrants in the world reside in the United States, according to the Pew Research Center report released Monday. Those immigrants and their children have contributed an estimated 55% of the country’s population growth during that time; the U.S. population currently stands at almost 322 million. By 2065, nearly 20% of people in the country will have been born outside of American borders. In 1965, 84% of Americans were non-Hispanic whites, 4% were Hispanic, and less than 1% were Asian. In 2015, the numbers are astonishingly different: 62% of Americans are white, 18% of Americans are Hispanic, and Asians count as 6% of the populace. The most striking transformation in immigrant makeup has been within the Hispanic community, which has seen a drop in unskilled Mexican immigrants. While 35% of the 59 million immigrants in the past 50 years has come from Mexico, South and Central American immigrant populations are now booming in the U.S. Much of this has to do with the Great Recession, says Mark Hugo Lopez, director of Hispanic research at the Pew Research Center. “Many Mexican immigrants are unskilled laborers,” he says. “Think of where that would work best: construction and the other parts of the housing market.” Lopez points to the mushrooming Mexican communities of Atlanta and Las Vegas, both cities with strong housing markets. Since 2005, the U.S. has seen a downward slide in immigrant arrivals. “It’s partially because of the recession,” Lopez says. “It’s harder to cross the Mexican border itself. But it’s also because there are more people entering legally”—whether they come from countries like Venezuela, which has the highest-educated Hispanic population in the U.S., or the Asian triumvirate of China, India, and the Philippines. In other words, the 1965 act is doing its job: enticing highly skilled workers to come to America. “Newly arrived immigrants aren’t coming in illegally because they don’t have to, and that’s a big economic change,” Lopez says. “We see Chinese people coming to pursue higher education, Indians in tech, and Filipinos [filling] medical careers.” And while that might seem like a stereotype, Lopez says that it all comes back to the 1965 law’s favoring highly educated immigrant populations. Americans, however, can’t seem to make up their mind about what exactly they think about immigration. On the one hand, 45% say that immigration has made American society better, with 54% saying the immigration system in the U.S. needs to be addressed and an additional 28% going so far as to say it’s a broken system in need of total restructuring. But 37% of respondents say immigration has made American society worse. A plurality see European and Asian immigration positively (44% and 47%, respectively). But Americans are a lot less enthusiastic about Latin American and Middle Eastern populations (with 37% and 39% of those surveyed expressing negativity); 50% of Americans are neutral when it comes to African immigrants. Lopez thinks the one-two punch of a post-9/11 environment combined with a recession marked Hispanics and Middle Eastern groups for negative perceptions. Regardless, Lopez stresses that the act itself was not the origin of modern immigration—a number of factors worked to create the America we know today. “We use the 1965 law to start analysis, but it’s unclear whether the law itself [is the reason for change in American immigration patterns],” he says.
Living From the Divine Ground: Meister Eckhart's Praxis of Detachment Meister Eckhart's notion of detachment constitutes a dynamic and vital key concept that lies at the heart of and unlocks Eckhart's richly textured mysticism. Eckhart makes a valuable contribution to the contemporary discourse on mysticism by emphasizing the dialectical and unbreakable connection between "interiority" and "exteriority" and highlighting the transformative nature of detachment. Detachment, for Eckhart, is not a static concept, but is rather a dynamic apophatic, kenotic, and dialectical activity. Eckhart's notion of detachment, disclosing the "this-worldly" and egalitarian dimensions of his mysticism, teaches us what it means to be truly and authentically human vis--vis self, other, community, and the transcendent.
<filename>iree/compiler/Dialect/Util/Analysis/DFX/Solver.h // Copyright 2021 The IREE Authors // // Licensed under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception #ifndef IREE_COMPILER_DIALECT_UTIL_ANALYSIS_DFX_SOLVER_H_ #define IREE_COMPILER_DIALECT_UTIL_ANALYSIS_DFX_SOLVER_H_ #include "iree/compiler/Dialect/Util/Analysis/DFX/DepGraph.h" #include "iree/compiler/Dialect/Util/Analysis/DFX/Element.h" #include "iree/compiler/Dialect/Util/Analysis/DFX/State.h" #include "iree/compiler/Dialect/Util/Analysis/Explorer.h" #include "llvm/Support/Allocator.h" #include "llvm/Support/raw_ostream.h" #include "mlir/IR/AsmState.h" #include "mlir/Support/LLVM.h" namespace mlir { namespace iree_compiler { namespace DFX { // Fixed point iteration solver ("monotone framework"). // http://symbolaris.com/course/Compilers11/27-monframework.pdf // // Forked from the LLVM Attributor: llvm/Transforms/IPO/Attributor.h // The attributor is an elegant and flexible piece of infra that is tied quite // tightly to LLVM IR. Here we extract it and generalize it to work with MLIR's // concepts of positional values, operations, and blocks. Unlike the Attributor // the solver is only for performing analysis and does no manifestation. We may // want to extend this to integrate into the MLIR folding framework, though. // // Good talks describing how the system works: // https://www.youtube.com/watch?v=I4Iv-HefknA // https://www.youtube.com/watch?v=CzWkc_JcfS0 // // This initial fork is to unblock work that requires this framework. Ideally // we'd upstream this into MLIR proper but there are some missing core // interfaces that keeps it locked here for the moment: in particular we need // tied operands (generalized view-like op interface), globals, and reference // types. We also need a lot of tests :) // // NOTE: the solver state - like Explorer - assumes that IR will not be modified // while it is in-use. Modifying the IR invalidates the state and may lead to // crashes as pointer references into the IR structure are retained. class Solver { public: // Creates a solver that uses |explorer| for walking the IR tree and // |allocator| for transient allocations of abstract elements. explicit Solver(Explorer &explorer, llvm::BumpPtrAllocator &allocator) : explorer(explorer), asmState(explorer.getAsmState()), allocator(allocator), depGraph(explorer.getAsmState()) {} // Initialized explorer for walking the IR. Explorer &getExplorer() { return explorer; } // Shared AsmState that can be used to efficiently print MLIR Values. // If this is not used the entire module may need to be walked in order to // get the name of a value each time it's printed. Nothing in this framework // should do that. AsmState &getAsmState() { return asmState; } // An allocator whose lifetime is at least as long as the solver. llvm::BumpPtrAllocator &getAllocator() { return allocator; } // Returns the element of |ElementT| for |pos| and adds a dependency from // |queryingElement| to the returned element with the given |resolution|. template <typename ElementT> const ElementT &getElementFor(const AbstractElement &queryingElement, const Position &pos, Resolution resolution) { return getOrCreateElementFor<ElementT>(pos, &queryingElement, resolution, /*forceUpdate=*/false); } // Returns the element of |ElementT| for |pos| and adds a dependency from // |queryingElement| to the returned element with the given |resolution|. // If the element already exists and the solver is in the UPDATE phase it will // be updated prior to returning as if another iteration had been performed. template <typename ElementT> const ElementT &getAndUpdateElementFor(const AbstractElement &queryingElement, const Position &pos, Resolution resolution) { return getOrCreateElementFor<ElementT>(pos, &queryingElement, resolution, /*forceUpdate=*/true); } // Returns the element of |ElementT| for |pos| and optionally adds a // dependency from |queryingElement| to the returned element with the given // |resolution|. // // Using this after the solver started running is restricted to only the // solver itself. Initial seeding of elements can be done via this function. // // NOTE: |forceUpdate| is ignored in any stage other than the update stage. template <typename ElementT> const ElementT &getOrCreateElementFor(Position pos, const AbstractElement *queryingElement, Resolution resolution, bool forceUpdate = false, bool updateAfterInit = true) { if (auto *elementPtr = lookupElementFor<ElementT>(pos, queryingElement, resolution, /*allowInvalidState=*/true)) { if (forceUpdate && phase == Phase::UPDATE) { updateElement(*elementPtr); } return *elementPtr; } // No matching element found: create one. auto &element = ElementT::createForPosition(pos, *this); registerElement(element); // Avoid too many nested initializations to prevent a stack overflow. static const int maxInitializationChainLength = 1024; if (initializationChainLength > maxInitializationChainLength) { element.getState().indicatePessimisticFixpoint(); return element; } // Bootstrap the new element with an initial update to propagate info. { ++initializationChainLength; element.initialize(*this); --initializationChainLength; } // If this is queried after we've performed iteration we force the element // to indicate pessimistic fixpoint immediately. if (phase == Phase::DONE) { element.getState().indicatePessimisticFixpoint(); return element; } // Allow seeded elements to declare dependencies that are preserved for // use during fixed point iteration. if (updateAfterInit) { auto oldPhase = phase; phase = Phase::UPDATE; updateElement(element); phase = oldPhase; } if (queryingElement && element.getState().isValidState()) { recordDependence(element, const_cast<AbstractElement &>(*queryingElement), resolution); } return element; } // Returns the element of |ElementT| for |pos| if existing and valid. template <typename ElementT> const ElementT &getOrCreateElementFor(const Position &pos) { return getOrCreateElementFor<ElementT>(pos, /*queryingElement=*/nullptr, Resolution::NONE); } // Returns the element of |ElementT| for |pos| if existing and valid. // |queryingElement| can be nullptr to allow for lookups from outside of the // solver system. template <typename ElementT> ElementT *lookupElementFor(const Position &pos, const AbstractElement *queryingElement = nullptr, Resolution resolution = Resolution::OPTIONAL, bool allowInvalidState = false) { static_assert(std::is_base_of<AbstractElement, ElementT>::value, "cannot query an element with a type not derived from " "'AbstractElement'"); // Lookup the abstract element of type ElementT and if found return it after // registering a dependence of queryingElement on the one returned element. auto *elementPtr = elementMap.lookup({&ElementT::ID, pos}); if (!elementPtr) return nullptr; auto *element = static_cast<ElementT *>(elementPtr); // Do not register a dependence on an element with an invalid state. if (resolution != Resolution::NONE && queryingElement && element->getState().isValidState()) { recordDependence(*element, const_cast<AbstractElement &>(*queryingElement), resolution); } // Return nullptr if this element has an invalid state. if (!allowInvalidState && !element->getState().isValidState()) { return nullptr; } return element; } // Explicitly record a dependence from |fromElement| to |toElement|, // indicating that if |fromElement| changes |toElement| should be updated as // well. // // This method should be used in conjunction with the `getElementFor` method // and with the resolution enum passed to the method set to NONE. This can be // beneficial to avoid false dependencies but it requires the users of // `getElementFor` to explicitly record true dependencies through this method. // The |resolution| flag indicates if the dependence is strictly necessary. // That means for required dependences if |fromElement| changes to an invalid // state |toElement| can be moved to a pessimistic fixpoint because it // required information from |fromElement| but none are available anymore. void recordDependence(const AbstractElement &fromElement, const AbstractElement &toElement, Resolution resolution); // Introduces a new abstract element into the fixpoint analysis. // // Note that ownership of the element is given to the solver and the solver // will invoke delete on destruction of the solver. // // Elements are identified by their IR position (ElementT::getPosition()) // and the address of their static member (see ElementT::ID). template <typename ElementT> ElementT &registerElement(ElementT &element) { static_assert(std::is_base_of<AbstractElement, ElementT>::value, "cannot register an element with a type not derived from " "'AbstractElement'!"); // Put the element in the lookup map structure and the container we use to // keep track of all attributes. const auto &pos = element.getPosition(); AbstractElement *&elementPtr = elementMap[{&ElementT::ID, pos}]; assert(!elementPtr && "element already in map!"); elementPtr = &element; // Register element with the synthetic root only before we are done. if (phase == Phase::SEEDING || phase == Phase::UPDATE) { depGraph.syntheticRoot.deps.push_back( DepGraphNode::DepTy(&element, unsigned(Resolution::REQUIRED))); } return element; } // Runs the solver until either it converges to a fixed point or exceeds the // maximum iteration count. Returns success() if it converges in time. LogicalResult run(); // Prints the constraint dependency graph to |os|. void print(llvm::raw_ostream &os); // Dumps a .dot of the constraint dependency graph to a file. void dumpGraph(); protected: friend DepGraph; Explorer &explorer; AsmState &asmState; llvm::BumpPtrAllocator &allocator; // This method will do fixpoint iteration until a fixpoint or the maximum // iteration count is reached. // // If the maximum iteration count is reached this method will // indicate pessimistic fixpoint on elements that transitively depend on // elements that were still scheduled for an update. LogicalResult runTillFixpoint(); // Runs update on |element| and tracks the dependencies queried while doing // so. Also adjusts the state if we know further updates are not necessary. ChangeStatus updateElement(AbstractElement &element); // Remembers the dependences on the top of the dependence stack such that they // may trigger further updates. void rememberDependences(); // Maximum number of fixed point iterations or None for default. Optional<unsigned> maxFixpointIterations; // A flag that indicates which stage of the process we are in. enum class Phase { // Initial elements are being registered to seed the graph. SEEDING, // Fixed point iteration is running. UPDATE, // Iteration has completed; does not indicate whether it coverged. DONE, } phase = Phase::SEEDING; // The current initialization chain length. Tracked to avoid stack overflows // during recursive initialization. unsigned initializationChainLength = 0; using ElementMapKeyTy = std::pair<const char *, Position>; DenseMap<ElementMapKeyTy, AbstractElement *> elementMap; // Element dependency graph indicating the resolution constraints across // elements. DepGraph depGraph; // Information about a dependence: // If fromElement is changed toElement needs to be updated as well. struct DepInfo { const AbstractElement *fromElement; const AbstractElement *toElement; Resolution resolution; }; // The dependence stack is used to track dependences during an // `AbstractElement::update` call. As `AbstractElement::update` can be // recursive we might have multiple vectors of dependences in here. The stack // size, should be adjusted according to the expected recursion depth and the // inner dependence vector size to the expected number of dependences per // abstract element. Since the inner vectors are actually allocated on the // stack we can be generous with their size. using DependenceVector = SmallVector<DepInfo, 8>; SmallVector<DependenceVector *, 16> dependenceStack; }; } // namespace DFX } // namespace iree_compiler } // namespace mlir #endif // IREE_COMPILER_DIALECT_UTIL_ANALYSIS_DFX_SOLVER_H_
import React, { ReactNode } from 'react' import { FaFacebook, FaInstagram, FaLinkedin, FaTwitter, FaYoutube } from 'react-icons/fa' import { ComapnyLogo, DeveloperLink, FooterContainer, FooterLink, FooterLinksContainer, FooterLinksItems, FooterLinksTitle, FooterLinksWrapper, FooterWrapper, SocialIconLink, SocialMediaWrapper, SociaMedia, SocilaIcons, WebsiteDeveloper, WebsiteRights } from './styles' interface FooterProps { children?: ReactNode } function Footer({ children }: FooterProps) { return ( <FooterContainer> <FooterWrapper> <FooterLinksContainer> <FooterLinksWrapper> <FooterLinksItems> <FooterLinksTitle>About Us</FooterLinksTitle> <FooterLink to={'home'} smooth={true} duration={500} spy={true} offset={-80} > How it works </FooterLink> <FooterLink to={'home'} smooth={true} duration={500} spy={true} offset={-80} > Testimonial </FooterLink> <FooterLink to={'home'} smooth={true} duration={500} spy={true} offset={-80} > Careers </FooterLink> <FooterLink to={'home'} smooth={true} duration={500} spy={true} offset={-80} > Investors </FooterLink> <FooterLink to={'home'} smooth={true} duration={500} spy={true} offset={-80} > Terms of service </FooterLink> </FooterLinksItems> <FooterLinksItems> <FooterLinksTitle>Contact Us</FooterLinksTitle> <FooterLink to={'home'} smooth={true} duration={500} spy={true} offset={-80} > Contact </FooterLink> <FooterLink to={'home'} smooth={true} duration={500} spy={true} offset={-80} > Address </FooterLink> <FooterLink to={'home'} smooth={true} duration={500} spy={true} offset={-80} > Support </FooterLink> <FooterLink to={'home'} smooth={true} duration={500} spy={true} offset={-80} > Destination </FooterLink> <FooterLink to={'home'} smooth={true} duration={500} spy={true} offset={-80} > Sporsorships </FooterLink> </FooterLinksItems> </FooterLinksWrapper> {/* Second Column */} <FooterLinksWrapper> <FooterLinksItems> <FooterLinksTitle>Social</FooterLinksTitle> <FooterLink to={'home'} smooth={true} duration={500} spy={true} offset={-80} > Youtube </FooterLink> <FooterLink to={'home'} smooth={true} duration={500} spy={true} offset={-80} > Facebook </FooterLink> <FooterLink to={'home'} smooth={true} duration={500} spy={true} offset={-80} > Instagram </FooterLink> <FooterLink to={'home'} smooth={true} duration={500} spy={true} offset={-80} > Linked In </FooterLink> <FooterLink to={'home'} smooth={true} duration={500} spy={true} offset={-80} > Twitter </FooterLink> </FooterLinksItems> <FooterLinksItems> <FooterLinksTitle>Services</FooterLinksTitle> <FooterLink to={'home'} smooth={true} duration={500} spy={true} offset={-80} > Service 1 </FooterLink> <FooterLink to={'home'} smooth={true} duration={500} spy={true} offset={-80} > Service 2 </FooterLink> <FooterLink to={'home'} smooth={true} duration={500} spy={true} offset={-80} > Service 3 </FooterLink> <FooterLink to={'home'} smooth={true} duration={500} spy={true} offset={-80} > Service 4 </FooterLink> <FooterLink to={'home'} smooth={true} duration={500} spy={true} offset={-80} > Terms of service with some long text, but not long enaought; </FooterLink> </FooterLinksItems> </FooterLinksWrapper> </FooterLinksContainer> <SociaMedia> <SocialMediaWrapper> {/* Create logic to handle to redirect to home in diferent paths - use a commun anchor instead; */} <ComapnyLogo to="home" smooth={true} duration={500} spy={true} offset={-80} > ConsoliDados </ComapnyLogo> <WebsiteRights>ConsoliDados © {new Date().getFullYear()} All Rights Reserved </WebsiteRights> <SocilaIcons> <SocialIconLink href="/#" target="_blank" area-label="Facebook" > <FaFacebook /> </SocialIconLink> <SocialIconLink href="/#" target="_blank" area-label="Instagram" > <FaInstagram /> </SocialIconLink> <SocialIconLink href="/#" target="_blank" area-label="YouTube" > <FaYoutube /> </SocialIconLink> <SocialIconLink href="/#" target="_blank" area-label="Twitter" > <FaTwitter /> </SocialIconLink> <SocialIconLink href="/#" target="_blank" area-label="LinkedIn" > <FaLinkedin /> </SocialIconLink> </SocilaIcons > </SocialMediaWrapper> <WebsiteDeveloper> Made with  💚 by: <DeveloperLink href="https://www.johnnycarreiro.com" target="_blank" area-label="JohnnyCarreiro"> <NAME> </DeveloperLink> </WebsiteDeveloper> </SociaMedia> </FooterWrapper> </FooterContainer> ) } export default Footer
RCMP are investigating human remains that were found on the shoreline of Lake Winnipegosis near Shoal River. Police say they were discovered Tuesday at around 5 p.m. RCMP confirmed the presence of a deceased person and transported the body to Winnipeg for an autopsy. “We are not in a position to confirm the identity of the remains at this time, pending further forensic examination and other investigative steps to ensure certainty,” said Sgt. Bert Paquet of the Manitoba RCMP. The investigation is continuing and no further details have been released.
<reponame>normalscene/PSS<filename>purenessscopeserver/FrameCore/CppUnit/Unit_MakePacket.cpp #include "Unit_MakePacket.h" #ifdef _CPPUNIT_TEST CUnit_MakePacket::CUnit_MakePacket() { } void CUnit_MakePacket::setUp(void) { m_pMakePacket = new CMakePacket(); } void CUnit_MakePacket::tearDown(void) { delete m_pMakePacket; m_pMakePacket = NULL; } void CUnit_MakePacket::Test_MakePacket(void) { bool blRet = false; uint32 u4ConnectID = 1; ACE_Time_Value tvNow = ACE_OS::gettimeofday(); ACE_Message_Block* pmb = App_MessageBlockManager::instance()->Create(10); if (false == m_pMakePacket->PutSendErrorMessage(u4ConnectID, pmb, tvNow)) { OUR_DEBUG((LM_INFO, "[Test_ControlListen]m_pMakePacket->PutSendErrorMessage() Error.\n")); CPPUNIT_ASSERT_MESSAGE("[Test_ControlListen]m_pMakePacket->PutSendErrorMessage() Error.", true == blRet); } } #endif
Coping in adult cystic fibrosis patients: Association with anxiety and depression Objective: Cystic fibrosis (CF) is an inherited and chronic disease. Coping with the disease gets more important with increased life span. In this study, the relationship between coping strategies and anxiety/depression risk in adult CF patients are examined. Method: 30 adult CF patients (17 female,13 male; mean age:24±4) completed Hospital Anxiety/Depression Scale and Brief COPE Scale. 14 coping scores were calculated. Results: Acceptance (6.80±1.21) was the most preferred strategy and substance use (2.53±1.55) was the least. 4 patients had increased risk of anxiety, 4 had increased risk of depression, and 4 had increased risk of both. Patients with anxiety and depression risks used behavioural disengagement more than the non- risk group. Active coping was significantly higher in patients without depression risk. Anxiety risk group had significantly higher coping with venting (Table I). Coping with instrumental support was significantly higher in the employed than students and unemployed patients (p:0.041). Discussion: Psychological state affects preferred coping method. Encouraging use of adaptive coping strategies in adult CF patients is important.
World Bank president Paul Wolfowitz broke the rules and engaged in an actual conflict of interest when in 2005 he arranged for a rather generous salary boost for his girlfriend, Shaha Riza, a communications official at the Bank. That’s the conclusion of a special panel of the Bank’s board of directors, which on Monday released its report on the Wolfowitz matter. This judgment was no surprise; the basics had been leaked days earlier. But the report presented more information that places Wolfowitz in a tough spot–for it suggests that he and Riza brazenly took advantage of the situation created by his appointment to the Bank to guarantee her a promotion and pay rise she had failed to obtain previously. And the question of the moment is the obvious one: can he survive? According to Mr. [Xavier] Coll [vice president of human resources], he met with Mr. Wolfowitz and Ms. [Robin] Cleveland, Counselor to the President, on August 10, 2005, in preparation for a meeting on August 11 with Ms. Riza. During that meeting, Mr. Coll was told to stop consulting with the Bank’s General Counsel on this matter. In retrospect, it’s clear there was the need for more legal advice, not less, about what to do about Riza, who could not continue to work at the Bank in a position under the supervision of Wolfowitz. Yet Wolfowitz kept the circle small. He has claimed it would have been a conflict of interest to involve the Bank’s general counsel–a contention rejected by the special panel. But even if Wolfowitz had been right about that, he could have sought another way for the human relations department to obtain appropriate legal guidance. He did not. If this is so–if the Bank’s board believes Mr. Coll–it’s end of story. Had Wolfowitz indeed proceeded with a deal after he was warned it was “outside the rules”–a deal that was rather lucrative for his girlfriend–that ought to be a firing offense. According to Mr. Coll, after he received the written August 11 instructions from Mr. Wolfowitz [dictating the terms of the Riza deal], he asked again whether he could consult with the Bank’s General Counsel, or anyone in the Bank’s Legal Department, and was told he could not. This explains it. Riza was angry. She was mad (as the report notes) that she had to leave the Bank because her romantic partner was taking over. But she also harbored a grudge, believing, rightly or wrongly, that she had been the victim of discrimination at the Bank. (In a previous article, I explained how she was turned down for a promotion to a job for which she did not meet the minimum qualifications.) According to the panel’s report, it was Riza who came up with the specific terms of her reassignment. It seems she was trying to turn lemons into champagne–that is, using the opportunity to settle old scores and award herself the money she believed she deserved. And Wolfowitz went along with his gal-pal. The report is clear: “The salary increase granted to Ms. Riza far exceeded an increase that would have been granted in accordance with the applicable Staff Rule.” The report notes that even had she received a promotion at that time, she could have expected a boost in her annual salary of between $5000 and $20,000–not the $47,000 Wolfowitz awarded her. The report also says that the agreement Wolfowitz arranged called for an annual salary increase more than twice the customary rate and that the automatic promotions awarded Riza in the deal violated the Bank’s rules. The board of directors was scheduled to discuss the report with Wolfowitz on Tuesday evening. The issue is, what will the board do in response to the report? It can vote to reprimand or remove Wolfowitz. A reprimand might not be enough for many board members. But the board may not want to pull the trigger. It can issue a vote of no confidence, hoping Wolfowitz will resign. But does Wolfowitz want to put up a fight? Is the White House willing to stick with him, as it has done (so far) with Attorney General Alberto Gonzales? George W. Bush can be a stubborn fellow. The report is a strong indictment of Wolfowitz. It shows he and his girlfriend tried to game the system in a way that could bring her (over the course of his tenure and beyond, thanks to a generous pension) millions of extra dollars. If Wolfowitz manages to stay on after the release of the report, it will be quite an accomplishment for the accountability’s-not-us Bush administration.
Large-scale timing-driven rectilinear steiner tree construction in presence of obstacles In the paper, we provide a timing-driven rectilinear routing tree algorithm which applies top-down partitioning followed by the bottom-up routing tree construction in the presence of the obstacles. The objective is to simultaneously minimize the source-to-terminal delay and the total wirelength. First, a top-down partitioning method is used to divide the chip into four sub-regions according to the position of the source. Then, the terminals in each sub-region are connected by a fast sequential routing tree algorithm. The major steps of the routing algorithm include minimal spanning tree constructing, invalid edges pushing and routing. It shows experimentally that the maximum source-to-terminal delay of the routing tree is improved by 74%. Compared to previous results, total wirelength is significantly reduced by 34.7%.
The present invention relates to combustion systems, and more particularly relates to heat exchangers for combustion systems. Combustion systems, such as combustion furnaces, generate sounds which, depending on the use and environment of the combustion system, may be unacceptable or unpleasant. The sound level generated by a particular combustion system generally depends on the turbulence of the combustion fluids at the source of combustion. In addition, these sounds may interact with structural components of the combustion system which acoustically amplify the sound. Normally, in combustion furnaces this sound level is reduced to an acceptable level by adjusting the flow of the combustion fluids to maintain a substantially non-turbulent flow at the combustion source, and by arranging the heat exchanger assembly, furnace cabinet, and other such components to minimize acoustic amplification. However, in certain situations it is not feasible or desirable to reduce the sound level by using these conventional techniques. Also, it may be desirable to reduce the sound level to a level lower then that which may be attained by using these conventional techniques. For example, in an induced draft combustion furnace having compact, side by side heat exchangers with monoport, inshot burners, it is not desirable to make burner modifications which may decrease the efficiency of the furnace and it is not desirable to make other furnace component modifications which may increase the size and/or bulkiness of the furnace.
/** * Removes final modifier from a Field object. * * @param field Field to remove modifier from. */ public static void removeFinal(Field field) { try { Field modifier = Field.class.getDeclaredField("modifiers"); modifier.setAccessible(true); modifier.set(field, field.getModifiers() & ~Modifier.FINAL); } catch (NoSuchFieldException | IllegalAccessException e) { e.printStackTrace(); } }
A general method to synthesize and sinter bulk ceramics in seconds Speedy ceramic sintering Synthesizing ceramics can require heating for long times at high temperatures, making the screening of high-through-put materials challenging. C. Wang et al. developed a new ceramic-sintering technique that uses resistive heating of thin carbon strips to ramp up and ramp down temperature quickly. This method allows for the quick synthesis of a wide variety of ceramics while mitigating the loss of volatile elements. Ultrafast sintering is ideal for synthesizing many compositions to screen for ideal properties for a variety of applications, including the development of new solid-state electrolytes. Science, this issue p. 521 A resistive heating method can sinter ceramics in seconds, allowing for high-throughput materials screening. Ceramics are an important class of materials with widespread applications because of their high thermal, mechanical, and chemical stability. Computational predictions based on first principles methods can be a valuable tool in accelerating materials discovery to develop improved ceramics. It is essential to experimentally confirm the material properties of such predictions. However, materials screening rates are limited by the long processing times and the poor compositional control from volatile element loss in conventional ceramic sintering techniques. To overcome these limitations, we developed an ultrafast high-temperature sintering (UHS) process for the fabrication of ceramic materials by radiative heating under an inert atmosphere. We provide several examples of the UHS process to demonstrate its potential utility and applications, including advancements in solid-state electrolytes, multicomponent structures, and high-throughput materials screening.
How Do Spatial Learning and Memory Occur in the Brain? Coordinated Learning of Entorhinal Grid Cells and Hippocampal Place Cells Spatial learning and memory are important for navigation and formation of episodic memories. The hippocampus and medial entorhinal cortex (MEC) are key brain areas for spatial learning and memory. Place cells in hippocampus fire whenever an animal is located in a specific region in the environment. Grid cells in the superficial layers of MEC provide inputs to place cells and exhibit remarkable regular hexagonal spatial firing patterns. They also exhibit a gradient of spatial scales along the dorsoventral axis of the MEC, with neighboring cells at a given dorsoventral location having different spatial phases. A neural model shows how a hierarchy of self-organizing maps, each obeying the same laws, responds to realistic rat trajectories by learning grid cells with hexagonal grid firing fields of multiple spatial scales and place cells with unimodal firing fields that fit neurophysiological data about their development in juvenile rats. The hippocampal place fields represent much larger spaces than the grid cells to support navigational behaviors. Both the entorhinal and hippocampal self-organizing maps amplify and learn to categorize the most energetic and frequent co-occurrences of their inputs. Topdown attentional mechanisms from hippocampus to MEC help to dynamically stabilize these spatial memories in both the model and neurophysiological data. Spatial learning through MEC to hippocampus occurs in parallel with temporal learning through lateral entorhinal cortex to hippocampus. These homologous spatial and temporal representations illustrate a kind of neural relativity that may provide a substrate for episodic learning and memory.
The role of muscle tissue in the pathogenesis of chronic heart failure the potential of exposure (FORMA study) Aim. To determine whether the skeletal muscle of patients with chronic heart failure (CHF) retains the ability to regenerate and grow; to compare the effectiveness of long aerobic trainings, calculated by an individualized method, and con-ventionally calculated trainings (VO 2 peak values), in relation to the severity of heart failure, exercise tolerance (ET), and ergoreflex activity (ERGO). Material and methods. The study included 297 patients with stable III functional class (FC) CHF, receiving optimal therapy. The presence of heart failure was found in all patients at least 6 months before the start of the study (age 18-65 years, body mass index (BMI) 19-28 kg/height, m 2. Initially, the study performed a cardiorespiratory test (CRT) with an assessment of gas composition, acid-base balance of the blood and ERGO activity. Patients were randomized into 2 groups: experimental (EG) and control (CG). For EG, based on the determination of the lactate threshold (LT), after 1 and 3 months the CRT was repeated and the training walking mode was dynam-ically recounted according to the new LT level. For CG, the training walking mode was calculated based on the VO 2 peak values. All patients trained for 6 months. At the end of the training, diagnostic CRT was performed, and the activity of EGO was evaluated. Eleven patients with CHF and 3 healthy donors before the start of the training underwent a biopsy of the gastrocnemius muscle. Results. It was shown that the potential for muscle differentiation of satellite skeletal muscle precursor cells obtained from patients with CHF with a reduced ejection fraction (HFrEF) does not differ in vitro from the potential of satellite cells of healthy donors. After 6 months of training, the severity of CHF decreased to FC II in 75% of EG patients, and among CG patients in 44%; the main indicators of the stages of compensatory mechanisms activation during physical exertion (VO 2 LT and VO 2 peak) in EG increased more than in the CG (10,8±0,4, 18,7±0,7 ml/min/kg and 9,5±0,8, 15,3±0,9 ml/ min/kg, with p 1 <0,01, p 2 <0,05, p 3 <0,01, respectively). Conclusion. In vitro, the potential for muscle differentiation, regeneration and growth of satellite skeletal muscle precursor cells obtained from patients with HFrEF does not differ from the potential of satellite cells of healthy donors. Aerobic training in patients with III FC chronic heart failure calculated by definition of LT, relating to safety is not worse than the results calculated by the level of VO 2 peak. Aerobic training in patients with III FC chronic heart failure calculated by definition of LT, compared with the usual mode of training walking, significantly reduce the activity of ergoreflex, increase ET, reduce the severity of CHF. In patients with III FC CHF, training walking for more than 1,5 hours/day determined by the level of LT, contributes to the development of physiological reverse myocardial remodeling to a greater extent than aerobic training calculated by the conventional method. Conflicts of nothing. Initially, the study performed a cardiorespiratory test (CRT) with an assessment of gas composition, acid-base balance of the blood and ERGO activity. Patients were randomized into 2 groups: experimental (EG) and control (CG). For EG, based on the determination of the lactate threshold (LT), after 1 and 3 months the CRT was repeated and the training walking mode was dynamically recounted according to the new LT level. For CG, the training walking mode was calculated based on the VO 2 peak values. All patients trained for 6 months. At the end of the training, diagnostic CRT was performed, and the activity of EGO was evaluated. Eleven patients with CHF and 3 healthy donors before the start of the training underwent a biopsy of the gastrocnemius muscle. Results. It was shown that the potential for muscle differentiation of satellite skeletal muscle precursor cells obtained from patients with CHF with a reduced ejection fraction (HFrEF) does not differ in vitro from the potential of satellite cells of healthy donors. After 6 months of training, the severity of CHF decreased to FC II in 75% of EG patients, and among CG patients -in 44%; the main indicators of the stages of compensatory mechanisms activation during physical exertion (VO 2 LT and VO 2 peak) in EG increased more than in the CG (10,8±0,4, 18,7±0,7 ml/min/kg and 9,5±0,8, 15,3±0,9 ml/ min/kg, with p 1 <0,01, p 2 <0,05, p 3 <0,01, respectively). Conclusion. In vitro, the potential for muscle differentiation, regeneration and growth of satellite skeletal muscle precursor cells obtained from patients with HFrEF does not differ from the potential of satellite cells of healthy donors. Aerobic training in patients with III FC chronic heart failure calculated by definition of LT, relating to safety is not worse than the results calculated by the level of VO 2 peak. Aerobic training in patients with III FC chronic heart failure calculated by definition of LT, compared with the usual mode of training walking, significantly reduce the activity of ergoreflex, increase ET, reduce the severity of CHF. In patients with III FC CHF, training walking for more than 1,5 hours/day determined by the level of LT, contributes to the development of physiological reverse myocardial remodeling to a greater extent than aerobic training calculated by the conventional method. The prevalence of heart failure (HF) in the Russian Federation has reached the epidemic. By evidence-based medicine, effective methods to combat this pathology were developed, including basic medications: angiotensin-converting enzyme inhibitors (ACE inhibitors), angiotensin II receptor antagonists (ARA II), angiotensin receptor-neprilysin inhibitors (ARNi), beta-blockers (BB), mineralocorticoid receptor antagonists (MCRA). However, to date, it has not been possible to stop the rapid increase in the number of rehospitalizations due to decompensated HF, which significantly burdens the economies of the countries. Despite the inhibitory effect of BB, ACE inhibitors, ARA II, ARNi, MCRA, neurohumoral activation in HF is increased due to continuous peripheral afferent stimulation (enhanced ergoreflex activity). One of the possible points of application for HF stabilization is striated muscle tissue. Stimulation of molecular mechanisms for skeletal muscle regeneration, including physical rehabilitation, is a promising strategy to reduce muscle dysfunctions. Therefore, it seems relevant to determine whether the skeletal muscles in HF patients retain their ability to regenerate and grow. Data on such studies was not found. As any organ or tissue in HF, skeletal muscles suffer from a lack of oxygen and nutrients. There are following differences: muscle tissue is the largest organ by mass in human -40-45% of body weight; muscles have a special feedback system called "ergoreflex". Between the skeletal muscles on the one hand and the vasomotor and respiratory centers on the other hand, there are neurogenic connections that are mediated by ergoreceptors. Ergoreceptors are myelinated and non-myelinated afferent nerve fibers in the skeletal muscles, sensitive to all mechanical and metabolic changes in muscle fibers. Ergoreceptors play a major role in feedback control to maintain a balance between muscle load intensity and energy for this. Ergoreflex is a defensive mechanism of the body in response to metabolite accumulation in muscle fiber, aimed at removing metabolites and enhancing aerobic oxidation. In response to the muscle meta-bolic state, ergoreceptors modulate the intensity of muscle perfusion and the cardiorespiratory response to physical activity in order to meet the metabolic needs of contracting muscles. So, there is an increase in ventilation and a number of circulatory changes due to an enhanced sympathetic nervous system (SNS) activity -increase of heart rate and blood pressure (BP), contraction of the resistance vessels ( Fig. 1). Thus, skeletal muscle is not only the largest organ by mass in the human body, but also an organ that controls the activity of the cardiovascular and pulmonary systems by means of ergoreflex ( Fig. 1). However, data on effective influencing methods is currently contradictory. The only and most physiological way to reduce the ergoreflex activity is exercise training (ET). Physical therapy (PT) in HF patients should be used to improve exercise tolerance and quality of life, reduce the number of hospitalizations for decompensated HF. Currently, individual selection of the type, duration and intensity of physical activity in HF patients is an urgent problem. There were following aims of the study: 1) to determine whether the skeletal muscle in HF patients retains the ability to regenerate and grow; 2) to compare the effectiveness of individualized and conventional (based on VO 2 peak) approaches to selecting exercise mode, in relation to the severity of HF, exercise tolerance, and ergoreflex activity. Materials and methods Gastrocnemius muscle biopsy and assessment of muscle-resident cells. Eleven HF patients (mean age 54±12,5 years, body mass index (BMI) -26,5±6,4 kg/m 2, left ventricular ejection fraction (LVEF) 26,4±1,4%) and 3 healthy donors underwent gastrocnemius muscle biopsy. The preparation of primary muscle-resident cell cultures enriched in satellite cells was performed according to the standard methods. Preparing Geltrex-coated (Invitrogen, USA) culture dishes was performed for 1,5 h in a CO 2 incubator at +37°C in a Dulbecco's Modified Eagle's medium (DMEM) in a ratio of 1:100. The culture medium was changed every other day. Myogenic differentiation of cells was performed according to the standard methods when cultured in a differentiation medium consisting of a basic culture medium (-MEM) (PanEco, Russia) with the addition of 1% L-glutamine (Invitrogen, USA), 1% Penicillin-Streptomycin (Invitrogen, USA) and 2% horse serum (Gibco, USA). The primary medium was replaced with a differentiation one when subconfluent state of the culture was observed. During immunocytochemistry, the cells were washed with phosphate buffered saline (PBS) and fixed with 4% paraformaldehyde at +4°C for 10-15 minutes, washed with PBS, incubated with 0,2% TRITONx100 for 5 minutes, washed with PBS, blocked with 15% fetal calf serum for 30 minutes (Gibco, USA) in PBS. Incubation with primary and secondary antibodies were perfomed according to the manufacturer's instructions (MF20 antibodies to the myosin heavy chain (MHC MF20), myogenic factor 5 (Myf5), mitofusin-1 (Mfn1), PAX transcription factors, R&D BioSystems, USA). Immunophenotyping was performed by CytoFLEX flow cytometer (Beckman Coulter). Data was analyzed using CytExpert 2.0 software (Beckman Coulter). Isolation of ribonucleic acid (RNA), synthesis of complementary deoxynucleic acid (cDNA) and realtime polymerase chain reaction (PCR). Total RNA was isolated using ExtractRNA reagent (Evrogen, cat.no BC032, Russia). cDNA was synthesized from 500 ng total RNA using a reverse transcription kit (Molove, SK021, Russia). Quantitative gene expression was performed using qPCR-HS SYBR + ROX (Evrogen, cat.no. PK156, Russia). Data of qPCR are presented as arbitrary units of mRNA expression normalized to GAPDH expression and expression levels in a reference sample. Statistical analysis was performed using Graph-PadPrism7 software. All data were analyzed by at least three biological replicates and presented as mean±SEM. Safety and effectiveness of different exercise methods was assessed as part of the FORMA study. A prospective, randomized study was performed in accordance with Good Clinical Practice guidelines and the principles of Declaration of Helsinki; the study protocol was approved by the ethics committee of the Almazov National Medical Research Center. There were following inclusion criteria: symptoms of class III HF; stable clinical status for at least 2 weeks before inclusion in the study; age -18-65 years; body mass index (BMI) -19-28 kg/m 2 ; completed informed consent; the ability to perform cardiorespiratory test (CRT); LVEF <45%; administration of ACE inhibitors/ARA II/ARNi, BB, MCRA, diuretics; patient education during hospitalization at Almazov National Medical Research Center; follow-up monitoring of HF patients by a cardiologist. Exclusion criteria were moderate and severe chronic obstructive pulmonary disease (COPD), myocardial infarction (MI), pulmonary embolism (PE), surgeries over the past 6 months, severe cognitive disorders, low adherence treatment. The endpoints of the study were changes in the HF severity, exercise tolerance (VO 2 peak), ergoreflex, and myocardial contractile function (LVEF, LV end-diastolic dimension (LV EDD), LV end-systolic dimension (LV ESD)). Clinical characteristics of patients. The study included 297 patients with stable class III HF, which was established at least 6 months before the study. Patients were randomized into two groups: the experimental group (EG) -237 patients with class III HF (age 18-65 years, BMI 19-28 kg/m 2 ) and control group (CG) -60 patients with HF (age 18-65 years, BMI -19-28 kg/m 2 ). After 4-6 weeks of exercise, 55 EG patients on their own initiative gradually increased the duration of daily walk to 1,5-2 hours; this subgroup of patients (EGlong) was allocated for additional analysis (Table 1). Therapy did not differ significantly between groups. Results of clinical and instrumental examinations are presented in Table 1. The study progress is presented in Table 2. Initially, the subjects underwent a submaximal CRT with a simultaneous assessment of gas composition and acid-base status of the blood (Table 2). For each EG patient, the exercise mode of walk was estimated according to the CRT results based on the lactate threshold (LT) determination; after 1 and 3 months the CRT was repeated and on the basis of the newly obtained LP values, the mode was re-estimated (walking speed was 95% of the LT speed). Patients trained for 6 months. At the end of the exercise, a diagnostic CRT was performed. CG patients performed walking at the level of 55% VO 2 peak 3 times/week. Echocardiography was conducted using Philips iE-33. We used one-and twodimensional scanning modes, by which the transverse dimension of the left atrium (LA), EDD, ESD, and LVEF were assessed. The CRT was performed using treadmill (GE Medical Systems Information Technologies) and Oxycon Pro system (Jeger, Germany). Venous blood lactate concentration at rest and during physical exertion. Before the CRT, the catheter was inserted into the ulnar vein. Blood sampling was carried out initially and every minute during the physical exertion. Venous blood lactate concentration was evaluated by i-STAT Portable Clinical Analyzer (Abbott, USA) using CG4 cartridge kits. LT was recorded at the time of the beginning of Abbreviations: BMI -body mass index, AF -atrial fibrillation, COPD -chronic obstructive pulmonary disease, CRT -cardiac resynchronization therapy, CABG -coronary artery bypass grafting, LVEF -left ventricle ejection fraction, CG -control group, EGexperimental group, EGpres -EG subgroup with preserved load during physical rehabilitation, EGlong -EG subgroup with long-lasting exercise. blood lactate concentration increase. The assessment of ergoref lex was carried out by postexercise regional circulatory occlusion (PE-RCO). During the test, diastolic blood pressure (DBP) was measured; ventilation and gas exchange rates were recorded. The difference between DBP, carbon dioxide ventilatory equivalent (VE/VCO 2 ), minute ventilation (VE) after a three-minute occlusion (+PE-RCO) and the recovery period without occlusion (-PE-RCO) was calculated; percentage ratio of these values was estimated. Statistical analysis was performed using Statistica 6.0 software. All data were analyzed by at least three biological replicates and presented as mean+SEM. Comparison of mean values was performed using nonparametric statistics (Mann-Whitney U-test). The chi-squared test and the F-test were used to identify confidence in contingency tables. The significance level was p<0,05. Results Examination of stem cell population obtained by skeletal muscle biopsy. After isolation of cells and several days of in vitro expansion, we analyzed the expression of surface markers: CD56, CD105, CD166, CD146, CD73, CD140a, CD140b; CD45 was used as a negative control (Fig. 2). We showed that the vast majority of the isolated cells were CD56-positive (marker of satellite cells) and CD45-negative (marker of hematopoietic cells). We also found that a significant fraction of cells expressed stromal markers CD105, CD166 and CD73, and only a small fraction of cells was positive for markers CD146, Cd140a and CD140b. The high level of expression of stromal markers in the population was most likely associated with contamination of the satellite cell fraction with the stromal cell fraction of muscle tissue. Therefore, an immunocytochemical analysis of the obtained samples was carried out, which confirmed the expression of the satellite cell markers Pax7 and Myf5 (Fig. 3A). The results of a quantitative analysis of immunocytochemical staining and expression of mRNA markers of satellite cells and myoblasts are shown in Fig. 3. The level of mRNA expression of both Myf5 and Pax7 was high and did not differ significantly between samples of healthy donors and patients with HF. The percentage of Myf5+ and Pax7+ cells also did not differ significantly in the samples. The results of the stimulation of differentiation showed that cells obtained from both healthy donors and HF patients have a similar potential for muscle differentiation in vitro. Fig. 4 shows the myotubes obtained after stimulation of muscle differentiation of satellite cell samples in vitro. The fusion coefficient did not differ significantly between the groups and amounted to 19±7% and 23±5% in the samples of healthy donors and HF patients, respectively. Comparison of safety and effectiveness of conventional and individualized approaches to selecting exercise mode. Of 297 patients, 25 people discontinued participation in trial: 8 EG patients, 17 -CG (p<0,05); there were following reasons: unwillingness to continue exercise (n=10), heart transplantation (n=6), non-HF hospitalization (n=4), 3 -hospitalization due to decompensated HF after URTI. Thus, 229 EG and 43 CG patients completed the study. After exercise, in OG patients there was a more pronounced decrease in the ergoreflex activity compared to CG patients: DBP -by 40%, VE in OGby 53%, VE/VCO 2 -by 38%, and in CG -by 21%, 23% and 15%, respectively (p<0,05) ( Table 3). Table 4 presents echocardiography changes in the studied patients before and after physical rehabilitation. In the EG, LV EDD, LV ESD, LVEF and left atrium dimension were significantly improved. In the CG, there was a significant increase in LVEF; LV EDD, LV ESD, and left atrium dimension were not significantly improved. Against the background of long-lasting aerobic exercise, patients from the EGlong subgroup showed a significant decrease in the end-systolic and end-diastolic volumes of the LV and LA, as well as a more pronounced LVEF increase than in the EG with preserved load (EGpres) and CG (Table 4). Discussion In HF, systemic metabolic changes are accompanied by muscular wasting, which in turn causes deterioration in physical performance and quality of life [1,2,. The aim of the first part of this project was to determine whether the skeletal muscle in HF patients retains the ability to regenerate and grow. The results of the study demonstrated that striated muscle cells of patients with class III HF do not have significant differences with cells obtained from healthy donors. They have similar potential for muscle differentiation in vitro and show a high potential for restoration of muscle precursor cells. Thus, the skeletal muscle satellite cells under favorable conditions can contribute to the restoration of muscles injured due to HF. The exact molecular mechanisms of skeletal muscle restoration in HF patients have to be investigated. It is obvious that novel therapeutic strategies should be aimed at activating the regeneration potential of satellite cells, which may be partially realized by physical exercise. The results of applying different exercise modes are ref lected in the second part of this study. In 2017, Russian recommendations for the appointment of physical training for patients with chronic heart failure were published. It was proposed to select the regime of physical rehabilitation empirically, based on the six-minute walk test (6MWT) or VO 2 peak. Nevertheless, the 6MWT results largely depend on the motivation of patient and doctor, concomitant pathology and many other factors. Therefore, a physical rehabilitation program estimated by 6MWT can be not accurate. VO 2 peak is also highly specified by the patient's motivation. Some aspects in determining the exercise regimen for HF patients remain open: there are no uniform principles for controlling the adaptation to physical activity; principles for planning the effective, safe and personalized exercise has not been fully developed. In 2012, we proposed the selection of the walking training mode based on the LT determination. The advantage of this approach is to increase the accuracy of determining the reserves of adaptation to physical activity. This method, in comparison with the previous ones, allows developing physical rehabilitation programs for any cardiovascular patients.. This study demonstrated the safety and effectiveness of present approach in class III HF patients. Its using allows to avoid the fatigue and, therefore, to prescribe a longer physical exercise. Described method makes it possible to softly increase the load based on the LT re-determination. As a result, there is a greater decrease in the ergoref lex activity in the EG, followed by decrease in neurohumoral activation. Also longer exercise duration can increase the number of mitochondria and exercise tolerance compared with conventional approaches where the time and load are strictly fixed. This is confirmed by the fact that in patients with LT-dependent exercise load, the tolerance increased more significantly, and in patients with >1,5 hours/day exercise, reverse myocardial remodeling was observed. Limitations: a relatively small number of patients in the group of long-lasting exercise and multicenter design. 1) In vitro, the potential for muscle differentiation, regeneration and growth of satellite skeletal muscle precursor cells obtained from patients with HF with reduced EF does not differ from the potential of satellite cells of healthy donors. 2) Safety of aerobic exercise in patients with class III HF estimated by LT definition is equal with exercise estimated by the level of VO 2 peak; 3) Aerobic exercise in patients with class III HF estimated by LT definition, compared with the conventional approach, significantly reduce the activity of ergoreflex, increase exercise tolerance and reduce the HF severity. 4) In patients with class III HF, walking training >1,5 hours/day estimated by the LT level, contributes to the development of physiological reverse myocardial remodeling to a greater extent than aerobic exercise selected by the conventional method. Funding. This study was partially supported by the Russian Science Foundation grant № 16-15-1017 Conflicts of Interest: nothing to declare.
__author__ = '<NAME> (Little Fish Solutions LTD)'
""" Example: eager mode integrand Demonstrates how to run a non-tensorflow integrand using VegasFlow """ from vegasflow import run_eager, vegas_wrapper import time import numpy as np from scipy.special import expit import tensorflow as tf # Enable eager mode run_eager(True) # MC integration setup dim = 4 ncalls = np.int32(1e5) n_iter = 5 @tf.function def symgauss_sigmoid(xarr, **kwargs): """symgauss test function""" n_dim = xarr.shape[-1] a = 0.1 pref = pow(1.0 / a / np.sqrt(np.pi), n_dim) coef = np.sum(np.arange(1, 101)) # Tensorflow variable will be casted down by numpy # you can directly access their numpy representation with .numpy() xarr_sq = np.square((xarr - 1.0 / 2.0) / a) coef += np.sum(xarr_sq, axis=1) coef -= 100.0 * 101.0 / 2.0 return expit(xarr[:, 0].numpy()) * (pref * np.exp(-coef)) if __name__ == "__main__": """Testing several different integrations""" print(f"VEGAS MC, ncalls={ncalls}:") start = time.time() ncalls = 10 * ncalls r = vegas_wrapper(symgauss_sigmoid, dim, n_iter, ncalls, compilable=True) end = time.time() print(f"Vegas took: time (s): {end-start}")
Heavy metals such as cadmium, lead and mercury are non-essential elements for plants and are even hazardous to the growth of plants. They are deemed as heavy-metal pollutants for plant growth and food safety. The excessive accumulation of these hazardous heavy metals in food will enable them to enter into the food chain, and even threaten human health. According to the results of a quality and safety survey of rice in various regions of China in 2002 and 2003 made by the Quality Inspection and Supervision Center of Rice and Rice Products, Ministry of Agriculture, one of the quality and safety issues of rice is the over-the-limit content of heavy metals such as cadmium and lead. The over-the-limit rate is more than 10%. Three industrial wastes, non-ferrous metal mining and sewage irrigation are causes that may lead to an excessive amount of toxic heavy metals in the soil and excessive absorption by the plant, being the main source of the accumulation of heavy metals in plants or crops. Therefore, strict limiting criteria for heavy metals in soil and foods (or grains) have been established in various countries. For example, in China, the limit of cadmium in cereals is 0.2 mg/kg, that of lead 0.2 mg/kg and that of mercury 0.02 mg/kg. Cadmium, lead and mercury that are accumulated in plant foods, such as rice, barley and wheat, are mainly absorbed by the roots from the soil and finally accumulated in the harvest parts after flowing to the top upon transpiration. Research shows that the contents of cadmium, lead and mercury in the soil, especially the effective content (namely absorption by the roots) are the key factors when the roots absorb cadmium, lead and mercury from the soil. Hence, to reduce and control the effective cadmium, lead and mercury content in the soil by various agronomic means is always a key study subject at home and abroad. For instance, lime is applied on acidic and slightly acidic soil to raise the pH value of the soil, which obviously decreases the effectiveness of cadmium, lead and mercury in the soil and finally reduces the absorption of such heavy metals by the roots. However, the application of lime has also a lot of side effects. For example, it may cause the rise of the pH value in the soil, leading to the reduction of the required contents of multiple microelements like iron, manganese and zinc while decreasing such heavy metals, easily resulting in nutrient deficiency in the crops and their failure to thrive. On the other hand, the considerable difference of pH values and pH buffer capacities for different regions and soils gives rise to a great difficulty in accurately controlling the lime application amount. As to paddy rice, the inundation can facilitate the increase of soil reducibility, and the increase of elements such as ferrous iron in the soil and the promotion of the deposit of cadmium, lead and mercury in the form of sulfide, on one hand, decreases the soil effectiveness of these heavy metals. On the other hand, a decrease in the roots' absorption capacity for these elements, greatly reduces the roots' absorption and accumulation of toxic heavy metals from the soil. But for paddy rice itself, long-term inundation is not conducive to its growth and is apt to cause an increase of arsenic absorption in rice. It is a key orientation of studies around the world to add a curing agent or adsorbent in the soil to combine and fix or absorb heavy metals in the soil like cadmium, lead and mercury. The curing agent or adsorbent reported mainly includes zeolite, kieselguhr, sepiolite, bentonite and limestone, and even alkaline cinder, but there is still no curing agent or adsorbent for heavy metals in the soil that is produced and applied broadly. The mainly reasons lie in their being non-environmentally-friendly, high cost and no popularization efforts as to their use value.
Rep. Francis Rooney Laurence (Francis) Francis Rooney13 House Republicans who bucked Trump on emergency declaration House votes to overturn Trump's emergency declaration Whip List: Where Republicans stand on emergency declaration vote MORE (R-Fla.) on Wednesday defended his calls for a “purge” at the FBI and claims that some agents are part of “the deep state.” “It might be a pretty strong word. I’m not, maybe, the most nuanced political person in the world coming from a career in business, but I’m pretty frustrated by all the things that have come out,” Rooney said on CNN after video was shown of his earlier remarks. Rooney on Tuesday called out the FBI, Department of Justice (DOJ) and special counsel Robert Mueller's investigation into possible collusion between the Trump campaign and Russia. ADVERTISEMENT He was specifically critical of Peter Strzok, an FBI agent who worked on the investigation into Hillary Clinton Hillary Diane Rodham ClintonREAD: Cohen testimony alleges Trump knew Stone talked with WikiLeaks about DNC emails County GOP in Minnesota shares image comparing Sanders to Hitler Holder: 'Time to make the Electoral College a vestige of the past' MORE’s use of a private email server during her time as secretary of State and was a member of Mueller’s team. Strzok was dismissed from Mueller’s team after it was revealed he had sent anti-Trump text messages. Rooney said Wednesday Strzok’s messages indicate a “lack of impartiality” and make the Florida lawmaker “nervous” as an American citizen. “I think that’s going beyond just having political views. I hold the FBI and the Department of Justice in very high esteem,” Rooney said. “I’m not saying it necessarily influenced the investigation but they certainly were trying to work to impede Donald Trump, that’s what the guy said in those emails,” he continued. “And I just don’t know that someone in the FBI and DOJ ought to be doing that kind of stuff with all the power and authority they have over American citizens.” Republican lawmakers and President Trump Donald John TrumpREAD: Cohen testimony alleges Trump knew Stone talked with WikiLeaks about DNC emails Trump urges North Korea to denuclearize ahead of summit Venezuela's Maduro says he fears 'bad' people around Trump MORE have in recent weeks targeted the FBI over claims of bias, specifically targeting both Strzok and FBI Deputy Director Andrew McCabe. This past weekend, Trump lashed out at FBI leadership, going after McCabe over donations that Democrats made to his wife’s political campaign. Trump's attack Saturday came after a report that McCabe is retiring from the FBI amid mounting criticism from Republicans in Congress.
<reponame>Eve-ning/singapore_da<filename>projects/wbt/src_2/main.py # -*- coding: utf-8 -*- """ Created on Mon Jul 15 12:39:08 2019 @author: johnc """ import pandas as pd import matplotlib.pyplot as plt from pandas.plotting import register_matplotlib_converters # Register converter for date time register_matplotlib_converters() class WBTSG: def __init__(self, file_path): self.file_path = file_path self.data = None def data_load(self): '''Purely loads the data into the correct format''' with open(self.file_path, 'r') as f: data = pd.read_csv(f) data['wbt_date'] = pd.to_datetime(data['wbt_date']) self.data = data self.data['wbt_year'] = pd.DatetimeIndex(data['wbt_date']).year self.data['wbt_month'] = pd.DatetimeIndex(data['wbt_date']).month self.data['wbt_day'] = pd.DatetimeIndex(data['wbt_date']).day def data_groupby(self, groups:str=['wbt_year', 'wbt_month'], axis = 0): self.data = self.data.groupby([groups]).max().reset_index() data = WBTSG("../../../../../Data/singapore/wbt/main.csv") data.data_load() data.data_groupby() wbt = data.data plt.plot(data.data['wbt_date'][1:100], data.data['wet_bulb_temperature'][1:100])
Mountain uplift explains differences in Palaeogene patterns of mammalian evolution and extinction between North America and Europe Patterns of late Palaeogene mammalian evolution appear to be very different between Eurasia and North America. Around the EoceneOligocene (EO) transition global temperatures in the Northern Hemisphere plummet: following this, European mammal faunas undergo a profound extinction event (the Grande Coupure), while in North America they appear to pass through this temperature event unscathed. Here, we investigate the role of surface uplift to environmental change and mammalian evolution through the Palaeogene (6623 Ma). Palaeogene regional surface uplift in North America caused large-scale reorganization of precipitation patterns, particularly in the continental interior, in accord with our combined stable isotope and ecometric data. Changes in mammalian faunas reflect that these were dry and high-elevation palaeoenvironments. The scenario of Middle to Late Eocene (5037 Ma) surface uplift, together with decreasing precipitation in higher-altitude regions of western North America, explains the enigma of the apparent lack of the large-scale mammal faunal change around the EO transition that characterized western Europe. We suggest that North American mammalian faunas were already pre-adapted to cooler and drier conditions preceding the EO boundary, resulting from the effects of a protracted history of surface uplift.
// NewByVersionIDPayload builds a resource service ByVersionId endpoint payload. func NewByVersionIDPayload(versionID uint) *resource.ByVersionIDPayload { v := &resource.ByVersionIDPayload{} v.VersionID = versionID return v }
package io.github.coolcrabs.fernutil; import java.net.URL; import java.net.URLClassLoader; import java.util.HashMap; import org.tinylog.Logger; // Jank class FUClassLoader extends URLClassLoader { static final HashMap<String, Class<?>> yeet = new HashMap<>(); static void c(Class<?> c) { yeet.put(c.getName(), c); } static { c(FernUtil.class); c(FernUtil.LineNumbers.class); c(FernUtil.JavadocProvider.class); c(Logger.class); } FUClassLoader(URL[] classpath) { super(classpath, ClassLoader.getSystemClassLoader().getParent()); } @Override protected Class<?> findClass(String name) throws ClassNotFoundException { Class<?> c = yeet.get(name); if (c != null) { return c; } return super.findClass(name); } }
def cql_type_to_gemini(cql_type, is_frozen=False): if isinstance(cql_type, str): return cql_type elif len(cql_type) == 1: return cql_type[0] else: is_frozen_type = is_frozen gemini_type = {} token = cql_type.pop(0) if isinstance(token, (list, tuple)): return cql_type_to_gemini(token, is_frozen_type) elif token == 'frozen': return cql_type_to_gemini(cql_type.pop(0), True) elif token == 'map': subtypes = cql_type.pop(0) gemini_type['key_type'] = cql_type_to_gemini(subtypes[0], is_frozen_type) gemini_type['value_type'] = cql_type_to_gemini(subtypes[1], is_frozen_type) elif token == 'list': gemini_type['kind'] = 'list' gemini_type['type'] = cql_type_to_gemini(cql_type.pop(0)[0], is_frozen_type) elif token == 'set': gemini_type['kind'] = 'set' gemini_type['type'] = cql_type_to_gemini(cql_type.pop(0)[0], is_frozen_type) elif token == 'tuple': gemini_type['types'] = cql_type.pop(0) gemini_type['frozen'] = is_frozen_type return gemini_type
<reponame>mghgroup/Glide-Browser // Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/android/context_menu/chrome_context_menu_populator.h" #include "base/android/callback_android.h" #include "base/android/jni_android.h" #include "base/android/jni_array.h" #include "base/android/jni_string.h" #include "base/bind.h" #include "base/bind_helpers.h" #include "base/strings/string_util.h" #include "chrome/android/chrome_jni_headers/ChromeContextMenuPopulator_jni.h" #include "chrome/browser/download/android/download_controller_base.h" #include "chrome/browser/image_decoder/image_decoder.h" #include "chrome/browser/ui/tab_contents/core_tab_helper.h" #include "components/embedder_support/android/contextmenu/context_menu_builder.h" #include "content/public/browser/context_menu_params.h" #include "content/public/browser/render_frame_host.h" #include "content/public/browser/web_contents.h" #include "third_party/blink/public/common/associated_interfaces/associated_interface_provider.h" #include "ui/gfx/android/java_bitmap.h" #include "ui/gfx/geometry/size.h" using base::android::JavaParamRef; using base::android::JavaRef; namespace { class ContextMenuPopulatorImageRequest : public ImageDecoder::ImageRequest { public: static void Start(const JavaRef<jobject>& jcallback, const std::vector<uint8_t>& thumbnail_data) { auto* request = new ContextMenuPopulatorImageRequest(jcallback); ImageDecoder::Start(request, thumbnail_data); } protected: void OnImageDecoded(const SkBitmap& decoded_image) override { base::android::RunObjectCallbackAndroid( jcallback_, gfx::ConvertToJavaBitmap(&decoded_image)); delete this; } void OnDecodeImageFailed() override { base::android::ScopedJavaLocalRef<jobject> j_bitmap; base::android::RunObjectCallbackAndroid(jcallback_, j_bitmap); delete this; } private: explicit ContextMenuPopulatorImageRequest(const JavaRef<jobject>& jcallback) : jcallback_(jcallback) {} const base::android::ScopedJavaGlobalRef<jobject> jcallback_; DISALLOW_IMPLICIT_CONSTRUCTORS(ContextMenuPopulatorImageRequest); }; chrome::mojom::ImageFormat ToChromeMojomImageFormat(int image_format) { auto format = static_cast<ContextMenuImageFormat>(image_format); switch (format) { case ContextMenuImageFormat::JPEG: return chrome::mojom::ImageFormat::JPEG; case ContextMenuImageFormat::PNG: return chrome::mojom::ImageFormat::PNG; case ContextMenuImageFormat::ORIGINAL: return chrome::mojom::ImageFormat::ORIGINAL; } NOTREACHED(); return chrome::mojom::ImageFormat::JPEG; } void OnRetrieveImageForShare( mojo::AssociatedRemote<chrome::mojom::ChromeRenderFrame> chrome_render_frame, const JavaRef<jobject>& jcallback, const std::vector<uint8_t>& thumbnail_data, const gfx::Size& original_size, const std::string& image_extension) { JNIEnv* env = base::android::AttachCurrentThread(); auto j_data = base::android::ToJavaByteArray(env, thumbnail_data); auto j_extension = base::android::ConvertUTF8ToJavaString(env, image_extension); base::android::RunObjectCallbackAndroid( jcallback, Java_ChromeContextMenuPopulator_createImageCallbackResult( env, j_data, j_extension)); } void OnRetrieveImageForContextMenu( mojo::AssociatedRemote<chrome::mojom::ChromeRenderFrame> chrome_render_frame, const JavaRef<jobject>& jcallback, const std::vector<uint8_t>& thumbnail_data, const gfx::Size& original_size, const std::string& filename_extension) { ContextMenuPopulatorImageRequest::Start(jcallback, thumbnail_data); } } // namespace ChromeContextMenuPopulator::ChromeContextMenuPopulator( content::WebContents* web_contents) : web_contents_(web_contents) {} void ChromeContextMenuPopulator::OnStartDownload( JNIEnv* env, const JavaParamRef<jobject>& obj, const JavaParamRef<jobject>& jcontext_menu_params, jboolean jis_link) { std::string headers; auto* context_menu_params = context_menu::ContextMenuParamsFromJavaObject(jcontext_menu_params); DownloadControllerBase::Get()->StartContextMenuDownload( *context_menu_params, web_contents_, jis_link, headers); } void ChromeContextMenuPopulator::SearchForImage( JNIEnv* env, const JavaParamRef<jobject>& obj, const JavaParamRef<jobject>& jrender_frame_host, const JavaParamRef<jobject>& jcontext_menu_params) { auto* render_frame_host = content::RenderFrameHost::FromJavaRenderFrameHost(jrender_frame_host); if (!render_frame_host) return; auto* context_menu_params = context_menu::ContextMenuParamsFromJavaObject(jcontext_menu_params); CoreTabHelper::FromWebContents(web_contents_) ->SearchByImageInNewTab(render_frame_host, context_menu_params->src_url); } void ChromeContextMenuPopulator::RetrieveImageForShare( JNIEnv* env, const JavaParamRef<jobject>& obj, const JavaParamRef<jobject>& jrender_frame_host, const JavaParamRef<jobject>& jcallback, jint max_width_px, jint max_height_px, jint j_image_format) { RetrieveImageInternal(env, base::BindOnce(&OnRetrieveImageForShare), jrender_frame_host, jcallback, max_width_px, max_height_px, ToChromeMojomImageFormat(j_image_format)); } void ChromeContextMenuPopulator::RetrieveImageForContextMenu( JNIEnv* env, const JavaParamRef<jobject>& obj, const JavaParamRef<jobject>& jrender_frame_host, const JavaParamRef<jobject>& jcallback, jint max_width_px, jint max_height_px) { // For context menu, Image needs to be PNG for receiving transparency pixels. RetrieveImageInternal(env, base::BindOnce(&OnRetrieveImageForContextMenu), jrender_frame_host, jcallback, max_width_px, max_height_px, chrome::mojom::ImageFormat::PNG); } void ChromeContextMenuPopulator::RetrieveImageInternal( JNIEnv* env, ImageRetrieveCallback retrieve_callback, const JavaParamRef<jobject>& jrender_frame_host, const JavaParamRef<jobject>& jcallback, jint max_width_px, jint max_height_px, chrome::mojom::ImageFormat image_format) { auto* render_frame_host = content::RenderFrameHost::FromJavaRenderFrameHost(jrender_frame_host); if (!render_frame_host) return; mojo::AssociatedRemote<chrome::mojom::ChromeRenderFrame> chrome_render_frame; render_frame_host->GetRemoteAssociatedInterfaces()->GetInterface( &chrome_render_frame); // Bind the InterfacePtr into the callback so that it's kept alive // until there's either a connection error or a response. auto* thumbnail_capturer_proxy = chrome_render_frame.get(); thumbnail_capturer_proxy->RequestImageForContextNode( max_width_px * max_height_px, gfx::Size(max_width_px, max_height_px), image_format, base::BindOnce( std::move(retrieve_callback), base::Passed(&chrome_render_frame), base::android::ScopedJavaGlobalRef<jobject>(env, jcallback))); } static jlong JNI_ChromeContextMenuPopulator_Init( JNIEnv* env, const JavaParamRef<jobject>& jweb_contents) { if (jweb_contents.is_null()) return reinterpret_cast<intptr_t>(nullptr); auto* web_contents = content::WebContents::FromJavaWebContents(jweb_contents); DCHECK(web_contents); return reinterpret_cast<intptr_t>( new ChromeContextMenuPopulator(web_contents)); }
def _get_number(s, init=0): return _get_number(s[1:], init * 10 + int(s[0])) if len(s) > 0 and s[0].isdigit() else (s, init)
/****************************************************************************** * Code generated with sympy 0.7.6 * * * * See http://www.sympy.org/ for more information. * * * * This file is part of 'project' * ******************************************************************************/ #include "index_dist_thumb_inter_side_0.h" #include <math.h> double index_dist_thumb_inter_side_0() { double index_dist_thumb_inter_side_0_result; index_dist_thumb_inter_side_0_result = 0; return index_dist_thumb_inter_side_0_result; }
Evidence that a sequence similar to TAR is important for induction of the JC virus late promoter by human immunodeficiency virus type 1 Tat A specific RNA sequence located in the leader of all human immunodeficiency virus type 1 (HIV-1) mRNAs termed the transactivation response element, or TAR, is a primary target for induction of HIV-1 long terminal repeat activity by the HIV-1-derived trans-regulatory protein, Tat. Human neurotropic virus, JC virus (JCV), a causative agent of the degenerative demyelinating disease progressive multifocal leukoencephalopathy, contains sequences in the 5' end of the late RNA species with an extensive homology to HIV-1 TAR. In this study, we examined the possible role of the JCV-derived TAR-homologous sequence in Tat-mediated activation of the JCV late promoter (Tada et al., Proc. Natl. Acad. Sci. USA 87:3479-3483, 1990). Results from site-directed mutagenesis revealed that critical G residues required for the function of HIV-1 TAR that are conserved in the JCV TAR homolog play an important role in Tat activation of the JCV promoter. In addition, in vivo competition studies suggest that shared regulatory components mediate Tat activation of the JCV late and HIV-1 long terminal repeat promoters. Furthermore, we showed that the JCV-derived TAR sequence behaves in the same way as HIV-1 TAR in response to two distinct Tat mutants, one of which that has no ability to bind to HIV-1 TAR and another that lacks transcriptional activity on a responsive promoter. These results suggest that the TAR homolog of the JCV late promoter is responsive to HIV-1 Tat induction and thus may participate in the overall activation of the JCV late promoter mediated by this transactivation.
Biomarkers of Chronic Inflammatory State in Uremia and Cardiovascular Disease Cardiovascular disease is the leading cause of death in the general population; traditional risk factors seem inadequate to explain completely the remarkable prevalence of cardiovascular mortality and morbidity observed in the uremic population. A role for chronic inflammation has been well established in the development of atherosclerotic disease, and, on the basis of these observations, atherosclerosis might be considered an inflammatory disease. Inflammation has been implicated in the etiology of coronary artery disease in the general population, and traditional inflammatory biomarkers such as C-reactive protein (CRP) and interleukin-6 (IL-6) have been shown to predict cardiovascular events in both symptomatic and asymptomatic individuals as well as those in the uremic population. Later on, new nontraditional markers were related to the risk of cardiovascular morbidity and mortality in general and in uremic population. As a consequence of the expanding research base and availability of assays, the number of inflammatory marker tests ordered by clinicians for cardiovascular disease (CVD) risk prediction has grown rapidly and several commercial assays have become available. So, up to now we can consider that several new nontraditional markers as CD40-CD40 ligand system and pentraxin-3 seem to be significant features of cardiovascular disease in general and in ESRD population. Introduction Patients with end-stage kidney disease undergoing chronic hemodialysis (HD) present higher mortality rates compared with the general population. Once patients are on HD, the risk of cardiovascular death is approximately 30 times higher than that in the general population and remains 10-20 times higher after stratification for age, gender, and the presence of diabetes. About half of the deaths of patients on dialysis are attributed to cardiovascular causes including coronary heart disease, cerebrovascular disease, peripheral vascular disease, and heart failure. End-stage renal disease (ESRD) patients suffer from a state of chronic inflammation leading to cardiovascular complications, progressive malnutrition, and death. Inflammation is subclinical, and chronic disorders of the cytokine system or acute-phase proteins may be observed as the sole evidence of a proinflammatory disorder. According to this hypothesis traditional inflammatory biomarkers such as tumor necrosis factor-alfa (TNF-alfa), C-reactive protein (CRP), and interleukin-6 (IL-6) have been shown to predict cardiovascular events in both symptomatic and asymptomatic individuals as well as those in the uremic population. More recently, several new nontraditional biomarkers have been introduced in the clinical practice. Traditional Biomarkers of Chronic Inflammation Low-grade chronic inflammation, as indicated by levels of high-sensitivity C-reactive protein (hs-CRP), prospectively defines the risk of atherosclerotic complications, adding to the prognostic information provided by traditional risk factors. The study of Ridker et al. provides convincing evidence that, in apparently healthy subjects, baseline serum levels of hs-CRP are predictive of future myocardial infarction and ischemic stroke. Subsequent meta-analysis of prospective population-based studies has compared patients in the lower tertile of hs-CRP with those in the upper tertile. With a good consistency between studies, a higher risk for major coronary events was observed for the upper tertile with the lowest tertile used as a reference. In general population most studies showed a dose-response relationship between the level of hs-CRP and risk of incident coronary disease. Recent papers also suggest association with incidence of sudden death and peripheral arterial disease. Through stratification or multivariable statistical adjustment, hs-CRP retains an independent association with incident coronary events after adjusting for age, total cholesterol, HDL cholesterol, smoking, body mass index, diabetes, history of hypertension, exercise level, and family history of coronary disease. In terms of prediction of recurrent CVD events and death, the strongest association with prognosis has been with hs-CRP; hs-CRP consistently predicts new coronary events in patients with unstable angina and acute myocardial infarction. As elevated serum levels of hs-CRP have been shown to be such a strong predictor of cardiovascular mortality in the general population, available data suggest that the association between inflammation and atherosclerosis is particularly strong in uremic patients. Zimmermann et al. reported that chronic inflammation enhances cardiovascular risk and mortality; a few years later Ikizler et al. in a prospective study assessed the importance of hs-CRP values as independent determination of hospitalization in chronic hemodialysis (HD) patients. Recently, it has been shown that proinflammatory cytokines such as IL-6 may exert a direct inflammatory effect on the heart and peripheral circulation. In a previous published paper, we investigated the joint predictive power of CRP and IL-6, in order to ascertain what is the prognostic information that each index carries independently of the other. To this aim, IL-6 and CRP plasma levels were measured in a cohort of 218 ESRD patients from different centres over a 4-year followup. Main outcomes were cardiovascular and total mortality. This study showed that plasma IL-6 rather than CRP better predicts outcome in ERSD patients. Various possible explanations may underline the advantage of IL-6 over CRP as an outcome predictor. One possibility is that, being located upstream in the cascade of events which lead to the synthesis of many acute-phase reactants, IL-6 is a better marker of the inflammatory burden affecting the development of cardiovascular disease. Another possibility is that levels of IL-6 vary less than those of CRP, leading to a more accurate classification of patients at risk when one single sample is taken. Finally, the toxic effects of IL-6 on the heart and peripheral vasculature might be stronger than those of CRP. This study provides some important implications. First, it gives further support to the hypothesis about the role of inflammatory mediators in the genesis of cardiovascular disease in dialysis patients. Secondly, it provides evidence suggesting the use of IL-6 in addition to, or even in place of, CRP for the identification of patients at risk. Zhang et al. reported that there was no association between CRP haplotypes and cardiovascular outcome in dialysis patients; this study argues against CRP as a cardiovascular risk factor. On the other hand, because variations within the IL-6 gene were shown to affect the risk for CVD in a multiethnic dialysis cohort, this suggests that IL-6 should be the target for interventional studies. According to these data, we suggest that all traditional risk factors for death should be measured accurately in uremic patients. Clinical events should be identified prospectively, and, whenever possible, IL-6 levels should be measured repeatedly during the course of followup. TNF-alfa, a proinflammatory cytokine (17 kDa) originally associated with killing of tumor cells, has a pivotal role in regulating both pro-and anti-inflammatory mediators. TNF-alfa has been regarded a "master regulator" of the cytokine cascade that provides a rapid form of host defense against infection but is fatal in excess. TNF-alfa is highly multifunctional with effects on lipid metabolism, coagulation, insulin resistance, and endothelial dysfunction. The major cellular origin of TNF-alfa, previously known as cachectin, is activated macrophages. It should be noted that, whereas IL-6 is strongly associated with CRP and other inflammatory biomarkers, the association between TNF-alfa and CRP is rather weak. This suggests that circulating levels may be influenced by a number of different factors and that circulating TNF-alfa levels may not reflect biologic activity at the tissue levels. Although available evidence suggests upregulated TNF-alfa system activity in ESRD patients, data linking elevated circulating TNF-alfa levels to CVD and mortality have not been as clear as for IL-6. Nontraditional Biomarkers of Chronic Inflammation It is now generally accepted that CD40-CD40 ligand interaction is a main determinant of the proatherogenic phenotype. Originally identified in B and T lymphocytes as being involved in T-cell-dependent B-cell activation and differentiation, the CD40-CD40 ligand system has been implicated in the pathophysiology of several chronic inflammatory diseases including risk factor-related vascular damage. CD40, a 50 kDa integral membrane protein of the tumor necrosis factor receptor family, and its cognate agonist CD40 ligand also known as CD154, a transmembrane 39-kDalton protein structurally related to tumor necrosis factor-alpha, are coexpressed by several cells of the vasculature, including endothelial cells, smooth muscle cells, and macrophages. CD40 ligand also occurs in a soluble form (sCD40L) that is considered to possess a full biological activity. Increased sCD40L levels have been described in obesity, hypercholesterolemia, diabetes, and unstable angina. Furthermore, it has been recently reported that circulating sCD40L has a strong independent prognostic value among apparently healthy individuals and patients with acute coronary syndromes and represents an independent predictor of restenosis after percutaneous transluminal angioplasty. Thus, the clinical association between soluble CD40L and cardiovascular events suggests that soluble CD40L function spans the time interval from early atherogenesis to late thrombotic complications. According to this, Hocher et al. recently demonstrated during a follow-up period of 52 months that sCD40L is an independent predictor of atherothrombotic events in patients on HD. More recently we expanded on this topic demonstrating that the prognostic value of sCD40L is evident also in over 200 chronic HD patients from the RISCAVID population at 24-month followup (RISCAVID, "risk cardiovascular in dialysis" is a prospective observational study performed on a large HD population in the northwestern region of Tuscany, Italy) ( Figure 1). In this paper we were able to demonstrate that this prognostic value of sCD40L is already evident at 24 months followup thus reinforcing the strong link between sCD40L and clinical outcomes in patients in HD and suggesting a possible clinical use of this new promising biomarker to better define cardiovascular prognosis in these patients. The striking prognostic impact of sCD40L on the clinical course in patients in HD raises questions about the origin of this biomarker. Platelets represent the main source of circulating sCD40L in patients with acute coronary syndrome and in hypercholesterolemia. Accordingly, plasma levels of sCD40L correlate closely with markers of platelet activation in these patient populations. Thus, increased circulating levels of sCD40L might reflect an enhanced platelet activation in HD. According to this, it has been demonstrated that circulating activated platelets (P-selectin/CD63-positive platelets) are higher in HD patients than in controls and further increase during HD sessions. Potential causes of such activation include possible stimulation of platelets by proinflammatory cytokines that have been reported to be increased in patients with end-stage renal disease. Furthermore, the increased lipid peroxidation that has been found in patients with chronic renal failure might also participate in activating platelets. On the other hand, the lack of any correlation between circulating levels of sCD40L and CRP seems to exclude a role for this plateletactivating inflammatory biomarker in the enhanced sCD40L signaling observed in our study population. Pentraxin is a family of proteins considered to be markers of the acute-phase inflammation (Figure 2). Currently, the pentraxin protein family is divided into two subfamilies based on size: the classical "short" pentraxin (25 kDa) and the "long" pentraxin. Pentraxin 3 (PTX3) is a "long" pentraxin that is highly expressed in the heart, whereas C-reactive protein (CRP) is a "short" pentraxin and is produced from the liver. PTX-3 expression occurs in a variety of cell types, including endothelial cells, mononuclear phagocytes, dendritic cells, smooth muscle cells, fibroblasts, adipocytes, and epithelial cells in response to inflammatory cytokines and Toll-like receptor engagement. In several recent studies PTX3 appeared to be not only an early indicator of irreversible myocyte injury but also a prognostic marker in patients with acute myocardial infarction. Latini et al. reported the acute-phase protein PTX3 as a predictor of 3-month mortality after adjustment for major risk factors and other acutephase prognostic markers. In a recently published paper of Barbui et al., the role of PTX3 as a prognostic biomarker was shown by an increased serum PTX3 that was closely related to death due to MI, in-hospital or to 6 months, in ACS patients, including STEMI, NSTEMI, and UAP groups. More recently, Suliman et al. analyzed plasma PTX-3 concentrations in relation to comorbidities (Davies score), protein-energy wasting (PEW), and inflammation markers in 200 prevalent HD patients, aged 64 ± 14 years, who had been on HD treatment for a median period of 36 months. Survival (42 months) was analyzed in relation to PTX-3 levels (high PTX-3 tertile versus two lower tertiles). This study shows that high levels of PTX-3 were found in prevalent HD patients with CVD and PEW; furthermore, a powerful association of PTX-3 with comorbidities was founded. As PTX-3 predicts mortality independent of age and comorbidities in prevalent HD patients, further designed studies addressing the clinical implication and pathogenic mechanisms of this long pentraxin are warranted. Conclusions Although the successful introduction of dialysis in the 1960s has increased life expectancy in patients with ESRD, the mortality rate is still unacceptably high, due primary to a process of inflammation-associated accelerated atherosclerosis. The accelerated atherosclerotic process of ESRD may involve several interrelated processes, such as oxidative stress, endothelial dysfunction, vascular calcification, and inflammation. The explosion of new knowledge on the central role of a dysregulated cytokine and Th system activity has opened new and exciting opportunities for nephrologists to manage and prevent CVD and wasting in this diseased patient group. The use of several traditional and new biomarkers of inflammatory and cardiovascular risk is of great utility in this high-risk population.
Orthogonal Neighborhood Preserving Projections: A Projection-Based Dimensionality Reduction Technique This paper considers the problem of dimensionality reduction by orthogonal projection techniques. The main feature of the proposed techniques is that they attempt to preserve both the intrinsic neighborhood geometry of the data samples and the global geometry. In particular, we propose a method, named orthogonal neighborhood preserving projections, which works by first building an "affinity" graph for the data in a way that is similar to the method of locally linear embedding (LLE). However, in contrast with the standard LLE where the mapping between the input and the reduced spaces is implicit, ONPP employs an explicit linear mapping between the two. As a result, handling new data samples becomes straightforward, as this amounts to a simple linear transformation. We show how we can define kernel variants of ONPP, as well as how we can apply the method in a supervised setting. Numerical experiments are reported to illustrate the performance of ONPP and to compare it with a few competing methods.
<reponame>Vv1vV/WBL-Python<filename>2-medium/classes/animal.py class Animal: def __init__(self, size, species): self.size = size self.species = species def sizeCheck(self): if self.size != "small": if self.size != "medium": if self.size != "large": return "Enter either: small, medium, large" return self.size def amdog(self): if self.species == "dog": return "Woof" return "you are not a dog"
Are DMI+QoI Fungicide Premixes During flowering Worthwhile for Fusarium head blight Control in Wheat? A Meta-analysis. Fusarium head blight (FHB), caused mainly by Fusarium graminearum, is best controlled with demethylation inhibitor (DMI) fungicides during flowering. However, the use of premixes of DMI and quinone outside inhibitor (QoI) fungicides to control FHB has increased in Brazil. Data on FHB severity and wheat yields measured in field experiments conducted in Brazil were gathered from both peer- and non-peer-reviewed sources published from 2000 to 2018. After applying selection criteria, 73 field trials from 35 bibliographic sources were identified, among which 50% of the data were obtained from cooperative network trials conducted after 2011. To be included in the analysis, a DMI+QoI premixes or tebuconazole (TEB) were tested in at least 14 trials and three years. Four premixes met the criteria. Estimates of percent control (and respective 95% confidence interval) by a network model fitted to the log of the treatment means ranged from 44.1% (pyraclostrobin + metconazole applied once; 32.4 to 53.7) to 64.3% (pyraclostrobin + metconazole; 58.4 to 69.3); the latter not differing from TEB (59.9%, 53.6 to 65.3). Yield response was statistically similar for pyraclostrobin + metconazole (532.1 kg/ha, 441 to 623) and trifloxystrobin + prothioconazole (494.9 kg/ha, 385 to 551), and both differed statistically from a group composed of TEB (448.2 kg/ha, 342 to 554), trifloxystrobin + TEB (468.2 kg/ha, 385 to 551), azoxystrobin + TEB (462.4 kg/ha, 366 to 558) and pyraclostrobin + metconazole applied once (413.7 kg/ha, 308 to 518). The two categories of FHB index (7% cut off) and yield (3,000 kg/ha cut off), both in the non-treated check, did not explain the heterogeneity in the estimates. Two sequential sprays of TEB or one spray of pyraclostrobin + metconazole as management choices are likely more profitable than DIM+QoI premixes sprayed twice during flowering considering only the fungicide effects on yield.
<gh_stars>10-100 import { Status } from "allure-js-commons"; import { expect } from "chai"; import { suite, test } from "@testdeck/mocha"; import { findParameter, runTests } from "../utils"; @suite class ParameterSuite { @test async shouldHaveParameter() { const writerStub = await runTests("testData"); expect(writerStub.groups.find((suite) => suite.name === "TestData")).not.eq(undefined); const test = writerStub.getTestByName("shouldCallTestUserDataOnTest"); expect(test).not.eq(undefined); expect(test.status).eq(Status.PASSED); expect(findParameter(test, "inputs").value).eq( JSON.stringify({ firstName: "Test", lastName: "User", }), ); } }
/* * Copyright 2011 Google Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.devtools.j2objc.gen; import com.google.common.base.Function; import com.google.common.base.Predicate; import com.google.common.collect.Iterables; import com.google.devtools.j2objc.Options; import com.google.devtools.j2objc.ast.AbstractTypeDeclaration; import com.google.devtools.j2objc.ast.Annotation; import com.google.devtools.j2objc.ast.AnnotationTypeDeclaration; import com.google.devtools.j2objc.ast.BodyDeclaration; import com.google.devtools.j2objc.ast.CompilationUnit; import com.google.devtools.j2objc.ast.EnumDeclaration; import com.google.devtools.j2objc.ast.FieldDeclaration; import com.google.devtools.j2objc.ast.FunctionDeclaration; import com.google.devtools.j2objc.ast.Javadoc; import com.google.devtools.j2objc.ast.MethodDeclaration; import com.google.devtools.j2objc.ast.Name; import com.google.devtools.j2objc.ast.NativeDeclaration; import com.google.devtools.j2objc.ast.SingleVariableDeclaration; import com.google.devtools.j2objc.ast.TagElement; import com.google.devtools.j2objc.ast.TextElement; import com.google.devtools.j2objc.ast.TreeNode; import com.google.devtools.j2objc.ast.TreeNode.Kind; import com.google.devtools.j2objc.ast.TreeUtil; import com.google.devtools.j2objc.ast.TypeDeclaration; import com.google.devtools.j2objc.ast.VariableDeclaration; import com.google.devtools.j2objc.ast.VariableDeclarationFragment; import com.google.devtools.j2objc.types.IOSMethod; import com.google.devtools.j2objc.types.IOSMethodBinding; import com.google.devtools.j2objc.types.IOSParameter; import com.google.devtools.j2objc.util.BindingUtil; import com.google.devtools.j2objc.util.NameTable; import org.eclipse.jdt.core.dom.IMethodBinding; import org.eclipse.jdt.core.dom.ITypeBinding; import org.eclipse.jdt.core.dom.IVariableBinding; import org.eclipse.jdt.core.dom.Modifier; import java.text.BreakIterator; import java.util.Iterator; import java.util.List; import java.util.Locale; /** * Generates source files from AST types. This class handles common actions * shared by the header and implementation generators. * * @author <NAME> */ public abstract class ObjectiveCSourceFileGenerator extends SourceFileGenerator { /** * Create a new generator. * * @param unit The AST of the source to generate * @param emitLineDirectives if true, generate CPP line directives */ protected ObjectiveCSourceFileGenerator(CompilationUnit unit, boolean emitLineDirectives) { super(unit, emitLineDirectives); } /** * Generate an output source file from the specified type declaration. */ public void generate(AbstractTypeDeclaration node) { if (node instanceof TypeDeclaration) { generate((TypeDeclaration) node); } else if (node instanceof EnumDeclaration) { generate((EnumDeclaration) node); } else if (node instanceof AnnotationTypeDeclaration) { generate((AnnotationTypeDeclaration) node); } } protected abstract void generate(TypeDeclaration node); protected abstract void generate(EnumDeclaration node); protected abstract void generate(AnnotationTypeDeclaration node); public void save(CompilationUnit node) { save(getOutputFileName(node)); } private static final Function<VariableDeclaration, IVariableBinding> GET_VARIABLE_BINDING_FUNC = new Function<VariableDeclaration, IVariableBinding>() { public IVariableBinding apply(VariableDeclaration node) { return node.getVariableBinding(); } }; private static final Predicate<VariableDeclaration> IS_STATIC_VARIABLE_PRED = new Predicate<VariableDeclaration>() { public boolean apply(VariableDeclaration node) { return BindingUtil.isStatic(node.getVariableBinding()); } }; private static final Predicate<VariableDeclarationFragment> NEEDS_INITIALIZATION_PRED = new Predicate<VariableDeclarationFragment>() { public boolean apply(VariableDeclarationFragment frag) { IVariableBinding binding = frag.getVariableBinding(); return BindingUtil.isStatic(binding) && !BindingUtil.isPrimitiveConstant(binding); } }; protected static final String DEPRECATED_ATTRIBUTE = "__attribute__((deprecated))"; protected Iterable<IVariableBinding> getStaticFieldsNeedingAccessors( AbstractTypeDeclaration node) { return Iterables.transform( Iterables.filter(TreeUtil.getAllFields(node), IS_STATIC_VARIABLE_PRED), GET_VARIABLE_BINDING_FUNC); } /** * Excludes primitive constants which will not have variables declared for them. */ protected Iterable<VariableDeclarationFragment> getStaticFieldsNeedingInitialization( AbstractTypeDeclaration node) { return Iterables.filter(TreeUtil.getAllFields(node), NEEDS_INITIALIZATION_PRED); } protected boolean hasInitializeMethod(AbstractTypeDeclaration node) { return !node.getClassInitStatements().isEmpty(); } /** * Print a list of methods. */ protected void printMethods(List<MethodDeclaration> methods) { for (MethodDeclaration m : methods) { printMethod(m); } } protected void printMethod(MethodDeclaration m) { IMethodBinding binding = m.getMethodBinding(); IOSMethod iosMethod = IOSMethodBinding.getIOSMethod(binding); if (iosMethod != null) { printMappedMethodDeclaration(m, iosMethod); } else if (m.isConstructor()) { printConstructor(m); } else { printNormalMethod(m); } } protected abstract void printFunction(FunctionDeclaration declaration); protected abstract void printNativeDeclaration(NativeDeclaration declaration); private void printDeclaration(BodyDeclaration declaration) { switch (declaration.getKind()) { case METHOD_DECLARATION: printMethod((MethodDeclaration) declaration); return; case NATIVE_DECLARATION: printNativeDeclaration((NativeDeclaration) declaration); return; default: break; } } protected void printDeclarations(Iterable<BodyDeclaration> declarations) { for (BodyDeclaration declaration : declarations) { printDeclaration(declaration); } } protected void printFunctions(Iterable<BodyDeclaration> declarations) { for (BodyDeclaration declaration : declarations) { if (declaration.getKind() == Kind.FUNCTION_DECLARATION) { printFunction((FunctionDeclaration) declaration); } } } protected abstract void printNormalMethod(MethodDeclaration m); protected abstract void printConstructor(MethodDeclaration m); protected abstract void printMappedMethodDeclaration(MethodDeclaration m, IOSMethod mappedMethod); /** * Create an Objective-C method or constructor declaration string for an * inlined method. */ protected String mappedMethodDeclaration(MethodDeclaration method, IOSMethod mappedMethod) { StringBuffer sb = new StringBuffer(); // Explicitly test hashCode() because of NSObject's hash return value. String baseDeclaration; if (mappedMethod.getName().equals("hash")) { baseDeclaration = "- (NSUInteger)hash"; } else { String returnType = method.isConstructor() ? "instancetype" : NameTable.getObjCType(method.getReturnType().getTypeBinding()); baseDeclaration = String.format("%c (%s)%s", Modifier.isStatic(method.getModifiers()) ? '+' : '-', returnType, mappedMethod.getName()); } sb.append(baseDeclaration); Iterator<IOSParameter> iosParameters = mappedMethod.getParameters().iterator(); if (iosParameters.hasNext()) { List<SingleVariableDeclaration> parameters = method.getParameters(); IOSParameter first = iosParameters.next(); SingleVariableDeclaration var = parameters.get(first.getIndex()); addTypeAndName(first, var, sb); while (iosParameters.hasNext()) { sb.append(mappedMethod.isVarArgs() ? ", " : " "); IOSParameter next = iosParameters.next(); sb.append(next.getParameterName()); var = parameters.get(next.getIndex()); addTypeAndName(next, var, sb); } } return sb.toString(); } private void addTypeAndName(IOSParameter iosParameter, SingleVariableDeclaration var, StringBuffer sb) { sb.append(":("); sb.append(iosParameter.getType()); sb.append(')'); sb.append(var.getName().getIdentifier()); } /** * Create an Objective-C method declaration string. */ protected String methodDeclaration(MethodDeclaration m) { assert !m.isConstructor(); StringBuffer sb = new StringBuffer(); boolean isStatic = Modifier.isStatic(m.getModifiers()); IMethodBinding binding = m.getMethodBinding(); String methodName = NameTable.getName(binding); String baseDeclaration = String.format("%c (%s)%s", isStatic ? '+' : '-', NameTable.getObjCType(binding.getReturnType()), methodName); sb.append(baseDeclaration); parametersDeclaration(binding, m.getParameters(), baseDeclaration, sb); return sb.toString(); } /** * Create an Objective-C constructor declaration string. */ protected String constructorDeclaration(MethodDeclaration m) { return constructorDeclaration(m, /* isInner */ false); } protected String constructorDeclaration(MethodDeclaration m, boolean isInner) { assert m.isConstructor(); StringBuffer sb = new StringBuffer(); IMethodBinding binding = m.getMethodBinding(); String baseDeclaration = "- (instancetype)init"; if (isInner) { baseDeclaration += NameTable.getFullName(binding.getDeclaringClass()); } sb.append(baseDeclaration); parametersDeclaration(binding, m.getParameters(), baseDeclaration, sb); return sb.toString(); } /** * Create an Objective-C constructor from a list of annotation member * declarations. */ protected String annotationConstructorDeclaration(ITypeBinding annotation) { StringBuffer sb = new StringBuffer(); sb.append("- (instancetype)init"); IMethodBinding[] members = BindingUtil.getSortedAnnotationMembers(annotation); for (int i = 0; i < members.length; i++) { if (i == 0) { sb.append("With"); } else { sb.append(" with"); } IMethodBinding member = members[i]; sb.append(NameTable.capitalize(member.getName())); sb.append(":("); sb.append(NameTable.getSpecificObjCType(member.getReturnType())); sb.append(')'); sb.append(member.getName()); sb.append('_'); } return sb.toString(); } private void parametersDeclaration(IMethodBinding method, List<SingleVariableDeclaration> params, String baseDeclaration, StringBuffer sb) throws AssertionError { method = BindingUtil.getOriginalMethodBinding(method); if (!params.isEmpty()) { ITypeBinding[] parameterTypes = method.getParameterTypes(); boolean first = true; int nParams = params.size(); for (int i = 0; i < nParams; i++) { SingleVariableDeclaration param = params.get(i); ITypeBinding typeBinding = parameterTypes[i]; String keyword = NameTable.parameterKeyword(typeBinding); if (first) { sb.append(NameTable.capitalize(keyword)); baseDeclaration += keyword; first = false; } else { sb.append(pad(baseDeclaration.length() - keyword.length())); sb.append(keyword); } IVariableBinding var = param.getVariableBinding(); sb.append(String.format(":(%s)%s", NameTable.getSpecificObjCType(var.getType()), NameTable.getName(var))); if (i + 1 < nParams) { sb.append('\n'); } } } } /** Ignores deprecation warnings. Deprecation warnings should be visible for human authored code, * not transpiled code. This method should be paired with popIgnoreDeprecatedDeclarationsPragma. */ protected void pushIgnoreDeprecatedDeclarationsPragma() { if (Options.generateDeprecatedDeclarations()) { printf("#pragma clang diagnostic push\n"); printf("#pragma GCC diagnostic ignored \"-Wdeprecated-declarations\"\n"); } } /** Restores deprecation warnings after a call to pushIgnoreDeprecatedDeclarationsPragma. */ protected void popIgnoreDeprecatedDeclarationsPragma() { if (Options.generateDeprecatedDeclarations()) { printf("#pragma clang diagnostic pop\n"); } } protected void printDocComment(Javadoc javadoc) { if (javadoc != null) { printIndent(); println("/**"); List<TagElement> tags = javadoc.getTags(); for (TagElement tag : tags) { if (tag.getTagName() == null) { // Description section. String description = printTagFragments(tag.getFragments()); // Extract first sentence from description. BreakIterator iterator = BreakIterator.getSentenceInstance(Locale.US); iterator.setText(description.toString()); int start = iterator.first(); int end = iterator.next(); if (end != BreakIterator.DONE) { // Print brief tag first, since Quick Help shows it first. This makes the // generated source easier to review. printDocLine(String.format("@brief %s", description.substring(start, end).trim())); String remainder = description.substring(end).trim(); if (!remainder.isEmpty()) { printDocLine(remainder); } } else { printDocLine(description.trim()); } } else { String doc = printJavadocTag(tag); if (!doc.isEmpty()) { printDocLine(doc); } } } printIndent(); println(" */"); } } private void printDocLine(String line) { printIndent(); print(' '); println(line); } private String printJavadocTag(TagElement tag) { String tagName = tag.getTagName(); // Xcode 5 compatible tags. if (tagName.equals(TagElement.TAG_AUTHOR) || tagName.equals(TagElement.TAG_EXCEPTION) || tagName.equals(TagElement.TAG_PARAM) || tagName.equals(TagElement.TAG_RETURN) || tagName.equals(TagElement.TAG_SINCE) || tagName.equals(TagElement.TAG_THROWS) || tagName.equals(TagElement.TAG_VERSION)) { return String.format("%s %s", tagName, printTagFragments(tag.getFragments())); } if (tagName.equals(TagElement.TAG_DEPRECATED)) { // Deprecated annotation translated instead. return ""; } if (tagName.equals(TagElement.TAG_SEE)) { // TODO(tball): implement @see when Xcode quick help links are documented. return ""; } if (tagName.equals(TagElement.TAG_CODE)) { return String.format("<code>%s</code>", printTagFragments(tag.getFragments())); } // Remove tag, but return any text it has. return printTagFragments(tag.getFragments()); } private String printTagFragments(List<TreeNode> fragments) { StringBuilder sb = new StringBuilder(); for (TreeNode fragment : fragments) { sb.append(' '); if (fragment instanceof TextElement) { String text = escapeDocText(((TextElement) fragment).getText()); sb.append(text.trim()); } else if (fragment instanceof TagElement) { sb.append(printJavadocTag((TagElement) fragment)); } else { sb.append(escapeDocText(fragment.toString()).trim()); } } return sb.toString().trim(); } private String escapeDocText(String text) { return text.replace("@", "@@").replace("/*", "/\\*"); } @Override protected String getOutputFileName(CompilationUnit node) { String result = super.getOutputFileName(node); if (node.getMainTypeName().equals(NameTable.PACKAGE_INFO_MAIN_TYPE)) { return result.replace(NameTable.PACKAGE_INFO_MAIN_TYPE, NameTable.PACKAGE_INFO_FILE_NAME); } return result; } /** * Prints the list of instance variables in a type. * * @param node the type to examine * @param privateVars if true, only print private vars, otherwise print all but private vars */ protected void printInstanceVariables(AbstractTypeDeclaration node, boolean privateVars) { indent(); boolean first = true; boolean printAllVars = !Options.hidePrivateMembers() && !privateVars; for (FieldDeclaration field : TreeUtil.getFieldDeclarations(node)) { int modifiers = field.getModifiers(); if (!Modifier.isStatic(field.getModifiers()) && (printAllVars || (privateVars == isPrivateOrSynthetic(modifiers)))) { List<VariableDeclarationFragment> vars = field.getFragments(); assert !vars.isEmpty(); IVariableBinding varBinding = vars.get(0).getVariableBinding(); ITypeBinding varType = varBinding.getType(); // Need direct access to fields possibly from inner classes that are // promoted to top level classes, so must make all visible fields public. if (first) { println(" @public"); first = false; } printDocComment(field.getJavadoc()); printIndent(); if (BindingUtil.isWeakReference(varBinding)) { // We must add this even without -use-arc because the header may be // included by a file compiled with ARC. print("__weak "); } String objcType = NameTable.getSpecificObjCType(varType); boolean needsAsterisk = !varType.isPrimitive() && !objcType.matches("id|id<.*>|Class"); if (needsAsterisk && objcType.endsWith(" *")) { // Strip pointer from type, as it will be added when appending fragment. // This is necessary to create "Foo *one, *two;" declarations. objcType = objcType.substring(0, objcType.length() - 2); } print(objcType); print(' '); for (Iterator<VariableDeclarationFragment> it = field.getFragments().iterator(); it.hasNext(); ) { VariableDeclarationFragment f = it.next(); if (needsAsterisk) { print('*'); } String name = NameTable.getName(f.getName().getBinding()); print(NameTable.javaFieldToObjC(name)); if (it.hasNext()) { print(", "); } } println(";"); } } unindent(); } protected boolean isPrivateOrSynthetic(int modifiers) { return Modifier.isPrivate(modifiers) || BindingUtil.isSynthetic(modifiers); } protected void printNormalMethodDeclaration(MethodDeclaration m) { newline(); printDocComment(m.getJavadoc()); print(this.methodDeclaration(m)); String methodName = NameTable.getName(m.getMethodBinding()); if (needsObjcMethodFamilyNoneAttribute(methodName)) { // Getting around a clang warning. // clang assumes that methods with names starting with new, alloc or copy // return objects of the same type as the receiving class, regardless of // the actual declared return type. This attribute tells clang to not do // that, please. // See http://clang.llvm.org/docs/AutomaticReferenceCounting.html // Sections 5.1 (Explicit method family control) // and 5.2.2 (Related result types) print(" OBJC_METHOD_FAMILY_NONE"); } if (needsDeprecatedAttribute(m.getAnnotations())) { print(" " + DEPRECATED_ATTRIBUTE); } println(";"); } protected boolean needsObjcMethodFamilyNoneAttribute(String name) { return name.startsWith("new") || name.startsWith("copy") || name.startsWith("alloc") || name.startsWith("init") || name.startsWith("mutableCopy"); } protected boolean needsDeprecatedAttribute(List<Annotation> annotations) { return Options.generateDeprecatedDeclarations() && hasDeprecated(annotations); } private boolean hasDeprecated(List<Annotation> annotations) { for (Annotation annotation : annotations) { Name annotationTypeName = annotation.getTypeName(); String expectedTypeName = annotationTypeName.isQualifiedName() ? "java.lang.Deprecated" : "Deprecated"; if (expectedTypeName.equals(annotationTypeName.getFullyQualifiedName())) { return true; } } return false; } protected void printFieldSetters(AbstractTypeDeclaration node, boolean privateVars) { ITypeBinding declaringType = node.getTypeBinding(); boolean newlinePrinted = false; boolean printAllVars = !Options.hidePrivateMembers() && !privateVars; for (FieldDeclaration field : TreeUtil.getFieldDeclarations(node)) { ITypeBinding type = field.getType().getTypeBinding(); int modifiers = field.getModifiers(); if (Modifier.isStatic(modifiers) || type.isPrimitive() || (!printAllVars && isPrivateOrSynthetic(modifiers) != privateVars)) { continue; } String typeStr = NameTable.getObjCType(type); String declaringClassName = NameTable.getFullName(declaringType); for (VariableDeclarationFragment var : field.getFragments()) { if (BindingUtil.isWeakReference(var.getVariableBinding())) { continue; } String fieldName = NameTable.javaFieldToObjC(NameTable.getName(var.getName().getBinding())); if (!newlinePrinted) { newlinePrinted = true; newline(); } println(String.format("J2OBJC_FIELD_SETTER(%s, %s, %s)", declaringClassName, fieldName, typeStr)); } } } protected String getFunctionSignature(FunctionDeclaration function) { StringBuilder sb = new StringBuilder(); String returnType = NameTable.getObjCType(function.getReturnType().getTypeBinding()); returnType += returnType.endsWith("*") ? "" : " "; sb.append(returnType).append(function.getName()).append('('); for (Iterator<SingleVariableDeclaration> iter = function.getParameters().iterator(); iter.hasNext(); ) { IVariableBinding var = iter.next().getVariableBinding(); String paramType = NameTable.getObjCType(var.getType()); paramType += (paramType.endsWith("*") ? "" : " "); sb.append(paramType + NameTable.getName(var)); if (iter.hasNext()) { sb.append(", "); } } sb.append(')'); return sb.toString(); } }
Holiday spirit filled the air at the 11th annual lighting of the Children’s Museum building Wednesday in San José. Spectators packed the museum grounds for this year’s event. Christmas songs by Costa Rican singer Arnoldo Castillo accompanied by The Tico Jazz Band began the 90-minute performance. Costa Rican musician Bernardo Quesada composed the music for the show. The festivities continued with a presentation of artistic ensembles followed by a performance of Lucierganos, a troop of glowing characters. At one point, more than 40 characters, including two giant marionettes were dancing on stage. Organizers expected about 10,000 people to attend the event this year. Kiany Villalobos, 8, and Raichel Rodriguez, 7, of San José were among those attending and had arrived early with family members to secure a good viewing area. It was the girls first time at the annual performance and they were excited to see the spectacular. The finale came at 7:30 p.m. A countdown followed by the lighting of the building and a fireworks display involving more than 7000 explosions from 40 different locations above the museum delighted children and adults alike.
<filename>LCXApp/LCXUIInit/LCXUIInit.h // // LCXUIInit.h // LCXApp // // Created by leichunxiang on 2019/11/5. // Copyright © 2019 lcx. All rights reserved. // #import <UIKit/UIKit.h> NS_ASSUME_NONNULL_BEGIN @interface LCXUIInit : NSObject @end NS_ASSUME_NONNULL_END
The Impact of BIPM Amendments in the Romanian Mass Dissemination From January 2014 to January 2015 an extraordinary calibration using IPK was carried out at BIPM. After this calibration campaign, it was concluded that the results obtained for the set of working standards indicate the existence of an offset from the IPK over 22 years by 35 g. Therefore, Consultative Committee for Mass and Related Quantities (CCM) recommended that all mass calibrations of national prototypes and of mass standards issued by the BIPM during the years 2003-2013 need to be amended with this value. During this period, Romanian National Prototype of the Kilogram (NPK ) together with stainless steel kilogram Ni81 were calibrated two times at BIPM: in 2005 and 2013. After receiving the BIPM amendments, the first measure taken by Mass laboratory was to perform the comparison between NPK and stainless steel reference standards, which represents the main step in the dissemination of mass unit in Romania. The paper describes the results obtained from this comparison, the impact of these new values on Romanian mass dissemination and actions taken in consequence.
As with many people that populate the inner ring of Trumpland, Lewandowski has virtually no cachet independent from his relationship to the president. He has spent his career as a journeyman political operative, a two-time failed candidate for office (in Massachusetts and New Hampshire) and a police officer. Yet where the Clintons were surrounded by a vast horde of ‘‘friends,’’ Trump ran a family business with a small network of flag-wavers. His campaign was a tiny operation, and Lewandowski got in early, stuck around and stayed loyal. Never mind that he was bounced as campaign manager in June 2016, one reason being that he had run afoul of Jared Kushner and Ivanka Trump; Bloomberg reported last month of a détente between Lewandowski and Kushner, consummated on a ‘‘stroll through the White House Rose Garden,’’ ending in a hug. Being one of ‘‘Trump’s guys’’ positions Lewandowski at the apex of Washington influence in 2017. He is clearly close to the president. He can decipher his moods, know which buttons to push and when to stay away. Rather than ‘‘going in’’ (local shorthand for ‘‘going in to an administration’’), Lewandowski leveraged his big-fish status in Trump­land to open a lucrative Washington consultancy to help corporations and clients ‘‘navigate’’ the new administration. Unlike Spicer’s position, this inside-out role enables Lewandowski to pick his spots and steer clear of the West Wing infighting and fiefs and, maybe most of all, the day-to-day dramas inherent in being too close to Donald J. Trump. Lewandowski warned me to keep my voice down, as he suspected that the guy sitting on the couch behind him was an eavesdropping reporter — a familiar hazard at the Trump Hotel. After a few minutes, we were joined by Anthony Scaramucci, the Long Island-born financier, fund-raiser and Trump acolyte — known as ‘‘the Mooch’’ to fellow Trumpians, New York tabloid-headline writers as well as his long-ago Little League teammates. Scaramucci, whose sculpted jaw, hair and form-fitting suit give the impression of an infomercial host, had been having trouble landing a top White House job. He had apparently been up for a role as an adviser and public liaison to government agencies and businesses, but that stalled because of complications related to the sale of his company, SkyBridge Capital, to foreign buyers. ‘‘Thanks for that thing at the White House today,’’ Scaramucci told Lewandowski, the first of four times he would thank him in the five minutes that we were together. The men locked eyes and nodded simultaneously: Gratitude acknowledged, accepted. Lewandowski had to rush off to catch a flight home to New Hampshire, where he lives with his wife and four children. The Mooch thanked him again. I found out later that Scaramucci was now in line to be the Paris-based ambassador to the Organization for Economic Cooperation and Development, pending Senate confirmation. In the interim, Scaramucci has been appointed chief strategy officer of the United States Export-Import Bank, an institution that Trump derided during the campaign as ‘‘excess baggage.’’ But the Mooch says the plan is now to keep the bank open. The excess baggage, in other words, has been claimed. Trump was elected in part by portraying and revealing politicians to be feckless weenies — and many of them went out and reinforced this view by displaying their willingness to be rolled by Trump in the campaign and unwillingness to stand up to him in office. This gets to one ethic of This Town that has endured and that Trump has reinforced: The interests of self-perpetuation drive nearly everything. Much of the Republican base still loves Trump, and few Republicans in Congress can afford to alienate these voters by defying him too forcefully, even though many of them — particularly senators — plainly hold the president in low regard. There have been exceptions, but by far the most vocal critics of Trump on the right have been the columnists, political consultants and former Republican officeholders who don’t need to face voters. Trump ‘‘has been a complete disaster’’ in office beyond foreign policy, said John Boehner, the former Republican speaker — the key word being ‘‘former.’’ (He was speaking at a private event in Houston; he later tried to walk the comment back.) You encounter many elected Republicans these days who struggle to calibrate their reactions to the president by what the Republican media consultant Rick Wilson refers to as ‘‘F.O.M.T.’’ — ‘‘Fear of Mean Tweets.’’ ‘‘It’s the great dichotomy of my life right now,’’ says Wilson, an outspoken anti-Trump voice who speaks often to clients and friends who are Republican officeholders. ‘‘I have guys call me literally on the verge of tears some days, like, ‘This guy is going to get us killed,’ ’’ Wilson told me. ‘‘And then they go out the next day, and they can’t wait to build the wall, they want to ‘make America great again’ all day long.’’
Headlines for May 02, 2017 | Democracy Now! Around the world, millions of workers took to the streets Monday for May Day, also known as International Workers’ Day. May Day protests were held worldwide, including in France, Kenya, Indonesia, South Korea, Russia and in Turkey, where more than 70 people were arrested in Istanbul. In California, tens of thousands of people marched in the Bay Area, as immigrant workers refused to go to work and students walked out of class. In Milwaukee, Wisconsin, more than 30,000 people marched to demand the governor fire Milwaukee County Sheriff Dave Clarke, block anti-immigrant legislation and return driver’s licenses to undocumented immigrants. At least 140 businesses were shut down across Milwaukee. In Texas, two dozen people were arrested, including Austin City Councilmember Gregorio Casar, after an 8-hour sit-in at the office of Texas Governor Greg Abbott, in protest of anti-immigrant bill SB 4. This is Julie Ann Nitsch. In many cities and rural areas, some immigrants launched a one-day work strike, including in Homestead, Florida, where farm workers refused to work and instead marched to City Hall. Monday’s immigrant-led protests came as newly released data from the Immigrations and Customs Enforcement Agency, known as ICE, shows nearly half of the 675 immigrants arrested in ICE raids in February had either low-level driving convictions or no criminal record at all. In Portland, Oregon, police arrested at least 25 protesters on Monday, as some demonstrators hurled paint and Pepsi cans at the police. The soda cans were a reference to a Pepsi ad, featuring Kendall Jenner, in which she’s portrayed as a hero after defusing tensions between protesters and police by offering a cop a can of Pepsi. Meanwhile, in Puerto Rico, thousands of protesters blocked traffic and marched downtown to protest austerity measures imposed by the federal fiscal control board. We’ll have more voices from the May Day protests in the streets of New York City after headlines. In news on Syria, Human Rights Watch says Syrian government forces have used chemical nerve agents, such as sarin gas, in attacks at least four times in recent months, including in the April attack on a town that killed 86 people, including dozens of children. Human Rights Watch also says new evidence, including photos and videos of weapon remnants, suggests the April attack came from a Soviet-made, air-dropped chemical bomb specifically designed to deliver sarin. One of the other attacks, on December 12, reportedly killed 64 people. The Syrian government denies using chemical weapons, including in the April attack. Meanwhile, U.S.-led coalition airstrikes continue in Syria. The journalistic monitoring group Airwars says these airstrikes reportedly killed at least two dozen civilians in the final week of April in and around Raqqa. U.S.-led coalition airstrikes also continue in Mosul, Iraq, where dozens of civilians were killed by airstrikes launched by the coalition or the U.S.-backed Iraqi Army in the final week of April. In Afghanistan, more civilians died last year amid the ongoing war than at any time since the United Nations began keeping records in 2009. At least 11,418 civilians died in 2016. Another 660,000 Afghans fled their homes—the highest number of displacements on record. This comes as the White House is considering a plan to deploy an additional 5,000 troops to Afghanistan. CIA Director Mike Pompeo visited Seoul, South Korea, Monday amid rising tensions between the U.S. and North Korea. His visit comes as the THAAD missile defense system installed by the U.S. in South Korea is now operational. On Monday, President Trump said he’d be “honored” to meet North Korean President Kim Jong-un. White House Press Secretary Sean Spicer later tried to walk back the president’s comments, saying, “Clearly, the conditions are not there right now.” Meanwhile, Philippines President Rodrigo Duterte has said he might be too busy to accept President Trump’s invitation to the White House. Human rights activists criticized the invitation, saying it condones the thousands of extrajudicial killings in the Philippines since Duterte launched his so-called war on drugs. The White House is pushing for a vote on a new Republican plan to repeal and replace the Affordable Care Act—even as widespread questions about the plan remain, including whether President Trump understands it. On Monday, President Trump reiterated his claim that the Republican bill would guarantee coverage for people with pre-existing conditions. In fact, on Saturday, Republican lawmakers struck a deal that would not protect people with pre-existing conditions, instead allowing insurers to charge them significantly higher premiums. The White House is pushing for a vote as early as Wednesday, even though a number of Republicans came out against the legislation Monday. In one of his latest interviews, President Trump sparked confusion—and ridicule—by questioning why, exactly, the Civil War was fought, and suggesting former President Andrew Jackson could have averted the war itself—had he not been dead for 16 years by the time it began. Fox News has ousted co-president Bill Shine as part of the continued fallout over revelations about sexual harassment at the network. Shine worked closely with former Chair Roger Ailes, who was ousted over the summer after more than 20 women accused him of sexual harassment and professional retaliation. Fox’s former top anchor, Bill O’Reilly, has also been ousted amid sexual harassment accusations. On Monday, however, Fox promoted longtime executive Suzanne Scott, who has been accused in multiple lawsuits of working to cover up Ailes’ sexual harassment. In Houston, Texas, a federal judge has ordered Harris County to stop imprisoning people on misdemeanor charges because they can’t pay bail, after ruling the bail system was unconstitutional and discriminated against poor defendants. Harris County is the third largest county jail system in the United States. In Wisconsin, a jury has recommended bringing criminal charges against seven Milwaukee County jailers who denied 38-year-old Terrill Thomas water for seven days in a solitary confinement jail cell. Thomas died from extreme dehydration on April 24, 2016. In Minneapolis, a man has been sentenced to 15 years in prison, after shooting and wounding five Black Lives Matter protesters in 2015 at an occupation at a police precinct over the police killing of Jamar Clark. Prosecutors say 25-year-old Allen Scarsella sent a series of racist texts to friends in the months leading up to his decision to drive over to the protest camp, put on a mask and shoot five of the demonstrators. Meanwhile, in Texas, family and friends are mourning the death of 15-year-old African-American teenager Jordan Edwards, who was shot in the head by a police officer in a suburb of Dallas on Saturday. The Balch Springs Police Department had initially claimed the officers opened fire while the car carrying the high school freshman was reversing toward the police car. But, in fact, on Monday, the police chief admitted the police officer opened fire while the car was actually moving forward, away from the police. And in New York City, author and editor Jean Stein has died, after she took her own life on Sunday. She was 83 years old. The former editor of The Paris Review, Stein was well known for writing best-selling oral histories—her latest, “West of Eden: An American Place,” about MCA, the Music Corporation of America, which is now NBCUniversal. MCA was founded by her father, Jules Stein. Stein is survived by her daughters, Katrina vanden Heuvel, publisher of The Nation, and actress Wendy vanden Heuvel.
import { Injectable } from '@angular/core'; import { Router,ActivatedRoute } from '@angular/router'; import { ToastController, Platform, NavController, LoadingController, ModalController } from '@ionic/angular'; import { BehaviorSubject, Observable } from 'rxjs'; import { HttpHeaders,HttpClient } from '@angular/common/http'; import {environment} from 'src/environments/environment' import { JwtHelperService } from '@auth0/angular-jwt'; import { NotificationService } from './notification.service'; @Injectable({ providedIn: 'root' }) export class AuthenticationService { disabledRedirectFor: string[] = ['not_found','public']; authState = new BehaviorSubject(false); URL_API : string = environment.API; constructor( private router: Router, private navCtrl: NavController, private platform: Platform, public toastController: ToastController, private http: HttpClient, public route: ActivatedRoute, public jwtHelper: JwtHelperService, private notification: NotificationService ) { this.platform.ready().then(() => { if (location && !this.disabledRedirectFor.find(path => { return location.pathname.includes(path) })){ if(!this.isAuthenticatedv2()){ this.navCtrl.navigateRoot('/login'); } }else{ if(this.isAuthenticated()){ this.navCtrl.navigateRoot('/home'); } } }); } async GetProfile(){ const res = await this.http.get(this.URL_API + '/account/getProfile', { responseType: 'json'}).toPromise() .catch(err => { console.log(err); }); return res; } async UpdateProfile(data){ const res = await this.http.post(this.URL_API + '/account/updateProfile', data,{ responseType: 'json'}).toPromise() .catch(err => { console.log(err); }); return res; } async resetpassword(data: any){ const headers = new HttpHeaders().set('Content-Type', 'application/json'); const res = await this.http.post(this.URL_API + '/account/changePassword',JSON.stringify(data), { headers, responseType: 'json'}).toPromise() .catch(err => { console.log(err); }); return res; } async login(data: any) { const headers = new HttpHeaders().set('Content-Type', 'application/json'); const res = await this.http.post(this.URL_API + '/account/login',JSON.stringify(data), { headers, responseType: 'json'}).toPromise() .catch(err => { console.log(err); }); if(res['code'] == 0){ sessionStorage.setItem('user-token', res['data'][0].token); sessionStorage.setItem('userFullName', res['data'][0].userFullName); sessionStorage.setItem('usertype', res['data'][0].type); sessionStorage.setItem('companyId', res['data'][0].company.id); sessionStorage.setItem('companyName', res['data'][0].company.name); sessionStorage.setItem('companyLogo', res['data'][0].company.logo); this.authState.next(true); this.navCtrl.navigateRoot('/home'); return true; }else{ this.authState.next(false); return false; } } logout() { sessionStorage.clear(); this.authState.next(false); this.navCtrl.navigateRoot('/login'); } isAuthenticated() { const token = sessionStorage.getItem('user-token'); if(token){ if(this.jwtHelper.isTokenExpired(token)){ return false; }else{ this.authState.next(true); return true; } }else{ return false; } } isAuthenticatedv2() { //used for refresh dom after login const token = sessionStorage.getItem('user-token'); if(token){ if(this.jwtHelper.isTokenExpired(token)){ return false; }else{ return true; } }else{ return false; } } getUserLoggedIn(): Observable<boolean> { return this.authState.asObservable(); } async refreshToken(){ const res = await this.http.get(this.URL_API + '/account/refreshToken', { responseType: 'json'}).toPromise() .catch(err => { console.log(err); this.logout(); return false; }); if(res['code'] == 0){ sessionStorage.setItem('user-token', res['data'][0].newToken); this.authState.next(true); return true; }else{ this.logout(); return false; } } }
/** * Helper function, returns true if we (finally) found the EXIT / END command and we can break * the loop * * @param message * the message we want to check the content of * @return true if our message's data is equal to EXIT or END, false if not */ private boolean foundExit(Message message) { String valueOfMessage = message.getData(); if(valueOfMessage.equals(EXIT)) { return true; } return false; }
// // buffer_size.hpp // ~~~~~~~~~~~~~~~ // // Copyright (c) 2021 <NAME> (tsingyat at outlook dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef GPCL_BUFFER_SIZE_HPP #define GPCL_BUFFER_SIZE_HPP #include <gpcl/buffer.hpp> #include <gpcl/buffers_range_ref.hpp> namespace gpcl { namespace detail { inline std::size_t buffer_size(const_buffer b) noexcept { return b.size(); } template <typename BufferSequence> std::size_t buffer_size(const BufferSequence &bs) noexcept { std::size_t total_bytes = 0; for (auto b : gpcl::buffers_range_ref(bs)) { total_bytes += b.size(); } return total_bytes; } struct buffer_size_impl { template <typename BufferSequence> constexpr std::size_t operator()(const BufferSequence &bs) const noexcept { return buffer_size(bs); } }; } // namespace detail inline constexpr detail::buffer_size_impl buffer_size{}; } // namespace gpcl #endif
Amy Schumer: 'The Met Gala felt like punishment' | HELLO! August 24, 2016 - 15:16 BST hellomagazine.com Uh oh! So it's fair to say Amy Schumer is not a fan of the Met Gala! The comedian has opened up on her experience of attending this year's event, saying that it "felt like a punishment". "I left earlier than I should have been allowed," she said during an interview with Howard Stern. "I got to meet Beyoncé and she was like, 'Is this your first Met Gala?' And I was like, 'It's my last.' It felt like a punishment. It's not me and I don't like it." Amy looked stunning at the gala, showing off her curves in a tangerine-hued gown by Alexander Wang. But she didn't enjoy the process of dressing up. She told Howard Stern that it "felt like a punishment" "We're dressed up like a bunch of fu**ing a**holes," she said. "I have no interest in fashion. I like the idea of coming up with a way to dress that's more comfortable that looks cool. Other than that, I just don't care." Amy went on to explain that mingling with celebrities at parties really isn't her thing in general. "It's just so fake," she said. "It's people doing an impression of having a conversation... I like talking to people one on one where you can be comfortable and really have a conversation. I don't like the farce."
def start(vm_name): with Pacemaker(vm_name) as p: if vm_name in p.list_resources(): state = p.show() if state != "Started": logger.info("Start " + vm_name) p.start() p.wait_for("Started") logger.info("VM " + vm_name + " started") else: logger.info("VM " + vm_name + " is already started") else: raise Exception("VM " + vm_name + " is not on the cluster")
Ethanol induced microcephaly in the neonatal rat: occurrence without withdrawal. Neonatal rats exposed to ethanol with an artificial rearing technique on postnatal days 4--8 have been found to have up to 20% decrease in brain weight when examined on postnatal day 18. Following the four day ethanol exposure these animals went through a moderate to severe abstinence syndrome. Since the appearance of any detectable brain growth differences were not found until after the withdrawal period, it was possible that the microcephaly was a result of withdrawal and not ethanol exposure. To test this hypothesis, neonatal rats were exposed to ethanol for either the four day exposure period used in the previous work, or until determination of brain growth impairment at day 11. This last group of animals were administered a daily dose of ethanol such that they did not have an observable abstinence syndrome. Examination of brain weights on day 11 revealed no differences in the extent of the observed microcephaly between the ethanol exposure conditions, suggesting that withdrawal per se was not responsible for the production of the brain growth retardation.
# -*- coding: utf-8 -*- # Generated by Django 1.11.16 on 2018-10-14 01:11 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('base', '0017_auto_20181014_0105'), ] operations = [ migrations.AlterField( model_name='stylesnippet', name='style_file', field=models.ForeignKey(help_text='Only filenames ending in .css, .scss or .sass will be processed.', on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtaildocs.Document', verbose_name='style file'), ), ]
// Disconnect local node from remote LND node func (c *lndClient) Disconnect(address string) error { addrParts := strings.Split(address, "@") if len(addrParts) != 2 || addrParts[0] == "" || addrParts[1] == "" { return fmt.Errorf("Invalid address format: %s", address) } pubKey := addrParts[0] ctx, cancel := context.WithTimeout(context.Background(), defaultGRPCTimeout) defer cancel() _, err := c.client.DisconnectPeer(ctx, &lnrpc.DisconnectPeerRequest{ PubKey: pubKey, }) return err }
<reponame>zhuyuqing/TVStore /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iotdb.tsfile.read.query.timegenerator; import java.io.IOException; import org.apache.iotdb.tsfile.read.common.Path; /** * All SingleSeriesExpression involved in a IExpression will be transferred to a TimeGenerator tree * whose leaf nodes are all SeriesReaders, The TimeGenerator tree can generate the next timestamp * that satisfies the filter condition. Then we use this timestamp to get values in other series * that are not included in IExpression */ public interface TimeGenerator { boolean hasNext() throws IOException; long next() throws IOException; Object getValue(Path path, long time) throws IOException; }
#pragma once #include <memory> #include "worldSize.h" #include "ICamera.h" #include "ICameraControllerInput.h" #include "IChunk.h" #include "IChunkFactory.h" #include "IChunkManager.h" #include "ILightSource.h" #include "ISprite.h" #include "ISpriteManager.h" struct GLFWwindow; class SpriteManager; typedef std::function<void(float timeDelta)> tickFunction; class VoxelEngine { GLFWwindow *_window; std::shared_ptr<IChunkManager> _chunkManager; std::shared_ptr<ICamera> _camera; std::shared_ptr<ICameraControllerInput> _cameraInputController; std::shared_ptr<ILightSource> _lightSource; std::shared_ptr<SpriteManager> _spriteManager; double _lastTickTime; public: VoxelEngine(GLFWwindow* window, std::string shaderPath, const worldSize& worldSize, const std::shared_ptr<IChunkFactory>& chunkFactory, const cameraConfiguration& cameraConfiguration, const std::shared_ptr<ICameraControllerInput>& cameraInputController, const std::shared_ptr<ILightSource>& lightSource); ~VoxelEngine(); float tick(tickFunction updateCompleteFunc=nullptr, tickFunction tickCompleteFunc=nullptr); worldSize getWorldSize() const { return worldSize(_chunkManager->getWidth() * IChunk::Width, _chunkManager->getHeight() * IChunk::Height, _chunkManager->getDepth() * IChunk::Depth); } std::shared_ptr<ISpriteManager> getSpriteManager() const; };
// SetReadWriteTimeout sets the connection read and write timeout. // The timeout is implemented using net.Conn.SetDeadline. func SetReadWriteTimeout(timeout time.Duration) Option { return func(t *Transport) { t.readTimeout = timeout t.writeTimeout = timeout } }
<gh_stars>10-100 package me.saiintbrisson.minecraft; import com.google.common.collect.Sets; import org.bukkit.Bukkit; import org.bukkit.entity.Player; import org.bukkit.inventory.Inventory; import org.bukkit.inventory.InventoryHolder; import java.io.Closeable; import java.util.HashMap; import java.util.Map; import java.util.WeakHashMap; import java.util.function.Supplier; import java.util.stream.Collectors; public class View extends VirtualView implements InventoryHolder, Closeable { public static final int INVENTORY_ROW_SIZE = 9; public static final int UNSET_SLOT = -1; public static final int INVENTORY_ROW_COUNT = 3; private final String title; private final int rows; private final Map<String, ViewContext> contexts; private ViewFrame frame; private boolean cancelOnClick, cancelOnPickup, cancelOnDrop, cancelOnDrag, cancelOnClone; private boolean cancelOnMoveOut, cancelOnShiftClick , clearCursorOnClose; private final Map<Player, Map<String, Object>> data; public View() { this(0); } public View(final int rows) { this(null, rows, ""); } public View(final int rows, final String title) { this(null, rows, title); } public View(final ViewFrame frame, final int rows, final String title) { super(new ViewItem[INVENTORY_ROW_SIZE * rows == 0 ? INVENTORY_ROW_COUNT : rows]); this.rows = rows; this.frame = frame; this.title = title; contexts = new WeakHashMap<>(); data = new WeakHashMap<>(); cancelOnPickup = true; cancelOnDrop = true; cancelOnDrag = true; cancelOnClone = true; cancelOnMoveOut = true; cancelOnShiftClick = true; } @Override public int getLastSlot() { return INVENTORY_ROW_SIZE * rows - 1; } public Map<Player, ViewContext> getContexts() { return contexts.entrySet().stream().collect(Collectors.toMap(e -> Bukkit.getPlayerExact(e.getKey()), Map.Entry::getValue)); } public ViewContext getContext(final Player player) { return contexts.get(player.getName()); } public ViewFrame getFrame() { return frame; } void setFrame(final ViewFrame frame) { this.frame = frame; } /** * Use ViewContext#getRows() instead of this method if you're using dynamic rows. * @return the number of rows in this view */ @Deprecated public int getRows() { return rows; } public String getTitle() { return title; } protected ViewContext createContext(final View view, final Player player, final Inventory inventory) { return new ViewContext(view, player, inventory); } public void open(final Player player) { open(player, null); } public void open(final Player player, final Map<String, Object> data) { contexts.computeIfPresent(player.getName(), ($, context) -> { context.invalidate(); return null; }); final OpenViewContext preOpenContext = new OpenViewContext(this, player); if (data != null) setData(player, new HashMap<>(data)); else { // ensure non-transitive data on view switch clearData(player); } onOpen(preOpenContext); if (preOpenContext.isCancelled()) { clearData(player); return; } int inventorySize = preOpenContext.getInventorySize(); if (inventorySize != items.length) { this.expandItemsArray(inventorySize); } final Inventory inventory = getInventory(preOpenContext.getInventoryTitle(), inventorySize); final ViewContext context = createContext(this, player, inventory); contexts.put(player.getName(), context); onRender(context); render(context); player.openInventory(inventory); } public void update() { for (final ViewContext ctx : contexts.values()) ctx.update(); } @Override public void update(final ViewContext context) { frame.debug("[context]: update"); onUpdate(context); super.update(context); } @Override public void update(final ViewContext context, final int slot) { frame.debug("[slot " + slot + "]: update"); super.update(context, slot); } @Override public void render(final ViewContext context) { frame.debug("[context]: render"); super.render(context); } @Override public void render(final ViewContext context, final int slot) { frame.debug("[slot " + slot + "]: render"); super.render(context, slot); } @Override public void render(final ViewContext context, final ViewItem item, final int slot) { frame.debug("[slot " + slot + "]: render with item"); super.render(context, item, slot); } @Override ViewItem resolve(final ViewContext context, final int slot) { frame.debug("[slot " + slot + "]: resolve item"); return super.resolve(context, slot); } ViewContext remove(final Player player) { frame.debug("[context]: remove"); final ViewContext context = contexts.remove(player.getName()); if (context != null) { context.invalidate(); frame.debug("[context]: invalidate"); } return context; } void remove(final ViewContext context) { context.invalidate(); frame.debug("[context]: invalidate"); synchronized (contexts) { contexts.remove(context.getPlayer().getName()); frame.debug("[context]: remove"); } } public void close() { for (final Player player : Sets.newHashSet(getContexts().keySet())) { player.closeInventory(); } } public boolean isCancelOnClick() { return cancelOnClick; } public void setCancelOnClick(final boolean cancelOnClick) { this.cancelOnClick = cancelOnClick; } public boolean isCancelOnPickup() { return cancelOnPickup; } public void setCancelOnPickup(final boolean cancelOnPickup) { this.cancelOnPickup = cancelOnPickup; } public boolean isCancelOnDrop() { return cancelOnDrop; } public void setCancelOnDrop(final boolean cancelOnDrop) { this.cancelOnDrop = cancelOnDrop; } public boolean isCancelOnDrag() { return cancelOnDrag; } public void setCancelOnDrag(final boolean cancelOnDrag) { this.cancelOnDrag = cancelOnDrag; } public boolean isCancelOnClone() { return cancelOnClone; } public void setCancelOnClone(final boolean cancelOnClone) { this.cancelOnClone = cancelOnClone; } public boolean isCancelOnMoveOut() { return cancelOnMoveOut; } public void setCancelOnMoveOut(boolean cancelOnMoveOut) { this.cancelOnMoveOut = cancelOnMoveOut; } public boolean isCancelOnShiftClick() { return cancelOnShiftClick; } public void setCancelOnShiftClick(boolean cancelOnShiftClick) { this.cancelOnShiftClick = cancelOnShiftClick; } public void setClearCursorOnClose(boolean clearCursorOnClose) { this.clearCursorOnClose = clearCursorOnClose; } public boolean isClearCursorOnClose() { return clearCursorOnClose; } @Override public Inventory getInventory() { throw new UnsupportedOperationException(); } private Inventory getInventory(final String title, final int size) { return Bukkit.createInventory(this, size, title == null ? this.title : title); } public void clearData(final Player player) { data.remove(player); } public void clearData(final Player player, final String key) { if (!data.containsKey(player)) return; data.get(player).remove(key); } public Map<String, Object> getData(final Player player) { return data.get(player); } @SuppressWarnings("unchecked") public <T> T getData(final Player player, final String key) { if (!data.containsKey(player)) return null; return (T) data.get(player).get(key); } @SuppressWarnings("unchecked") public <T> T getData(final Player player, final String key, final Supplier<T> defaultValue) { if (!data.containsKey(player) || !data.get(player).containsKey(key)) return defaultValue.get(); return (T) data.get(player).get(key); } public void setData(final Player player, final Map<String, Object> data) { this.data.put(player, data); } public void setData(final Player player, final String key, final Object value) { data.computeIfAbsent(player, $ -> new HashMap<>()).put(key, value); } public boolean hasData(final Player player, final String key) { if (!data.containsKey(player)) return false; return data.get(player).containsKey(key); } protected void onOpen(final OpenViewContext context) { } protected void onRender(final ViewContext context) { } protected void onClose(final ViewContext context) { } protected void onClick(final ViewSlotContext context) { } protected void onUpdate(final ViewContext context) { } protected void onMoveOut(final ViewSlotMoveContext context) { } protected void onItemHold(final ViewSlotContext context) { } protected void onItemRelease(final ViewSlotContext from, final ViewSlotContext to) { } private void expandItemsArray(int newLength) { ViewItem[] newItems = new ViewItem[newLength]; System.arraycopy(items, 0, newItems, 0, items.length); items = newItems; } @Override public String toString() { return "View{" + "title='" + title + '\'' + ", rows=" + rows + "} " + super.toString(); } }
Invading the Yeast Nucleus: a Nuclear Localization Signal at the C Terminus of Ty1 Integrase Is Required for Transposition In Vivo ABSTRACT Retrotransposon Ty1 faces a formidable cell barrier during transpositionthe yeast nuclear membrane which remains intact throughout the cell cycle. We investigated the mechanism by which transposition intermediates are transported from the cytoplasm (the presumed site of Ty1 DNA synthesis) to the nucleus, where they are integrated into the genome. Ty1 integrase has a nuclear localization signal (NLS) at its C terminus. Both full-length integrase and a C-terminal fragment localize to the nucleus. C-terminal deletion mutants in Ty1 integrase were used to map the putative NLS to the last 74 amino acid residues of integrase. Mutations in basic segments within this region decreased retrotransposition at least 50-fold in vivo. Furthermore, these mutant integrase proteins failed to localize to the nucleus. Production of virus-like particles, reverse transcriptase activity, and complete in vitro Ty1 integration resembled wild-type levels, consistent with failure of the mutant integrases to enter the nucleus.
Co-disposal Research of Red Mud and Aluminum Electrolysis Solid Waste Based on External Thermal Reduction and Smelting Separation In large aluminum smelting enterprises, the solid wastes of red mud and waste cathodes are satised the basic elements requirements of the solid-solid direct reduction technology. Thus, it is proposed a solution to use waste cathodes and red mud under indirect heating supply to produce pellets and to utilize in situ. It has been studied the reduction mechanism of red mud and waste cathode, the inuence of carbon allocation method on pellet reduction and the research of the smelting separation and product quality. Experiment shows that the iron of red mud can be successfully reduced by waste cathode and anode toner at high temperature. The metallization rate of iron in red mud can reach more than 95% and the quality of pellets is more suitable with the mode of external carbon mixing. In order to carry out effective melting and separation of slag and iron, the red mud pellets must to be briquetted or to reserve a molten pool before charging into the melting furnace. After separation, the content of iron and carbon in steel can reach about 98.85% and 0.13% respectively, and the yield of iron elements can reach more than 96%, the harmful components ow into the slag can be effectively solidied. Content of alumina in the slag reached about 37% which can be economically recycled to the alumina purication process as a matching. Background At present, a large amount of solid waste and hazardous waste are produced in large-scale aluminum smelting enterprises, among which the typical wastes are red mud produced in alumina production process and waste cathode and anode c5arbon powder produced in aluminum reduction cell during electrolytic aluminum process. About 70 million tons of red mud are produced every year in the world, among which about 30 million tons are produced every year in China. Many important advances have been achieved in the research of comprehensive utilization of red mud at home and abroad. But on the whole, current technology are still expensive, complex and uneconomic. Compared with the discharge capacity of red mud, current treatment capacity is quite small which the main reason that large-scale utilization of red mud has not been realized in the world so far. At present, about 95% of red mud is still disposed of by open storage. Comprehensive utilization of red mud is still a worldwide and popular problem. With the progress of iron enrichment technology of red mud, the Fe content of iron concentrate enriched from red mud, which produced by the Bayer process, can reach more than 45%. The concentrate can be used for iron and steel plants as ore blending to produce hot metal, but other elements, especially aluminum, have not been effectively utilized. It is still the main development direction of red mud to realize the reclamation, minimization and harmlessness to the maximum extent by means of recovering valuable metals and manufacturing civil materials. Solid wastes produced from aluminum cells, such as waste cathode and anode toner, contain a large amount of hazardous electrolyte components, among them there is quantities of soluble uoride and cyanide. It is di cult to deal with, poor economy, the utilization value is low, At present, only a small amount of uoride is recover for electrolyte extraction or carbon in spent cathode is used as fuel, A large number of waste cathodes are still treated mainly by land ll and stockpiling, The soluble uoride and cyanide contained in it will be transferred or volatilized into the atmosphere by wind, sun and rain, or mixed with rainwater into rivers, linking people underground to pollute soil and groundwater, causing great damage to animals and human body, destroying the ecological environment, affecting the agricultural ecological balance, and reducing crop production. If harmless treatment is not carried out in time, the harm will be long-term. At present, factories are facing great pressure of environmental protection from the Chinese government, which has begun to levy environmental protection tax on solid waste treatment. This article intended to use external heating solid-solid direct reduction technology, that is iron oxide in red mud is reduced by carbon in waste cathode and anode toner to get iron and uoride in waste cathode can be volatilized and then recycled in ue gas. Therefore, high metallization pellets of red mud and non-toxic cathode and anode toner are obtained. Pellets can melt iron elements into molten iron through melting separation, and high melting point oxides such as aluminum elements in the pellets are enriched into the slag to obtain high aluminum slag, i.e. "External heating synergistic reduction + melting separation", thus enabling red mud to be e ciently utilized. Raw Material Page 3/10 1) Red mud powder In this experiment, Guinea bauxite (TFe=~25%) imported from a domestic aluminum-making enterprise was used, and a large amount of red mud (TFe>30%) containing iron oxide was produced by the Bayer process after alumina was prepared. Red mud can enrich iron elements through preliminary step magnetic separation to obtain red mud re ned powder, whose total iron grade can reach more than 45%. The main components and compositions are shown in Table 1&2: Both iron and aluminum have certain recycling value. 2) Waste cathode and anode carbon powder As the key equipment in the process of preparing aluminum ingots, when the lining of the aluminum reduction cell is damaged or replaced regularly, a large amount of waste cathodes immersed in the electrolyte will be generated. The main components are shown in Table 3: During the use of the electrolytic cell, not only the waste cathode will be generated, but also a lot of anode carbon powder will be generated due to carbon particle shedding caused by uneven combustion of carbon anode (Table 4), and the amount of anode carbon powder generated can reach 40~50% of the waste cathode amount. 3) Limestone Limestone was used to remove the sulfur from the raw material and Table 5 showed its composition as follows: Table 5 Composition of Limestone (wt %) Solid-solid direct reduction technology is a process in which metal oxides are reduced to form elemental metal by using carbon elements in the solid reducing agents. Heating methods in the reduction process are generally divided into direct internal heating by open ame and indirect external heating by isolated the ame. Heat transfer speed of internal heating is fast but the reduction atmosphere control is poor, while the external heating is slow but the reduction atmosphere is controllable and the product quality is better. Thus, the external heating method was selected in this paper. The speci c implementation process is shows in Figure 1: The red mud powder was briquetting into green balls with additives, which were further mixed with waste cathode and anode carbon powder. Under the condition of external heating (Figure 2), heat is provided. After being heated to a certain temperature and kept for a time, the red mud pellets are reduced to metalized pellets with high metallization rate. Then the pellets and residual carbon materials were cooled and separated, the pellets were briquetted and charged into the electric furnace in order to separate the iron and aluminum. Based upon above process, nally the solid wastes were comprehensively utilized through the cooperative disposal. According to the analysis in above and take the thermodynamic point into account, the experimental parameters are set with high temperature direct reduction method (Table 6). The chemical composition of red mud pellets with high metallization rate obtained from the experiment is shown in Table 7. 3 Results And Discussion Reduction Mechanism In Table 7, it can be seen that the reduction effect of iron oxide in pellets was better and the reduction rate was reached more than 95%. The main mineral components in red mud are goethite, hematite, gibbsite, kaolinite, colloidal silica, quartz, diaspora and so on. During the reduction process, the reduced materials can be regarded as a system composed of monomer oxides such as Fe 2 O 3, A1 2 O 3 and SiO 2. In the reduction process of these materials, not only the reduction phase transition of ironcontaining oxides, but also solid-state reaction among oxides. In the reduction process, the main reactions between carbon and iron oxides in waste cathode and anode carbon powder are as follows : Waste cathode and anode carbon powder were used as reducing agents, and carbon gasi cation reaction would occur if the carbon amount was excessive. Therefore, the direct reduction of iron oxide was carried out with the participation of CO. During the whole reaction, the solid-solid reaction ~ between green balls and reducing agent was quite small, while the reaction mostly was gas-solid reaction ~, that is, the reducing agent rst occurred oxidation reaction to generate CO, which reacted with iron oxide. Finally, the equilibrium control of reduction atmosphere was realized through disproportionation reaction. Compounds such as calcium-iron olivine and iron spinel were generated during the reduction process. These compounds could form a series of low-melting mixtures, which would make the reduction process more complicated and di cult. It has been proved that adopting a high reduction temperature of 1180℃ was bene cial to the reduction. The total iron content in the reduced pellets reached about 55~58% which was about 47% in the raw materials. The metallization rate is above 95% (Figure 3), that is, almost all iron were reduced. According to the weight balance, it showed that only the iron oxide has been reduced to metallic iron. All the results showed that the reduction effect was ideal. The other components in red mud and waste cathode were relatively stable even at high temperature. There was no vapor volatilization, which further veri ed the rationality and stability of the cooperative treatment of red mud, waste cathode and anode carbon by pyrometallurgy. The technical route is suitable for the treatment of hazardous wastes, such as waste cathode and anode carbon powder, which contains uoride and other harmful substances. Blending Method of Carbon The reduction effect showed that waste cathode and anode carbon powder can be directly used as reducing agent at high temperature. Carbon plays the main role of reducing and has good reactivity at high temperature, which was bene cial to the reduction of iron oxides. The content of C and S in the green ball has a great in uence on the quality of subsequent products. Fig.3 showed the comparison of the content of C and S after reduction of different carbon blending method. The internal carbon blending was one kind of green ball made by directly blending red mud with anode carbon with a weight charging ratio of 20%. Fig.4 showed after reduction the contents of C and S were 8.8% and 0.89% respectively, corresponding to external blending, which blending red mud ball with anode carbon were 0.266% and 0.021% respectively. The content of C and S in the metalized pellets was quite different that internal blending was about 40 times higher than external blending. In the internal blending sample, it showed that anode carbon powder was directly added to the pellet and was the main reason which caused the higher of sulfur content. Since petroleum coke were the main raw material for producing cathode and anode carbon brick, which were basically treating at a high temperature of 1200℃. Thus, in the cathode and anode most of the residue inorganic sulfur were stable and would not volatilize and affect the product quality. After undergoing high temperature reduction, sulfur still stayed in the metallized pellets. So the quality of the internal blending carbon pellets was badly affected the cost of the subsequent process. However, for the external blending, due to the natural physical space isolation, sulfur was still remained in a separate reducing agent, so the S content in the metallized pellets affected by the carbon blending method was relatively low, which did not affect the use by the subsequent process. This also showed that the sulfur in all raw materials was mainly inorganic sulfur, which has been solidi ed in the raw materials, i.e., there was unnecessary to add desulfurizer for gas desulfurization. In addition, although the metallization rate of internal blending was relatively higher, it was badly affected the iron content of about 5% lower than external blending due to the entrainment of ash and other unbene ced components in the anode carbon. Therefore, considering comprehensively, pellet quality reduced by the external blending was obviously better than the internal blending. It is more reasonable to choose the method of external blending to treat the red mud without additional desulfurizer for solidi cation and desulfurization. Melting Separation Affected by Iron Content Considering the green pellets, since it has the disadvantage of lower iron content and small bulk density, it was uneasy to conduct electricity and heat in the experimental scale furnace. Therefore, pellets need to be briquetted. Firstly, no extra ux iron block was charging in the melting process. After melting, the block was relatively loose, and the slag and iron were basically not separated (S1). In fact, it was a mixture of slag and iron. The main reason was that the quantity of molten iron was too small form a big molten bath and the quantity of slag was relatively large. Under laboratory conditions, the slag volume was quite large that made it di cult for slag to oat up and separate naturally. Therefore, it was necessary to improve the molten pool conditions. A pure iron block was successfully obtained with 30% mass charging of the iron block (S2). The separation effect of slag and iron was good and there was an obvious slag-iron separation interface. The iron block was compact and the slag block was basically compact, which indicated that the slag phase oated more fully during melting process and achieved the purpose of slag-iron separation ( Figure 5). The composition of the molten iron block was shown in Table 8. From the component analysis, the iron content reached 98.85% and the carbon content is 0.13%. It was actually the composition of steel, that is, molten steel was obtained. In the high metallization rate pellets after direct reduction, it showed that only iron was simple substance, while other components were mainly oxides with high melting point. During melting process, most of iron melted and entered into the molten iron, a few numbers of oxides such as Si and Mn were reduced and melted into molten iron by the carbon brought from the pellets, and other oxides basically entered into the slag. The quality of the molten steel was so good that various target steel products can be obtained after controlling the content of S, P and other components through appropriate re ning process, thus greatly simplifying the production process from comprehensive utilization of red mud to end products. The composition of the slag after melting and separation was shown in Table 9. The iron content in the slag was less than 3%. It was estimated that the iron recovery rate in sample S2 was 96.5% which indicated the iron loss was few. A large amount of alkali metal and uoride originated from raw material have been solidi ed in the slag, that is, the raw materials were stable and safe after being processed by the pyrotechnic process. In addition, the aluminum compound contained in the slag was as high as about 37%, which had good economic recycling value and can be returned to the alumina extraction process through blending, thus to realize the fully use of everything by self-circulation and zero emission within the enterprise. 4 Conclusion 1) Through the route of "external heating reduction and melting separation", co-disposal treatment of red mud, waste cathode and anode carbon powder is completely feasible. 2) Through direct reduction with high temperature and external heating, the reduction rate of iron in red mud is high, which could reach more than 95%. 3) The method of external blending was bene cial to the quality of metalized pellets. The pellets had low content of C, S and impurities, and it is unnecessary to add desulfurizer alone. 4) The reduced pellets must operate under the condition of mixed with iron blocks or melted by reserved molten pool. 5) The molten steel can be obtained with high quality such as the iron content is 98.85%, the carbon content is 0.13% and the iron recovery rate can reach more than 96%. 6) The aluminum compound in slag can reach more than 37% that can be directly blended into alumina extraction process. Figure 2 Schematic diagram of external heating Figure 3 Page 10/10 Comparison of reduction effects of red mud pellets Quality comparison between internal blending carbon and external blending carbon Melting and separating products
The potential role of HGF-MET signaling and autophagy in the war of Alectinib versus Crizotinib against ALK-positive NSCLC Non-small-cell lung cancer (NSCLC) is currently the leading cause of cancer-related death. Accumulating evidences suggest that overcoming the therapeutic resistance in NSCLC is a big challenge. Recently, the outcomes of two independent phase 3 trials regarding Alectinib versus Crizotinib in ALK-positive NSCLC are encouraging. However, given the potential relevance of HGF-MET signaling and especially autophagy to the war against ALK-positive NSCLC between Alectinib and Crizotinib, its too early to reach a convincing conclusion. Therefore, to further improve the therapeutic efficacy of ALK-positive NSCLC, this commentary highlights the negligence in design of relevant clinical trials, emphasizes the importance of molecular characteristics investigation, and discusses the prospect of combination therapy. Background Non-small-cell lung cancer (NSCLC) is currently one of the leading cause of cancer-related death. Accumulating evidences suggest that overcoming the therapeutic resistance in NSCLC is a big challenge. Recently, two independent phase 3 trials regarding Alectinib versus Crizotinib in ALK-positive NSCLC, have been individually reported by Solange Peters et al. in NEJM and Toyoaki Hida et al. in Lancet. The outcomes are encouraging, and I appreciate the concerted effort for improving therapeutic efficacy in NSCLC. However, after careful analysis and consideration, I think it's too early to reach a conclusion, because the both trials utterly neglect several key points and challenges that are highlighted as follows. Main text First, Crizotinib is actually an ALK/MET doubletargeted small molecule inhibitor. Hepatocyte growth factor (HGF) and its physiological receptor tyrosine kinase MET (HGFR) have been reported involved in almost all the aspects of cancer progression, including cancer development, growth, invasion and metastasis. HGF-MET signaling activates AKT-and ERKmediated cancer cell survival, cycle, proliferation, motility and transformation. The significance of HGF-MET signaling makes them become critical targets in cancer therapy. Unexpectedly, besides cancer cells themselves, MET also regulates cancer immune microenvironment. For instance, MET is required for recruitment and infiltration of anti-tumor neutrophil to abate metastasis. The paradoxical effects of MET ought to force us to re-investigate whether it is really an applicable target in cancer therapy. Therefore, the states of MET and its ligand HGF need to be taken into consideration in the design and analysis of clinical trials. Alectinib combined with MET-targeted inhibitor versus Crizotinib is a more reasonable comparison. Second, Crizotinib is able to induce autophagy in NSCLC. Autophagy is a protective mechanism that is usually linked to cancer metabolic stress and therapeutic resistance, mainly on "self-eating" to recycle obsolete components to maintain necessary biogenesis for cancer survival. In fact, combined treatment with ALK inhibitor and autophagy blocker, like Chloroquine (CQ) or 3-Methyladenine (3-MA), can dramatically suppress cancer cell viability and colony formation in many kinds of NSCLC. This indicates autophagy plays a pivotal role in ALK-driven onco-signalings, and autophagic state is likely to determine ALK-targeted therapeutic efficacy in NSCLC. Furthermore, ALK contains the typical LC3interacting region (LIR) motif. LIR motif is the principal structural foundation for autophagy machinery. By virtue of bioinformatics analysis, seventeen various types of LIR motifs are identified in MAM1, MAM2, TM, PTK domains or other regions of ALK (Table 1), which means there may be a direct interplay between ALK and autophagy. Of note, some of LIR motifs locate in kinase activity center of ALK. Given that de-phosphorylation in LIR motif has been proved capable to enhance its interaction with LC3, a crucial issue arises: should we just focus on inhibiting the kinase activation in ALK-targeted therapy for NSCLC? Because if only targeting kinase activity, ALK may immediately turn to induce autophagy for resisting cancer therapeutic stress via its inactivation-caused de-phosphorylated LIR motif. It seems like "when one door shuts, another opens", and will be a big challenge for all the ALK-targeted therapies (including but not limited to NSCLC). In addition, as one coin has two sides, emerging evidences suggest that autophagy has mutually contradictory effects on cancer. For example, cancer cell autonomous autophagy is required for chemotherapyinduced immune surveillance by releasing ATP to activate immunocytes, whereas surrounding normal cellsgenerated micro-environmental autophagy promotes tumor growth by supplementing abundant extrametabolites. Moreover, autophagy has distinct phasedependent functions in cancer progression, for example, preventing malignant transformation, but favoring growth and metastasis. Thus, it's in dire need to divide ALK-positive NSCLC into molecular characteristics of HGF-MET signaling or / and autophagy-based sub-types for recruitment of the most eligible patients. Conclusions Taken together, even the both trials provide strong evidences supporting the superiority and safety of Alectinib in comparison with Crizotinib in ALK-positive NSCLC, I still strongly suggest authors further investigate potential roles of HGF-MET signaling and autophagy in determining sensitivity or resistance to ALK-targeted therapy for NSCLC. On the other side, since the anti-cancer effects and safety of autophagy blocker have been testified in several cancer therapies, ALK-autophagy doubletargeted strategy maybe a simple and practicable method to improve NSCLC therapeutic efficiency in the future. Availability of data and materials All data generated or analyzed during this study are included in the current article. Further information is available from the corresponding author on reasonable request. Authors' contributions XH conceived the commentary, performed the bioinformatics analysis and found the LIR motif in ALK, and wrote the manuscript. The author read and approved the final manuscript. Ethics approval and consent to participate Not applicable.
UAB media guides usually include a section called ``Proud Past, Bright Future.'' It's a timeline of the most important moments in the school's athletic history. Time to add one more. June 20, 2010: Graeme McDowell wins the U.S. Open at Pebble Beach. How big was that victory for the former UAB golfer from Northern Ireland? It earned him a guest spot Monday night on NBC's ``Tonight Show.'' That's a big deal, even if Jay Leno still isn't funny the second time around. 1. Summer, 1977: UAB hires Gene Bartow as head basketball coach and AD from UCLA. Everything good that's happened for the Blazers has come, directly or indirectly, from that moment. To put it into perspective for younger generations, imagine if South Alabama, in starting its football program, had named, as its first coach, Nick Saban. 2. March, 1982: UAB beats No. 1 seed Virginia and national player of the year Ralph Sampson in the Sweet 16 in the BJCC. Are you kidding me? Five years after buying its first basketball, UAB reaches the Elite Eight? That's a story that may never be duplicated. If only the Blazers had managed to get by Louisville in the next game to reach the Final Four. 3. March, 1993: UAB 58, Alabama 56 in the NIT in Tuscaloosa. Sure, these were far from the best teams in either school's distinguished basketball history, but this game was about meeting Big Brother for the first time on the hardwood -- and beating him in his house. Alabama has yet to ask for a rematch. 4. February, 2002: Vonetta Flowers wins gold in the two-woman bobsled at the Winter Olympics. She made history on so many levels. First African-American to win gold at the Winter Games. First Alabamian to win gold at the Winter Games. And to think that she switched sports to do it. She'd been a track All-American at UAB. 5. June, 2010: Graeme McDowell wins the U.S. Open. Only one man does it every year. And consider the list of men who've done it at Pebble Beach: Jack Nicklaus, Tom Watson, Tom Kite, Tiger Woods and McDowell. That's elite. Quick prediction: This won't be McDowell's last major victory.
<gh_stars>0 //------------------------------------------------------------------------------ /// @file indexer.hpp /// @author <NAME> /// /// @brief Header file providing declaration & definition of std::indexer<> class template, as well as specializations std::index<> and std::series<>. /// /// std::indexer<> is a pseudo-container class template i.e. container adapter (as per STL definition), that implements high-level indexing & sorting functionality /// to STL's sequence containers, while also providing a pulic interface matching STL's *Container* and *SequenceContainer* named requirements. This means /// by itself it can be treated as a container and will work with any of STL's tools (e.g. <utility>), while being versatile enough /// /// While STL already provides associative containers in std:map<> and std::set<>, the major advantage of std::indexer<> is that it leverages the performance edge /// of different sequence containers (std::array, std::vector, std::deque, etc), makes no assumption on the ordering/sorting of the underlying data, can be locked /// to prevent dynamic resizing operations, maintains efficient random access to elements, is extensible, and thus is versatile enough to fit differerent usage scenarios. /// //------------------------------------------------------------------------------ #ifndef TRIGNOCLIENT_INCLUDE_TRIGNOCLIENT_STD_INDEXER_HPP_ #define TRIGNOCLIENT_INCLUDE_TRIGNOCLIENT_STD_INDEXER_HPP_ #include <vector> #include <type_traits> #include <cassert> #include <utility> #include <exception> #include <initializer_list> #include <algorithm> #include "type_check.hpp" // std::can_apply<> #include "tagged.hpp" // std::tagged<> #include "cast_iterator.hpp" // std::cast_iterator<> #include "range_iterator.hpp" // std::range_iterator<> namespace std { //------------------------------------------------------------------------------ /// @brief Templated static SFINAE check for indexable types. It employs std::can_apply<> (cf. type_check.hpp) while testing specifically for the validity of a ::id member. /// /// @note At this stage, std::indexer<> requires: /// 1) A Container type that meets STL's *Container* & *SequenceContainer* mandatory requirements holding objects of /// 2) An 'indexable' type that provide a valid public non-const 'id' member. /// /// @note With generic use in mind, std::indexer makes minimal assumptions on data type. Any STL sequence container can be used with any class/struct that provides an 'id' member. /// However, it was extensively tested using std::vector<> as the underlying container and std::tagged<> as the indexable type (that itself wraps around any other type). /// template < class T > using id_member = decltype(declval<T>().key); template < class T > using id_function = decltype(declval<T>().key()); template < class T > using position_accessor = decltype(declval<T>().operator[](0)); template < class T > using front_accessor = decltype(declval<T>().front()); template < class T > using back_accessor = decltype(declval<T>().back()); template < class T > using front_pusher = decltype(declval<T>().push_front()); template < class T > using back_pusher = decltype(declval<T>().push_back()); template < class T > using front_emplacer = decltype(declval<T>().emplace_front()); template < class T > using back_emplacer = decltype(declval<T>().emplace_back()); template < class T > using front_popper = decltype(declval<T>().pop_front()); template < class T > using back_popper = decltype(declval<T>().pop_back()); /// template < class T > using has_id = can_apply< id_member, T >; template < class T > using has_id_function = can_apply< id_function, T >; template < class T > using has_position_accessor = can_apply< position_accessor, T >; template < class T > using has_front_accessor = can_apply< front_accessor, T >; template < class T > using has_back_accessor = can_apply< back_accessor, T >; template < class T > using has_front_pusher = can_apply< front_pusher, T >; template < class T > using has_back_pusher = can_apply< back_pusher, T >; template < class T > using has_front_emplacer = can_apply< front_emplacer, T >; template < class T > using has_back_emplacer = can_apply< back_emplacer, T >; template < class T > using has_front_popper = can_apply< front_popper, T >; template < class T > using has_back_popper = can_apply< back_popper, T >; /// template < typename T > constexpr bool can_be_indexed() { return (has_id< T >() && !has_id_function< T >()); } template < typename T, typename U > constexpr bool can_be_replaced_by() { return (is_convertible< U, T >() && is_convertible< T, U >() /* whether T=U and U=T are valid */); } template < typename T > constexpr bool is_position_accessible() { return (has_position_accessor< T >()); } template < typename T > constexpr bool has_front() { return (has_front_accessor< T >()); } template < typename T > constexpr bool has_back() { return (has_back_accessor< T >()); } template < typename T > constexpr bool is_front_pushable() { return (has_front_pusher< T >()); } template < typename T > constexpr bool is_back_pushable() { return (has_back_pusher< T >()); } template < typename T > constexpr bool is_front_emplaceable() { return (has_front_emplacer< T >()); } template < typename T > constexpr bool is_back_emplaceable() { return (has_back_emplacer< T >()); } template < typename T > constexpr bool is_front_poppable() { return (has_front_popper< T >()); } template < typename T > constexpr bool is_back_poppable() { return (has_back_popper< T >()); } //------------------------------------------------------------------------------ /// @brief Simple extensible generic container adapter that provides high-level indexing/key functionality to *Container*, while maintaining a customizable public interface /// of a container of *T*, in line with STL's *SequenceContainer* named requirements. /// Represents a pseudo-container type for specialized "indexable" data types i.e. with a public identifier *key* member. /// Provides associative features of std::set and std::map in sequence containers (std::array, std::vector, std::deque and std::list), leveraging perfomance advantages of these types. /// /// @tparam Container Generic container of key/tagged type instances. /// Must fit STL's Container & SequenceContainer (partial) requirements. /// @tparam T Public/interface type. /// @tparam Locked Whether runtime size manipulation is allowed after initialization. /// Useful when size of the container does not change but elements do. /// /// @note Container element type must be a 'key' type that has a public 'id' member with identifier object (asserted @ compile time). /// /// @note Provides a minimal key/identifier interface, while exposing only a subset of container modifiers for a more generic scope. /// /// @note Designed to match public interface of other container adapters in the STL (std::stack, std::queue, etc), /// while also fitting Container requirements (https://en.cppreference.com/w/cpp/named_req/Container) and partially SequenceContainer, /// by wrapping around underlying container. /// /// @todo Rename to indexed_list? /// template < typename Container, typename T = typename Container::value_type, bool Locked = false > class indexer { public: //-------------------------------------------------------------------------- /// @brief Static assertions on data types/template arguments. Required in order to avoid ill-formed expressions within class definition. /// static_assert(std::is_class< Container >(), "CONTAINER TYPE MUST BE A CLASS (NON-UNION)!"); static_assert(std::is_position_accessible< Container >(), "CONTAINER TYPE MUST BE RANDOM POSITION ACCESSIBLE ([])!"); static_assert(std::can_be_indexed< typename Container::value_type >(), "UNDERLYING CONTAINER ELEMENT TYPE [Container::value_type] MUST BE INDEXABLE!"); static_assert(std::can_be_replaced_by< typename Container::value_type, T >(), "UNDERLYING CONTAINER ELEMENT TYPE [Container::value_type] MUST BE IMPLICITLY CONVERTIBLE!"); //-------------------------------------------------------------------------- /// @brief Value key wrapper type (== container value type). Provided to fit with STL 'Container' requirements. /// using value_type = T; // typename Container::value_type; //-------------------------------------------------------------------------- /// @brief Container type. Provided to fit with STL 'Container' requirements. /// using container_type = Container; //-------------------------------------------------------------------------- /// @brief Container element type. Must be an indexable type implicitly convertible to & assignable from T. /// using element_type = typename Container::value_type; //-------------------------------------------------------------------------- /// @brief Container element reference type. /// using element_reference = element_type&; //-------------------------------------------------------------------------- /// @brief Container element cons reference type. /// using element_const_reference = const element_type&; //-------------------------------------------------------------------------- /// @brief Descriptor type. /// using key_type = decltype(declval< element_type >().key); //-------------------------------------------------------------------------- /// @brief Size/index type. Provided to fit with STL 'Container' requirements. /// using size_type = typename Container::size_type; //-------------------------------------------------------------------------- /// @brief Value reference type. Provided to fit with STL 'Container' requirements. /// using reference = T&; // typename Container::reference; //-------------------------------------------------------------------------- /// @brief Value const reference type. Provided to fit with STL 'Container' requirements. /// using const_reference = const T&; // typename Container::const_reference; //-------------------------------------------------------------------------- /// @brief Iterator type. Provided to fit with STL 'Container' requirements. /// /// @note cast_iterator auto-converts container elements to different data type on dereferencing operation. /// using iterator = cast_iterator< Container, T >; //-------------------------------------------------------------------------- /// @brief Const iterator type. Provided to fit with STL 'Container' requirements. /// /// @note cast_iterator auto-converts container elements to different data type on dereferencing operation. /// using const_iterator = cast_iterator< const Container, const T >; //-------------------------------------------------------------------------- /// @brief Constructs a new instance, passing the arguments to the Container constructor (Container::Container(...)) /// /// @param args Container constructor arguments. /// /// @tparam Args Variadic parameter pack describing types of constructor arguments. /// /// @note Discards the need to explicitly declare default, copy and move constructors, required to meet STL 'Container' requirements. /// /// @note std::indexer<> makes no assumptions about how *element_type* is constructed, as such it may not be possible to assign labels @ construction /// (e.g. if *element_type* does not allow it, as is the case of std::tagged<>, or if keys are static/const values). /// This is by design, as that is considered to be the responsability of the underlying indexable type, and out of the scope of the indexer class. /// The constructor simply redirects its arguments to underlying container constructor, whcih add versatility. /// Nonetheless, for convenience, std::make_indexer<>() named constructor is provided that accepts key list as arguments. /// template < typename... Args, typename = typename enable_if< is_constructible< Container, Args... >::value >::type > indexer(Args&&... args); //-------------------------------------------------------------------------- /// @brief Constructs a new (copied) instance. /// /// @param[in] other Instance to copy/move. /// /// @note Default copy assignment operator limits input arguments to same template specialization, but it is useful to bypass Locked value. /// template < bool oLocked > indexer(const indexer< Container, T, oLocked >& other); //-------------------------------------------------------------------------- /// @brief Constructs a new (moved) instance. /// /// @param[in] other Instance to copy/move. /// /// @note Default copy assignment operator limits input arguments to same template specialization, but it is useful to bypass Locked value. /// template < bool oLocked > indexer(indexer< Container, T, oLocked >&& other); //-------------------------------------------------------------------------- /// @brief Constructs a new instance, from a braced initializer list of *T* elements. /// /// @param[in] data List of elements to assign to underlying container. /// /// @note Useful when *T* != *Container::value_type* (public interface different from underltying element type). /// Generic variadic parameter pack constructor indexer(Args&&...) captures underlaying Container initializer list constructor, /// but it will notfit if a list of elements of type *T* is passed. /// indexer(std::initializer_list< T > data); //-------------------------------------------------------------------------- /// @brief Destroys the object. /// /// @note Marked virtual to allow deriving from indexer class. /// /// @note Default (implicit) destructor already meets STL 'Container' requirements. /// virtual ~indexer() = default; //-------------------------------------------------------------------------- /// @brief Copy assignment operator. /// /// @note Default copy assignment operator limits input arguments to same template specialization, but it is useful to bypass Locked value. /// /// @note Provided to fit with STL 'Container' requirements. /// template < bool oLocked > indexer< Container, T, Locked >& operator=(const indexer< Container, T, oLocked >& other); //-------------------------------------------------------------------------- /// @brief Move assignment operator. /// /// @note Default move assignment operator limits input arguments to same template specialization, but it is useful to bypass Locked value. /// /// @note Provided to fit with STL 'Container' requirements. /// template < bool oLocked > indexer< Container, T, Locked >& operator=(indexer< Container, T, oLocked >&& other); //-------------------------------------------------------------------------- /// @brief Access container element. /// /// @param[in] pos Element position in container. /// /// @tparam Arg Dummy template argument to force SFINAE check over key_type. /// @tparam <unnamed> SFINAE check that auto-disables operator overload if key_type and size_type are implicitely convertible (e.g. numerical keys). /// /// @return Reference to T instance. /// /// @throw std::out_of_range if pos is invalid. /// /// @note Only available if key_type is not implicitly convertible to size_type, in order to avoid ambiguity. /// template < typename Arg = key_type, typename = typename enable_if< !is_convertible< Arg, size_type >::value >::type > reference operator()(size_type pos); //-------------------------------------------------------------------------- /// @brief Access container element (const overload). /// /// @param[in] pos Element position in container. /// /// @tparam Arg Dummy template argument to force SFINAE check over key_type. /// @tparam <unnamed> SFINAE check that auto-disables operator overload if key_type and size_type are implicitely convertible (e.g. numerical keys). /// /// @return Reference to T instance. /// /// @throw std::out_of_range if pos is invalid. /// /// @note Only available if key_type is not implicitly convertible to size_type, in order to avoid ambiguity. /// template < typename Arg = key_type, typename = typename enable_if< !is_convertible< Arg, size_type >::value >::type > const_reference operator()(size_type pos) const; //-------------------------------------------------------------------------- /// @brief Access container element. /// /// @param[in] key Element key/identifier. /// /// @return Reference to T instance. /// /// @throw std::invalid_argument if identifier is not found. /// reference operator()(const key_type& key); //-------------------------------------------------------------------------- /// @brief Access container element (const overload). /// /// @param[in] key Element key/identifier. /// /// @return Reference to T instance. /// /// @throw std::invalid_argument if identifier is not found. /// const_reference operator()(const key_type& key) const; //-------------------------------------------------------------------------- /// @brief Access container element. /// /// @param[in] pos Element position in container. /// /// @return Reference to T instance. /// /// @throw std::out_of_range if pos is invalid. /// /// @note Only available if key_type is not implicitly convertible to size_type, in order to avoid ambiguity. /// In that case, only *key* overloads are accepted. /// reference operator[](size_type pos); //-------------------------------------------------------------------------- /// @brief Access container element (const overload). /// /// @param[in] pos Element position in container. /// /// @return Reference to T instance. /// /// @throw std::out_of_range if pos is invalid. /// const_reference operator[](size_type pos) const; //-------------------------------------------------------------------------- /// @brief Access container element. /// /// @param[in] key Element key/identifier. /// /// @tparam Arg Dummy template argument to force SFINAE check over key_type. /// @tparam <unnamed> SFINAE check that auto-disables operator overload if key_type and size_type are implicitely convertible (e.g. numerical keys). /// /// @return Reference to T instance. /// /// @throw std::invalid_argument if identifier is not found. /// /// @note Only available if key_type is not implicitly convertible to size_type, in order to avoid ambiguity. /// In that case, only *position* [] operator overloads can be used (STL container interface). /// template < typename Arg = key_type, typename = typename enable_if< !is_convertible< Arg, size_type >::value >::type > reference operator[](const key_type& key); //-------------------------------------------------------------------------- /// @brief Access container element (const overload). /// /// @param[in] key Element key/identifier. /// /// @tparam Arg Dummy template argument to force SFINAE check over key_type. /// @tparam <unnamed> SFINAE check that auto-disables operator overload if key_type and size_type are implicitely convertible (e.g. numerical keys). /// /// @return Reference to T instance. /// /// @throw std::invalid_argument if identifier is not found. /// /// @note Only available if key_type is not implicitly convertible to size_type, in order to avoid ambiguity. /// In that case, only *position* [] operator overloads can be used (STL container interface). /// template < typename Arg = key_type, typename = typename enable_if< !is_convertible< Arg, size_type >::value >::type > const_reference operator[](const key_type& key) const; //-------------------------------------------------------------------------- /// @brief Access container element. /// /// @param[in] pos Element position in container. /// /// @tparam oT Desired output type. Underlying element type *must* be convertible to this type. /// /// @return Reference to T instance. /// /// @throw std::out_of_range if pos is invalid. /// /// @note Templated return type allows explictly requesting either value_type or element_type references @ *pos*. /// template < typename oT = value_type > oT& at(size_type pos); //-------------------------------------------------------------------------- /// @brief Access container element (const overload). /// /// @param[in] pos Element position in container. /// /// @tparam oT Desired output type. Underlying element type *must* be convertible to this type. /// /// @return Reference to T instance. /// /// @throw std::out_of_range if pos is invalid. /// /// @note Templated return type allows explictly requesting either value_type or element_type references @ *pos*. /// template < typename oT = value_type > const oT& at(size_type pos) const; //-------------------------------------------------------------------------- /// @brief Access container element. /// /// @param[in] key Element key/identifier. /// /// @tparam oT Desired output type. Underlying element type *must* be convertible to this type. /// @tparam Arg Dummy template argument to force SFINAE check over key_type. /// @tparam <unnamed> SFINAE check that auto-disables operator overload if key_type and size_type are implicitely convertible (e.g. numerical keys). /// /// @return Reference to T instance. /// /// @throw std::invalid_argument if identifier is not found. /// /// @note Only available if key_type is not implicitly convertible to size_type, in order to avoid ambiguity. /// In that case, only *position* [] operator overloads can be used (STL container interface). /// /// @note Templated return type allows explictly requesting either value_type or element_type references @ *pos*. /// template < typename oT = value_type, typename Arg = key_type, typename = typename enable_if< !is_convertible< Arg, size_type >::value >::type > oT& at(const key_type& key); //-------------------------------------------------------------------------- /// @brief Access container element (const overload). /// /// @param[in] key Element key/identifier. /// /// @tparam oT Desired output type. Underlying element type *must* be convertible to this type. /// @tparam Arg Dummy template argument to force SFINAE check over key_type. /// @tparam <unnamed> SFINAE check that auto-disables operator overload if key_type and size_type are implicitely convertible (e.g. numerical keys). /// /// @return Reference to T instance. /// /// @throw std::invalid_argument if identifier is not found. /// /// @note Only available if key_type is not implicitly convertible to size_type, in order to avoid ambiguity. /// In that case, only *position* [] operator overloads can be used (STL container interface). /// /// @note Templated return type allows explictly requesting either value_type or element_type references @ *pos*. /// template < typename oT = value_type, typename Arg = key_type, typename = typename enable_if< !is_convertible< Arg, size_type >::value >::type > const oT& at(const key_type& key) const; //-------------------------------------------------------------------------- /// @brief Get element position from its identifier. /// /// @param[in] id Element key/identifier. /// /// @return Position in container if element is found, -1 otherwise. /// virtual size_type find(const key_type& key) const noexcept; //-------------------------------------------------------------------------- /// @brief Access element key/identifier. /// /// @param[in] pos Element position in container. /// /// @return Reference to key object. /// /// @throw std::out_of_range if pos is invalid. /// /// @note Usdeful to set element key/identifiers. /// key_type& key(size_type pos); //-------------------------------------------------------------------------- /// @brief Access element key/identifier (const overload). /// /// @param[in] pos Element position in container. /// /// @return Reference to key object. /// /// @throw std::out_of_range if pos is invalid. /// const key_type& key(size_type pos) const; //-------------------------------------------------------------------------- /// @brief Gets a list of the ids describing the content of the underlying container. /// /// @return Container with id values. /// /// @tparam OutputContainer Container type. Must be a SequenceContainer with ::push_back() defined (e.g. std::basic_string, std::deque, std::vector) /// of type compatible with key_type . /// template < typename OutputContainer > OutputContainer get_keys() const; //-------------------------------------------------------------------------- /// @brief Set key/identifiers into container elements. /// /// @param[in] ids Element keys/identifiers /// /// @tparam InputContainer Container type. Must be compatible with range-based loops (e.g. STL containers), and be of a type compatible /// template < typename InputContainer > void set_keys(const InputContainer& id); //-------------------------------------------------------------------------- /// @brief Checks if container is empty. /// /// @return True if undelying container is empty, false otherwise. /// /// @note Provided to fit with STL 'Container' requirements. /// bool empty() const noexcept; //-------------------------------------------------------------------------- /// @brief Number of elements in the underlying container. /// /// @note Provided to fit with STL 'Container' requirements. /// size_type size() const noexcept; //-------------------------------------------------------------------------- /// @brief Maximum number of elements in the underlying container. /// /// @note Provided to fit with STL 'Container' requirements. /// size_type max_size() const noexcept; //-------------------------------------------------------------------------- /// @brief Clears the contents of the underlying container. /// /// @note Only available if Locked is false, otherwise it is not compilable. /// /// @note Provided to fit with STL 'Container' requirements. /// void clear() noexcept; //-------------------------------------------------------------------------- /// @brief Get an iterator to the beginning. /// /// @note Provided to fit with STL 'Container' requirements. /// iterator begin() noexcept; //-------------------------------------------------------------------------- /// @brief Get an iterator to the end. /// /// @note Provided to fit with STL 'Container' requirements. /// iterator end() noexcept; //-------------------------------------------------------------------------- /// @brief Get an iterator to the beginning (const overload). /// /// @note Provided to fit with STL 'Container' requirements. /// const_iterator begin() const noexcept; //-------------------------------------------------------------------------- /// @brief Get an iterator to the end (const overload). /// /// @note Provided to fit with STL 'Container' requirements. /// const_iterator end() const noexcept; //-------------------------------------------------------------------------- /// @brief Get an const iterator to the beginning. /// /// @note Provided to fit with STL 'Container' requirements. /// const_iterator cbegin() const noexcept; //-------------------------------------------------------------------------- /// @brief Get an const iterator to the end. /// /// @note Provided to fit with STL 'Container' requirements. /// const_iterator cend() const noexcept; //-------------------------------------------------------------------------- /// @brief Reference to the underlying container. /// /// @note Useful to manage objects directly, in cases where T != Container::value_type (i.e. iterators return different types when dereferencing). /// Container& elements() noexcept; //-------------------------------------------------------------------------- /// @brief Reference to the underlying container (const overload). /// /// @note Useful to manage objects directly, in cases where T != Container::value_type (i.e. iterators return different types when dereferencing). /// const Container& elements() const noexcept; //-------------------------------------------------------------------------- /// @brief Get a reference to element at given position. Bypasses public type interface to access underlying indexable/descriptor type. /// /// @param[in] pos Element position in underlying container. /// element_reference element(size_type pos); //-------------------------------------------------------------------------- /// @brief Get a const reference to element at given position. Bypasses public type interface to access underlying indexable/descriptor type. /// /// @param[in] pos Element position in underlying container. /// element_const_reference element(size_type pos) const; //-------------------------------------------------------------------------- /// @brief Swaps the contents of the underlying container. /// /// @param[in] other Other locked/unlocked indexer instance. /// /// @tparam oLocked Lock status of the input indexer. /// /// @note Provided to fit with STL 'Container' requirements. /// template < bool oLocked > void swap(const indexer< Container, T, oLocked >& other) noexcept; void assign(size_type count, const T& value); template< class InputIt > void assign(InputIt first, InputIt last); void assign(std::initializer_list<T> ilist); template< typename InputIt, typename = typename enable_if< is_convertible< InputIt, typename container_type::iterator >::value >::type, typename U = T, typename... Args, typename = typename enable_if< is_constructible< U, Args... >::value >::type > void emplace(InputIt pos, Args... args); template< class InputIt, typename = typename enable_if< is_convertible< InputIt, typename container_type::iterator >::value >::type > void insert(InputIt pos, const T& value); template< class InputIt, typename = typename enable_if< is_convertible< InputIt, typename container_type::iterator >::value >::type > void insert(InputIt pos, T&& value); template< class InputIt, typename = typename enable_if< is_convertible< InputIt, typename container_type::iterator >::value >::type > void insert(InputIt pos, size_type count, const T& value); template< typename InputIt, typename SourceIt, typename = typename enable_if< is_convertible< InputIt, typename container_type::iterator >::value >::type > void insert(InputIt pos, SourceIt first, SourceIt last); template< class InputIt, typename = typename enable_if< is_convertible< InputIt, typename container_type::iterator >::value >::type > void insert(InputIt pos, std::initializer_list< T > ilist); //-------------------------------------------------------------------------- /// @brief Erases element @ given *pos*. /// /// @param[in] pos Iterator to element to erase. /// template< class InputIt, typename = typename enable_if< is_convertible< InputIt, typename container_type::iterator >::value >::type > void erase(InputIt pos); //-------------------------------------------------------------------------- /// @brief Erases elements @ range between *first* and *last*. /// /// @param[in] pos Iterator to element to erase. /// /// @return Iterator to first element after *pos*. /// template< class InputIt, typename = typename enable_if< is_convertible< InputIt, typename container_type::iterator >::value >::type > void erase(InputIt first, InputIt last); //-------------------------------------------------------------------------- /// @brief Access first element in container. /// /// @return Reference to first element. /// template < typename oT = value_type, typename C = Container, typename = typename enable_if< has_front< C >() >::type > oT& front(); //-------------------------------------------------------------------------- /// @brief Access first element in container. /// /// @return *const* reference to first element. /// template < typename oT = value_type, typename C = Container, typename = typename enable_if< has_front< C >() >::type > const oT& front() const; //-------------------------------------------------------------------------- /// @brief Access last element in container. /// /// @return Reference to last element. /// template < typename oT = value_type, typename C = Container, typename = typename enable_if< has_back< C >() >::type > oT& back(); //-------------------------------------------------------------------------- /// @brief Access last element in container. /// /// @return *const* reference to last element. /// template < typename oT = value_type, typename C = Container, typename = typename enable_if< has_back< C >() >::type > const oT& back() const; //-------------------------------------------------------------------------- /// @brief Creates a new element in-place @ front of container. /// /// @param args Arguments passed to the element constructor. /// /// @tparam Args Variadic parameter pack (implicitely deduced) describing type list of constructor arguments. /// @tparam <unnamed> SFINAE check that shadows/disables constructor overload if *T* can't be constructed with *Args* /// i.e. T(Args..) is not declared/defined /// template < typename C = Container, typename = typename enable_if< is_front_emplaceable< C >() >::type, typename U = T, typename... Args, typename = typename enable_if< is_constructible< U, Args... >::value >::type > void emplace_front(Args&&... args); //-------------------------------------------------------------------------- /// @brief Creates a new element in-place @ front of container, w/ indexer key. /// /// @param key Key to assign to new element. /// @param args Arguments passed to the element constructor. /// /// @tparam Args Variadic parameter pack (implicitely deduced) describing type list of constructor arguments. /// @tparam <unnamed> SFINAE check that shadows/disables constructor overload if *T* can't be constructed with *Args* /// i.e. T(Args..) is not declared/defined /// template < typename C = Container, typename = typename enable_if< is_front_emplaceable< C >() >::type, typename U = T, typename... Args, typename = typename enable_if< is_constructible< U, Args... >::value >::type > void emplace_front(const key_type& key, Args&&... args); //-------------------------------------------------------------------------- /// @brief Creates a new element in-place @ back of container. /// /// @param args Arguments passed to the element constructor. /// /// @tparam Args Variadic parameter pack (implicitely deduced) describing type list of constructor arguments. /// @tparam <unnamed> SFINAE check that shadows/disables constructor overload if *T* can't be constructed with *Args* /// i.e. T(Args..) is not declared/defined /// template < typename C = Container, typename = typename enable_if< is_back_emplaceable< C >() >::type, typename U = T, typename... Args, typename = typename enable_if< is_constructible< U, Args... >::value >::type > void emplace_back(Args&&... args); //-------------------------------------------------------------------------- /// @brief Creates a new element in-place @ back of container, w/ indexer key. /// /// @param key Key to assign to new element. /// @param args Arguments passed to the element constructor. /// /// @tparam Args Variadic parameter pack (implicitely deduced) describing type list of constructor arguments. /// @tparam <unnamed> SFINAE check that shadows/disables constructor overload if *T* can't be constructed with *Args* /// i.e. T(Args..) is not declared/defined /// template < typename C = Container, typename = typename enable_if< is_back_emplaceable< C >() >::type, typename U = T, typename... Args, typename = typename enable_if< is_constructible< U, Args... >::value >::type > void emplace_back(const key_type& key, Args&&... args); //-------------------------------------------------------------------------- /// @brief Copies given *value* to @ front of container. /// /// @param value Value to add to the underlying container. /// template < typename C = Container, typename = typename enable_if< is_front_pushable< C >() >::type > void push_front(value_type&& value); //-------------------------------------------------------------------------- /// @brief Copies given *value* to @ front of container, w/ indexer key. /// /// @param key Key to assign to new element. /// @param value Value to add to the underlying container. /// template < typename C = Container, typename = typename enable_if< is_front_pushable< C >() >::type > void push_front(const key_type& key, value_type&& value); //-------------------------------------------------------------------------- /// @brief Copies given *value* to @ back of container. /// /// @param value Value to add to the underlying container. /// template < typename C = Container, typename = typename enable_if< is_back_pushable< C >() >::type > void push_back(value_type&& value); //-------------------------------------------------------------------------- /// @brief Copies given *value* to @ back of container, w/ indexer key. /// /// @param key Key to assign to new element. /// @param value Value to add to the underlying container. /// template < typename C = Container, typename = typename enable_if< is_back_pushable< C >() >::type > void push_back(const key_type& key, value_type&& value); //-------------------------------------------------------------------------- /// @brief Deletes/destroys element @ front of container /// template < typename C = Container, typename = typename enable_if< is_front_poppable< C >() >::type > void pop_front(); //-------------------------------------------------------------------------- /// @brief Deletes/destroys element @ back of container. /// template < typename C = Container, typename = typename enable_if< is_back_poppable< C >() >::type > void pop_back(); protected: //-------------------------------------------------------------------------- /// @brief Underlying data container. /// Container _data; }; //-------------------------------------------------------------------------- /// @cond template < typename Container, typename T, bool Locked > template < typename... Args, typename > indexer< Container, T, Locked >::indexer(Args&&... args) : _data(std::forward<Args>(args)...) { /* ... */ assert(_data.size() || !Locked && ("LOCKED CONTAINER CAN'T BE DEFAULT/EMPTY-INITIALIZED!")); } template < typename Container, typename T, bool Locked > template < bool oLocked > indexer< Container, T, Locked >::indexer(const indexer< Container, T, oLocked >& other) { /* ... */ assert(other.size() || !Locked && ("LOCKED CONTAINER CAN'T BE DEFAULT/EMPTY-INITIALIZED!")); _data = other._data; } template < typename Container, typename T, bool Locked > template < bool oLocked > indexer< Container, T, Locked >::indexer(indexer< Container, T, oLocked >&& other) { /* ... */ assert(other.size() || !Locked && ("LOCKED CONTAINER CAN'T BE DEFAULT/EMPTY-INITIALIZED!")); _data = std::move(other._data); } template < typename Container, typename T, bool Locked > indexer< Container, T, Locked >::indexer(std::initializer_list< T > data) { /* ... */ // _data.reserve(data.size()); // requires Container to be std::vector! for (auto& element : data) { // since element_type and T are implicitely convertible/constructible // calls element_type(const T&) copy constructor overload _data.emplace_back(std::move(element)); } } // template < typename Container, typename T, bool Locked > // indexer< Container, T, Locked >::~indexer() { // /* ... */ // } template < typename Container, typename T, bool Locked > template < bool oLocked > indexer< Container, T, Locked >& indexer< Container, T, Locked >::operator=(const indexer< Container, T, oLocked >& other) { /* ... */ assert(_data.size() == other.size() || !Locked && ("LOCKED CONTAINER CAN'T BE RESIZED!")); _data = other._data; /* ... */ return (*this); } template < typename Container, typename T, bool Locked > template < bool oLocked > indexer< Container, T, Locked >& indexer< Container, T, Locked >::operator=(indexer< Container, T, oLocked >&& other) { /* ... */ assert(_data.size() == other.size() || !Locked && ("LOCKED CONTAINER CAN'T BE RESIZED!")); _data = other._data; /* ... */ return (*this); } template < typename Container, typename T, bool Locked > template < typename, typename > typename indexer< Container, T, Locked >::reference indexer< Container, T, Locked >::operator()(typename indexer< Container, T, Locked >::size_type pos) { if (pos >= 0 && pos < _data.size()) { return _data[pos]; } throw std::out_of_range("[" + std::string(__func__) + "] Invalid position!"); } template < typename Container, typename T, bool Locked > template < typename, typename > typename indexer< Container, T, Locked >::const_reference indexer< Container, T, Locked >::operator()(typename indexer< Container, T, Locked >::size_type pos) const { if (pos >= 0 && pos < _data.size()) { return _data[pos]; } throw std::out_of_range("[" + std::string(__func__) + "] Invalid position!"); } template < typename Container, typename T, bool Locked > typename indexer< Container, T, Locked >::reference indexer< Container, T, Locked >::operator()(const typename indexer< Container, T, Locked >::key_type& key) { return _data[find(key)]; } template < typename Container, typename T, bool Locked > typename indexer< Container, T, Locked >::const_reference indexer< Container, T, Locked >::operator()(const typename indexer< Container, T, Locked >::key_type& key) const { return _data[find(key)]; } template < typename Container, typename T, bool Locked > typename indexer< Container, T, Locked >::reference indexer< Container, T, Locked >::operator[](typename indexer< Container, T, Locked >::size_type pos) { return at(pos); } template < typename Container, typename T, bool Locked > typename indexer< Container, T, Locked >::const_reference indexer< Container, T, Locked >::operator[](typename indexer< Container, T, Locked >::size_type pos) const { return at(pos); } template < typename Container, typename T, bool Locked > template < typename, typename > typename indexer< Container, T, Locked >::reference indexer< Container, T, Locked >::operator[](const typename indexer< Container, T, Locked >::key_type& key) { return at(key); } template < typename Container, typename T, bool Locked > template < typename, typename > typename indexer< Container, T, Locked >::const_reference indexer< Container, T, Locked >::operator[](const typename indexer< Container, T, Locked >::key_type& key) const { return at(key); } template < typename Container, typename T, bool Locked > template < typename oT > oT& indexer< Container, T, Locked >::at(typename indexer< Container, T, Locked >::size_type pos) { static_assert(is_convertible< element_type, oT >(), "[element_type] MUST BE CONVERTIBLE TO OUTPUT TYPE [oT]"); // if (pos >= 0 && pos < _data.size()) { return _data[pos]; } throw std::out_of_range("[" + std::string(__func__) + "] Invalid position!"); } template < typename Container, typename T, bool Locked > template < typename oT > const oT& indexer< Container, T, Locked >::at(typename indexer< Container, T, Locked >::size_type pos) const { static_assert(is_convertible< element_type, oT >(), "[element_type] MUST BE CONVERTIBLE TO OUTPUT TYPE [oT]"); // if (pos >= 0 && pos < _data.size()) { return _data[pos]; } throw std::out_of_range("[" + std::string(__func__) + "] Invalid position!"); } template < typename Container, typename T, bool Locked > template < typename oT, typename, typename > oT& indexer< Container, T, Locked >::at(const typename indexer< Container, T, Locked >::key_type& key) { return _data[find(key)]; } template < typename Container, typename T, bool Locked > template < typename oT, typename, typename > const oT& indexer< Container, T, Locked >::at(const typename indexer< Container, T, Locked >::key_type& key) const { return _data[find(key)]; } template < typename Container, typename T, bool Locked > typename indexer< Container, T, Locked >::size_type indexer< Container, T, Locked >::find(const indexer< Container, T, Locked >::key_type& identifier) const noexcept { for (int idx = 0; idx < _data.size(); idx++) { if (_data[idx].key == identifier) { return idx; } } return -1; } template < typename Container, typename T, bool Locked > typename indexer< Container, T, Locked >::key_type& indexer< Container, T, Locked >::key(indexer< Container, T, Locked >::size_type pos) { if (pos >= 0 && pos < _data.size()) { return _data[pos].key; } throw std::out_of_range("[" + std::string(__func__) + "] Invalid position!"); } template < typename Container, typename T, bool Locked > const typename indexer< Container, T, Locked >::key_type& indexer< Container, T, Locked >::key(indexer< Container, T, Locked >::size_type pos) const { if (pos >= 0 && pos < _data.size()) { return _data[pos].key; } throw std::out_of_range("[" + std::string(__func__) + "] Invalid position!"); } template < typename Container, typename T, bool Locked > template < typename OutputContainer > OutputContainer indexer< Container, T, Locked >::get_keys() const { OutputContainer ids; for (const auto& element : _data) { ids.push_back(element.key); } return ids; } template < typename Container, typename T, bool Locked > template < typename InputContainer > void indexer< Container, T, Locked >::set_keys(const InputContainer& identifiers) { int i = 0; for (const auto& element : _data) { element.key = identifiers[i++]; } } template < typename Container, typename T, bool Locked > bool indexer< Container, T, Locked >::empty() const noexcept { return _data.empty(); } template < typename Container, typename T, bool Locked > typename indexer< Container, T, Locked >::size_type indexer< Container, T, Locked >::size() const noexcept { return _data.size(); } template < typename Container, typename T, bool Locked > typename indexer< Container, T, Locked >::size_type indexer< Container, T, Locked >::max_size() const noexcept { return _data.max_size(); } template < typename Container, typename T, bool Locked > void indexer< Container, T, Locked >::clear() noexcept { static_assert(!Locked, "LOCKED CONTAINER; RESIZE OPERATIONS FORBIDDEN"); _data.clear(); } template < typename Container, typename T, bool Locked > typename indexer< Container, T, Locked >::iterator indexer< Container, T, Locked >::begin() noexcept { return iterator(&_data, 0); } template < typename Container, typename T, bool Locked > typename indexer< Container, T, Locked >::iterator indexer< Container, T, Locked >::end() noexcept { return iterator(&_data, _data.size()); } template < typename Container, typename T, bool Locked > typename indexer< Container, T, Locked >::const_iterator indexer< Container, T, Locked >::begin() const noexcept { return const_iterator(&_data, 0); } template < typename Container, typename T, bool Locked > typename indexer< Container, T, Locked >::const_iterator indexer< Container, T, Locked >::end() const noexcept { return const_iterator(&_data, _data.size()); } template < typename Container, typename T, bool Locked > typename indexer< Container, T, Locked >::const_iterator indexer< Container, T, Locked >::cbegin() const noexcept { return const_iterator(&_data, 0); } template < typename Container, typename T, bool Locked > typename indexer< Container, T, Locked >::const_iterator indexer< Container, T, Locked >::cend() const noexcept { return const_iterator(&_data, _data.size()); } template < typename Container, typename T, bool Locked > Container& indexer< Container, T, Locked >::elements() noexcept { return _data; } template < typename Container, typename T, bool Locked > const Container& indexer< Container, T, Locked >::elements() const noexcept { return _data; } template < typename Container, typename T, bool Locked > typename indexer< Container, T, Locked >::element_reference indexer< Container, T, Locked >::element(indexer< Container, T, Locked >::size_type pos) { return _data[pos]; // return _data.at< element_reference >(pos); } template < typename Container, typename T, bool Locked > typename indexer< Container, T, Locked >::element_const_reference indexer< Container, T, Locked >::element(indexer< Container, T, Locked >::size_type pos) const { return _data[pos]; // return _data.at< element_reference >(pos); } template < typename Container, typename T, bool Locked > template < bool oLocked > void indexer< Container, T, Locked >::swap(const indexer< Container, T, oLocked >& other) noexcept { _data.swap(other._data); } template < typename Container, typename T, bool Locked > void indexer< Container, T, Locked >::assign(indexer< Container, T, Locked >::size_type count, const T& value) { _data.assign(count, value); } template < typename Container, typename T, bool Locked > template< class InputIt > void indexer< Container, T, Locked >::assign(InputIt first, InputIt last) { _data.assign(first, last); } template < typename Container, typename T, bool Locked > void indexer< Container, T, Locked >::assign(std::initializer_list< T > ilist) { _data.assign(ilist); } template < typename Container, typename T, bool Locked > template < typename InputIt, typename, typename, typename... Args, typename > void indexer< Container, T, Locked >::emplace(InputIt pos, Args... args) { return _data.emplace(pos, std::forward<Args>(args)...); } template < typename Container, typename T, bool Locked > template < typename InputIt, typename > void indexer< Container, T, Locked >::insert(InputIt pos, const T& value) { // parse input iterator _data.insert(pos, value); } template < typename Container, typename T, bool Locked > template < typename InputIt, typename > void indexer< Container, T, Locked >::insert(InputIt pos, T&& value) { _data.insert(pos, value); } template < typename Container, typename T, bool Locked > template < typename InputIt, typename > void indexer< Container, T, Locked >::insert(InputIt pos, typename indexer< Container, T, Locked >::size_type count, const T& value) { _data.insert(pos, count, value); } template < typename Container, typename T, bool Locked > template< class InputIt, typename SourceIt, typename > void indexer< Container, T, Locked >::insert(InputIt pos, SourceIt first, SourceIt last) { _data.insert(pos, first, last); } template < typename Container, typename T, bool Locked > template < typename InputIt, typename > void indexer< Container, T, Locked >::insert(InputIt pos, std::initializer_list< T > ilist) { _data.insert(pos, ilist); } template < typename Container, typename T, bool Locked > template< class InputIt, typename > void indexer< Container, T, Locked >::erase(InputIt pos) { // @note cast_iterator< container_type > is implicitly convertible to container_type::iterator, we can just call underlying erase() overload _data.erase(pos); } template < typename Container, typename T, bool Locked > template< class InputIt, typename > void indexer< Container, T, Locked >::erase(InputIt first, InputIt last) { // @note cast_iterator< container_type > is implicitly convertible to container_type::iterator, we can just call underlying erase() overload _data.erase(first, last); } template < typename Container, typename T, bool Locked > template < typename oT, typename, typename > oT& indexer< Container, T, Locked >::front() { return _data.front(); } template < typename Container, typename T, bool Locked > template < typename oT, typename, typename > const oT& indexer< Container, T, Locked >::front() const { return _data.front(); } template < typename Container, typename T, bool Locked > template < typename oT, typename, typename > oT& indexer< Container, T, Locked >::back() { return _data.back(); } template < typename Container, typename T, bool Locked > template < typename oT, typename, typename > const oT& indexer< Container, T, Locked >::back() const { return _data.back(); } template < typename Container, typename T, bool Locked > template < typename, typename, typename, typename... Args, typename > void indexer< Container, T, Locked >::emplace_front(Args&&... args) { _data.emplace_front(std::forward<Args>(args)...); // for return type, compile with C++17!! } template < typename Container, typename T, bool Locked > template < typename, typename, typename, typename... Args, typename > void indexer< Container, T, Locked >::emplace_front(const typename indexer< Container, T, Locked >::key_type& key, Args&&... args) { _data.emplace_front(key, std::forward<Args>(args)...); // for return type, compile with C++17!! } template < typename Container, typename T, bool Locked > template < typename, typename, typename, typename... Args, typename > void indexer< Container, T, Locked >::emplace_back(Args&&... args) { _data.emplace_back(std::forward<Args>(args)...); // for return type, compile with C++17!! } template < typename Container, typename T, bool Locked > template < typename, typename, typename, typename... Args, typename > void indexer< Container, T, Locked >::emplace_back(const typename indexer< Container, T, Locked >::key_type& key, Args&&... args) { _data.emplace_back(key, std::forward<Args>(args)...); // for return type, compile with C++17!! } template < typename Container, typename T, bool Locked > template < typename, typename > void indexer< Container, T, Locked >::push_front(typename indexer< Container, T, Locked >::value_type&& value) { _data.push_front(std::forward< value_type >(value)); } template < typename Container, typename T, bool Locked > template < typename, typename > void indexer< Container, T, Locked >::push_front(const indexer< Container, T, Locked >::key_type& key, typename indexer< Container, T, Locked >::value_type&& value) { _data.push_front(std::forward< value_type >(value)); _data.front().key = key; } template < typename Container, typename T, bool Locked > template < typename, typename > void indexer< Container, T, Locked >::push_back(typename indexer< Container, T, Locked >::value_type&& value) { _data.push_back(std::forward< value_type >(value)); } template < typename Container, typename T, bool Locked > template < typename, typename > void indexer< Container, T, Locked >::push_back(const indexer< Container, T, Locked >::key_type& key, typename indexer< Container, T, Locked >::value_type&& value) { _data.push_back(std::forward< value_type >(value)); _data.back().key = key; } template < typename Container, typename T, bool Locked > template < typename, typename > void indexer< Container, T, Locked >::pop_front() { _data.pop_front(); } template < typename Container, typename T, bool Locked > template < typename, typename > void indexer< Container, T, Locked >::pop_back() { _data.pop_back(); } /// @endcond //------------------------------------------------------------------------------ /// @brief Utility constructor that allows inline initialization of an indexer object and its key set. /// /// @param[in] keys Key values to be assigned to elements of resulting indexer<>. /// @param[in] args Argument list to be passed to the constructor. /// /// @tparam Container Generic container of key/tagged type instances. /// Must fit STL's Container & SequenceContainer (partial) requirements. /// @tparam T Public/interface type. /// @tparam Locked Whether runtime size manipulation is allowed after initialization. /// Useful when size of the container does not change but elements do. /// @tparam Args Variadic parameter pack describing types of arguments passed to constructor. /// /// @return indexer<> instance with given *keys*. /// template <typename Container, typename T = typename Container::value_type, bool Locked = false, typename... Args > indexer< Container, T, Locked > make_indexer(const initializer_list< typename indexer< Container, T, Locked >::key_type > keys, Args... args) { auto obj = indexer< Container, T, Locked >(args...); obj.set_keys(keys); return obj; } //------------------------------------------------------------------------------ /// @brief Public std::indexer specialization to std::vector with text keys, and generalizing over given type T through std::tagged<> indexable wrapper. /// /// @note Practical/intuitive specialization, exploiting std::indexer interface to provide a pseudo-container with associative properties. /// /// @tparam T Data type (*not* the container data type!) /// @tparam Locked Whether container is resizeable after initialization. Defaults to false (expandable container). /// @tparam Id Type of key/identifier. Defautls to string. /// template < typename T, typename Key = string, bool Locked = false> using index = indexer< std::vector< std::tagged< T, Key > >, T, Locked >; } // namespace std #endif // TRIGNOCLIENT_INCLUDE_TRIGNOCLIENT_STD_REGISTER_HPP_
Retailers were ecstatic about the level of online shopping and mostly pleased with traffic at their brick-and-mortar stores this holiday season. Retailers got what they were looking for this holiday season in soaring online sales and sufficient store traffic — but they paid dearly for it. Expenses associated with advertising, markdowns, shipping and increased service will bolster the top line but bring down the bottom line, industry experts told WWD.
<reponame>tstrutz/sqlite_orm #pragma once namespace sqlite_orm { namespace internal { struct serializator_context_base { bool replace_bindable_with_question = false; bool skip_table_name = true; bool use_parentheses = true; template<class O, class F> std::string column_name(F O::*) const { return {}; } }; template<class I> struct serializator_context : serializator_context_base { using impl_type = I; const impl_type &impl; serializator_context(const impl_type &impl_) : impl(impl_) {} template<class O, class F> std::string column_name(F O::*m) const { return this->impl.column_name(m); } }; template<class S> struct serializator_context_builder { using storage_type = S; using impl_type = typename storage_type::impl_type; serializator_context_builder(const storage_type &storage_) : storage(storage_) {} serializator_context<impl_type> operator()() const { return {this->storage.impl}; } const storage_type &storage; }; } }
Authorities in southern Arizona have suspended an unsuccessful weeklong search for an 82-year-old Ohio man who a sheriff says is now presumed dead after getting lost while hiking. NOGALES, Ariz. – Authorities in southern Arizona have suspended an unsuccessful weeklong search for an Ohio man who a sheriff says is now presumed dead after getting lost while hiking in rugged terrain in the Santa Rita Mountains near Tucson. Santa Cruz County Sheriff Tony Estrada said Tuesday that searchers did everything they could to first try to rescue Harold “Joe” Smallwood and then locate and recover the body of the 82-year-old Springboro, Ohio, resident who had been staying in Tucson. The National Weather Service says there was snowfall early the next morning in the Santa Rita Mountains where temperatures dropped as low as 17 degrees (-8.33 Celsius).
def layers(self, rev=True): image_layers = [ PodmanImage(None, identifier=x, pull_policy=PodmanImagePullPolicy.NEVER) for x in self.get_layer_ids() ] if not rev: image_layers.reverse() return image_layers
Scope-guided implantation of a stented elephant trunk in acute aortic dissection. A reliable guide is essential for implanting a stented graft safely into a recently dissected, fragile aorta. In 4 patients with acute aortic dissection, the implantation of a stented elephant trunk was done safely using an endoscope for direct visualization. In all patients, the operation went well. The placement of a stent appears to enhance the benefit of the elephant trunk, which itself reduces the complications of an arch replacement in acute dissection.
Getting the privacy calculus right: Analyzing the relations between privacy concerns, expected benefits, and self-disclosure using response surface analysis Rational models of privacy self-management such as privacy calculus assume that sharing personal information online can be explained by individuals perceptions of risks and benefits. Previous research tested this assumption by conducting conventional multivariate procedures, including path analysis or structural equation modeling. However, these analytical approaches cannot account for the potential conjoint effects of risk and benefit perceptions. In this paper, we use a novel analytical approach called polynomial regressions with response surface analysis (RSA) to investigate potential non-linear and conjoint effects based on three data sets (N1 = 344, N2 = 561, N3 = 1.131). In all three datasets, we find that people self-disclose more when gratifications exceed concerns. In two datasets, we also find that self-disclosure increases when both risk and benefit perceptions are on higher rather than lower levels, suggesting that gratifications play an important role in determining whether and how risk considerations will factor into the decision to disclose information.
package br.study.model; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import java.time.Duration; import java.time.Instant; import java.util.ArrayList; import java.util.List; public class TestRoundRobinScheduler { private final List<Process> contiguousProcessList = new ArrayList<>(); private final List<CpuTask> expectedScheduleOfProcessesOnContiguousProcessList = new ArrayList<>(); @Before public void initializeContiguousProcessListAndItsExpectedSchedule() { Process process1 = new Process(1, Instant.ofEpochMilli(0), Duration.ofMillis(24)); Process process2 = new Process(2, Instant.ofEpochMilli(1), Duration.ofMillis(3)); Process process3 = new Process(3, Instant.ofEpochMilli(2), Duration.ofMillis(3)); contiguousProcessList.add(process1); contiguousProcessList.add(process2); contiguousProcessList.add(process3); CpuTask cpuTask1 = new CpuTask(process1, Instant.ofEpochMilli(0), Instant.ofEpochMilli(4)); CpuTask cpuTask2 = new CpuTask(process2, Instant.ofEpochMilli(4), Instant.ofEpochMilli(7)); CpuTask cpuTask3 = new CpuTask(process3, Instant.ofEpochMilli(7), Instant.ofEpochMilli(10)); CpuTask cpuTask4 = new CpuTask(process1, Instant.ofEpochMilli(10), Instant.ofEpochMilli(30)); expectedScheduleOfProcessesOnContiguousProcessList.add(cpuTask1); expectedScheduleOfProcessesOnContiguousProcessList.add(cpuTask2); expectedScheduleOfProcessesOnContiguousProcessList.add(cpuTask3); expectedScheduleOfProcessesOnContiguousProcessList.add(cpuTask4); } @Test public void schedule() { List<CpuTask> processesScheduledByTheAlgorithm = new RoundRobinScheduler(Duration.ofMillis(4)).schedule(contiguousProcessList); for (int i = 0; i < processesScheduledByTheAlgorithm.size(); i++) { Assert.assertEquals(processesScheduledByTheAlgorithm.get(i), expectedScheduleOfProcessesOnContiguousProcessList.get(i)); } } }
Synthesizing embedded software with safety wrappers through polyhedral analysis in a polychronous framework Polychrony, a model of computation, allows us to statically analyze safety properties from formal specifications and synthesize deterministic software for safety-critical cyber physical systems. Currently, the analysis is performed on the formal specifications through Boolean abstractions. Even though it is a sound abstraction, for more precise analysis we might have to refine the abstraction. Refining the abstraction level from pure Boolean to a theory of Integers can lead to more precise decisions. In this paper, we first show how integrating a Satisfiability Modulo Theory (SMT) solver to POLYCHRONY compiler can enhance its decision making capabilities. Further, we show, how a polyhedral analysis library integrated to the compiler, can compute safe operational boundaries, and filter unsafe input combinations to keep the system safe. We enhanced the POLYCHRONY compiler's ability to make more accurate decisions and to accept and characterize the safe input range for specifications where safety may be violated for a relatively small region of a large input space. The enhancement also allows the user to consider the severity of the violation with respect to entire space of inputs, and either reject a specification or synthesize a wrapped software with guaranteed safe operation.
/* * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package org.openjdk.jmh.generators.core; import org.openjdk.jmh.annotations.CompilerControl; import org.openjdk.jmh.runner.CompilerHints; import java.io.IOException; import java.io.OutputStreamWriter; import java.io.PrintWriter; import java.io.Writer; import java.nio.charset.StandardCharsets; import java.util.Comparator; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; class CompilerControlPlugin { private final SortedSet<String> lines = new TreeSet<>(); private final Set<MethodInfo> defaultForceInlineMethods = new TreeSet<>(Comparator.comparing(MethodInfo::getQualifiedName)); private final Set<String> alwaysDontInlineMethods = new TreeSet<>(); public void defaultForceInline(MethodInfo methodInfo) { defaultForceInlineMethods.add(methodInfo); } public void alwaysDontInline(String className, String methodName) { alwaysDontInlineMethods.add(getName(className, methodName)); } public void process(GeneratorSource source, GeneratorDestination destination) { try { for (MethodInfo element : BenchmarkGeneratorUtils.getMethodsAnnotatedWith(source, CompilerControl.class)) { CompilerControl ann = element.getAnnotation(CompilerControl.class); if (ann == null) { throw new IllegalStateException("No annotation"); } CompilerControl.Mode command = ann.value(); lines.add(command.command() + "," + getName(element)); } for (MethodInfo element : defaultForceInlineMethods) { // Skip methods annotated explicitly if (element.getAnnotation(CompilerControl.class) != null) continue; // Skip methods in classes that are annotated explicitly if (element.getDeclaringClass().getAnnotation(CompilerControl.class) != null) continue; lines.add(CompilerControl.Mode.INLINE.command() + "," + getName(element)); } for (String element : alwaysDontInlineMethods) { lines.add(CompilerControl.Mode.DONT_INLINE.command() + "," + element); } for (ClassInfo element : BenchmarkGeneratorUtils.getClassesAnnotatedWith(source, CompilerControl.class)) { CompilerControl ann = element.getAnnotation(CompilerControl.class); if (ann == null) { throw new IllegalStateException("No annotation"); } CompilerControl.Mode command = ann.value(); lines.add(command.command() + "," + getName(element)); } } catch (Throwable t) { destination.printError("Compiler control generators had thrown the unexpected exception", t); } } public void finish(GeneratorSource source, GeneratorDestination destination) { try (Writer w = new OutputStreamWriter(destination.newResource(CompilerHints.LIST.substring(1)), StandardCharsets.UTF_8)){ PrintWriter writer = new PrintWriter(w); for (String line : lines) { writer.println(line); } writer.close(); } catch (IOException ex) { destination.printError("Error writing compiler hint list ", ex); } catch (Throwable t) { destination.printError("Compiler control generators had thrown the unexpected exception", t); } } private static String getName(String className, String methodName) { return className.replaceAll("\\.", "/") + "." + methodName; } private static String getName(MethodInfo mi) { return getName(getClassName(mi.getDeclaringClass()), mi.getName()); } private static String getName(ClassInfo ci) { return getName(getClassName(ci), "*"); } private static String getClassName(ClassInfo ci) { return ci.getPackageName() + "." + BenchmarkGeneratorUtils.getNestedNames(ci); } }
. Sixty-five-years or older person accounts for 23% of the population in Japan. Hence, Helicobacter pylori (H. pylori) eradication therapy is performed in many elderly patients. Urea breath test and H. pylori stool antigen test for diagnosis of H. pylori infection before and after eradication therapy are recommended from the point of being a noninvasive test and providing accurate diagnosis. H. pylori eradication therapy in Japan consists of the PPI/AMPC/CAM as the first therapy, and PPI/AMPC/MNZ as the second therapy. Eradication therapy rate and adverse effect rate of H. pylori eradication therapy for elderly patients are the same as for young people. It is not necessary to avoid H. pylori eradication therapy merely because of high age in elderly patients. However, it is necessary to be careful regarding drug interactions in patients who are taking multiple drugs.
/** * This class provides operations on the Validation service. * * <p> * When booting up, Spring will try and find a service named "Validation" (see * the FeignClient below) under the available ZooKeeper instance. * </p> * */ @Configuration @EnableFeignClients @EnableDiscoveryClient public class HelloWorldClient { @Autowired private TheClient theClient; @FeignClient(name = "HelloWorld") interface TheClient { @RequestMapping(path = "/helloworld", method = RequestMethod.GET) @ResponseBody String HelloWorld(); } public String HelloWorld() { return theClient.HelloWorld(); } }
<reponame>commercionetwork/dsb package environment import ( "os" "reflect" "strconv" ) // Get reads the environment and returns a populated, verified instance of Variables. func Get() (Variables, error) { v := Variables{} for envVar, fieldName := range evMapping { value := os.Getenv(envVar) field := reflect.ValueOf(&v).Elem().FieldByName(fieldName) switch field.Type().Kind().String() { case "string": field.SetString(value) case "bool": field.SetBool(parseBool(value)) case "int": if value == "" { field.SetInt(0) continue } t, e := strconv.Atoi(value) if e != nil { panic(e) } field.SetInt(int64(t)) } } if err := v.Validate(); err != nil { return Variables{}, err } return v, nil } func parseBool(str string) bool { b, e := strconv.ParseBool(str) if e != nil { return false } return b }
<gh_stars>10-100 # Copyright 2021 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from connector import channel from google3.cloud.graphite.mmv2.services.google.compute import backend_bucket_pb2 from google3.cloud.graphite.mmv2.services.google.compute import backend_bucket_pb2_grpc from typing import List class BackendBucket(object): def __init__( self, bucket_name: str = None, cdn_policy: dict = None, description: str = None, enable_cdn: bool = None, name: str = None, project: str = None, self_link: str = None, service_account_file: str = "", ): channel.initialize() self.bucket_name = bucket_name self.cdn_policy = cdn_policy self.description = description self.enable_cdn = enable_cdn self.name = name self.project = project self.service_account_file = service_account_file def apply(self): stub = backend_bucket_pb2_grpc.ComputeBackendBucketServiceStub( channel.Channel() ) request = backend_bucket_pb2.ApplyComputeBackendBucketRequest() if Primitive.to_proto(self.bucket_name): request.resource.bucket_name = Primitive.to_proto(self.bucket_name) if BackendBucketCdnPolicy.to_proto(self.cdn_policy): request.resource.cdn_policy.CopyFrom( BackendBucketCdnPolicy.to_proto(self.cdn_policy) ) else: request.resource.ClearField("cdn_policy") if Primitive.to_proto(self.description): request.resource.description = Primitive.to_proto(self.description) if Primitive.to_proto(self.enable_cdn): request.resource.enable_cdn = Primitive.to_proto(self.enable_cdn) if Primitive.to_proto(self.name): request.resource.name = Primitive.to_proto(self.name) if Primitive.to_proto(self.project): request.resource.project = Primitive.to_proto(self.project) request.service_account_file = self.service_account_file response = stub.ApplyComputeBackendBucket(request) self.bucket_name = Primitive.from_proto(response.bucket_name) self.cdn_policy = BackendBucketCdnPolicy.from_proto(response.cdn_policy) self.description = Primitive.from_proto(response.description) self.enable_cdn = Primitive.from_proto(response.enable_cdn) self.name = Primitive.from_proto(response.name) self.project = Primitive.from_proto(response.project) self.self_link = Primitive.from_proto(response.self_link) def delete(self): stub = backend_bucket_pb2_grpc.ComputeBackendBucketServiceStub( channel.Channel() ) request = backend_bucket_pb2.DeleteComputeBackendBucketRequest() request.service_account_file = self.service_account_file if Primitive.to_proto(self.bucket_name): request.resource.bucket_name = Primitive.to_proto(self.bucket_name) if BackendBucketCdnPolicy.to_proto(self.cdn_policy): request.resource.cdn_policy.CopyFrom( BackendBucketCdnPolicy.to_proto(self.cdn_policy) ) else: request.resource.ClearField("cdn_policy") if Primitive.to_proto(self.description): request.resource.description = Primitive.to_proto(self.description) if Primitive.to_proto(self.enable_cdn): request.resource.enable_cdn = Primitive.to_proto(self.enable_cdn) if Primitive.to_proto(self.name): request.resource.name = Primitive.to_proto(self.name) if Primitive.to_proto(self.project): request.resource.project = Primitive.to_proto(self.project) response = stub.DeleteComputeBackendBucket(request) @classmethod def list(self, project, service_account_file=""): stub = backend_bucket_pb2_grpc.ComputeBackendBucketServiceStub( channel.Channel() ) request = backend_bucket_pb2.ListComputeBackendBucketRequest() request.service_account_file = service_account_file request.Project = project return stub.ListComputeBackendBucket(request).items def to_proto(self): resource = backend_bucket_pb2.ComputeBackendBucket() if Primitive.to_proto(self.bucket_name): resource.bucket_name = Primitive.to_proto(self.bucket_name) if BackendBucketCdnPolicy.to_proto(self.cdn_policy): resource.cdn_policy.CopyFrom( BackendBucketCdnPolicy.to_proto(self.cdn_policy) ) else: resource.ClearField("cdn_policy") if Primitive.to_proto(self.description): resource.description = Primitive.to_proto(self.description) if Primitive.to_proto(self.enable_cdn): resource.enable_cdn = Primitive.to_proto(self.enable_cdn) if Primitive.to_proto(self.name): resource.name = Primitive.to_proto(self.name) if Primitive.to_proto(self.project): resource.project = Primitive.to_proto(self.project) return resource class BackendBucketCdnPolicy(object): def __init__( self, signed_url_key_names: list = None, signed_url_cache_max_age_sec: int = None, ): self.signed_url_key_names = signed_url_key_names self.signed_url_cache_max_age_sec = signed_url_cache_max_age_sec @classmethod def to_proto(self, resource): if not resource: return None res = backend_bucket_pb2.ComputeBackendBucketCdnPolicy() if Primitive.to_proto(resource.signed_url_key_names): res.signed_url_key_names.extend( Primitive.to_proto(resource.signed_url_key_names) ) if Primitive.to_proto(resource.signed_url_cache_max_age_sec): res.signed_url_cache_max_age_sec = Primitive.to_proto( resource.signed_url_cache_max_age_sec ) return res @classmethod def from_proto(self, resource): if not resource: return None return BackendBucketCdnPolicy( signed_url_key_names=Primitive.from_proto(resource.signed_url_key_names), signed_url_cache_max_age_sec=Primitive.from_proto( resource.signed_url_cache_max_age_sec ), ) class BackendBucketCdnPolicyArray(object): @classmethod def to_proto(self, resources): if not resources: return resources return [BackendBucketCdnPolicy.to_proto(i) for i in resources] @classmethod def from_proto(self, resources): return [BackendBucketCdnPolicy.from_proto(i) for i in resources] class Primitive(object): @classmethod def to_proto(self, s): if not s: return "" return s @classmethod def from_proto(self, s): return s
#include <iostream> #include <cstring> #include <cstdio> #include <algorithm> #include <cmath> #include <vector> #include <stack> #include <map> #include <set> #include <queue> #include <iomanip> #include <string> #include <ctime> #include <list> typedef unsigned char byte; #define pb push_back #define input_fast std::ios::sync_with_stdio(false);std::cin.tie(0) #define local freopen("in.txt","r",stdin) #define pi acos(-1) using namespace std; map<long long ,long long >c1,c2; long long n,k; int main() { long long ans=0; scanf ("%I64d %I64d",&n,&k); for(int i=0;i<n;i++) { long long temp; scanf ("%I64d",&temp); if(temp%(k*k)==0) ans+=c1[temp/k]; if(temp%k==0) c1[temp]+=c2[temp/k]; c2[temp]++; } printf("%I64d\n",ans); return 0; }
<gh_stars>1-10 import { autoCacheMode, connect, dependencies, dependencyVersions, disconnect, finalize, mark, observers, } from '../symbols'; import { isRecordingObservations, Observer, recordObservation, startRecordingObservations, stopRecordingObservations, TrackedObservable, TrackedObserver, } from './tracking'; describe('tracking/tracking', () => { it('should throw when trying to stop recording when no recording is active', () => { expect(() => stopRecordingObservations()).toThrowError(); }); describe('(recording observations)', () => { let observer: TrackedObserver; let observables: Array<TrackedObservable & Observer>; beforeEach(() => { observer = { id: 0, [dependencies]: [], [dependencyVersions]: {}, [mark]: jest.fn(), }; }); beforeEach(() => { observables = [1, 2, 3].map(id => ({ id, version: 1, [observers]: [], [autoCacheMode]: false, connected: true, [connect]: jest.fn(), [disconnect]: jest.fn(), [mark]: jest.fn(), finalized: false, [finalize]: jest.fn(), })); }); afterEach(() => { expect(isRecordingObservations()).toBe(false); }); it('should report that we are in a recording', () => { expect(isRecordingObservations()).toBe(false); startRecordingObservations(observer); expect(isRecordingObservations()).toBe(true); stopRecordingObservations(); expect(isRecordingObservations()).toBe(false); }); describe('after one recording', () => { beforeEach(() => { startRecordingObservations(observer); observables.forEach(obs => recordObservation(obs, false)); stopRecordingObservations(); }); it('should have recorded all dependencies', () => { expect(observer[dependencies]).toEqual(observables); expect(observer[dependencyVersions]).toEqual({ 1: 1, 2: 1, 3: 1 }); observables.forEach(obs => expect(obs[observers]).toEqual([observer])); }); describe('and a second recording', () => { beforeEach(() => { startRecordingObservations(observer); recordObservation(observables[1], false); recordObservation(observables[0], false); // Observables should be recorded only once in the dependencies array... recordObservation(observables[1], false); stopRecordingObservations(); }); it('should have updated the dependencies in the order in which they were observed', () => { expect(observer[dependencies]).toEqual([observables[1], observables[0]]); expect(observables[2][observers]).toHaveLength(0); observables.slice(0, 2).forEach(obs => expect(obs[observers]).toEqual([observer])); }); it('should have called disconnect on disconnectable dependencies that are no longer needed', () => { expect(observables[0][disconnect]).not.toHaveBeenCalled(); expect(observables[1][disconnect]).not.toHaveBeenCalled(); expect(observables[2][disconnect]).toHaveBeenCalledTimes(1); }); }); }); it('should support nested recordings', () => { const secondObserver: TrackedObserver = { id: 4, [dependencies]: [], [dependencyVersions]: {}, [mark]: jest.fn(), }; startRecordingObservations(observer); startRecordingObservations(secondObserver); recordObservation(observables[1], false); recordObservation(observables[2], false); stopRecordingObservations(); expect(observer[dependencies]).toHaveLength(0); expect(secondObserver[dependencies]).toEqual(observables.slice(1)); expect(observables[0][observers]).toHaveLength(0); expect(observables[1][observers]).toEqual([secondObserver]); expect(observables[2][observers]).toEqual([secondObserver]); recordObservation(observables[0], false); recordObservation(observables[1], false); stopRecordingObservations(); expect(observer[dependencies]).toEqual(observables.slice(0, 2)); expect(secondObserver[dependencies]).toEqual(observables.slice(1)); expect(observables[0][observers]).toEqual([observer]); expect(observables[1][observers]).toEqual([secondObserver, observer]); expect(observables[2][observers]).toEqual([secondObserver]); }); it('should fail when trying to record a cyclic dependency of derivables', () => { const secondObserver: TrackedObserver = { id: 4, [dependencies]: [], [dependencyVersions]: {}, [mark]: jest.fn(), }; startRecordingObservations(observer); startRecordingObservations(secondObserver); expect(() => startRecordingObservations(observer)).toThrowError( 'cyclic dependency between derivables detected', ); stopRecordingObservations(); stopRecordingObservations(); }); }); });
Two volunteers who talked to folks at the Women's March in San Francisco about KnockEveryDoor and signed them up to get involved. In the wake of Trump's election, Becky Bond says, people began to ask her what they could do. Beyond just donating money to campaigns or showing up to protests, she says, people wanted to be active in their communities, talking to their neighbors. She and some of her Sanders campaign colleagues decided to create a platform for people to do just that, and thus KnockEveryDoor was born. The big data strategy is where essentially you hire a bunch of data consultants to run a bunch of models to find out “What is the smallest number of people you can talk to and win? Who are those people and what do they care about?” We need to talk to everybody. When you talk to a small group of people, they may not reflect back what the campaign needs to hear and about what is really going on with most of the constituents in that race. I think that campaigns need to hear from the majority of the people how policies are affecting their lives. Then, that could really change what politicians decide to talk about and fight for. One of the things that I really learned from talking to people across the country is that the people that are not participating in elections, the so-called “low information voters,” it is not that they are ignorant people at all. In fact, time and time again, when I talk to them I come away feeling like they have a very sophisticated political analysis and they are choosing not to participate in politics. Not because they don’t know, but because their liberation is not on the ballot or they don’t see how voting is actually going to materially change anything in their lives. I think that reestablishing the feedback loop of talking voters is doing an important thing. That the concerns from the people that are not participating can also be something that politicians take into account, not just the narrow slice of voters who they think will put them over the top.
<reponame>danielhfrank/nsq<gh_stars>0 package main import ( "../util" "fmt" "html/template" "io" "log" "net" "net/http" "net/url" "regexp" "sort" "strings" ) var templates *template.Template func httpServer(listener net.Listener) { var err error templates, err = template.ParseGlob(fmt.Sprintf("%s/*.html", *templateDir)) if err != nil { log.Printf("ERROR: %s", err.Error()) } log.Printf("HTTP: listening on %s", listener.Addr().String()) handler := http.NewServeMux() handler.HandleFunc("/ping", pingHandler) handler.HandleFunc("/", indexHandler) handler.HandleFunc("/nodes", nodesHandler) handler.HandleFunc("/topic/", topicHandler) handler.HandleFunc("/delete_topic", deleteTopicHandler) handler.HandleFunc("/delete_channel", deleteChannelHandler) handler.HandleFunc("/empty_channel", emptyChannelHandler) server := &http.Server{ Handler: handler, } err = server.Serve(listener) // theres no direct way to detect this error because it is not exposed if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { log.Printf("ERROR: http.Serve() - %s", err.Error()) } log.Printf("HTTP: closing %s", listener.Addr().String()) } func pingHandler(w http.ResponseWriter, req *http.Request) { w.Header().Set("Content-Length", "2") io.WriteString(w, "OK") } func indexHandler(w http.ResponseWriter, req *http.Request) { var topics []string if len(lookupdHTTPAddrs) != 0 { topics, _ = getLookupdTopics(lookupdHTTPAddrs) } else { topics, _ = getNSQDTopics(nsqdHTTPAddrs) } sort.Strings(topics) p := struct { Title string Topics []string Version string }{ Title: "NSQ", Topics: topics, Version: VERSION, } err := templates.ExecuteTemplate(w, "index.html", p) if err != nil { log.Printf("Template Error %s", err.Error()) http.Error(w, "Template Error", 500) } } func topicHandler(w http.ResponseWriter, req *http.Request) { var urlRegex = regexp.MustCompile(`^/topic/([a-zA-Z0-9_-]+)(/([-_a-zA-Z0-9]+(#ephemeral)?))?$`) matches := urlRegex.FindStringSubmatch(req.URL.Path) if len(matches) == 0 { http.NotFound(w, req) return } topic := matches[1] if len(matches) >= 4 && len(matches[3]) > 0 { channel := matches[3] channelHandler(w, req, topic, channel) return } var producers []string if len(lookupdHTTPAddrs) != 0 { producers, _ = getLookupdTopicProducers(topic, lookupdHTTPAddrs) } else { producers, _ = getNsqdTopicProducers(topic, nsqdHTTPAddrs) } topicHostStats, channelStats, _ := getNSQDStats(producers, topic) globalTopicStats := &TopicHostStats{HostAddress: "Total"} for _, t := range topicHostStats { globalTopicStats.AddHostStats(t) } p := struct { Title string Version string Topic string TopicProducers []string TopicHostStats []*TopicHostStats GlobalTopicStats *TopicHostStats ChannelStats map[string]*ChannelStats }{ Title: fmt.Sprintf("NSQ %s", topic), Version: VERSION, Topic: topic, TopicProducers: producers, TopicHostStats: topicHostStats, GlobalTopicStats: globalTopicStats, ChannelStats: channelStats, } err := templates.ExecuteTemplate(w, "topic.html", p) if err != nil { log.Printf("Template Error %s", err.Error()) http.Error(w, "Template Error", 500) } } func channelHandler(w http.ResponseWriter, req *http.Request, topic string, channel string) { var producers []string if len(lookupdHTTPAddrs) != 0 { producers, _ = getLookupdTopicProducers(topic, lookupdHTTPAddrs) } else { producers, _ = getNsqdTopicProducers(topic, nsqdHTTPAddrs) } _, allChannelStats, _ := getNSQDStats(producers, topic) channelStats := allChannelStats[channel] p := struct { Title string Version string Topic string Channel string TopicProducers []string ChannelStats *ChannelStats }{ Title: fmt.Sprintf("NSQ %s / %s", topic, channel), Version: VERSION, Topic: topic, Channel: channel, TopicProducers: producers, ChannelStats: channelStats, } err := templates.ExecuteTemplate(w, "channel.html", p) if err != nil { log.Printf("Template Error %s", err.Error()) http.Error(w, "Template Error", 500) } } func deleteTopicHandler(w http.ResponseWriter, req *http.Request) { reqParams, err := util.NewReqParams(req) if err != nil { log.Printf("ERROR: failed to parse request params - %s", err.Error()) http.Error(w, "INVALID_REQUEST", 500) return } topicName, err := reqParams.Query("topic") if err != nil { http.Error(w, "MISSING_ARG_TOPIC", 500) return } // for topic removal, you need to get all the producers *first* producers, _ := getLookupdTopicProducers(topicName, lookupdHTTPAddrs) // remove the topic from all the lookupds for _, addr := range lookupdHTTPAddrs { endpoint := fmt.Sprintf("http://%s/delete_topic?topic=%s", addr, url.QueryEscape(topicName)) log.Printf("LOOKUPD: querying %s", endpoint) _, err := util.ApiRequest(endpoint) if err != nil { log.Printf("ERROR: lookupd %s - %s", endpoint, err.Error()) continue } } // now remove the topic from all the producers for _, addr := range producers { endpoint := fmt.Sprintf("http://%s/delete_topic?topic=%s", addr, url.QueryEscape(topicName)) log.Printf("NSQD: querying %s", endpoint) _, err := util.ApiRequest(endpoint) if err != nil { log.Printf("ERROR: nsqd %s - %s", endpoint, err.Error()) continue } } http.Redirect(w, req, "/", 302) } func deleteChannelHandler(w http.ResponseWriter, req *http.Request) { reqParams, err := util.NewReqParams(req) if err != nil { log.Printf("ERROR: failed to parse request params - %s", err.Error()) http.Error(w, "INVALID_REQUEST", 500) return } topicName, channelName, err := util.GetTopicChannelArgs(reqParams) if err != nil { http.Error(w, err.Error(), 500) return } for _, addr := range lookupdHTTPAddrs { endpoint := fmt.Sprintf("http://%s/delete_channel?topic=%s&channel=%s", addr, url.QueryEscape(topicName), url.QueryEscape(channelName)) log.Printf("LOOKUPD: querying %s", endpoint) _, err := util.ApiRequest(endpoint) if err != nil { log.Printf("ERROR: lookupd %s - %s", endpoint, err.Error()) continue } } producers, _ := getLookupdTopicProducers(topicName, lookupdHTTPAddrs) for _, addr := range producers { endpoint := fmt.Sprintf("http://%s/delete_channel?topic=%s&channel=%s", addr, url.QueryEscape(topicName), url.QueryEscape(channelName)) log.Printf("NSQD: querying %s", endpoint) _, err := util.ApiRequest(endpoint) if err != nil { log.Printf("ERROR: nsqd %s - %s", endpoint, err.Error()) continue } } http.Redirect(w, req, fmt.Sprintf("/topic/%s", url.QueryEscape(topicName)), 302) } func emptyChannelHandler(w http.ResponseWriter, req *http.Request) { reqParams, err := util.NewReqParams(req) if err != nil { log.Printf("ERROR: failed to parse request params - %s", err.Error()) http.Error(w, "INVALID_REQUEST", 500) return } topicName, channelName, err := util.GetTopicChannelArgs(reqParams) if err != nil { http.Error(w, err.Error(), 500) return } producers, _ := getLookupdTopicProducers(topicName, lookupdHTTPAddrs) for _, addr := range producers { endpoint := fmt.Sprintf("http://%s/empty_channel?topic=%s&channel=%s", addr, url.QueryEscape(topicName), url.QueryEscape(channelName)) log.Printf("NSQD: calling %s", endpoint) _, err := util.ApiRequest(endpoint) if err != nil { log.Printf("ERROR: nsqd %s - %s", endpoint, err.Error()) continue } } http.Redirect(w, req, fmt.Sprintf("/topic/%s", url.QueryEscape(topicName)), 302) } func nodesHandler(w http.ResponseWriter, req *http.Request) { producers, _ := getLookupdProducers(lookupdHTTPAddrs) p := struct { Title string Version string Producers []*Producer }{ Title: "NSQD Hosts", Version: VERSION, Producers: producers, } err := templates.ExecuteTemplate(w, "nodes.html", p) if err != nil { log.Printf("Template Error %s", err.Error()) http.Error(w, "Template Error", 500) } }
<gh_stars>10-100 export interface IBuildsCache { [key: string]: { src?: string; lib?: string; }; }
Intelligent Condition Diagnosis Method for Rotating Machinery Based on Probability Density and Discriminant Analyses This letter puts forward a method for intelligent condition diagnosis of rotating machinery using the probability density analysis and the canonical discriminant analysis (CDA) comprising the following steps. First, the noise is cancelled by statistics filter (SF), and the probability density functions (PDFs) of the vibration signals measured in each state are determined. Second, the segment values of the PDFs of the vibration signals are calculated and the integrated symptom parameters (ISPs) are combined using CDA. Third, Mahalanobis distances between the ISPs are introduced to identify the machine state. Moreover, the selecting discrimination index is optimized according to the accuracy rate of the identification. The efficacy of this novel method was confirmed by the results of the condition diagnosis for a centrifugal blower.
export * from './config'; export * from './linter';
<reponame>HJianFei/TCPServer package stickpacket; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import utils.ExceptionUtils; /** * 特定字符的粘包处理,首尾各一个Byte[], * 不可以同时为空, 如果其中一个为空, * 那么以不为空的作为分割标记 * 例:协议制定为 ^+数据+$,首就是^,尾是$ */ public class SpecifiedStickPackageHelper implements AbsStickPackageHelper { private byte[] head; private byte[] tail; private List<Byte> bytes; private int headLen, tailLen; public SpecifiedStickPackageHelper(byte[] head, byte[] tail) { this.head = head; this.tail = tail; if (head == null || tail == null) { ExceptionUtils.throwException(" head or tail ==null"); } if (head.length == 0 && tail.length == 0) { ExceptionUtils.throwException(" head and tail length==0"); } headLen = head.length; tailLen = tail.length; bytes = new ArrayList<>(); } private boolean endWith(Byte[] src, byte[] target) { if (src.length < target.length) { return false; } for (int i = 0; i < target.length; i++) {// 逆序比较 if (target[target.length - i - 1] != src[src.length - i - 1]) { return false; } } return true; } private byte[] getRangeBytes(List<Byte> list, int start, int end) { Byte[] temps = Arrays.copyOfRange(list.toArray(new Byte[0]), start, end); byte[] result = new byte[temps.length]; for (int i = 0; i < result.length; i++) { result[i] = temps[i]; } return result; } @Override public byte[] execute(InputStream is) { bytes.clear(); int len = -1; byte temp; int startIndex = -1; byte[] result = null; boolean isFindStart = false, isFindEnd = false; try { while ((len = is.read()) != -1) { temp = (byte) len; bytes.add(temp); Byte[] byteArray = bytes.toArray(new Byte[] {}); if (headLen == 0 || tailLen == 0) {// 只有头或尾标记 if (endWith(byteArray, head) || endWith(byteArray, tail)) { if (startIndex == -1) { startIndex = bytes.size() - headLen; } else {// 找到了 result = getRangeBytes(bytes, startIndex, bytes.size()); break; } } } else { if (!isFindStart) { if (endWith(byteArray, head)) { startIndex = bytes.size() - headLen; isFindStart = true; } } else if (!isFindEnd) { if (endWith(byteArray, tail)) { if (startIndex + headLen <= bytes.size() - tailLen) { isFindEnd = true; result = getRangeBytes(bytes, startIndex, bytes.size()); break; } } } } } if (len == -1) { return null; } } catch (IOException e) { e.printStackTrace(); return null; } return result; } }
A Learning Framework for Size and Type Independent Transient Stability Prediction of Power System Using Twin Convolutional Support Vector Machine Real-time transient stability assessment (TSA) of power systems is an important real world problem in electrical energy engineering and pattern recognition scope. The definition of most discriminative trajectory features and proper supervised trajectory-based classifier has remained a motivational challenge for scholars vis--vis real-time TSA. In addition, increase in the consumption of electrical energy along with constraints such as amortization of network equipment induces electric power system inadequacy risk. The retrieval of power system adequacy involves network expansion planning such as installing new power plants for the network. This policy affects the structure and electrical specification of the network significantly. Furthermore, due to sudden or the scheduled tripping of network equipment stemming from action of protection devices or maintenance procedures, the network must undergo shallow structural changes. The different level of changes in network specification is becoming a potential barrier for network analysis tools like real-time TSA platform. In fact, the lack of consideration of the incompatibility of TSA tool with expansion planning affects the performance of TSA learning model that is trained using the pre-expansion network. However, this paradoxical problem can be solved by generalized learning for power system size & type independent (PSs&tInd) real-time TSA. For this purpose, first, we used a set of PSs&tInd trajectory features. Next, we presented a trajectory-based deep neuro classifier to eliminate kernel functions weaknesses plugged into the hyperplane-based classifier. Finally, experimental comparisons were conducted to assess the efficacy of the proposed framework. The results showed that the proposed technique offered high-generalization capacity on real-time TSA during network expansion.
WASHINGTON -- President Reagan, in a switch of position, will support legislation to force the states to raise their minimum drinking age to 21, Transportation Secretary Elizabeth Dole announced Wednesday. The legislation -- attached to a federal highway bill -- would give states two years to set drinking ages of 21. If states do not, 5 percent of their federal highway funds would be cut the first year and 10 percent would be cut the next year. The measure was opposed by the administration when it was approved by the House a week ago. But Mrs. Dole and Sen Richard Lugar, R-Ind., said they persuaded Reagan to change his mind. With Reagan's support now, it is likely to be passed within the next few weeks by the Republican-led Senate. On Tuesday, Tennessee became the 24th state in the nation to increase its legal drinking age for all alcoholic beverages to 21. Ten states have a minimum drinking age of 21 for hard liquor, but permit younger persons to buy wine and beer. 'Today I am announcing administration support for legislation to withhold 5 percent of highway funds to states that do not raise the drinking age to 21,' Mrs. Dole told a news conference on the Capitol steps. She was joined by Lugar, Sen. Frank Lautenberg, D-N.J., Rep. James Howard, D-N.J., and leaders of Mothers Against Drunk Drivers. 'Because of the work of groups like MADD and the concern of hundreds of high school organizations called SADD (Students Against Driving Drunk), Secretary Dole and I were able to convince the president to back this legislation,' Lugar said. 'I will work as hard as I can to obtain a bipartisan majority in the Senate for this legislation. I am confident we will be successful,' Lugar said. Mrs. Dole noted that decisions such as the minimum drinking age 'traditionally have been left to the states.' While many states have raised their drinking age, 19 states failed this year to increase the minimum age to 21. 'We must increase the drinking age to 21 in all of our states to protect all of our people,' Mrs. Dole said. The Insurance Institute for Highway Safety estimates that at least 1,250 lives could be saved annually if all state raised the drinking age to 21. Candy Lightner, founder of MADD, said nearly 50 percent of all fatal highway crashes involving 18- to 20-year-old drivers are alcohol related.