code_text
stringlengths
604
999k
repo_name
stringlengths
4
100
file_path
stringlengths
4
873
language
stringclasses
23 values
license
stringclasses
15 values
size
int32
1.02k
999k
// Package pagespeedonline provides access to the PageSpeed Insights API. // // See https://developers.google.com/speed/docs/insights/v1/getting_started // // Usage example: // // import "google.golang.org/api/pagespeedonline/v1" // ... // pagespeedonlineService, err := pagespeedonline.New(oauthHttpClient) package pagespeedonline // import "google.golang.org/api/pagespeedonline/v1" import ( "bytes" "encoding/json" "errors" "fmt" context "golang.org/x/net/context" ctxhttp "golang.org/x/net/context/ctxhttp" gensupport "google.golang.org/api/gensupport" googleapi "google.golang.org/api/googleapi" "io" "net/http" "net/url" "strconv" "strings" ) // Always reference these packages, just in case the auto-generated code // below doesn't. var _ = bytes.NewBuffer var _ = strconv.Itoa var _ = fmt.Sprintf var _ = json.NewDecoder var _ = io.Copy var _ = url.Parse var _ = gensupport.MarshalJSON var _ = googleapi.Version var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = ctxhttp.Do const apiId = "pagespeedonline:v1" const apiName = "pagespeedonline" const apiVersion = "v1" const basePath = "https://www.googleapis.com/pagespeedonline/v1/" func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") } s := &Service{client: client, BasePath: basePath} s.Pagespeedapi = NewPagespeedapiService(s) return s, nil } type Service struct { client *http.Client BasePath string // API endpoint base URL UserAgent string // optional additional User-Agent fragment Pagespeedapi *PagespeedapiService } func (s *Service) userAgent() string { if s.UserAgent == "" { return googleapi.UserAgent } return googleapi.UserAgent + " " + s.UserAgent } func NewPagespeedapiService(s *Service) *PagespeedapiService { rs := &PagespeedapiService{s: s} return rs } type PagespeedapiService struct { s *Service } type Result struct { // FormattedResults: Localized PageSpeed results. Contains a ruleResults // entry for each PageSpeed rule instantiated and run by the server. FormattedResults *ResultFormattedResults `json:"formattedResults,omitempty"` // Id: Canonicalized and final URL for the document, after following // page redirects (if any). Id string `json:"id,omitempty"` // InvalidRules: List of rules that were specified in the request, but // which the server did not know how to instantiate. InvalidRules []string `json:"invalidRules,omitempty"` // Kind: Kind of result. Kind string `json:"kind,omitempty"` // PageStats: Summary statistics for the page, such as number of // JavaScript bytes, number of HTML bytes, etc. PageStats *ResultPageStats `json:"pageStats,omitempty"` // ResponseCode: Response code for the document. 200 indicates a normal // page load. 4xx/5xx indicates an error. ResponseCode int64 `json:"responseCode,omitempty"` // Score: The PageSpeed Score (0-100), which indicates how much faster a // page could be. A high score indicates little room for improvement, // while a lower score indicates more room for improvement. Score int64 `json:"score,omitempty"` // Screenshot: Base64-encoded screenshot of the page that was analyzed. Screenshot *ResultScreenshot `json:"screenshot,omitempty"` // Title: Title of the page, as displayed in the browser's title bar. Title string `json:"title,omitempty"` // Version: The version of PageSpeed used to generate these results. Version *ResultVersion `json:"version,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "FormattedResults") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "FormattedResults") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *Result) MarshalJSON() ([]byte, error) { type noMethod Result raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ResultFormattedResults: Localized PageSpeed results. Contains a // ruleResults entry for each PageSpeed rule instantiated and run by the // server. type ResultFormattedResults struct { // Locale: The locale of the formattedResults, e.g. "en_US". Locale string `json:"locale,omitempty"` // RuleResults: Dictionary of formatted rule results, with one entry for // each PageSpeed rule instantiated and run by the server. RuleResults map[string]ResultFormattedResultsRuleResults `json:"ruleResults,omitempty"` // ForceSendFields is a list of field names (e.g. "Locale") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Locale") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ResultFormattedResults) MarshalJSON() ([]byte, error) { type noMethod ResultFormattedResults raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ResultFormattedResultsRuleResults: The enum-like identifier for this // rule. For instance "EnableKeepAlive" or "AvoidCssImport". Not // localized. type ResultFormattedResultsRuleResults struct { // LocalizedRuleName: Localized name of the rule, intended for // presentation to a user. LocalizedRuleName string `json:"localizedRuleName,omitempty"` // RuleImpact: The impact (unbounded floating point value) that // implementing the suggestions for this rule would have on making the // page faster. Impact is comparable between rules to determine which // rule's suggestions would have a higher or lower impact on making a // page faster. For instance, if enabling compression would save 1MB, // while optimizing images would save 500kB, the enable compression rule // would have 2x the impact of the image optimization rule, all other // things being equal. RuleImpact float64 `json:"ruleImpact,omitempty"` // UrlBlocks: List of blocks of URLs. Each block may contain a heading // and a list of URLs. Each URL may optionally include additional // details. UrlBlocks []*ResultFormattedResultsRuleResultsUrlBlocks `json:"urlBlocks,omitempty"` // ForceSendFields is a list of field names (e.g. "LocalizedRuleName") // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "LocalizedRuleName") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *ResultFormattedResultsRuleResults) MarshalJSON() ([]byte, error) { type noMethod ResultFormattedResultsRuleResults raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *ResultFormattedResultsRuleResults) UnmarshalJSON(data []byte) error { type noMethod ResultFormattedResultsRuleResults var s1 struct { RuleImpact gensupport.JSONFloat64 `json:"ruleImpact"` *noMethod } s1.noMethod = (*noMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.RuleImpact = float64(s1.RuleImpact) return nil } type ResultFormattedResultsRuleResultsUrlBlocks struct { // Header: Heading to be displayed with the list of URLs. Header *ResultFormattedResultsRuleResultsUrlBlocksHeader `json:"header,omitempty"` // Urls: List of entries that provide information about URLs in the url // block. Optional. Urls []*ResultFormattedResultsRuleResultsUrlBlocksUrls `json:"urls,omitempty"` // ForceSendFields is a list of field names (e.g. "Header") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Header") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ResultFormattedResultsRuleResultsUrlBlocks) MarshalJSON() ([]byte, error) { type noMethod ResultFormattedResultsRuleResultsUrlBlocks raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ResultFormattedResultsRuleResultsUrlBlocksHeader: Heading to be // displayed with the list of URLs. type ResultFormattedResultsRuleResultsUrlBlocksHeader struct { // Args: List of arguments for the format string. Args []*ResultFormattedResultsRuleResultsUrlBlocksHeaderArgs `json:"args,omitempty"` // Format: A localized format string with $N placeholders, where N is // the 1-indexed argument number, e.g. 'Minifying the following $1 // resources would save a total of $2 bytes'. Format string `json:"format,omitempty"` // ForceSendFields is a list of field names (e.g. "Args") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Args") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ResultFormattedResultsRuleResultsUrlBlocksHeader) MarshalJSON() ([]byte, error) { type noMethod ResultFormattedResultsRuleResultsUrlBlocksHeader raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type ResultFormattedResultsRuleResultsUrlBlocksHeaderArgs struct { // Type: Type of argument. One of URL, STRING_LITERAL, INT_LITERAL, // BYTES, or DURATION. Type string `json:"type,omitempty"` // Value: Argument value, as a localized string. Value string `json:"value,omitempty"` // ForceSendFields is a list of field names (e.g. "Type") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Type") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ResultFormattedResultsRuleResultsUrlBlocksHeaderArgs) MarshalJSON() ([]byte, error) { type noMethod ResultFormattedResultsRuleResultsUrlBlocksHeaderArgs raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type ResultFormattedResultsRuleResultsUrlBlocksUrls struct { // Details: List of entries that provide additional details about a // single URL. Optional. Details []*ResultFormattedResultsRuleResultsUrlBlocksUrlsDetails `json:"details,omitempty"` // Result: A format string that gives information about the URL, and a // list of arguments for that format string. Result *ResultFormattedResultsRuleResultsUrlBlocksUrlsResult `json:"result,omitempty"` // ForceSendFields is a list of field names (e.g. "Details") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Details") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ResultFormattedResultsRuleResultsUrlBlocksUrls) MarshalJSON() ([]byte, error) { type noMethod ResultFormattedResultsRuleResultsUrlBlocksUrls raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type ResultFormattedResultsRuleResultsUrlBlocksUrlsDetails struct { // Args: List of arguments for the format string. Args []*ResultFormattedResultsRuleResultsUrlBlocksUrlsDetailsArgs `json:"args,omitempty"` // Format: A localized format string with $N placeholders, where N is // the 1-indexed argument number, e.g. 'Unnecessary metadata for this // resource adds an additional $1 bytes to its download size'. Format string `json:"format,omitempty"` // ForceSendFields is a list of field names (e.g. "Args") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Args") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ResultFormattedResultsRuleResultsUrlBlocksUrlsDetails) MarshalJSON() ([]byte, error) { type noMethod ResultFormattedResultsRuleResultsUrlBlocksUrlsDetails raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type ResultFormattedResultsRuleResultsUrlBlocksUrlsDetailsArgs struct { // Type: Type of argument. One of URL, STRING_LITERAL, INT_LITERAL, // BYTES, or DURATION. Type string `json:"type,omitempty"` // Value: Argument value, as a localized string. Value string `json:"value,omitempty"` // ForceSendFields is a list of field names (e.g. "Type") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Type") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ResultFormattedResultsRuleResultsUrlBlocksUrlsDetailsArgs) MarshalJSON() ([]byte, error) { type noMethod ResultFormattedResultsRuleResultsUrlBlocksUrlsDetailsArgs raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ResultFormattedResultsRuleResultsUrlBlocksUrlsResult: A format string // that gives information about the URL, and a list of arguments for // that format string. type ResultFormattedResultsRuleResultsUrlBlocksUrlsResult struct { // Args: List of arguments for the format string. Args []*ResultFormattedResultsRuleResultsUrlBlocksUrlsResultArgs `json:"args,omitempty"` // Format: A localized format string with $N placeholders, where N is // the 1-indexed argument number, e.g. 'Minifying the resource at URL $1 // can save $2 bytes'. Format string `json:"format,omitempty"` // ForceSendFields is a list of field names (e.g. "Args") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Args") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ResultFormattedResultsRuleResultsUrlBlocksUrlsResult) MarshalJSON() ([]byte, error) { type noMethod ResultFormattedResultsRuleResultsUrlBlocksUrlsResult raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type ResultFormattedResultsRuleResultsUrlBlocksUrlsResultArgs struct { // Type: Type of argument. One of URL, STRING_LITERAL, INT_LITERAL, // BYTES, or DURATION. Type string `json:"type,omitempty"` // Value: Argument value, as a localized string. Value string `json:"value,omitempty"` // ForceSendFields is a list of field names (e.g. "Type") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Type") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ResultFormattedResultsRuleResultsUrlBlocksUrlsResultArgs) MarshalJSON() ([]byte, error) { type noMethod ResultFormattedResultsRuleResultsUrlBlocksUrlsResultArgs raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ResultPageStats: Summary statistics for the page, such as number of // JavaScript bytes, number of HTML bytes, etc. type ResultPageStats struct { // CssResponseBytes: Number of uncompressed response bytes for CSS // resources on the page. CssResponseBytes int64 `json:"cssResponseBytes,omitempty,string"` // FlashResponseBytes: Number of response bytes for flash resources on // the page. FlashResponseBytes int64 `json:"flashResponseBytes,omitempty,string"` // HtmlResponseBytes: Number of uncompressed response bytes for the main // HTML document and all iframes on the page. HtmlResponseBytes int64 `json:"htmlResponseBytes,omitempty,string"` // ImageResponseBytes: Number of response bytes for image resources on // the page. ImageResponseBytes int64 `json:"imageResponseBytes,omitempty,string"` // JavascriptResponseBytes: Number of uncompressed response bytes for JS // resources on the page. JavascriptResponseBytes int64 `json:"javascriptResponseBytes,omitempty,string"` // NumberCssResources: Number of CSS resources referenced by the page. NumberCssResources int64 `json:"numberCssResources,omitempty"` // NumberHosts: Number of unique hosts referenced by the page. NumberHosts int64 `json:"numberHosts,omitempty"` // NumberJsResources: Number of JavaScript resources referenced by the // page. NumberJsResources int64 `json:"numberJsResources,omitempty"` // NumberResources: Number of HTTP resources loaded by the page. NumberResources int64 `json:"numberResources,omitempty"` // NumberStaticResources: Number of static (i.e. cacheable) resources on // the page. NumberStaticResources int64 `json:"numberStaticResources,omitempty"` // OtherResponseBytes: Number of response bytes for other resources on // the page. OtherResponseBytes int64 `json:"otherResponseBytes,omitempty,string"` // TextResponseBytes: Number of uncompressed response bytes for text // resources not covered by other statistics (i.e non-HTML, non-script, // non-CSS resources) on the page. TextResponseBytes int64 `json:"textResponseBytes,omitempty,string"` // TotalRequestBytes: Total size of all request bytes sent by the page. TotalRequestBytes int64 `json:"totalRequestBytes,omitempty,string"` // ForceSendFields is a list of field names (e.g. "CssResponseBytes") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "CssResponseBytes") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *ResultPageStats) MarshalJSON() ([]byte, error) { type noMethod ResultPageStats raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ResultScreenshot: Base64-encoded screenshot of the page that was // analyzed. type ResultScreenshot struct { // Data: Image data base64 encoded. Data string `json:"data,omitempty"` // Height: Height of screenshot in pixels. Height int64 `json:"height,omitempty"` // MimeType: Mime type of image data. E.g. "image/jpeg". MimeType string `json:"mime_type,omitempty"` // Width: Width of screenshot in pixels. Width int64 `json:"width,omitempty"` // ForceSendFields is a list of field names (e.g. "Data") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Data") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ResultScreenshot) MarshalJSON() ([]byte, error) { type noMethod ResultScreenshot raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ResultVersion: The version of PageSpeed used to generate these // results. type ResultVersion struct { // Major: The major version number of PageSpeed used to generate these // results. Major int64 `json:"major,omitempty"` // Minor: The minor version number of PageSpeed used to generate these // results. Minor int64 `json:"minor,omitempty"` // ForceSendFields is a list of field names (e.g. "Major") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Major") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ResultVersion) MarshalJSON() ([]byte, error) { type noMethod ResultVersion raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // method id "pagespeedonline.pagespeedapi.runpagespeed": type PagespeedapiRunpagespeedCall struct { s *Service urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Runpagespeed: Runs PageSpeed analysis on the page at the specified // URL, and returns a PageSpeed score, a list of suggestions to make // that page faster, and other information. func (r *PagespeedapiService) Runpagespeed(url string) *PagespeedapiRunpagespeedCall { c := &PagespeedapiRunpagespeedCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.urlParams_.Set("url", url) return c } // FilterThirdPartyResources sets the optional parameter // "filter_third_party_resources": Indicates if third party resources // should be filtered out before PageSpeed analysis. func (c *PagespeedapiRunpagespeedCall) FilterThirdPartyResources(filterThirdPartyResources bool) *PagespeedapiRunpagespeedCall { c.urlParams_.Set("filter_third_party_resources", fmt.Sprint(filterThirdPartyResources)) return c } // Locale sets the optional parameter "locale": The locale used to // localize formatted results func (c *PagespeedapiRunpagespeedCall) Locale(locale string) *PagespeedapiRunpagespeedCall { c.urlParams_.Set("locale", locale) return c } // Rule sets the optional parameter "rule": A PageSpeed rule to run; if // none are given, all rules are run func (c *PagespeedapiRunpagespeedCall) Rule(rule ...string) *PagespeedapiRunpagespeedCall { c.urlParams_.SetMulti("rule", append([]string{}, rule...)) return c } // Screenshot sets the optional parameter "screenshot": Indicates if // binary data containing a screenshot should be included func (c *PagespeedapiRunpagespeedCall) Screenshot(screenshot bool) *PagespeedapiRunpagespeedCall { c.urlParams_.Set("screenshot", fmt.Sprint(screenshot)) return c } // Strategy sets the optional parameter "strategy": The analysis // strategy to use // // Possible values: // "desktop" - Fetch and analyze the URL for desktop browsers // "mobile" - Fetch and analyze the URL for mobile devices func (c *PagespeedapiRunpagespeedCall) Strategy(strategy string) *PagespeedapiRunpagespeedCall { c.urlParams_.Set("strategy", strategy) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *PagespeedapiRunpagespeedCall) Fields(s ...googleapi.Field) *PagespeedapiRunpagespeedCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *PagespeedapiRunpagespeedCall) IfNoneMatch(entityTag string) *PagespeedapiRunpagespeedCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *PagespeedapiRunpagespeedCall) Context(ctx context.Context) *PagespeedapiRunpagespeedCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *PagespeedapiRunpagespeedCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *PagespeedapiRunpagespeedCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "runPagespeed") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "pagespeedonline.pagespeedapi.runpagespeed" call. // Exactly one of *Result or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Result.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *PagespeedapiRunpagespeedCall) Do(opts ...googleapi.CallOption) (*Result, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Result{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Runs PageSpeed analysis on the page at the specified URL, and returns a PageSpeed score, a list of suggestions to make that page faster, and other information.", // "httpMethod": "GET", // "id": "pagespeedonline.pagespeedapi.runpagespeed", // "parameterOrder": [ // "url" // ], // "parameters": { // "filter_third_party_resources": { // "default": "false", // "description": "Indicates if third party resources should be filtered out before PageSpeed analysis.", // "location": "query", // "type": "boolean" // }, // "locale": { // "description": "The locale used to localize formatted results", // "location": "query", // "pattern": "[a-zA-Z]+(_[a-zA-Z]+)?", // "type": "string" // }, // "rule": { // "description": "A PageSpeed rule to run; if none are given, all rules are run", // "location": "query", // "pattern": "[a-zA-Z]+", // "repeated": true, // "type": "string" // }, // "screenshot": { // "default": "false", // "description": "Indicates if binary data containing a screenshot should be included", // "location": "query", // "type": "boolean" // }, // "strategy": { // "description": "The analysis strategy to use", // "enum": [ // "desktop", // "mobile" // ], // "enumDescriptions": [ // "Fetch and analyze the URL for desktop browsers", // "Fetch and analyze the URL for mobile devices" // ], // "location": "query", // "type": "string" // }, // "url": { // "description": "The URL to fetch and analyze", // "location": "query", // "pattern": "(?i)http(s)?://.*", // "required": true, // "type": "string" // } // }, // "path": "runPagespeed", // "response": { // "$ref": "Result" // } // } }
cduchesne/rexray
vendor/google.golang.org/api/pagespeedonline/v1/pagespeedonline-gen.go
GO
apache-2.0
35,002
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.util.CharArraySet; import org.apache.lucene.analysis.util.ElisionFilter; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.Index; import org.elasticsearch.index.settings.IndexSettings; /** * */ public class ElisionTokenFilterFactory extends AbstractTokenFilterFactory { private final CharArraySet articles; @Inject public ElisionTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) { super(index, indexSettings, name, settings); this.articles = Analysis.parseArticles(env, settings, version); } @Override public TokenStream create(TokenStream tokenStream) { return new ElisionFilter(tokenStream, articles); } }
dmiszkiewicz/elasticsearch
src/main/java/org/elasticsearch/index/analysis/ElisionTokenFilterFactory.java
Java
apache-2.0
1,854
/* * Copyright 2000-2015 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.refactoring.rename; import com.intellij.featureStatistics.FeatureUsageTracker; import com.intellij.ide.scratch.ScratchFileType; import com.intellij.openapi.actionSystem.CommonDataKeys; import com.intellij.openapi.actionSystem.DataContext; import com.intellij.openapi.actionSystem.DataKey; import com.intellij.openapi.application.ApplicationManager; import com.intellij.openapi.diagnostic.Logger; import com.intellij.openapi.editor.Editor; import com.intellij.openapi.editor.ScrollType; import com.intellij.openapi.extensions.ExtensionPointName; import com.intellij.openapi.extensions.Extensions; import com.intellij.openapi.fileEditor.impl.NonProjectFileWritingAccessProvider; import com.intellij.openapi.project.Project; import com.intellij.openapi.ui.DialogWrapper; import com.intellij.openapi.ui.Messages; import com.intellij.openapi.util.Condition; import com.intellij.openapi.util.text.StringUtil; import com.intellij.openapi.vfs.VirtualFile; import com.intellij.psi.*; import com.intellij.psi.impl.source.tree.injected.InjectedLanguageUtil; import com.intellij.psi.meta.PsiMetaOwner; import com.intellij.psi.meta.PsiWritableMetaData; import com.intellij.psi.util.PsiUtilCore; import com.intellij.refactoring.RefactoringBundle; import com.intellij.refactoring.actions.BaseRefactoringAction; import com.intellij.refactoring.util.CommonRefactoringUtil; import com.intellij.usageView.UsageViewUtil; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import java.util.Arrays; /** * created at Nov 13, 2001 * * @author Jeka, dsl */ public class PsiElementRenameHandler implements RenameHandler { private static final Logger LOG = Logger.getInstance("#com.intellij.refactoring.rename.PsiElementRenameHandler"); public static final ExtensionPointName<Condition<PsiElement>> VETO_RENAME_CONDITION_EP = ExtensionPointName.create("com.intellij.vetoRenameCondition"); public static DataKey<String> DEFAULT_NAME = DataKey.create("DEFAULT_NAME"); @Override public void invoke(@NotNull Project project, Editor editor, PsiFile file, DataContext dataContext) { PsiElement element = getElement(dataContext); if (element == null) { element = BaseRefactoringAction.getElementAtCaret(editor, file); } if (ApplicationManager.getApplication().isUnitTestMode()) { final String newName = DEFAULT_NAME.getData(dataContext); if (newName != null) { rename(element, project, element, editor, newName); return; } } editor.getScrollingModel().scrollToCaret(ScrollType.MAKE_VISIBLE); final PsiElement nameSuggestionContext = InjectedLanguageUtil.findElementAtNoCommit(file, editor.getCaretModel().getOffset()); invoke(element, project, nameSuggestionContext, editor); } @Override public void invoke(@NotNull Project project, @NotNull PsiElement[] elements, DataContext dataContext) { PsiElement element = elements.length == 1 ? elements[0] : null; if (element == null) element = getElement(dataContext); LOG.assertTrue(element != null); Editor editor = CommonDataKeys.EDITOR.getData(dataContext); if (ApplicationManager.getApplication().isUnitTestMode()) { final String newName = DEFAULT_NAME.getData(dataContext); LOG.assertTrue(newName != null); rename(element, project, element, editor, newName); } else { invoke(element, project, element, editor); } } public static void invoke(PsiElement element, Project project, PsiElement nameSuggestionContext, @Nullable Editor editor) { if (element != null && !canRename(project, editor, element)) { return; } VirtualFile contextFile = PsiUtilCore.getVirtualFile(nameSuggestionContext); if (nameSuggestionContext != null && nameSuggestionContext.isPhysical() && (contextFile == null || contextFile.getFileType() != ScratchFileType.INSTANCE) && !PsiManager.getInstance(project).isInProject(nameSuggestionContext)) { final String message = "Selected element is used from non-project files. These usages won't be renamed. Proceed anyway?"; if (ApplicationManager.getApplication().isUnitTestMode()) throw new CommonRefactoringUtil.RefactoringErrorHintException(message); if (Messages.showYesNoDialog(project, message, RefactoringBundle.getCannotRefactorMessage(null), Messages.getWarningIcon()) != Messages.YES) { return; } } FeatureUsageTracker.getInstance().triggerFeatureUsed("refactoring.rename"); rename(element, project, nameSuggestionContext, editor); } static boolean canRename(Project project, Editor editor, PsiElement element) throws CommonRefactoringUtil.RefactoringErrorHintException { String message = renameabilityStatus(project, element); if (StringUtil.isNotEmpty(message)) { showErrorMessage(project, editor, message); return false; } return true; } @Nullable static String renameabilityStatus(Project project, PsiElement element) { if (element == null) return ""; boolean hasRenameProcessor = RenamePsiElementProcessor.forElement(element) != RenamePsiElementProcessor.DEFAULT; boolean hasWritableMetaData = element instanceof PsiMetaOwner && ((PsiMetaOwner)element).getMetaData() instanceof PsiWritableMetaData; if (!hasRenameProcessor && !hasWritableMetaData && !(element instanceof PsiNamedElement)) { return RefactoringBundle.getCannotRefactorMessage(RefactoringBundle.message("error.wrong.caret.position.symbol.to.rename")); } if (!PsiManager.getInstance(project).isInProject(element)) { if (element.isPhysical()) { VirtualFile virtualFile = PsiUtilCore.getVirtualFile(element); if (!(virtualFile != null && NonProjectFileWritingAccessProvider.isWriteAccessAllowedExplicitly(virtualFile, project))) { String message = RefactoringBundle.message("error.out.of.project.element", UsageViewUtil.getType(element)); return RefactoringBundle.getCannotRefactorMessage(message); } } if (!element.isWritable()) { return RefactoringBundle.getCannotRefactorMessage(RefactoringBundle.message("error.cannot.be.renamed")); } } if (InjectedLanguageUtil.isInInjectedLanguagePrefixSuffix(element)) { final String message = RefactoringBundle.message("error.in.injected.lang.prefix.suffix", UsageViewUtil.getType(element)); return RefactoringBundle.getCannotRefactorMessage(message); } return null; } static void showErrorMessage(Project project, @Nullable Editor editor, String message) { CommonRefactoringUtil.showErrorHint(project, editor, message, RefactoringBundle.message("rename.title"), null); } public static void rename(PsiElement element, final Project project, PsiElement nameSuggestionContext, Editor editor) { rename(element, project, nameSuggestionContext, editor, null); } public static void rename(PsiElement element, final Project project, PsiElement nameSuggestionContext, Editor editor, String defaultName) { RenamePsiElementProcessor processor = RenamePsiElementProcessor.forElement(element); PsiElement substituted = processor.substituteElementToRename(element, editor); if (substituted == null || !canRename(project, editor, substituted)) return; RenameDialog dialog = processor.createRenameDialog(project, substituted, nameSuggestionContext, editor); if (defaultName == null && ApplicationManager.getApplication().isUnitTestMode()) { String[] strings = dialog.getSuggestedNames(); if (strings != null && strings.length > 0) { Arrays.sort(strings); defaultName = strings[0]; } else { defaultName = "undefined"; // need to avoid show dialog in test } } if (defaultName != null) { try { dialog.performRename(defaultName); } finally { dialog.close(DialogWrapper.CANCEL_EXIT_CODE); // to avoid dialog leak } } else { dialog.show(); } } @Override public boolean isAvailableOnDataContext(DataContext dataContext) { return !isVetoed(getElement(dataContext)); } public static boolean isVetoed(PsiElement element) { if (element == null || element instanceof SyntheticElement) return true; for(Condition<PsiElement> condition: Extensions.getExtensions(VETO_RENAME_CONDITION_EP)) { if (condition.value(element)) return true; } return false; } @Nullable public static PsiElement getElement(final DataContext dataContext) { PsiElement[] elementArray = BaseRefactoringAction.getPsiElementArray(dataContext); if (elementArray.length != 1) { return null; } return elementArray[0]; } @Override public boolean isRenaming(DataContext dataContext) { return isAvailableOnDataContext(dataContext); } }
fnouama/intellij-community
platform/lang-impl/src/com/intellij/refactoring/rename/PsiElementRenameHandler.java
Java
apache-2.0
9,486
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>. // // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. package sqlite3 /* #include <sqlite3-binding.h> #include <stdlib.h> */ import "C" import ( "runtime" "unsafe" ) type SQLiteBackup struct { b *C.sqlite3_backup } func (c *SQLiteConn) Backup(dest string, conn *SQLiteConn, src string) (*SQLiteBackup, error) { destptr := C.CString(dest) defer C.free(unsafe.Pointer(destptr)) srcptr := C.CString(src) defer C.free(unsafe.Pointer(srcptr)) if b := C.sqlite3_backup_init(c.db, destptr, conn.db, srcptr); b != nil { bb := &SQLiteBackup{b: b} runtime.SetFinalizer(bb, (*SQLiteBackup).Finish) return bb, nil } return nil, c.lastError() } // Backs up for one step. Calls the underlying `sqlite3_backup_step` function. // This function returns a boolean indicating if the backup is done and // an error signalling any other error. Done is returned if the underlying C // function returns SQLITE_DONE (Code 101) func (b *SQLiteBackup) Step(p int) (bool, error) { ret := C.sqlite3_backup_step(b.b, C.int(p)) if ret == C.SQLITE_DONE { return true, nil } else if ret != 0 && ret != C.SQLITE_LOCKED && ret != C.SQLITE_BUSY { return false, Error{Code: ErrNo(ret)} } return false, nil } func (b *SQLiteBackup) Remaining() int { return int(C.sqlite3_backup_remaining(b.b)) } func (b *SQLiteBackup) PageCount() int { return int(C.sqlite3_backup_pagecount(b.b)) } func (b *SQLiteBackup) Finish() error { return b.Close() } func (b *SQLiteBackup) Close() error { ret := C.sqlite3_backup_finish(b.b) if ret != 0 { return Error{Code: ErrNo(ret)} } b.b = nil runtime.SetFinalizer(b, nil) return nil }
DevOpsInvTech/doittypes
Godeps/_workspace/src/github.com/mattn/go-sqlite3/backup.go
GO
apache-2.0
1,742
/* * Copyright 2001-2013 Stephen Colebourne * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.joda.time.field; import org.joda.time.DateTimeFieldType; import org.joda.time.DurationField; /** * Precise datetime field, composed of two precise duration fields. * <p> * This DateTimeField is useful for defining DateTimeFields that are composed * of precise durations, like time of day fields. If either duration field is * imprecise, then an {@link ImpreciseDateTimeField} may be used instead. * <p> * PreciseDateTimeField is thread-safe and immutable. * * @author Brian S O'Neill * @author Stephen Colebourne * @since 1.0 * @see ImpreciseDateTimeField */ public class PreciseDateTimeField extends PreciseDurationDateTimeField { @SuppressWarnings("unused") private static final long serialVersionUID = -5586801265774496376L; /** The maximum range in the correct units */ private final int iRange; private final DurationField iRangeField; /** * Constructor. * * @param type the field type this field uses * @param unit precise unit duration, like "seconds()". * @param range precise range duration, preferably a multiple of the unit, * like "minutes()". * @throws IllegalArgumentException if either duration field is imprecise * @throws IllegalArgumentException if unit milliseconds is less than one * or effective value range is less than two. */ public PreciseDateTimeField(DateTimeFieldType type, DurationField unit, DurationField range) { super(type, unit); if (!range.isPrecise()) { throw new IllegalArgumentException("Range duration field must be precise"); } long rangeMillis = range.getUnitMillis(); iRange = (int)(rangeMillis / getUnitMillis()); if (iRange < 2) { throw new IllegalArgumentException("The effective range must be at least 2"); } iRangeField = range; } /** * Get the amount of fractional units from the specified time instant. * * @param instant the milliseconds from 1970-01-01T00:00:00Z to query * @return the amount of fractional units extracted from the input. */ public int get(long instant) { if (instant >= 0) { return (int) ((instant / getUnitMillis()) % iRange); } else { return iRange - 1 + (int) (((instant + 1) / getUnitMillis()) % iRange); } } /** * Add to the component of the specified time instant, wrapping around * within that component if necessary. * * @param instant the milliseconds from 1970-01-01T00:00:00Z to add to * @param amount the amount of units to add (can be negative). * @return the updated time instant. */ public long addWrapField(long instant, int amount) { int thisValue = get(instant); int wrappedValue = FieldUtils.getWrappedValue (thisValue, amount, getMinimumValue(), getMaximumValue()); // copy code from set() to avoid repeat call to get() return instant + (wrappedValue - thisValue) * getUnitMillis(); } /** * Set the specified amount of units to the specified time instant. * * @param instant the milliseconds from 1970-01-01T00:00:00Z to set in * @param value value of units to set. * @return the updated time instant. * @throws IllegalArgumentException if value is too large or too small. */ public long set(long instant, int value) { FieldUtils.verifyValueBounds(this, value, getMinimumValue(), getMaximumValue()); return instant + (value - get(instant)) * iUnitMillis; } /** * Returns the range duration of this field. For example, if this field * represents "minute of hour", then the range duration field is an hours. * * @return the range duration of this field, or null if field has no range */ public DurationField getRangeDurationField() { return iRangeField; } /** * Get the maximum value for the field. * * @return the maximum value */ public int getMaximumValue() { return iRange - 1; } /** * Returns the range of the field in the field's units. * <p> * For example, 60 for seconds per minute. The field is allowed values * from 0 to range - 1. * * @return unit range */ public int getRange() { return iRange; } }
hambroperks/j2objc
joda_time/sources/org/joda/time/field/PreciseDateTimeField.java
Java
apache-2.0
5,054
/* * Copyright 2012 GitHub Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.github.pockethub.ui.user; import org.eclipse.egit.github.core.User; /** * Interface to register and unregister a {@link OrganizationSelectionListener} */ public interface OrganizationSelectionProvider { /** * Add selection listener * * @param listener * @return the currently selected organization */ User addListener(OrganizationSelectionListener listener); /** * Remove selection listener * * @param listener * @return this selection provider */ OrganizationSelectionProvider removeListener( OrganizationSelectionListener listener); }
Bloody-Badboy/PocketHub
app/src/main/java/com/github/pockethub/ui/user/OrganizationSelectionProvider.java
Java
apache-2.0
1,225
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with this * work for additional information regarding copyright ownership. The ASF * licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package org.apache.hadoop.io.file.tfile; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.EOFException; import java.io.IOException; import java.util.Random; import org.junit.Assert; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.io.compress.zlib.ZlibFactory; import org.apache.hadoop.io.file.tfile.TFile.Reader; import org.apache.hadoop.io.file.tfile.TFile.Writer; import org.apache.hadoop.io.file.tfile.TFile.Reader.Location; import org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner; import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Before; import org.junit.Test; /** * * Byte arrays test case class using GZ compression codec, base class of none * and LZO compression classes. * */ public class TestTFileByteArrays { private static String ROOT = GenericTestUtils.getTestDir().getAbsolutePath(); private final static int BLOCK_SIZE = 512; private final static int BUF_SIZE = 64; private final static int K = 1024; protected boolean skip = false; private static final String KEY = "key"; private static final String VALUE = "value"; private FileSystem fs; private Configuration conf = new Configuration(); private Path path; private FSDataOutputStream out; private Writer writer; private String compression = Compression.Algorithm.GZ.getName(); private String comparator = "memcmp"; private final String outputFile = getClass().getSimpleName(); /* * pre-sampled numbers of records in one block, based on the given the * generated key and value strings. This is slightly different based on * whether or not the native libs are present. */ private boolean usingNative = ZlibFactory.isNativeZlibLoaded(conf); private int records1stBlock = usingNative ? 5674 : 4480; private int records2ndBlock = usingNative ? 5574 : 4263; public void init(String compression, String comparator, int numRecords1stBlock, int numRecords2ndBlock) { init(compression, comparator); this.records1stBlock = numRecords1stBlock; this.records2ndBlock = numRecords2ndBlock; } public void init(String compression, String comparator) { this.compression = compression; this.comparator = comparator; } @Before public void setUp() throws IOException { path = new Path(ROOT, outputFile); fs = path.getFileSystem(conf); out = fs.create(path); writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf); } @After public void tearDown() throws IOException { if (!skip) fs.delete(path, true); } @Test public void testNoDataEntry() throws IOException { if (skip) return; closeOutput(); Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf); Assert.assertTrue(reader.isSorted()); Scanner scanner = reader.createScanner(); Assert.assertTrue(scanner.atEnd()); scanner.close(); reader.close(); } @Test public void testOneDataEntry() throws IOException { if (skip) return; writeRecords(1); readRecords(1); checkBlockIndex(0, 0); readValueBeforeKey(0); readKeyWithoutValue(0); readValueWithoutKey(0); readKeyManyTimes(0); } @Test public void testTwoDataEntries() throws IOException { if (skip) return; writeRecords(2); readRecords(2); } /** * Fill up exactly one block. * * @throws IOException */ @Test public void testOneBlock() throws IOException { if (skip) return; // just under one block writeRecords(records1stBlock); readRecords(records1stBlock); // last key should be in the first block (block 0) checkBlockIndex(records1stBlock - 1, 0); } /** * One block plus one record. * * @throws IOException */ @Test public void testOneBlockPlusOneEntry() throws IOException { if (skip) return; writeRecords(records1stBlock + 1); readRecords(records1stBlock + 1); checkBlockIndex(records1stBlock - 1, 0); checkBlockIndex(records1stBlock, 1); } @Test public void testTwoBlocks() throws IOException { if (skip) return; writeRecords(records1stBlock + 5); readRecords(records1stBlock + 5); checkBlockIndex(records1stBlock + 4, 1); } @Test public void testThreeBlocks() throws IOException { if (skip) return; writeRecords(2 * records1stBlock + 5); readRecords(2 * records1stBlock + 5); checkBlockIndex(2 * records1stBlock + 4, 2); // 1st key in file readValueBeforeKey(0); readKeyWithoutValue(0); readValueWithoutKey(0); readKeyManyTimes(0); // last key in file readValueBeforeKey(2 * records1stBlock + 4); readKeyWithoutValue(2 * records1stBlock + 4); readValueWithoutKey(2 * records1stBlock + 4); readKeyManyTimes(2 * records1stBlock + 4); // 1st key in mid block, verify block indexes then read checkBlockIndex(records1stBlock - 1, 0); checkBlockIndex(records1stBlock, 1); readValueBeforeKey(records1stBlock); readKeyWithoutValue(records1stBlock); readValueWithoutKey(records1stBlock); readKeyManyTimes(records1stBlock); // last key in mid block, verify block indexes then read checkBlockIndex(records1stBlock + records2ndBlock - 1, 1); checkBlockIndex(records1stBlock + records2ndBlock, 2); readValueBeforeKey(records1stBlock + records2ndBlock - 1); readKeyWithoutValue(records1stBlock + records2ndBlock - 1); readValueWithoutKey(records1stBlock + records2ndBlock - 1); readKeyManyTimes(records1stBlock + records2ndBlock - 1); // mid in mid block readValueBeforeKey(records1stBlock + 10); readKeyWithoutValue(records1stBlock + 10); readValueWithoutKey(records1stBlock + 10); readKeyManyTimes(records1stBlock + 10); } Location locate(Scanner scanner, byte[] key) throws IOException { if (scanner.seekTo(key) == true) { return scanner.currentLocation; } return scanner.endLocation; } @Test public void testLocate() throws IOException { if (skip) return; writeRecords(3 * records1stBlock); Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf); Scanner scanner = reader.createScanner(); locate(scanner, composeSortedKey(KEY, 2).getBytes()); locate(scanner, composeSortedKey(KEY, records1stBlock - 1).getBytes()); locate(scanner, composeSortedKey(KEY, records1stBlock).getBytes()); Location locX = locate(scanner, "keyX".getBytes()); Assert.assertEquals(scanner.endLocation, locX); scanner.close(); reader.close(); } @Test public void testFailureWriterNotClosed() throws IOException { if (skip) return; Reader reader = null; try { reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf); Assert.fail("Cannot read before closing the writer."); } catch (IOException e) { // noop, expecting exceptions } finally { if (reader != null) { reader.close(); } } } @Test public void testFailureWriteMetaBlocksWithSameName() throws IOException { if (skip) return; writer.append("keyX".getBytes(), "valueX".getBytes()); // create a new metablock DataOutputStream outMeta = writer.prepareMetaBlock("testX", Compression.Algorithm.GZ.getName()); outMeta.write(123); outMeta.write("foo".getBytes()); outMeta.close(); // add the same metablock try { writer.prepareMetaBlock("testX", Compression.Algorithm.GZ.getName()); Assert.fail("Cannot create metablocks with the same name."); } catch (Exception e) { // noop, expecting exceptions } closeOutput(); } @Test public void testFailureGetNonExistentMetaBlock() throws IOException { if (skip) return; writer.append("keyX".getBytes(), "valueX".getBytes()); // create a new metablock DataOutputStream outMeta = writer.prepareMetaBlock("testX", Compression.Algorithm.GZ.getName()); outMeta.write(123); outMeta.write("foo".getBytes()); outMeta.close(); closeOutput(); Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf); DataInputStream mb = reader.getMetaBlock("testX"); Assert.assertNotNull(mb); mb.close(); try { DataInputStream mbBad = reader.getMetaBlock("testY"); Assert.fail("Error on handling non-existent metablocks."); } catch (Exception e) { // noop, expecting exceptions } reader.close(); } @Test public void testFailureWriteRecordAfterMetaBlock() throws IOException { if (skip) return; // write a key/value first writer.append("keyX".getBytes(), "valueX".getBytes()); // create a new metablock DataOutputStream outMeta = writer.prepareMetaBlock("testX", Compression.Algorithm.GZ.getName()); outMeta.write(123); outMeta.write("dummy".getBytes()); outMeta.close(); // add more key/value try { writer.append("keyY".getBytes(), "valueY".getBytes()); Assert.fail("Cannot add key/value after start adding meta blocks."); } catch (Exception e) { // noop, expecting exceptions } closeOutput(); } @Test public void testFailureReadValueManyTimes() throws IOException { if (skip) return; writeRecords(5); Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf); Scanner scanner = reader.createScanner(); byte[] vbuf = new byte[BUF_SIZE]; int vlen = scanner.entry().getValueLength(); scanner.entry().getValue(vbuf); Assert.assertEquals(new String(vbuf, 0, vlen), VALUE + 0); try { scanner.entry().getValue(vbuf); Assert.fail("Cannot get the value mlutiple times."); } catch (Exception e) { // noop, expecting exceptions } scanner.close(); reader.close(); } @Test public void testFailureBadCompressionCodec() throws IOException { if (skip) return; closeOutput(); out = fs.create(path); try { writer = new Writer(out, BLOCK_SIZE, "BAD", comparator, conf); Assert.fail("Error on handling invalid compression codecs."); } catch (Exception e) { // noop, expecting exceptions // e.printStackTrace(); } } @Test public void testFailureOpenEmptyFile() throws IOException { if (skip) return; closeOutput(); // create an absolutely empty file path = new Path(fs.getWorkingDirectory(), outputFile); out = fs.create(path); out.close(); try { new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf); Assert.fail("Error on handling empty files."); } catch (EOFException e) { // noop, expecting exceptions } } @Test public void testFailureOpenRandomFile() throws IOException { if (skip) return; closeOutput(); // create an random file path = new Path(fs.getWorkingDirectory(), outputFile); out = fs.create(path); Random rand = new Random(); byte[] buf = new byte[K]; // fill with > 1MB data for (int nx = 0; nx < K + 2; nx++) { rand.nextBytes(buf); out.write(buf); } out.close(); try { new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf); Assert.fail("Error on handling random files."); } catch (IOException e) { // noop, expecting exceptions } } @Test public void testFailureKeyLongerThan64K() throws IOException { if (skip) return; byte[] buf = new byte[64 * K + 1]; Random rand = new Random(); rand.nextBytes(buf); try { writer.append(buf, "valueX".getBytes()); } catch (IndexOutOfBoundsException e) { // noop, expecting exceptions } closeOutput(); } @Test public void testFailureOutOfOrderKeys() throws IOException { if (skip) return; try { writer.append("keyM".getBytes(), "valueM".getBytes()); writer.append("keyA".getBytes(), "valueA".getBytes()); Assert.fail("Error on handling out of order keys."); } catch (Exception e) { // noop, expecting exceptions // e.printStackTrace(); } closeOutput(); } @Test public void testFailureNegativeOffset() throws IOException { if (skip) return; try { writer.append("keyX".getBytes(), -1, 4, "valueX".getBytes(), 0, 6); Assert.fail("Error on handling negative offset."); } catch (Exception e) { // noop, expecting exceptions } closeOutput(); } @Test public void testFailureNegativeOffset_2() throws IOException { if (skip) return; closeOutput(); Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf); Scanner scanner = reader.createScanner(); try { scanner.lowerBound("keyX".getBytes(), -1, 4); Assert.fail("Error on handling negative offset."); } catch (Exception e) { // noop, expecting exceptions } finally { reader.close(); scanner.close(); } closeOutput(); } @Test public void testFailureNegativeLength() throws IOException { if (skip) return; try { writer.append("keyX".getBytes(), 0, -1, "valueX".getBytes(), 0, 6); Assert.fail("Error on handling negative length."); } catch (Exception e) { // noop, expecting exceptions } closeOutput(); } @Test public void testFailureNegativeLength_2() throws IOException { if (skip) return; closeOutput(); Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf); Scanner scanner = reader.createScanner(); try { scanner.lowerBound("keyX".getBytes(), 0, -1); Assert.fail("Error on handling negative length."); } catch (Exception e) { // noop, expecting exceptions } finally { scanner.close(); reader.close(); } closeOutput(); } @Test public void testFailureNegativeLength_3() throws IOException { if (skip) return; writeRecords(3); Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf); Scanner scanner = reader.createScanner(); try { // test negative array offset try { scanner.seekTo("keyY".getBytes(), -1, 4); Assert.fail("Failed to handle negative offset."); } catch (Exception e) { // noop, expecting exceptions } // test negative array length try { scanner.seekTo("keyY".getBytes(), 0, -2); Assert.fail("Failed to handle negative key length."); } catch (Exception e) { // noop, expecting exceptions } } finally { reader.close(); scanner.close(); } } @Test public void testFailureCompressionNotWorking() throws IOException { if (skip) return; long rawDataSize = writeRecords(10 * records1stBlock, false); if (!compression.equalsIgnoreCase(Compression.Algorithm.NONE.getName())) { Assert.assertTrue(out.getPos() < rawDataSize); } closeOutput(); } @Test public void testFailureFileWriteNotAt0Position() throws IOException { if (skip) return; closeOutput(); out = fs.create(path); out.write(123); try { writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf); Assert.fail("Failed to catch file write not at position 0."); } catch (Exception e) { // noop, expecting exceptions } closeOutput(); } private long writeRecords(int count) throws IOException { return writeRecords(count, true); } private long writeRecords(int count, boolean close) throws IOException { long rawDataSize = writeRecords(writer, count); if (close) { closeOutput(); } return rawDataSize; } static long writeRecords(Writer writer, int count) throws IOException { long rawDataSize = 0; int nx; for (nx = 0; nx < count; nx++) { byte[] key = composeSortedKey(KEY, nx).getBytes(); byte[] value = (VALUE + nx).getBytes(); writer.append(key, value); rawDataSize += WritableUtils.getVIntSize(key.length) + key.length + WritableUtils.getVIntSize(value.length) + value.length; } return rawDataSize; } /** * Insert some leading 0's in front of the value, to make the keys sorted. * * @param prefix * @param value * @return */ static String composeSortedKey(String prefix, int value) { return String.format("%s%010d", prefix, value); } private void readRecords(int count) throws IOException { readRecords(fs, path, count, conf); } static void readRecords(FileSystem fs, Path path, int count, Configuration conf) throws IOException { Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf); Scanner scanner = reader.createScanner(); try { for (int nx = 0; nx < count; nx++, scanner.advance()) { Assert.assertFalse(scanner.atEnd()); // Assert.assertTrue(scanner.next()); byte[] kbuf = new byte[BUF_SIZE]; int klen = scanner.entry().getKeyLength(); scanner.entry().getKey(kbuf); Assert.assertEquals(new String(kbuf, 0, klen), composeSortedKey(KEY, nx)); byte[] vbuf = new byte[BUF_SIZE]; int vlen = scanner.entry().getValueLength(); scanner.entry().getValue(vbuf); Assert.assertEquals(new String(vbuf, 0, vlen), VALUE + nx); } Assert.assertTrue(scanner.atEnd()); Assert.assertFalse(scanner.advance()); } finally { scanner.close(); reader.close(); } } private void checkBlockIndex(int recordIndex, int blockIndexExpected) throws IOException { Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf); Scanner scanner = reader.createScanner(); scanner.seekTo(composeSortedKey(KEY, recordIndex).getBytes()); Assert.assertEquals(blockIndexExpected, scanner.currentLocation .getBlockIndex()); scanner.close(); reader.close(); } private void readValueBeforeKey(int recordIndex) throws IOException { Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf); Scanner scanner = reader.createScannerByKey(composeSortedKey(KEY, recordIndex) .getBytes(), null); try { byte[] vbuf = new byte[BUF_SIZE]; int vlen = scanner.entry().getValueLength(); scanner.entry().getValue(vbuf); Assert.assertEquals(new String(vbuf, 0, vlen), VALUE + recordIndex); byte[] kbuf = new byte[BUF_SIZE]; int klen = scanner.entry().getKeyLength(); scanner.entry().getKey(kbuf); Assert.assertEquals(new String(kbuf, 0, klen), composeSortedKey(KEY, recordIndex)); } finally { scanner.close(); reader.close(); } } private void readKeyWithoutValue(int recordIndex) throws IOException { Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf); Scanner scanner = reader.createScannerByKey(composeSortedKey(KEY, recordIndex) .getBytes(), null); try { // read the indexed key byte[] kbuf1 = new byte[BUF_SIZE]; int klen1 = scanner.entry().getKeyLength(); scanner.entry().getKey(kbuf1); Assert.assertEquals(new String(kbuf1, 0, klen1), composeSortedKey(KEY, recordIndex)); if (scanner.advance() && !scanner.atEnd()) { // read the next key following the indexed byte[] kbuf2 = new byte[BUF_SIZE]; int klen2 = scanner.entry().getKeyLength(); scanner.entry().getKey(kbuf2); Assert.assertEquals(new String(kbuf2, 0, klen2), composeSortedKey(KEY, recordIndex + 1)); } } finally { scanner.close(); reader.close(); } } private void readValueWithoutKey(int recordIndex) throws IOException { Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf); Scanner scanner = reader.createScannerByKey(composeSortedKey(KEY, recordIndex) .getBytes(), null); byte[] vbuf1 = new byte[BUF_SIZE]; int vlen1 = scanner.entry().getValueLength(); scanner.entry().getValue(vbuf1); Assert.assertEquals(new String(vbuf1, 0, vlen1), VALUE + recordIndex); if (scanner.advance() && !scanner.atEnd()) { byte[] vbuf2 = new byte[BUF_SIZE]; int vlen2 = scanner.entry().getValueLength(); scanner.entry().getValue(vbuf2); Assert.assertEquals(new String(vbuf2, 0, vlen2), VALUE + (recordIndex + 1)); } scanner.close(); reader.close(); } private void readKeyManyTimes(int recordIndex) throws IOException { Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf); Scanner scanner = reader.createScannerByKey(composeSortedKey(KEY, recordIndex) .getBytes(), null); // read the indexed key byte[] kbuf1 = new byte[BUF_SIZE]; int klen1 = scanner.entry().getKeyLength(); scanner.entry().getKey(kbuf1); Assert.assertEquals(new String(kbuf1, 0, klen1), composeSortedKey(KEY, recordIndex)); klen1 = scanner.entry().getKeyLength(); scanner.entry().getKey(kbuf1); Assert.assertEquals(new String(kbuf1, 0, klen1), composeSortedKey(KEY, recordIndex)); klen1 = scanner.entry().getKeyLength(); scanner.entry().getKey(kbuf1); Assert.assertEquals(new String(kbuf1, 0, klen1), composeSortedKey(KEY, recordIndex)); scanner.close(); reader.close(); } private void closeOutput() throws IOException { if (writer != null) { writer.close(); writer = null; } if (out != null) { out.close(); out = null; } } }
GeLiXin/hadoop
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileByteArrays.java
Java
apache-2.0
22,859
/* * * Copyright 2012 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.simianarmy.basic.janitor; import java.util.ArrayList; import java.util.Date; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.simianarmy.Resource; import com.netflix.simianarmy.janitor.JanitorRuleEngine; import com.netflix.simianarmy.janitor.Rule; /** * Basic implementation of janitor rule engine that runs all containing rules to decide if a resource should be * a candidate of cleanup. */ public class BasicJanitorRuleEngine implements JanitorRuleEngine { /** The Constant LOGGER. */ private static final Logger LOGGER = LoggerFactory.getLogger(BasicJanitorRuleEngine.class); /** The rules to decide if a resource should be a candidate for cleanup. **/ private final List<Rule> rules; /** * The constructor of JanitorRuleEngine. */ public BasicJanitorRuleEngine() { rules = new ArrayList<Rule>(); } /** * Decides whether the resource should be a candidate of cleanup based on the underlying rules. If any rule in the * rule set thinks the resource should be a candidate of cleanup, the method returns false which indicates that the * resource should be marked for cleanup. If multiple rules think the resource should be cleaned up, the rule with * the nearest expected termination time fills the termination reason and expected termination time. * * @param resource * The resource * @return true if the resource is valid and should not be a candidate of cleanup based on the underlying rules, * false otherwise. */ @Override public boolean isValid(Resource resource) { LOGGER.debug(String.format("Checking if resource %s of type %s is a cleanup candidate against %d rules.", resource.getId(), resource.getResourceType(), rules.size())); // We create a clone of the resource each time when we try the rule. In the first iteration of the rules // we identify the rule with the nearest termination date if there is any rule considers the resource // as a cleanup candidate. Then the rule is applied to the original resource. Rule nearestRule = null; if (rules.size() == 1) { nearestRule = rules.get(0); } else { Date nearestTerminationTime = null; for (Rule rule : rules) { Resource clone = resource.cloneResource(); if (!rule.isValid(clone) && (nearestTerminationTime == null || nearestTerminationTime.after(clone.getExpectedTerminationTime()))) { nearestRule = rule; nearestTerminationTime = clone.getExpectedTerminationTime(); } } } if (nearestRule != null && !nearestRule.isValid(resource)) { LOGGER.info(String.format("Resource %s is marked as a cleanup candidate.", resource.getId())); return false; } else { LOGGER.info(String.format("Resource %s is not marked as a cleanup candidate.", resource.getId())); return true; } } /** {@inheritDoc} */ @Override public BasicJanitorRuleEngine addRule(Rule rule) { rules.add(rule); return this; } }
jnayegandhi/SimianArmy
src/main/java/com/netflix/simianarmy/basic/janitor/BasicJanitorRuleEngine.java
Java
apache-2.0
3,945
/* * Copyright 2012 GitHub Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.github.pockethub.core.code; import android.accounts.Account; import android.content.Context; import android.text.TextUtils; import android.util.Log; import com.github.pockethub.accounts.AuthenticatedUserTask; import com.github.pockethub.core.ref.RefUtils; import com.google.inject.Inject; import java.io.IOException; import org.eclipse.egit.github.core.Commit; import org.eclipse.egit.github.core.Reference; import org.eclipse.egit.github.core.Repository; import org.eclipse.egit.github.core.Tree; import org.eclipse.egit.github.core.service.DataService; import org.eclipse.egit.github.core.service.RepositoryService; /** * Task to load the tree for a repository's default branch */ public class RefreshTreeTask extends AuthenticatedUserTask<FullTree> { private static final String TAG = "RefreshTreeTask"; private final Repository repository; private final Reference reference; @Inject private RepositoryService repoService; @Inject private DataService dataService; /** * Create task to refresh repository's tree * * @param repository * @param reference * @param context */ public RefreshTreeTask(final Repository repository, final Reference reference, final Context context) { super(context); this.repository = repository; this.reference = reference; } private boolean isValidRef(Reference ref) { return ref != null && ref.getObject() != null && !TextUtils.isEmpty(ref.getObject().getSha()); } @Override protected FullTree run(Account account) throws Exception { Reference ref = reference; String branch = RefUtils.getPath(ref); if (branch == null) { branch = repository.getMasterBranch(); if (TextUtils.isEmpty(branch)) { branch = repoService.getRepository(repository) .getMasterBranch(); if (TextUtils.isEmpty(branch)) throw new IOException( "Repository does not have master branch"); } branch = "heads/" + branch; } if (!isValidRef(ref)) { ref = dataService.getReference(repository, branch); if (!isValidRef(ref)) throw new IOException( "Reference does not have associated commit SHA-1"); } Commit commit = dataService.getCommit(repository, ref.getObject() .getSha()); if (commit == null || commit.getTree() == null || TextUtils.isEmpty(commit.getTree().getSha())) throw new IOException("Commit does not have associated tree SHA-1"); Tree tree = dataService.getTree(repository, commit.getTree().getSha(), true); return new FullTree(tree, ref); } @Override protected void onException(Exception e) throws RuntimeException { super.onException(e); Log.d(TAG, "Exception loading tree", e); } }
yytang2012/PocketHub
app/src/main/java/com/github/pockethub/core/code/RefreshTreeTask.java
Java
apache-2.0
3,653
<?php class Swift_Plugins_LoggerPluginTest extends \SwiftMailerTestCase { public function testLoggerDelegatesAddingEntries() { $logger = $this->_createLogger(); $logger->expects($this->once()) ->method('add') ->with('foo'); $plugin = $this->_createPlugin($logger); $plugin->add('foo'); } public function testLoggerDelegatesDumpingEntries() { $logger = $this->_createLogger(); $logger->expects($this->once()) ->method('dump') ->will($this->returnValue('foobar')); $plugin = $this->_createPlugin($logger); $this->assertEquals('foobar', $plugin->dump()); } public function testLoggerDelegatesClearingEntries() { $logger = $this->_createLogger(); $logger->expects($this->once()) ->method('clear'); $plugin = $this->_createPlugin($logger); $plugin->clear(); } public function testCommandIsSentToLogger() { $evt = $this->_createCommandEvent("foo\r\n"); $logger = $this->_createLogger(); $logger->expects($this->once()) ->method('add') ->with($this->regExp('~foo\r\n~')); $plugin = $this->_createPlugin($logger); $plugin->commandSent($evt); } public function testResponseIsSentToLogger() { $evt = $this->_createResponseEvent("354 Go ahead\r\n"); $logger = $this->_createLogger(); $logger->expects($this->once()) ->method('add') ->with($this->regExp('~354 Go ahead\r\n~')); $plugin = $this->_createPlugin($logger); $plugin->responseReceived($evt); } public function testTransportBeforeStartChangeIsSentToLogger() { $evt = $this->_createTransportChangeEvent(); $logger = $this->_createLogger(); $logger->expects($this->once()) ->method('add') ->with($this->anything()); $plugin = $this->_createPlugin($logger); $plugin->beforeTransportStarted($evt); } public function testTransportStartChangeIsSentToLogger() { $evt = $this->_createTransportChangeEvent(); $logger = $this->_createLogger(); $logger->expects($this->once()) ->method('add') ->with($this->anything()); $plugin = $this->_createPlugin($logger); $plugin->transportStarted($evt); } public function testTransportStopChangeIsSentToLogger() { $evt = $this->_createTransportChangeEvent(); $logger = $this->_createLogger(); $logger->expects($this->once()) ->method('add') ->with($this->anything()); $plugin = $this->_createPlugin($logger); $plugin->transportStopped($evt); } public function testTransportBeforeStopChangeIsSentToLogger() { $evt = $this->_createTransportChangeEvent(); $logger = $this->_createLogger(); $logger->expects($this->once()) ->method('add') ->with($this->anything()); $plugin = $this->_createPlugin($logger); $plugin->beforeTransportStopped($evt); } public function testExceptionsArePassedToDelegateAndLeftToBubbleUp() { $transport = $this->_createTransport(); $evt = $this->_createTransportExceptionEvent(); $logger = $this->_createLogger(); $logger->expects($this->once()) ->method('add') ->with($this->anything()); $plugin = $this->_createPlugin($logger); try { $plugin->exceptionThrown($evt); $this->fail('Exception should bubble up.'); } catch (Swift_TransportException $ex) { } } private function _createLogger() { return $this->getMockBuilder('Swift_Plugins_Logger')->getMock(); } private function _createPlugin($logger) { return new Swift_Plugins_LoggerPlugin($logger); } private function _createCommandEvent($command) { $evt = $this->getMockBuilder('Swift_Events_CommandEvent') ->disableOriginalConstructor() ->getMock(); $evt->expects($this->any()) ->method('getCommand') ->will($this->returnValue($command)); return $evt; } private function _createResponseEvent($response) { $evt = $this->getMockBuilder('Swift_Events_ResponseEvent') ->disableOriginalConstructor() ->getMock(); $evt->expects($this->any()) ->method('getResponse') ->will($this->returnValue($response)); return $evt; } private function _createTransport() { return $this->getMockBuilder('Swift_Transport')->getMock(); } private function _createTransportChangeEvent() { $evt = $this->getMockBuilder('Swift_Events_TransportChangeEvent') ->disableOriginalConstructor() ->getMock(); $evt->expects($this->any()) ->method('getSource') ->will($this->returnValue($this->_createTransport())); return $evt; } public function _createTransportExceptionEvent() { $evt = $this->getMockBuilder('Swift_Events_TransportExceptionEvent') ->disableOriginalConstructor() ->getMock(); $evt->expects($this->any()) ->method('getException') ->will($this->returnValue(new Swift_TransportException(''))); return $evt; } }
jwjw2007/laravel54
vendor/swiftmailer/swiftmailer/tests/unit/Swift/Plugins/LoggerPluginTest.php
PHP
apache-2.0
5,660
/* ============================================================ * bootstrap-dropdown.js v2.3.2 * http://getbootstrap.com/2.3.2/javascript.html#dropdowns * ============================================================ * Copyright 2013 Twitter, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================ */ !function ($) { "use strict"; // jshint ;_; /* DROPDOWN CLASS DEFINITION * ========================= */ var toggle = '[data-toggle=dropdown]' , Dropdown = function (element) { var $el = $(element).on('click.dropdown.data-api', this.toggle) $('html').on('click.dropdown.data-api', function () { $el.parent().removeClass('open') }) } Dropdown.prototype = { constructor: Dropdown , toggle: function (e) { var $this = $(this) , $parent , isActive if ($this.is('.disabled, :disabled')) return $parent = getParent($this) isActive = $parent.hasClass('open') clearMenus() if (!isActive) { if ('ontouchstart' in document.documentElement) { // if mobile we we use a backdrop because click events don't delegate $('<div class="dropdown-backdrop"/>').insertBefore($(this)).on('click', clearMenus) } $parent.toggleClass('open') } $this.focus() return false } , keydown: function (e) { var $this , $items , $active , $parent , isActive , index if (!/(38|40|27)/.test(e.keyCode)) return $this = $(this) e.preventDefault() e.stopPropagation() if ($this.is('.disabled, :disabled')) return $parent = getParent($this) isActive = $parent.hasClass('open') if (!isActive || (isActive && e.keyCode == 27)) { if (e.which == 27) $parent.find(toggle).focus() return $this.click() } $items = $('[role=menu] li:not(.divider):visible a', $parent) if (!$items.length) return index = $items.index($items.filter(':focus')) if (e.keyCode == 38 && index > 0) index-- // up if (e.keyCode == 40 && index < $items.length - 1) index++ // down if (!~index) index = 0 $items .eq(index) .focus() } } function clearMenus() { $('.dropdown-backdrop').remove() $(toggle).each(function () { getParent($(this)).removeClass('open') }) } function getParent($this) { var selector = $this.attr('data-target') , $parent if (!selector) { selector = $this.attr('href') selector = selector && /#/.test(selector) && selector.replace(/.*(?=#[^\s]*$)/, '') //strip for ie7 } $parent = selector && $(selector) if (!$parent || !$parent.length) $parent = $this.parent() return $parent } /* DROPDOWN PLUGIN DEFINITION * ========================== */ var old = $.fn.dropdown $.fn.dropdown = function (option) { return this.each(function () { var $this = $(this) , data = $this.data('dropdown') if (!data) $this.data('dropdown', (data = new Dropdown(this))) if (typeof option == 'string') data[option].call($this) }) } $.fn.dropdown.Constructor = Dropdown /* DROPDOWN NO CONFLICT * ==================== */ $.fn.dropdown.noConflict = function () { $.fn.dropdown = old return this } /* APPLY TO STANDARD DROPDOWN ELEMENTS * =================================== */ $(document) .on('click.dropdown.data-api', clearMenus) .on('click.dropdown.data-api', '.dropdown form', function (e) { e.stopPropagation() }) .on('click.dropdown.data-api' , toggle, Dropdown.prototype.toggle) .on('keydown.dropdown.data-api', toggle + ', [role=menu]' , Dropdown.prototype.keydown) }(window.jQuery);
rawbean/taiga-mt
public/ext/bootstrap-calendar/bootstrap/js/bootstrap-dropdown.js
JavaScript
apache-2.0
4,407
/*! * jQuery UI Button 1.10.3 * http://jqueryui.com * * Copyright 2013 jQuery Foundation and other contributors * Released under the MIT license. * http://jquery.org/license * * http://docs.jquery.com/UI/Button#theming */ .ui-button { display: inline-block; position: relative; padding: 0; line-height: normal; margin-right: .1em; cursor: pointer; vertical-align: middle; text-align: center; overflow: visible; /* removes extra width in IE */ } .ui-button, .ui-button:link, .ui-button:visited, .ui-button:hover, .ui-button:active { text-decoration: none; } /* to make room for the icon, a width needs to be set here */ .ui-button-icon-only { width: 2.2em; } /* button elements seem to need a little more width */ button.ui-button-icon-only { width: 2.4em; } .ui-button-icons-only { width: 3.4em; } button.ui-button-icons-only { width: 3.7em; } /* button text element */ .ui-button .ui-button-text { display: block; line-height: normal; } .ui-button-text-only .ui-button-text { padding: .4em 1em; } .ui-button-icon-only .ui-button-text, .ui-button-icons-only .ui-button-text { padding: .4em; text-indent: -9999999px; } .ui-button-text-icon-primary .ui-button-text, .ui-button-text-icons .ui-button-text { padding: .4em 1em .4em 2.1em; } .ui-button-text-icon-secondary .ui-button-text, .ui-button-text-icons .ui-button-text { padding: .4em 2.1em .4em 1em; } .ui-button-text-icons .ui-button-text { padding-left: 2.1em; padding-right: 2.1em; } /* no icon support for input elements, provide padding by default */ input.ui-button { padding: .4em 1em; } /* button icon element(s) */ .ui-button-icon-only .ui-icon, .ui-button-text-icon-primary .ui-icon, .ui-button-text-icon-secondary .ui-icon, .ui-button-text-icons .ui-icon, .ui-button-icons-only .ui-icon { position: absolute; top: 50%; margin-top: -8px; } .ui-button-icon-only .ui-icon { left: 50%; margin-left: -8px; } .ui-button-text-icon-primary .ui-button-icon-primary, .ui-button-text-icons .ui-button-icon-primary, .ui-button-icons-only .ui-button-icon-primary { left: .5em; } .ui-button-text-icon-secondary .ui-button-icon-secondary, .ui-button-text-icons .ui-button-icon-secondary, .ui-button-icons-only .ui-button-icon-secondary { right: .5em; } /* button sets */ .ui-buttonset { margin-right: 7px; } .ui-buttonset .ui-button { margin-left: 0; margin-right: -.3em; } /* workarounds */ /* reset extra padding in Firefox, see h5bp.com/l */ input.ui-button::-moz-focus-inner, button.ui-button::-moz-focus-inner { border: 0; padding: 0; }
pauldraper/piezo
admin/public/jquery-ui-1.10.3/core/themes/base/jquery.ui.button.css
CSS
apache-2.0
2,541
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.component.undertow; import org.apache.camel.Exchange; import org.apache.camel.builder.RouteBuilder; import org.junit.Test; public class UndertowHeaderTest extends BaseUndertowTest { @Test public void testHttpHeaders() throws Exception { getMockEndpoint("mock:input").expectedMessageCount(1); getMockEndpoint("mock:input").expectedHeaderReceived("param", "true"); getMockEndpoint("mock:input").expectedHeaderReceived(Exchange.HTTP_METHOD, "GET"); getMockEndpoint("mock:input").expectedHeaderReceived(Exchange.HTTP_URL, "http://localhost:" + getPort() + "/headers"); getMockEndpoint("mock:input").expectedHeaderReceived(Exchange.HTTP_URI, "/headers"); getMockEndpoint("mock:input").expectedHeaderReceived(Exchange.HTTP_QUERY, "param=true"); getMockEndpoint("mock:input").expectedHeaderReceived(Exchange.HTTP_PATH, "/headers"); String out = template.requestBody("http://localhost:" + getPort() + "/headers?param=true", null, String.class); assertEquals("Bye World", out); assertMockEndpointsSatisfied(); } @Test public void testHttpHeadersPost() throws Exception { getMockEndpoint("mock:input").expectedBodiesReceived("Hello World"); getMockEndpoint("mock:input").expectedHeaderReceived(Exchange.HTTP_METHOD, "POST"); getMockEndpoint("mock:input").expectedHeaderReceived(Exchange.HTTP_URL, "http://localhost:" + getPort() + "/headers"); getMockEndpoint("mock:input").expectedHeaderReceived(Exchange.HTTP_URI, "/headers"); getMockEndpoint("mock:input").expectedHeaderReceived(Exchange.HTTP_QUERY, ""); getMockEndpoint("mock:input").expectedHeaderReceived(Exchange.HTTP_PATH, "/headers"); String out = template.requestBody("http://localhost:" + getPort() + "/headers", "Hello World", String.class); assertEquals("Bye World", out); assertMockEndpointsSatisfied(); } @Override protected RouteBuilder createRouteBuilder() throws Exception { return new RouteBuilder() { @Override public void configure() throws Exception { from("undertow:http://localhost:{{port}}/headers") .to("mock:input") .transform().constant("Bye World"); } }; } }
davidwilliams1978/camel
components/camel-undertow/src/test/java/org/apache/camel/component/undertow/UndertowHeaderTest.java
Java
apache-2.0
3,163
(function (tree) { tree.mixin = {}; tree.mixin.Call = function (elements, args, index, filename, important) { this.selector = new(tree.Selector)(elements); this.arguments = args; this.index = index; this.filename = filename; this.important = important; }; tree.mixin.Call.prototype = { eval: function (env) { var mixins, args, rules = [], match = false; for (var i = 0; i < env.frames.length; i++) { if ((mixins = env.frames[i].find(this.selector)).length > 0) { args = this.arguments && this.arguments.map(function (a) { return a.eval(env) }); for (var m = 0; m < mixins.length; m++) { if (mixins[m].match(args, env)) { try { Array.prototype.push.apply( rules, mixins[m].eval(env, this.arguments, this.important).rules); match = true; } catch (e) { throw { message: e.message, index: this.index, filename: this.filename, stack: e.stack }; } } } if (match) { return rules; } else { throw { type: 'Runtime', message: 'No matching definition was found for `' + this.selector.toCSS().trim() + '(' + this.arguments.map(function (a) { return a.toCSS(); }).join(', ') + ")`", index: this.index, filename: this.filename }; } } } throw { type: 'Name', message: this.selector.toCSS().trim() + " is undefined", index: this.index, filename: this.filename }; } }; tree.mixin.Definition = function (name, params, rules, condition, variadic) { this.name = name; this.selectors = [new(tree.Selector)([new(tree.Element)(null, name)])]; this.params = params; this.condition = condition; this.variadic = variadic; this.arity = params.length; this.rules = rules; this._lookups = {}; this.required = params.reduce(function (count, p) { if (!p.name || (p.name && !p.value)) { return count + 1 } else { return count } }, 0); this.parent = tree.Ruleset.prototype; this.frames = []; }; tree.mixin.Definition.prototype = { toCSS: function () { return "" }, variable: function (name) { return this.parent.variable.call(this, name) }, variables: function () { return this.parent.variables.call(this) }, find: function () { return this.parent.find.apply(this, arguments) }, rulesets: function () { return this.parent.rulesets.apply(this) }, evalParams: function (env, args) { var frame = new(tree.Ruleset)(null, []), varargs; for (var i = 0, val, name; i < this.params.length; i++) { if (name = this.params[i].name) { if (this.params[i].variadic && args) { varargs = []; for (var j = i; j < args.length; j++) { varargs.push(args[j].eval(env)); } frame.rules.unshift(new(tree.Rule)(name, new(tree.Expression)(varargs).eval(env))); } else if (val = (args && args[i]) || this.params[i].value) { frame.rules.unshift(new(tree.Rule)(name, val.eval(env))); } else { throw { type: 'Runtime', message: "wrong number of arguments for " + this.name + ' (' + args.length + ' for ' + this.arity + ')' }; } } } return frame; }, eval: function (env, args, important) { var frame = this.evalParams(env, args), context, _arguments = [], rules, start; for (var i = 0; i < Math.max(this.params.length, args && args.length); i++) { _arguments.push(args[i] || this.params[i].value); } frame.rules.unshift(new(tree.Rule)('@arguments', new(tree.Expression)(_arguments).eval(env))); rules = important ? this.rules.map(function (r) { return new(tree.Rule)(r.name, r.value, '!important', r.index); }) : this.rules.slice(0); return new(tree.Ruleset)(null, rules).eval({ frames: [this, frame].concat(this.frames, env.frames) }); }, match: function (args, env) { var argsLength = (args && args.length) || 0, len, frame; if (! this.variadic) { if (argsLength < this.required) { return false } if (argsLength > this.params.length) { return false } if ((this.required > 0) && (argsLength > this.params.length)) { return false } } if (this.condition && !this.condition.eval({ frames: [this.evalParams(env, args)].concat(env.frames) })) { return false } len = Math.min(argsLength, this.arity); for (var i = 0; i < len; i++) { if (!this.params[i].name) { if (args[i].eval(env).toCSS() != this.params[i].value.eval(env).toCSS()) { return false; } } } return true; } }; })(require('../tree'));
souldreamer/less.js
lib/less/tree/mixin.js
JavaScript
apache-2.0
5,634
/* * Copyright 2000-2014 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.openapi.project; import com.intellij.ide.DataManager; import com.intellij.openapi.actionSystem.CommonDataKeys; import com.intellij.openapi.actionSystem.DataContext; import com.intellij.openapi.fileEditor.UniqueVFilePathBuilder; import com.intellij.openapi.fileTypes.FileType; import com.intellij.openapi.fileTypes.FileTypeManager; import com.intellij.openapi.roots.ProjectRootManager; import com.intellij.openapi.util.io.FileUtil; import com.intellij.openapi.vfs.VirtualFile; import com.intellij.openapi.vfs.VirtualFilePathWrapper; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import javax.swing.*; /** * @author max */ public class ProjectUtil { private ProjectUtil() { } @Nullable public static String getProjectLocationString(@NotNull final Project project) { return FileUtil.getLocationRelativeToUserHome(project.getBasePath()); } @NotNull public static String calcRelativeToProjectPath(@NotNull final VirtualFile file, @Nullable final Project project, final boolean includeFilePath) { return calcRelativeToProjectPath(file, project, includeFilePath, false, false); } @NotNull public static String calcRelativeToProjectPath(@NotNull final VirtualFile file, @Nullable final Project project, final boolean includeFilePath, final boolean includeUniqueFilePath, final boolean keepModuleAlwaysOnTheLeft) { if (file instanceof VirtualFilePathWrapper && ((VirtualFilePathWrapper)file).enforcePresentableName()) { return includeFilePath ? ((VirtualFilePathWrapper)file).getPresentablePath() : file.getName(); } String url; if (includeFilePath) { url = file.getPresentableUrl(); } else if (includeUniqueFilePath) { url = UniqueVFilePathBuilder.getInstance().getUniqueVirtualFilePath(project, file); } else { url = file.getName(); } if (project == null) { return url; } return ProjectUtilCore.displayUrlRelativeToProject(file, url, project, includeFilePath, keepModuleAlwaysOnTheLeft); } public static String calcRelativeToProjectPath(final VirtualFile file, final Project project) { return calcRelativeToProjectPath(file, project, true); } @Nullable public static Project guessProjectForFile(VirtualFile file) { return ProjectLocator.getInstance().guessProjectForFile(file); } @Nullable public static Project guessProjectForContentFile(@NotNull VirtualFile file) { return guessProjectForContentFile(file, file.getFileType()); } @Nullable /*** * guessProjectForFile works incorrectly - even if file is config (idea config file) first opened project will be returned */ public static Project guessProjectForContentFile(@NotNull VirtualFile file, @NotNull FileType fileType) { if (isProjectOrWorkspaceFile(file, fileType)) { return null; } for (Project project : ProjectManager.getInstance().getOpenProjects()) { if (!project.isDefault() && project.isInitialized() && !project.isDisposed() && ProjectRootManager.getInstance(project).getFileIndex().isInContent(file)) { return project; } } return null; } public static boolean isProjectOrWorkspaceFile(final VirtualFile file) { // do not use file.getFileType() to avoid autodetection by content loading for arbitrary files return isProjectOrWorkspaceFile(file, FileTypeManager.getInstance().getFileTypeByFileName(file.getName())); } public static boolean isProjectOrWorkspaceFile(@NotNull VirtualFile file, @Nullable FileType fileType) { return ProjectCoreUtil.isProjectOrWorkspaceFile(file, fileType); } @NotNull public static Project guessCurrentProject(@Nullable JComponent component) { Project project = null; if (component != null) { project = CommonDataKeys.PROJECT.getData(DataManager.getInstance().getDataContext(component)); } if (project == null) { Project[] openProjects = ProjectManager.getInstance().getOpenProjects(); if (openProjects.length > 0) project = openProjects[0]; if (project == null) { DataContext dataContext = DataManager.getInstance().getDataContext(); project = CommonDataKeys.PROJECT.getData(dataContext); } if (project == null) { project = ProjectManager.getInstance().getDefaultProject(); } } return project; } }
adedayo/intellij-community
platform/lang-api/src/com/intellij/openapi/project/ProjectUtil.java
Java
apache-2.0
5,271
package cldr import ( "fmt" "log" "reflect" "testing" ) func failOnError(err error) { if err != nil { log.Panic(err) } } func data() *CLDR { d := Decoder{} data, err := d.Decode(testLoader{}) failOnError(err) return data } type h struct { A string `xml:"ha,attr"` E string `xml:"he"` D string `xml:",chardata"` X string } type fieldTest struct { Common To string `xml:"to,attr"` Key string `xml:"key,attr"` E string `xml:"e"` D string `xml:",chardata"` X string h } var testStruct = fieldTest{ Common: Common{ name: "mapping", // exclude "type" as distinguishing attribute Type: "foo", Alt: "foo", }, To: "nyc", Key: "k", E: "E", D: "D", h: h{ A: "A", E: "E", D: "D", }, } func TestIter(t *testing.T) { tests := map[string]string{ "Type": "foo", "Alt": "foo", "To": "nyc", "A": "A", "Alias": "<nil>", } k := 0 for i := iter(reflect.ValueOf(testStruct)); !i.done(); i.next() { v := i.value() if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.String { v = v.Elem() } name := i.field().Name if w, ok := tests[name]; ok { s := fmt.Sprint(v.Interface()) if w != s { t.Errorf("value: found %q; want %q", w, s) } delete(tests, name) } k++ } if len(tests) != 0 { t.Errorf("missing fields: %v", tests) } } func TestFindField(t *testing.T) { tests := []struct { name, val string exist bool }{ {"type", "foo", true}, {"alt", "foo", true}, {"to", "nyc", true}, {"he", "E", true}, {"q", "", false}, } vf := reflect.ValueOf(testStruct) for i, tt := range tests { v, err := findField(vf, tt.name) if (err == nil) != tt.exist { t.Errorf("%d: field %q present is %v; want %v", i, tt.name, err == nil, tt.exist) } else if tt.exist { if v.Kind() == reflect.Ptr { if v.IsNil() { continue } v = v.Elem() } if v.String() != tt.val { t.Errorf("%d: found value %q; want %q", i, v.String(), tt.val) } } } } var keyTests = []struct { exclude []string key string }{ {[]string{}, "alt=foo;key=k;to=nyc"}, {[]string{"type"}, "alt=foo;key=k;to=nyc"}, {[]string{"choice"}, "alt=foo;key=k;to=nyc"}, {[]string{"alt"}, "key=k;to=nyc"}, {[]string{"a"}, "alt=foo;key=k;to=nyc"}, {[]string{"to"}, "alt=foo;key=k"}, {[]string{"alt", "to"}, "key=k"}, {[]string{"alt", "to", "key"}, ""}, } func TestAttrKey(t *testing.T) { v := reflect.ValueOf(&testStruct) for i, tt := range keyTests { key := attrKey(v, tt.exclude...) if key != tt.key { t.Errorf("%d: found %q, want %q", i, key, tt.key) } } } func TestKey(t *testing.T) { for i, tt := range keyTests { key := Key(&testStruct, tt.exclude...) if key != tt.key { t.Errorf("%d: found %q, want %q", i, key, tt.key) } } } func testEnclosing(t *testing.T, x *LDML, name string) { eq := func(a, b Elem, i int) { for ; i > 0; i-- { b = b.enclosing() } if a != b { t.Errorf("%s: found path %q, want %q", name, getPath(a), getPath(b)) } } eq(x, x, 0) eq(x, x.Identity, 1) eq(x, x.Dates.Calendars, 2) eq(x, x.Dates.Calendars.Calendar[0], 3) eq(x, x.Dates.Calendars.Calendar[1], 3) //eq(x, x.Dates.Calendars.Calendar[0].Months, 4) eq(x, x.Dates.Calendars.Calendar[1].Months, 4) } func TestEnclosing(t *testing.T) { testEnclosing(t, data().RawLDML("de"), "enclosing-raw") de, _ := data().LDML("de") testEnclosing(t, de, "enclosing") } func TestDeepCopy(t *testing.T) { eq := func(have, want string) { if have != want { t.Errorf("found %q; want %q", have, want) } } x, _ := data().LDML("de") vc := deepCopy(reflect.ValueOf(x)) c := vc.Interface().(*LDML) linkEnclosing(nil, c) if x == c { t.Errorf("did not copy") } eq(c.name, "ldml") eq(c.Dates.name, "dates") testEnclosing(t, c, "deepCopy") } type getTest struct { loc string path string field string // used in combination with length data string altData string // used for buddhist calendar if value != "" typ string length int missing bool } const ( budMon = "dates/calendars/calendar[@type='buddhist']/months/" chnMon = "dates/calendars/calendar[@type='chinese']/months/" greMon = "dates/calendars/calendar[@type='gregorian']/months/" ) func monthVal(path, context, width string, month int) string { const format = "%s/monthContext[@type='%s']/monthWidth[@type='%s']/month[@type='%d']" return fmt.Sprintf(format, path, context, width, month) } var rootGetTests = []getTest{ {loc: "root", path: "identity/language", typ: "root"}, {loc: "root", path: "characters/moreInformation", data: "?"}, {loc: "root", path: "characters", field: "exemplarCharacters", length: 3}, {loc: "root", path: greMon, field: "monthContext", length: 2}, {loc: "root", path: greMon + "monthContext[@type='format']/monthWidth[@type='narrow']", field: "month", length: 4}, {loc: "root", path: greMon + "monthContext[@type='stand-alone']/monthWidth[@type='wide']", field: "month", length: 4}, // unescaping character data {loc: "root", path: "characters/exemplarCharacters[@type='punctuation']", data: `[\- โ€ โ€“ โ€” โ€ฆ ' โ€˜ โ€š " โ€œ โ€ž \& #]`}, // default resolution {loc: "root", path: "dates/calendars/calendar", typ: "gregorian"}, // alias resolution {loc: "root", path: budMon, field: "monthContext", length: 2}, // crossing but non-circular alias resolution {loc: "root", path: budMon + "monthContext[@type='format']/monthWidth[@type='narrow']", field: "month", length: 4}, {loc: "root", path: budMon + "monthContext[@type='stand-alone']/monthWidth[@type='wide']", field: "month", length: 4}, {loc: "root", path: monthVal(greMon, "format", "wide", 1), data: "11"}, {loc: "root", path: monthVal(greMon, "format", "narrow", 2), data: "2"}, {loc: "root", path: monthVal(greMon, "stand-alone", "wide", 3), data: "33"}, {loc: "root", path: monthVal(greMon, "stand-alone", "narrow", 4), data: "4"}, {loc: "root", path: monthVal(budMon, "format", "wide", 1), data: "11"}, {loc: "root", path: monthVal(budMon, "format", "narrow", 2), data: "2"}, {loc: "root", path: monthVal(budMon, "stand-alone", "wide", 3), data: "33"}, {loc: "root", path: monthVal(budMon, "stand-alone", "narrow", 4), data: "4"}, } // 19 var deGetTests = []getTest{ {loc: "de", path: "identity/language", typ: "de"}, {loc: "de", path: "posix", length: 2}, {loc: "de", path: "characters", field: "exemplarCharacters", length: 4}, {loc: "de", path: "characters/exemplarCharacters[@type='auxiliary']", data: `[รก ร  ฤƒ]`}, // identity is a blocking element, so de should not inherit generation from root. {loc: "de", path: "identity/generation", missing: true}, // default resolution {loc: "root", path: "dates/calendars/calendar", typ: "gregorian"}, // absolute path alias resolution {loc: "gsw", path: "posix", field: "messages", length: 1}, {loc: "gsw", path: "posix/messages/yesstr", data: "yes:y"}, } // 27(greMon) - 52(budMon) - 77(chnMon) func calGetTests(s string) []getTest { tests := []getTest{ {loc: "de", path: s, length: 2}, {loc: "de", path: s + "monthContext[@type='format']/monthWidth[@type='wide']", field: "month", length: 5}, {loc: "de", path: monthVal(s, "format", "wide", 1), data: "11"}, {loc: "de", path: monthVal(s, "format", "wide", 2), data: "22"}, {loc: "de", path: monthVal(s, "format", "wide", 3), data: "Maerz", altData: "bbb"}, {loc: "de", path: monthVal(s, "format", "wide", 4), data: "April"}, {loc: "de", path: monthVal(s, "format", "wide", 5), data: "Mai"}, {loc: "de", path: s + "monthContext[@type='format']/monthWidth[@type='narrow']", field: "month", length: 5}, {loc: "de", path: monthVal(s, "format", "narrow", 1), data: "1"}, {loc: "de", path: monthVal(s, "format", "narrow", 2), data: "2"}, {loc: "de", path: monthVal(s, "format", "narrow", 3), data: "M", altData: "BBB"}, {loc: "de", path: monthVal(s, "format", "narrow", 4), data: "A"}, {loc: "de", path: monthVal(s, "format", "narrow", 5), data: "m"}, {loc: "de", path: s + "monthContext[@type='stand-alone']/monthWidth[@type='wide']", field: "month", length: 5}, {loc: "de", path: monthVal(s, "stand-alone", "wide", 1), data: "11"}, {loc: "de", path: monthVal(s, "stand-alone", "wide", 2), data: "22"}, {loc: "de", path: monthVal(s, "stand-alone", "wide", 3), data: "Maerz", altData: "bbb"}, {loc: "de", path: monthVal(s, "stand-alone", "wide", 4), data: "april"}, {loc: "de", path: monthVal(s, "stand-alone", "wide", 5), data: "mai"}, {loc: "de", path: s + "monthContext[@type='stand-alone']/monthWidth[@type='narrow']", field: "month", length: 5}, {loc: "de", path: monthVal(s, "stand-alone", "narrow", 1), data: "1"}, {loc: "de", path: monthVal(s, "stand-alone", "narrow", 2), data: "2"}, {loc: "de", path: monthVal(s, "stand-alone", "narrow", 3), data: "m"}, {loc: "de", path: monthVal(s, "stand-alone", "narrow", 4), data: "4"}, {loc: "de", path: monthVal(s, "stand-alone", "narrow", 5), data: "m"}, } if s == budMon { for i, t := range tests { if t.altData != "" { tests[i].data = t.altData } } } return tests } var getTests = append(rootGetTests, append(deGetTests, append(calGetTests(greMon), append(calGetTests(budMon), calGetTests(chnMon)...)...)...)...) func TestPath(t *testing.T) { d := data() for i, tt := range getTests { x, _ := d.LDML(tt.loc) e, err := walkXPath(x, tt.path) if err != nil { if !tt.missing { t.Errorf("%d:error: %v %v", i, err, tt.missing) } continue } if tt.missing { t.Errorf("%d: missing is %v; want %v", i, e == nil, tt.missing) continue } if tt.data != "" && e.GetCommon().Data() != tt.data { t.Errorf("%d: data is %v; want %v", i, e.GetCommon().Data(), tt.data) continue } if tt.typ != "" && e.GetCommon().Type != tt.typ { t.Errorf("%d: type is %v; want %v", i, e.GetCommon().Type, tt.typ) continue } if tt.field != "" { slice, _ := findField(reflect.ValueOf(e), tt.field) if slice.Len() != tt.length { t.Errorf("%d: length is %v; want %v", i, slice.Len(), tt.length) continue } } } } func TestGet(t *testing.T) { d := data() for i, tt := range getTests { x, _ := d.LDML(tt.loc) e, err := Get(x, tt.path) if err != nil { if !tt.missing { t.Errorf("%d:error: %v %v", i, err, tt.missing) } continue } if tt.missing { t.Errorf("%d: missing is %v; want %v", i, e == nil, tt.missing) continue } if tt.data != "" && e.GetCommon().Data() != tt.data { t.Errorf("%d: data is %v; want %v", i, e.GetCommon().Data(), tt.data) continue } if tt.typ != "" && e.GetCommon().Type != tt.typ { t.Errorf("%d: type is %v; want %v", i, e.GetCommon().Type, tt.typ) continue } if tt.field != "" { slice, _ := findField(reflect.ValueOf(e), tt.field) if slice.Len() != tt.length { t.Errorf("%d: length is %v; want %v", i, slice.Len(), tt.length) continue } } } }
akutz/rexray
vendor/golang.org/x/text/unicode/cldr/resolve_test.go
GO
apache-2.0
10,866
๏ปฟ/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ namespace WPCordovaClassLib.Cordova.Commands { using System; using System.Linq; using System.Windows; using System.Windows.Controls; using System.Windows.Navigation; using Microsoft.Phone.Tasks; using Microsoft.Phone.UserData; using DeviceContacts = Microsoft.Phone.UserData.Contacts; /// <summary> /// Custom implemented class for picking single contact /// </summary> public partial class ContactPicker { #region Fields /// <summary> /// Result of ContactPicker call, represent contact returned. /// </summary> private ContactPickerTask.PickResult result; #endregion #region Constructors /// <summary> /// Initializes a new instance of the <see cref="ContactPicker"/> class. /// </summary> public ContactPicker() { InitializeComponent(); var cons = new DeviceContacts(); cons.SearchCompleted += this.OnSearchCompleted; cons.SearchAsync(string.Empty, FilterKind.None, string.Empty); } #endregion #region Callbacks /// <summary> /// Occurs when contact is selected or pick operation cancelled. /// </summary> public event EventHandler<ContactPickerTask.PickResult> Completed; #endregion /// <summary> /// The on navigated from. /// </summary> /// <param name="e"> /// The e. /// </param> protected override void OnNavigatedFrom(NavigationEventArgs e) { if (this.result == null) { this.Completed(this, new ContactPickerTask.PickResult(TaskResult.Cancel)); } base.OnNavigatedFrom(e); } /// <summary> /// Called when contacts retrieval completed. /// </summary> /// <param name="sender">The sender.</param> /// <param name="e">The <see cref="ContactsSearchEventArgs"/> instance containing the event data.</param> private void OnSearchCompleted(object sender, ContactsSearchEventArgs e) { if (e.Results.Count() != 0) { lstContacts.ItemsSource = e.Results.ToList(); lstContacts.Visibility = Visibility.Visible; NoContactsBlock.Visibility = Visibility.Collapsed; } else { lstContacts.Visibility = Visibility.Collapsed; NoContactsBlock.Visibility = Visibility.Visible; } } /// <summary> /// Called when any contact is selected. /// </summary> /// <param name="sender"> /// The sender. /// </param> /// <param name="e"> /// The e. /// </param> private void ContactsListSelectionChanged(object sender, SelectionChangedEventArgs e) { this.result = new ContactPickerTask.PickResult(TaskResult.OK) { Contact = e.AddedItems[0] as Contact }; this.Completed(this, this.result); if (NavigationService.CanGoBack) { NavigationService.GoBack(); } } } }
The-Best-Joke/SOS
plugins/cordova-plugin-contacts/src/wp/ContactPicker.xaml.cs
C#
apache-2.0
4,059
var path = require('path'); var fs = require('graceful-fs'); var zlib = require('zlib'); var DecompressZip = require('decompress-zip'); var tar = require('tar-fs'); var Q = require('q'); var mout = require('mout'); var junk = require('junk'); var createError = require('./createError'); // This forces the default chunk size to something small in an attempt // to avoid issue #314 zlib.Z_DEFAULT_CHUNK = 1024 * 8; var extractors; var extractorTypes; extractors = { '.zip': extractZip, '.tar': extractTar, '.tar.gz': extractTarGz, '.tgz': extractTarGz, '.gz': extractGz, 'application/zip': extractZip, 'application/x-zip': extractZip, 'application/x-zip-compressed': extractZip, 'application/x-tar': extractTar, 'application/x-tgz': extractTarGz, 'application/x-gzip': extractGz }; extractorTypes = Object.keys(extractors); function extractZip(archive, dst) { var deferred = Q.defer(); new DecompressZip(archive) .on('error', deferred.reject) .on('extract', deferred.resolve.bind(deferred, dst)) .extract({ path: dst, follow: false, // Do not follow symlinks (#699) filter: filterSymlinks // Filter symlink files }); return deferred.promise; } function extractTar(archive, dst) { var deferred = Q.defer(); fs.createReadStream(archive) .on('error', deferred.reject) .pipe(tar.extract(dst, { ignore: isSymlink // Filter symlink files })) .on('error', deferred.reject) .on('finish', deferred.resolve.bind(deferred, dst)); return deferred.promise; } function extractTarGz(archive, dst) { var deferred = Q.defer(); fs.createReadStream(archive) .on('error', deferred.reject) .pipe(zlib.createGunzip()) .on('error', deferred.reject) .pipe(tar.extract(dst, { ignore: isSymlink // Filter symlink files })) .on('error', deferred.reject) .on('finish', deferred.resolve.bind(deferred, dst)); return deferred.promise; } function extractGz(archive, dst) { var deferred = Q.defer(); fs.createReadStream(archive) .on('error', deferred.reject) .pipe(zlib.createGunzip()) .on('error', deferred.reject) .pipe(fs.createWriteStream(dst)) .on('error', deferred.reject) .on('close', deferred.resolve.bind(deferred, dst)); return deferred.promise; } function isSymlink(entry) { return entry.type === 'SymbolicLink'; } function filterSymlinks(entry) { return entry.type !== 'SymbolicLink'; } function getExtractor(archive) { // Make the archive lower case to match against the types // This ensures that upper-cased extensions work archive = archive.toLowerCase(); var type = mout.array.find(extractorTypes, function (type) { return mout.string.endsWith(archive, type); }); return type ? extractors[type] : null; } function isSingleDir(dir) { return Q.nfcall(fs.readdir, dir) .then(function (files) { var singleDir; // Remove any OS specific files from the files array // before checking its length files = files.filter(junk.isnt); if (files.length !== 1) { return false; } singleDir = path.join(dir, files[0]); return Q.nfcall(fs.stat, singleDir) .then(function (stat) { return stat.isDirectory() ? singleDir : false; }); }); } function moveSingleDirContents(dir) { var destDir = path.dirname(dir); return Q.nfcall(fs.readdir, dir) .then(function (files) { var promises; promises = files.map(function (file) { var src = path.join(dir, file); var dst = path.join(destDir, file); return Q.nfcall(fs.rename, src, dst); }); return Q.all(promises); }) .then(function () { return Q.nfcall(fs.rmdir, dir); }); } // ----------------------------- function canExtract(src, mimeType) { if (mimeType && mimeType !== 'application/octet-stream') { return !!getExtractor(mimeType); } return !!getExtractor(src); } // Available options: // - keepArchive: true to keep the archive afterwards (defaults to false) // - keepStructure: true to keep the extracted structure unchanged (defaults to false) function extract(src, dst, opts) { var extractor; var promise; opts = opts || {}; extractor = getExtractor(src); // Try to get extractor from mime type if (!extractor && opts.mimeType) { extractor = getExtractor(opts.mimeType); } // If extractor is null, then the archive type is unknown if (!extractor) { return Q.reject(createError('File ' + src + ' is not a known archive', 'ENOTARCHIVE')); } // Check archive file size promise = Q.nfcall(fs.stat, src) .then(function (stat) { if (stat.size <= 8) { throw createError('File ' + src + ' is an invalid archive', 'ENOTARCHIVE'); } // Extract archive return extractor(src, dst); }); // TODO: There's an issue here if the src and dst are the same and // The zip name is the same as some of the zip file contents // Maybe create a temp directory inside dst, unzip it there, // unlink zip and then move contents // Remove archive if (!opts.keepArchive) { promise = promise .then(function () { return Q.nfcall(fs.unlink, src); }); } // Move contents if a single directory was extracted if (!opts.keepStructure) { promise = promise .then(function () { return isSingleDir(dst); }) .then(function (singleDir) { return singleDir ? moveSingleDirContents(singleDir) : null; }); } // Resolve promise to the dst dir return promise.then(function () { return dst; }); } module.exports = extract; module.exports.canExtract = canExtract;
MatthieuCrouzet/Projet4A
test/node_modules/google-cdn/node_modules/bower/lib/util/extract.js
JavaScript
apache-2.0
5,976
๏ปฟ/* Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved. For licensing, see LICENSE.html or http://ckeditor.com/license */ CKEDITOR.lang['et']={"editor":"Rikkalik tekstiredaktor","editorPanel":"Rich Text Editor panel","common":{"editorHelp":"Abi saamiseks vajuta ALT 0","browseServer":"Serveri sirvimine","url":"URL","protocol":"Protokoll","upload":"Laadi รผles","uploadSubmit":"Saada serverisse","image":"Pilt","flash":"Flash","form":"Vorm","checkbox":"Mรคrkeruut","radio":"Raadionupp","textField":"Tekstilahter","textarea":"Tekstiala","hiddenField":"Varjatud lahter","button":"Nupp","select":"Valiklahter","imageButton":"Piltnupp","notSet":"<mรครคramata>","id":"ID","name":"Nimi","langDir":"Keele suund","langDirLtr":"Vasakult paremale (LTR)","langDirRtl":"Paremalt vasakule (RTL)","langCode":"Keele kood","longDescr":"Pikk kirjeldus URL","cssClass":"Stiilistiku klassid","advisoryTitle":"Soovituslik pealkiri","cssStyle":"Laad","ok":"OK","cancel":"Loobu","close":"Sulge","preview":"Eelvaade","resize":"Suuruse muutmiseks lohista","generalTab":"รœldine","advancedTab":"Tรคpsemalt","validateNumberFailed":"See vรครคrtus pole number.","confirmNewPage":"Kรตik salvestamata muudatused lรคhevad kaotsi. Kas oled kindel, et tahad laadida uue lehe?","confirmCancel":"Mรตned valikud on muudetud. Kas oled kindel, et tahad dialoogi sulgeda?","options":"Valikud","target":"Sihtkoht","targetNew":"Uus aken (_blank)","targetTop":"Kรตige รผlemine aken (_top)","targetSelf":"Sama aken (_self)","targetParent":"Vanemaken (_parent)","langDirLTR":"Vasakult paremale (LTR)","langDirRTL":"Paremalt vasakule (RTL)","styles":"Stiili","cssClasses":"Stiililehe klassid","width":"Laius","height":"Kรตrgus","align":"Joondus","alignLeft":"Vasak","alignRight":"Paremale","alignCenter":"Kesk","alignTop":"รœles","alignMiddle":"Keskele","alignBottom":"Alla","invalidValue":"Vigane vรครคrtus.","invalidHeight":"Kรตrgus peab olema number.","invalidWidth":"Laius peab olema number.","invalidCssLength":"\"%1\" vรคlja jaoks mรครคratud vรครคrtus peab olema positiivne tรคisarv CSS รผhikuga (px, %, in, cm, mm, em, ex, pt vรตi pc) vรตi ilma.","invalidHtmlLength":"\"%1\" vรคlja jaoks mรครคratud vรครคrtus peab olema positiivne tรคisarv HTML รผhikuga (px vรตi %) vรตi ilma.","invalidInlineStyle":"Reasisese stiili mรครคrangud peavad koosnema paarisvรครคrtustest (tuples), mis on semikoolonitega eraldatult jรคrgnevas vormingus: \"nimi : vรครคrtus\".","cssLengthTooltip":"Sisesta vรครคrtus pikslites vรตi number koos sobiva CSS-i รผhikuga (px, %, in, cm, mm, em, ex, pt vรตi pc).","unavailable":"%1<span class=\"cke_accessibility\">, pole saadaval</span>"},"about":{"copy":"Copyright &copy; $1. Kรตik รตigused kaitstud.","dlgTitle":"CKEditorist","help":"Abi jaoks vaata $1.","moreInfo":"Litsentsi andmed leiab meie veebilehelt:","title":"CKEditorist","userGuide":"CKEditori kasutusjuhendit"},"basicstyles":{"bold":"Paks","italic":"Kursiiv","strike":"Lรคbijoonitud","subscript":"Allindeks","superscript":"รœlaindeks","underline":"Allajoonitud"},"blockquote":{"toolbar":"Blokktsitaat"},"clipboard":{"copy":"Kopeeri","copyError":"Sinu veebisirvija turvaseaded ei luba redaktoril automaatselt kopeerida. Palun kasutage selleks klaviatuuri klahvikombinatsiooni (Ctrl/Cmd+C).","cut":"Lรตika","cutError":"Sinu veebisirvija turvaseaded ei luba redaktoril automaatselt lรตigata. Palun kasutage selleks klaviatuuri klahvikombinatsiooni (Ctrl/Cmd+X).","paste":"Aseta","pasteArea":"Asetamise ala","pasteMsg":"Palun aseta tekst jรคrgnevasse kasti kasutades klaviatuuri klahvikombinatsiooni (<STRONG>Ctrl/Cmd+V</STRONG>) ja vajuta seejรคrel <STRONG>OK</STRONG>.","securityMsg":"Sinu veebisirvija turvaseadete tรตttu ei oma redaktor otsest ligipรครคsu lรตikelaua andmetele. Sa pead asetama need uuesti siia aknasse.","title":"Asetamine"},"contextmenu":{"options":"Kontekstimenรผรผ valikud"},"toolbar":{"toolbarCollapse":"Tรถรถriistariba peitmine","toolbarExpand":"Tรถรถriistariba nรคitamine","toolbarGroups":{"document":"Dokument","clipboard":"Lรตikelaud/tagasivรตtmine","editing":"Muutmine","forms":"Vormid","basicstyles":"Pรตhistiilid","paragraph":"Lรตik","links":"Lingid","insert":"Sisesta","styles":"Stiilid","colors":"Vรคrvid","tools":"Tรถรถriistad"},"toolbars":"Redaktori tรถรถriistaribad"},"elementspath":{"eleLabel":"Elementide asukoht","eleTitle":"%1 element"},"format":{"label":"Vorming","panelTitle":"Vorming","tag_address":"Aadress","tag_div":"Tavaline (DIV)","tag_h1":"Pealkiri 1","tag_h2":"Pealkiri 2","tag_h3":"Pealkiri 3","tag_h4":"Pealkiri 4","tag_h5":"Pealkiri 5","tag_h6":"Pealkiri 6","tag_p":"Tavaline","tag_pre":"Vormindatud"},"horizontalrule":{"toolbar":"Horisontaaljoone sisestamine"},"image":{"alertUrl":"Palun kirjuta pildi URL","alt":"Alternatiivne tekst","border":"Joon","btnUpload":"Saada serverisse","button2Img":"Kas tahad teisendada valitud pildiga nupu tavaliseks pildiks?","hSpace":"H. vaheruum","img2Button":"Kas tahad teisendada valitud tavalise pildi pildiga nupuks?","infoTab":"Pildi info","linkTab":"Link","lockRatio":"Lukusta kuvasuhe","menu":"Pildi omadused","resetSize":"Lรคhtesta suurus","title":"Pildi omadused","titleButton":"Piltnupu omadused","upload":"Lae รผles","urlMissing":"Pildi lรคhte-URL on puudu.","vSpace":"V. vaheruum","validateBorder":"ร„รคrise laius peab olema tรคisarv.","validateHSpace":"Horisontaalne vaheruum peab olema tรคisarv.","validateVSpace":"Vertikaalne vaheruum peab olema tรคisarv."},"indent":{"indent":"Taande suurendamine","outdent":"Taande vรคhendamine"},"fakeobjects":{"anchor":"Ankur","flash":"Flashi animatsioon","hiddenfield":"Varjatud vรคli","iframe":"IFrame","unknown":"Tundmatu objekt"},"link":{"acccessKey":"Juurdepรครคsu vรตti","advanced":"Tรคpsemalt","advisoryContentType":"Juhendava sisu tรผรผp","advisoryTitle":"Juhendav tiitel","anchor":{"toolbar":"Ankru sisestamine/muutmine","menu":"Ankru omadused","title":"Ankru omadused","name":"Ankru nimi","errorName":"Palun sisesta ankru nimi","remove":"Eemalda ankur"},"anchorId":"Elemendi id jรคrgi","anchorName":"Ankru nime jรคrgi","charset":"Lingitud ressursi mรคrgistik","cssClasses":"Stiilistiku klassid","emailAddress":"E-posti aadress","emailBody":"Sรตnumi tekst","emailSubject":"Sรตnumi teema","id":"ID","info":"Lingi info","langCode":"Keele suund","langDir":"Keele suund","langDirLTR":"Vasakult paremale (LTR)","langDirRTL":"Paremalt vasakule (RTL)","menu":"Muuda linki","name":"Nimi","noAnchors":"(Selles dokumendis pole ankruid)","noEmail":"Palun kirjuta e-posti aadress","noUrl":"Palun kirjuta lingi URL","other":"<muu>","popupDependent":"Sรตltuv (Netscape)","popupFeatures":"Hรผpikakna omadused","popupFullScreen":"Tรคisekraan (IE)","popupLeft":"Vasak asukoht","popupLocationBar":"Aadressiriba","popupMenuBar":"Menรผรผriba","popupResizable":"Suurust saab muuta","popupScrollBars":"Kerimisribad","popupStatusBar":"Olekuriba","popupToolbar":"Tรถรถriistariba","popupTop":"รœlemine asukoht","rel":"Suhe","selectAnchor":"Vali ankur","styles":"Laad","tabIndex":"Tab indeks","target":"Sihtkoht","targetFrame":"<raam>","targetFrameName":"Sihtmรคrk raami nimi","targetPopup":"<hรผpikaken>","targetPopupName":"Hรผpikakna nimi","title":"Link","toAnchor":"Ankur sellel lehel","toEmail":"E-post","toUrl":"URL","toolbar":"Lingi lisamine/muutmine","type":"Lingi liik","unlink":"Lingi eemaldamine","upload":"Lae รผles"},"list":{"bulletedlist":"Punktloend","numberedlist":"Numberloend"},"magicline":{"title":"Sisesta siia lรตigu tekst"},"maximize":{"maximize":"Maksimeerimine","minimize":"Minimeerimine"},"pastetext":{"button":"Asetamine tavalise tekstina","title":"Asetamine tavalise tekstina"},"pastefromword":{"confirmCleanup":"Tekst, mida tahad asetada nรคib pรคrinevat Wordist. Kas tahad selle enne asetamist puhastada?","error":"Asetatud andmete puhastamine ei olnud sisemise vea tรตttu vรตimalik","title":"Asetamine Wordist","toolbar":"Asetamine Wordist"},"removeformat":{"toolbar":"Vormingu eemaldamine"},"sourcearea":{"toolbar":"Lรคhtekood"},"specialchar":{"options":"Erimรคrkide valikud","title":"Erimรคrgi valimine","toolbar":"Erimรคrgi sisestamine"},"scayt":{"about":"SCAYT-ist lรคhemalt","aboutTab":"Lรคhemalt","addWord":"Lisa sรตna","allCaps":"Lรคbivate suurtรคhtedega sรตnade eiramine","dic_create":"Loo","dic_delete":"Kustuta","dic_field_name":"Sรตnaraamatu nimi","dic_info":"Alguses sรคilitatakse kasutaja sรตnaraamatut kรผpsises. Kรผpsise suurus on piiratud. Pรคrast sรตnaraamatu kasvamist nii suureks, et see kรผpsisesse ei mahu, vรตib sรตnaraamatut hoida meie serveris. Oma isikliku sรตnaraamatu hoidmiseks meie serveris pead andma sellele nime. Kui sa juba oled sรตnaraamatu salvestanud, sisesta selle nimi ja klรตpsa taastamise nupule.","dic_rename":"Nimeta รผmber","dic_restore":"Taasta","dictionariesTab":"Sรตnaraamatud","disable":"SCAYT keelatud","emptyDic":"Sรตnaraamatu nimi ei tohi olla tรผhi.","enable":"SCAYT lubatud","ignore":"Eira","ignoreAll":"Eira kรตiki","ignoreDomainNames":"Domeeninimede eiramine","langs":"Keeled","languagesTab":"Keeled","mixedCase":"Tavapรคratu tรตstuga sรตnade eiramine","mixedWithDigits":"Numbreid sisaldavate sรตnade eiramine","moreSuggestions":"Veel soovitusi","opera_title":"Operas pole toetatud","options":"Valikud","optionsTab":"Valikud","title":"ร•igekirjakontroll kirjutamise ajal","toggle":"SCAYT sisse/vรคlja lรผlitamine","noSuggestions":"No suggestion"},"stylescombo":{"label":"Stiil","panelTitle":"Vormindusstiilid","panelTitle1":"Blokkstiilid","panelTitle2":"Reasisesed stiilid","panelTitle3":"Objektistiilid"},"table":{"border":"Joone suurus","caption":"Tabeli tiitel","cell":{"menu":"Lahter","insertBefore":"Sisesta lahter enne","insertAfter":"Sisesta lahter peale","deleteCell":"Eemalda lahtrid","merge":"รœhenda lahtrid","mergeRight":"รœhenda paremale","mergeDown":"รœhenda alla","splitHorizontal":"Poolita lahter horisontaalselt","splitVertical":"Poolita lahter vertikaalselt","title":"Lahtri omadused","cellType":"Lahtri liik","rowSpan":"Ridade vahe","colSpan":"Tulpade vahe","wordWrap":"Sรตnade murdmine","hAlign":"Horisontaalne joondus","vAlign":"Vertikaalne joondus","alignBaseline":"Baasjoon","bgColor":"Tausta vรคrv","borderColor":"ร„รคrise vรคrv","data":"Andmed","header":"Pรคis","yes":"Jah","no":"Ei","invalidWidth":"Lahtri laius peab olema number.","invalidHeight":"Lahtri kรตrgus peab olema number.","invalidRowSpan":"Ridade vahe peab olema tรคisarv.","invalidColSpan":"Tulpade vahe peab olema tรคisarv.","chooseColor":"Vali"},"cellPad":"Lahtri tรคidis","cellSpace":"Lahtri vahe","column":{"menu":"Veerg","insertBefore":"Sisesta veerg enne","insertAfter":"Sisesta veerg peale","deleteColumn":"Eemalda veerud"},"columns":"Veerud","deleteTable":"Kustuta tabel","headers":"Pรคised","headersBoth":"Mรตlemad","headersColumn":"Esimene tulp","headersNone":"Puudub","headersRow":"Esimene rida","invalidBorder":"ร„รคrise suurus peab olema number.","invalidCellPadding":"Lahtrite polsterdus (padding) peab olema positiivne arv.","invalidCellSpacing":"Lahtrite vahe peab olema positiivne arv.","invalidCols":"Tulpade arv peab olema nullist suurem.","invalidHeight":"Tabeli kรตrgus peab olema number.","invalidRows":"Ridade arv peab olema nullist suurem.","invalidWidth":"Tabeli laius peab olema number.","menu":"Tabeli omadused","row":{"menu":"Rida","insertBefore":"Sisesta rida enne","insertAfter":"Sisesta rida peale","deleteRow":"Eemalda read"},"rows":"Read","summary":"Kokkuvรตte","title":"Tabeli omadused","toolbar":"Tabel","widthPc":"protsenti","widthPx":"pikslit","widthUnit":"laiuse รผhik"},"undo":{"redo":"Toimingu kordamine","undo":"Tagasivรตtmine"},"wsc":{"btnIgnore":"Ignoreeri","btnIgnoreAll":"Ignoreeri kรตiki","btnReplace":"Asenda","btnReplaceAll":"Asenda kรตik","btnUndo":"Vรตta tagasi","changeTo":"Muuda","errorLoading":"Viga rakenduse teenushosti laadimisel: %s.","ieSpellDownload":"ร•igekirja kontrollija ei ole paigaldatud. Soovid sa selle alla laadida?","manyChanges":"ร•igekirja kontroll sooritatud: %1 sรตna muudetud","noChanges":"ร•igekirja kontroll sooritatud: รผhtegi sรตna ei muudetud","noMispell":"ร•igekirja kontroll sooritatud: รตigekirjuvigu ei leitud","noSuggestions":"- Soovitused puuduvad -","notAvailable":"Kahjuks ei ole teenus praegu saadaval.","notInDic":"Puudub sรตnastikust","oneChange":"ร•igekirja kontroll sooritatud: รผks sรตna muudeti","progress":"Toimub รตigekirja kontroll...","title":"ร•igekirjakontroll","toolbar":"ร•igekirjakontroll"}};
SLIIT-FacultyOfComputing/Digital-Pulz-for-Hospitals
HIS_Latest_Project_29-07-2016/Pharmacy/Pharmacy/public/plugins/ckeditor/lang/et.js
JavaScript
apache-2.0
12,359
/* * Copyright 2003-2007 Dave Griffith, Bas Leijdekkers * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.siyeh.ig.javabeans; import com.intellij.psi.PsiClass; import com.intellij.psi.PsiField; import com.intellij.psi.PsiMethod; import com.intellij.psi.PsiModifier; import com.intellij.psi.util.PropertyUtil; import com.siyeh.InspectionGadgetsBundle; import com.siyeh.ig.BaseInspection; import com.siyeh.ig.BaseInspectionVisitor; import org.jetbrains.annotations.NotNull; public class FieldHasSetterButNoGetterInspection extends BaseInspection { @Override @NotNull public String getDisplayName() { return InspectionGadgetsBundle.message( "field.has.setter.but.no.getter.display.name"); } @Override @NotNull protected String buildErrorString(Object... infos) { return InspectionGadgetsBundle.message( "field.has.setter.but.no.getter.problem.descriptor"); } @Override public BaseInspectionVisitor buildVisitor() { return new FieldHasSetterButNoGetterVisitor(); } private static class FieldHasSetterButNoGetterVisitor extends BaseInspectionVisitor { @Override public void visitField(@NotNull PsiField field) { final String propertyName = PropertyUtil.suggestPropertyName(field); final boolean isStatic = field.hasModifierProperty(PsiModifier.STATIC); final PsiClass containingClass = field.getContainingClass(); final PsiMethod setter = PropertyUtil.findPropertySetter(containingClass, propertyName, isStatic, false); if (setter == null) { return; } final PsiMethod getter = PropertyUtil.findPropertyGetter(containingClass, propertyName, isStatic, false); if (getter != null) { return; } registerFieldError(field); } } }
MER-GROUP/intellij-community
plugins/InspectionGadgets/InspectionGadgetsAnalysis/src/com/siyeh/ig/javabeans/FieldHasSetterButNoGetterInspection.java
Java
apache-2.0
2,287
/////////////////////////////////////////////////////////////// // Copyright 2012 John Maddock. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_ //[safe_prime #include <boost/multiprecision/cpp_int.hpp> #include <boost/multiprecision/miller_rabin.hpp> #include <iostream> #include <iomanip> int main() { using namespace boost::random; using namespace boost::multiprecision; typedef cpp_int int_type; mt11213b base_gen(clock()); independent_bits_engine<mt11213b, 256, int_type> gen(base_gen); // // We must use a different generator for the tests and number generation, otherwise // we get false positives. // mt19937 gen2(clock()); for(unsigned i = 0; i < 100000; ++i) { int_type n = gen(); if(miller_rabin_test(n, 25, gen2)) { // Value n is probably prime, see if (n-1)/2 is also prime: std::cout << "We have a probable prime with value: " << std::hex << std::showbase << n << std::endl; if(miller_rabin_test((n-1)/2, 25, gen2)) { std::cout << "We have a safe prime with value: " << std::hex << std::showbase << n << std::endl; return 0; } } } std::cout << "Ooops, no safe primes were found" << std::endl; return 1; } //]
m0ppers/arangodb
3rdParty/boost/1.61.0/libs/multiprecision/example/safe_prime.cpp
C++
apache-2.0
1,368
/*bootstrap*/ .popover { max-width: none; } legend { margin-bottom: 10px; font-size: 18px; line-height: 30px; font-weight: bold; } legend.small { font-size: 14px; } label, input, button, select, textarea { font-size: 12px; line-height: 18px; } input[type="text"], input[type="email"], input[type="tel"], input[type="search"], input[type="url"], input[type="password"], .ui-autocomplete-input, textarea, .uneditable-input{ font-size: 12px; line-height: 16px; height: 16px; padding: 5px; } textarea { height: auto; } .input-append input, .input-prepend input, .input-append select, .input-prepend select, .input-append .uneditable-input, .input-prepend .uneditable-input, .input-append .dropdown-menu, .input-prepend .dropdown-menu, .input-append .popover, .input-prepend .popover { font-size: 12px; } .input-append .add-on, .input-prepend .add-on { height: 18px; font-size: 12px; line-height: 18px; } .input-append input, .input-append select, .input-append .uneditable-input{ -webkit-border-radius: 4px 0 0 4px; -moz-border-radius: 4px 0 0 4px; border-radius: 4px 0 0 4px; } input.search-query { padding-right: 12px; } .btn { font-size: 12px; padding: 5px 8px; } .btn [class^="icon-"], .nav [class^="icon-"], .btn [class*=" icon-"], .nav [class*=" icon-"] { font-size: 13px; } .btn-group > .btn, .btn-group > .dropdown-menu, .btn-group > .popover { font-size: 12px; } /*btn-group ๅตŒๅฅ— btn-groupๆ—ถ*/ .btn-group > .btn-group.last > .btn { border-bottom-left-radius: 0px; -webkit-border-top-left-radius: 0px; border-top-left-radius: 0px; -moz-border-radius-bottomleft: 0px; -moz-border-radius-topleft: 0px; border-left-width: 0px; } .btn-group > .btn-group.last:first-child > .btn { border-bottom-left-radius: 4px; -webkit-border-top-left-radius: 4px; border-top-left-radius: 4px; -moz-border-radius-bottomleft: 4px; -moz-border-radius-topleft: 4px; border-left-width: 1px; } .btn-group > .btn-group.first > .btn:first-child { border-bottom-right-radius: 0px; -webkit-border-top-right-radius: 0px; border-top-right-radius: 0px; -moz-border-radius-bottomright: 0px; -moz-border-radius-topright: 0px; border-right-width: 0px; } .btn-group > .btn-group.first:last-child > .btn:first-child { border-bottom-right-radius: 4px; -webkit-border-top-right-radius: 4px; border-top-right-radius: 4px; -moz-border-radius-bottomright: 4px; -moz-border-radius-topright: 4px; border-right-width: 1px; } .input-mini { width: 20px; } .input-small { width: 65px; } .input-medium { width: 140px; } .input-large { width: 180px; } .input-xlarge { width: 220px; } .input-xxlarge { width: 430px; } .popover-title { font-size: 12px; } .dropdown-menu .btn { text-align: left; background: none; border: none; } .dropdown-menu > li > a:hover, .dropdown-menu > li > a:focus, .dropdown-submenu:hover > a, .dropdown-submenu:focus > a { text-shadow: none; } .inline-radio label{ display: inline-block; padding-top: 5px; margin-bottom: 0; vertical-align: middle; margin-right: 5px!important; } .pagination { margin: 10px 0; } .pagination ul, .pagination div { float: left; } .ui-progressbar .ui-progressbar-value{background: #ddd;} .progress-label { float: left; margin-left: 50%; margin-top: 5px; font-weight: bold; text-shadow: 1px 1px 0 #fff; } .modal{ width: 400px; font-size: 14px; left: 60%; } .modal input { margin-bottom: 5px; } .modal.fade.in { top: 15%; } .modal,.modal-backdrop { z-index:999999998!important; } .modal-gallery { z-index:999999999!important; } .dropdown-menu.left { left: -95%; min-width: 80px; } .ui-dialog-titlebar { padding: 5px; } .ui-dialog-titlebar > .ui-dialog-title{ margin-top: 0px; padding: 5px 0 0 0px; } .ui-dialog-titlebar > .ui-button { top: 70%; } .ui-dialog-titlebar.no-title { border: 0px; } .ui-dialog-titlebar.no-title .ui-dialog-title { height: 0px; } .ui-dialog-titlebar.no-title > .ui-button { top: 100%; z-index: 1; } [class^="icon-"], [class*=" icon-"] { font-size: 13px; } .icon6[class^="icon-"], .icon6[class*=" icon-"] { font-size:280px; } .icon5[class^="icon-"], .icon5[class*=" icon-"] { font-size:140px; } .icon4[class^="icon-"], .icon4[class*=" icon-"] { font-size:123px; } .icon3[class^="icon-"], .icon3[class*=" icon-"] { font-size:61px; } .icon2[class^="icon-"], .icon2[class*=" icon-"] { font-size:30px; } .icon1[class^="icon-"], .icon1[class*=" icon-"] { font-size:13px; } /*bootstrap*/ html, body { font-size: 12px; width: 100%; height: 100%; padding: 0; margin: 0; /*overflow: auto; *//* when page gets too small */ } .index { /*overflow: hidden;*/ } .bg { background:#eee; } .ui-layout-resizer { background: #eee; } .ui-layout-toggler{ border: 0px; } .ui-layout-pane{ border: 1px solid #eee; } .index-panel { margin:0 auto; width: 1020px; height:99.7%; border: 1px solid #eee; background: #fff; } .panel { padding: 5px 5px 0 5px; } #index-panel .ui-layout-pane{ border: 0px!important; } .index-header { background:#1999d7; background: url(../images/bg-header.png) no-repeat; } #userinfo { font-size: 12px; font-weight: bold; position:absolute; top:0;right:0; margin: 5px 15px 0 0; z-index: 999; /*background-clip: padding-box;*/ /*background-color: #ffffff;*/ /*border: 1px solid #ddd;*/ /*border-radius: 6px 6px 6px 6px;*/ /*-webkit-box-shadow: 0 3px 7px #ddd;*/ /*-moz-box-shadow: 0 3px 7px #ddd;*/ /*box-shadow: 0 3px 7px #ddd;*/ /*outline: medium none;*/ /*border: 1px solid #f5f5f5;*/ } #userinfo a { color: #0099ff; } #userinfo .dropdown-menu a:hover { color: #fff; } #userinfo .dropdown-menu { min-width: 80px; cursor: pointer; } #userinfo .btn-message.unread { color:#b94a48; } #userinfo .btn-message > .icon-count{ font-size: 10px; } #user-navbar .btn * { text-shadow: none; } #user-navbar > ul > li { margin: 0; padding: 0; } #user-navbar > ul > li > a { padding: 3px 10px; } .notification-list { top: 26px; left: -17px; display: hidden; width: 320px; z-index: 0; } .notification-list a { cursor: pointer; } .notification-list.in { display: block; } .notification-list .popover-title, .notification-list .popover-title a { color:#b94a48!important; font-weight: bold; } .notification-list .popover-title .pre, .notification-list .popover-title .next { font-weight: normal; } .notification-list .popover-title a.none { color: #999!important; text-decoration: none; cursor: auto; } .notification-list .arrow { overflow: visible; } .notification-list .content { overflow: hidden; position: relative; display: block; } .notification-list .loading .popover-content,.notification-list .no-comment .popover-content { text-align: center; font-weight: normal; color: #999999; padding: 20px; } .notification-list .popover-content { padding: 0px; } .notification-list .menu .popover-content > li { cursor: pointer; } .notification-list .popover-content > li,.notification-list .popover-content .notification-detail { border-bottom: 1px solid #f3f3f3; line-height: 40px; padding: 0 20px 0 10px; color: #999; font-weight: normal; } .notification-list .popover-content > li:hover { background: #f8f8f8; } .notification-list .popover-content > li.unread { color:#b94a48!important; font-weight: bold; } .notification-list .popover-content .notification-detail { padding: 0px; } .notification-list .popover-content .notification-detail .title { color: #555; background: #f8f8f8; padding: 0 20px 0 10px; cursor: auto; } .notification-list .popover-content .notification-detail .content { padding: 10px 20px 10px 10px; line-height: 20px; } .ui-layout-west { z-index: 2!important; } .ui-layout-pane { padding: 1px 1px 1px 1px; } .index-panel > .ui-layout-resizer-north{ background: #0088cc; } iframe[tabs="true"] { border: 0px!important; /*margin:41px 0 0 0!important;*/ /*position: relative!important;*/ } .ui-dialog { font-size: 12px; } #menu .submenu { padding:0px; background: #f5f5f5; padding-bottom: 10px; } #menu ul{ list-style: none; font-size: 12px; font-weight: normal; margin:0px; } #menu li { padding-left: 15px; margin: 8px 0 8px 0; line-height: 22px; } #menu .li-wrapper { margin-left: -15px; padding-left: 15px; } #menu .li-wrapper:hover { background: #ddd; } #menu .li-wrapper.active, #menu .li-wrapper.active:hover{ background: #0088cc; color: white; } #menu li { color: #777; cursor: pointer; } #menu li a { color: #0088cc; font-weight: bold; text-decoration: none; } #menu .active a{ color: white!important; } #menu .menu-icon{ padding-right: 5px; color: #f89406; } #menu .ui-accordion-header{ /*height: 35px;*/ line-height: 20px; } #menu .ui-accordion-header a { color:#0088cc; } #menu h3 { padding-left: 15px; } #menu .menu-header-icon{ float: left; margin-top: 3px; margin-left: 8px; margin-right: 8px; color: #f89406; position: relative; left: 0; top: 0; } .tabs-bar{ background: none; } .tabs-bar.tabs-fix-top { position: relative; float: left; top: 62px; width: 100%; left: 217px; right: 0px; height: 40px; z-index: 1; } .tabs-bar ul { width: 100000px; } .tabs-bar .ul-wrapper { width: 801px; overflow: hidden; float: left; } .tabs-bar.ui-tabs .ui-tabs-nav { margin-bottom: 0px; border: none; } .tabs-bar.ui-tabs .ui-tabs-nav li a { color: #0088cc; border: 0px; cursor: pointer; padding: 10px 10px 10px 20px; } .tabs-bar.ui-tabs .ui-tabs-nav li{ font-size: 12px; background-color: #f5f5f5; background-repeat: repeat-x; background-image: -webkit-linear-gradient(top, #ffffff, #f5f5f5); background-image: -moz-linear-gradient(top, #ffffff, #f5f5f5); background-image: -o-linear-gradient(top, #ffffff, #f5f5f5); background-image: linear-gradient(top, #ffffff, #f5f5f5); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffff', endColorstr='#f5f5f5', GradientType=0); -webkit-border-radius: 3px; -moz-border-radius: 3px; border-radius: 3px; color: #6a6a6a; border: 1px solid #a1acb8; border-bottom: 0px; } .tabs-bar.ui-tabs .ui-tabs-nav li:hover { background-color: #e6e6e6; background-repeat: repeat-x; background-image: -webkit-linear-gradient(top, #ffffff, #e6e6e6); background-image: -moz-linear-gradient(top, #ffffff, #e6e6e6); background-image: -o-linear-gradient(top, #ffffff, #e6e6e6); background-image: linear-gradient(top, #ffffff, #e6e6e6); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffff', endColorstr='#e6e6e6', GradientType=0); } .tabs-bar.ui-tabs .ui-tabs-nav li.ui-tabs-active a { background: none; border: none; color: #fff; font-weight: bold; } .tabs-bar.ui-tabs .ui-tabs-nav li.ui-tabs-active,.tabs-bar.ui-tabs .ui-tabs-nav li.ui-tabs-active:hover{ font-weight: bold; background: #0088cc; background: -ms-linear-gradient(top,#0069D6,#0088cc); background: -o-linear-gradient(top,#0069D6,#0088cc); background: -moz-linear-gradient(top,#0069D6,#0088cc); background: -webkit-linear-gradient(top,#0069D6,#0088cc); background: linear-gradient(top,#0069D6,#0088cc); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#0069D6', endColorstr='#0088cc', GradientType=0); border-color: #0069D6; color: #fff; border: 1px solid #135aa4; padding-right: 3px; } .tabs-bar.ui-tabs .ui-tabs-nav li .menu, .tabs-bar.ui-tabs .ui-tabs-nav li .menu:hover { font-size: 12px; cursor: pointer; color: #fff; } .tabs-bar .icon-chevron-left, .tabs-bar .icon-chevron-right { position: absolute; top: 0px; z-index: 999; height: 38px; line-height: 39px; padding: 0px 2px; cursor: pointer; border: 1px solid #ddd; float: left; color: #0069D6; background-color: #e6e6e6; background-repeat: repeat-x; background-image: -webkit-linear-gradient(top, #ffffff, #e6e6e6); background-image: -moz-linear-gradient(top, #ffffff, #e6e6e6); background-image: -o-linear-gradient(top, #ffffff, #e6e6e6); background-image: linear-gradient(top, #ffffff, #e6e6e6); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffff', endColorstr='#e6e6e6', GradientType=0); } .tabs-bar .icon-chevron-left { left: 0px; } .tabs-bar .icon-chevron-right { right: 0px; } .pagination ul > li > a, .pagination ul > li > span { padding: 4px 10px; } .page-input{ margin-left: 30px; } .page-input input { width: 30px; text-align: center; } .page-btn{ margin-bottom: 10px; } .page-info{ float: none!important; border: 0px!important; } .table { margin-bottom: 0; } .table .check{ text-align: center; } .table > tbody > tr.active, .table > tbody > tr.active > td{ background: #eee; } .table > tbody > tr.active * { text-shadow: none; } .popover { color: #333; } .sort-table .sort-th { cursor: pointer; } .sort-table .sort-title { float: left; } .sort-table .sort { float: right; } .sort-table .sort .sort-hover { color: #f89406!important; } .control-group { margin-bottom: 0px; } .control-group .input-append{ display:block; } .ui-layout-toggler .content { font: 12px bold Verdana, Verdana, Arial, Helvetica, sans-serif; } /*.ui-layout-toggler:hover .content {*/ /* mouse-over */ /*color: #0069D6;*/ /*}*/ .table .btn-edit { padding: 0px!important; } .confirm .title { color: #bd362f; } .moveable{ float: right; } .moveable .ui-icon { float: left; } .moveable .none{ width: 16px; height: 16px; float: left; } .moveable .btn-link { font-size: 12px!important; } .sortable-placeholder { height: 3em; line-height: 3em; } .ajax-upload-view .progress { width: 300px; height: 30px; float: left; } .ajax-upload-view .alert { width: 300px; } .tree.ui-layout-center , .tree.ui-layout-west { overflow: hidden; } .tree iframe { padding: 0 !important; /* iframes should not have padding */ overflow: auto !important; } .treeContent { margin-top: 2px; border: 1px solid #617775; background: #f0f6e4; width: 240px; height: 250px; overflow-y: scroll; overflow-x: auto; } .ztree li span.button.add { margin-left: 2px; margin-right: -1px; background-position: -144px 0; vertical-align: top; *vertical-align: middle; } .ztree li a input.rename{ height:16px!important; } .ztree li a.curSelectedNode_Edit { height: 19px!important; } .prettyprint { padding: 8px; background-color: #f7f7f9; border: 1px solid #e1e1e8; } .no-margin{ margin-bottom: 0px!important; border: 0px; } .margin-1{ padding: 0px 1px; } .no-padding{ padding: 0px; } .child-data > td,.child-data > td:hover { background: #E9F3FF!important; } /*้ป˜่ฎคไผšๅ็งป*/ .ui-spinner-input{ margin: 0px; } /*้ป˜่ฎคไผš้š่—ๆމerrorๆ็คบ*/ .ui-spinner{ overflow: visible; } .tool.ui-toolbar { width: auto; margin: 0 auto 1px 0; padding: 3px; } .search-form { float: right; margin-bottom: 0px; margin-right: 10px; } .search-form .accordion-inner { padding: 9px 0; border-top: 0px; } .file-input label { padding: 4px 12px; margin-bottom: 0px; } .file-input a { padding: 0px; } /*file upload*/ .file-input input[type="file"] { position: absolute; left: 0; top: 20000; opacity: 0; filter: alpha(opacity=0); z-index: 1; width: 0px; margin: 0px; height: 0px; line-height: 0px; } .file-input .name { margin-left: 8px; } .login { width: 400px; margin: 120px auto; background-clip: padding-box; background-color: #ffffff; border: 1px solid #ddd; border-radius: 6px 6px 6px 6px; -webkit-box-shadow: 0 3px 7px #ddd; -moz-box-shadow: 0 3px 7px #ddd; box-shadow: 0 3px 7px #ddd; outline: medium none; } .login .title { color: #fff; font-size: 25px; font-weight: bold; padding: 8px; background: #1999d7; background-clip: padding-box; border-radius: 6px 6px 0 0; line-height: 30px; } .login .form { padding: 30px 0 10px 30px; color: #777; } .login .remember { width: 180px; float: left; } .login .btn-login { padding: 5px 20px; } .jcaptcha-img { cursor: pointer; } /*.panel .nav-tabs {*/ /*margin-bottom: 10px;*/ /*}*/ /* Footer -------------------------------------------------- */ .footer { text-align: center; padding: 30px 0; margin-top: 70px; border-top: 1px solid #e5e5e5; background-color: #f5f5f5; } .footer p { margin-bottom: 0; color: #777; } .footer-links { margin: 10px 0; } .footer-links li { display: inline; padding: 0 2px; } .footer-links li:first-child { padding-left: 0; } .tree-search { margin-bottom: 0px; } .tree-search label { float: left; line-height: 28px; margin-right: 5px; margin-left: 10px; } .font-12 { font-size: 12px; font-weight: normal; } .font-14 { font-size: 14px; font-weight: normal; } .form-small .control-label { width: 60px; } .form-small .controls{ margin-left: 70px; } .form-medium .control-label { width: 90px; margin-right: 10px; } .form-medium .controls{ margin-left: 90px; } .hr{ /*color:#555555;*/ border-bottom: 1px solid #ddd; float: left; width: 100%; } .left-group{ width: 100%;float: left;padding: 20px 0; } .sys-icon-list{ width: 320px; height: 315px; position: absolute; background: #fff; -webkit-border-radius: 3px; -moz-border-radius: 3px; border-radius: 3px; border: 1px solid #a1acb8; padding: 3px 0 3px 0; } .bold { font-weight: bold; } .no-underline { text-decoration: none; } .no-underline:hover { text-decoration: none; } .message { margin: 0 20px; } .message .accordion-inner { border-top : 0px; } .message .accordion-heading > .accordion-toggle { color: #333; } /**็ณป็ปŸ็ฎก็† css start*/ .sys-icon-list .nav { margin: 0 0 5px 3px; } .sys-icon-list .tab-pane { width: 300px; height: 250px; padding: 10px; overflow: hidden; } .sys-icon-list .tab-pane i { margin: 5px; } .auth { float: left; margin: 10px; } .auth .left, .auth .right { float: left; width: 220px; height: 170px; } .auth .left .list,.auth .right .list { float: left; width: 220px; height: 150px; overflow: auto; -webkit-border-radius: 3px; -moz-border-radius: 3px; border-radius: 3px; border: 1px solid #a1acb8; } .auth .btns { float: left; margin: 60px 5px 5px 5px; text-align: center; vertical-align: middle; border: 0; } .auth .btns .btn { display: block; } .auth .btns .btn > [class^="icon-"] { font-size: 18px; } .auth ul { list-style-type: none; margin: 0 0 10px 0; padding: 0; min-height: 100px; } .auth li { margin: 5px; padding: 5px; cursor: move; } .auth .ui-state-highlight { height: 1.5em; line-height: 1.2em; } /**็ณป็ปŸ็ฎก็† css end*/ /*ๅŽป้™คIE ่™š็บฟๆก†*/ a,input,textarea{blr:expression(this.onFocus=this.close());} /* ๅชๆ”ฏๆŒIE๏ผŒ่ฟ‡ๅคšไฝฟ็”จๆ•ˆ็އไฝŽ */ a,input,textarea{blr:expression(this.onFocus=this.blur());} /* ๅชๆ”ฏๆŒIE๏ผŒ่ฟ‡ๅคšไฝฟ็”จๆ•ˆ็އไฝŽ */ a:focus { -moz-outline-style: none; } /* IEไธๆ”ฏๆŒ */ *:focus { outline: none; }
iacdingping/es
web/src/main/webapp/WEB-INF/static/css/application.css
CSS
apache-2.0
20,115
define( "dojox/editor/plugins/nls/hu/latinEntities", ({ /* These are already handled in the default RTE amp:"ampersand",lt:"less-than sign", gt:"greater-than sign", nbsp:"no-break space\nnon-breaking space", quot:"quote", */ iexcl:"fordรญtott felkiรกltรณjel", cent:"cent jel", pound:"font jel", curren:"pรฉnznem jel", yen:"jen jel\nyuan jel", brvbar:"megszakรญtott vonal\nmegszakรญtott fรผggล‘leges vonal", sect:"paragrafusjel", uml:"trรฉma\nkalapos trรฉma", copy:"copyright jel", ordf:"nล‘nemลฑ sorszรกmnรฉv jelzรฉse a felsล‘ indexben", laquo:"balra mutatรณ dupla hegyes idรฉzล‘jel\nbalra mutatรณ belsล‘ idรฉzล‘jel", not:"nem jel", shy:"lรกgy kรถtล‘jel\nfeltรฉteles kรถtล‘jel", reg:"vรฉdjegy jel\nbejegyzett vรฉdjegy jel", macr:"รฉkezet\nkalapos รฉkezet\nfelsล‘ vonal\nAPL felsล‘ csรญk", deg:"fok jel", plusmn:"plus-mรญnusz jel\nplusz-vagy-mรญnusz jel", sup2:"kettล‘ a felsล‘ indexben\nfelsล‘ indexbe รญrt kettes szรกmjegy\nnรฉgyzetre emelt", sup3:"hรกrom a felsล‘ indexben\nfelsล‘ indexbe รญrt hรกrmas szรกmjegy\nharmadik hatvรกnyra emelt", acute:"hegyes รฉkezet\nkalapos รฉkezet", micro:"mikro jel", para:"sorvรฉge jel\nbekezdรฉs jel", middot:"kรถzรฉpsล‘ pont\nGrรบz vesszล‘\nGรถrรถg kรถzรฉpsล‘ pont", cedil:"cรฉdille\nkalapos cรฉdille", sup1:"1 a felsล‘ indexben\nfelsล‘ indexbe รญrt egyes szรกmjegy", ordm:"hรญmnemลฑ sorszรกmnรฉv jelzรฉse a felsล‘ indexben", raquo:"jobbra mutatรณ dupla hegyes idรฉzล‘jel\njobbra mutatรณ belsล‘ idรฉzล‘jel", frac14:"kรถzรถnsรฉges egynegyed\ntรถrtnegyed", frac12:"kรถzรถnsรฉges fรฉl\ntรถrtfรฉl", frac34:"kรถzรถnsรฉges hรกromnegyed\ntรถrthรกromnegyed", iquest:"fordรญtott kรฉrdล‘jel\nmegfordรญtott kรฉrdล‘jel", Agrave:"Latin nagy A betลฑ tompa รฉkezettel\nTompa รฉkezetes latin nagy A betลฑ", Aacute:"Latin nagy A betลฑ รฉles รฉkezettel", Acirc:"Latin kalapos nagy A betลฑ", Atilde:"Latin hullรกmvonalas nagy A betลฑ", Auml:"Latin kรฉtpontos nagy A betลฑ", Aring:"Latin nagy A betลฑ felรผl kรถrrel\nLatin nagy A betลฑ felsล‘ kรถrrel", AElig:"Latin nagy AE\nLatin nagy AE ikerbetลฑ", Ccedil:"Latin nagy C betลฑ cedillel", Egrave:"Latin nagy E betลฑ tompa รฉkezettel", Eacute:"Latin nagy E betลฑ รฉles รฉkezettel", Ecirc:"Latin kalapos nagy E betลฑ", Euml:"Latin kรฉtpontos nagy E betลฑ", Igrave:"Latin nagy I betลฑ tompa รฉkezettel", Iacute:"Latin nagy I betลฑ รฉles รฉkezettel", Icirc:"Latin kalapos nagy I betลฑ", Iuml:"Latin kรฉtpontos nagy I betลฑ", ETH:"Latin nagy ETH betลฑ", Ntilde:"Latin hullรกmvonalas nagy N betลฑ", Ograve:"Latin nagy O betลฑ tompa รฉkezettel", Oacute:"Latin nagy O betลฑ รฉles รฉkezettel", Ocirc:"Latin kalapos nagy O betลฑ", Otilde:"Latin hullรกmvonalas nagy O betลฑ", Ouml:"Latin kรฉtpontos nagy O betลฑ", times:"szorzรกsjel", Oslash:"Latin รกthรบzott nagy O betลฑ\nLatin nagy O betลฑ osztรกsjellel", Ugrave:"Latin nagy U betลฑ tompa รฉkezettel", Uacute:"Latin nagy U betลฑ รฉles รฉkezettel", Ucirc:"Latin kalapos nagy U betลฑ", Uuml:"Latin kรฉtpontos nagy U betลฑ", Yacute:"Latin nagy Y betลฑ รฉles รฉkezettel", THORN:"Latin nagy THORN betลฑ", szlig:"Latin kis sharfes\nseszett", agrave:"Latin kis a betลฑ tompa รฉkezettel\nTompa รฉkezetes latin kis a betลฑ", aacute:"Latin kis a betลฑ รฉles รฉkezettel", acirc:"Latin kalapos kis a betลฑ", atilde:"Latin hullรกmvonalas kis a betลฑ", auml:"Latin kรฉtpontos kis a betลฑ", aring:"Latin kis a betลฑ felรผl kรถrrel\nLatin kis a betลฑ felsล‘ kรถrrel", aelig:"Latin kis ae betลฑ\nLatin kis ae ikerbetลฑ", ccedil:"Latin kis c betลฑ cedillel", egrave:"Latin kis e betลฑ tompa รฉkezettel", eacute:"Latin kis e betลฑ รฉles รฉkezettel", ecirc:"Latin kalapos kis e betลฑ", euml:"Latin kรฉtpontos kis e betลฑ", igrave:"Latin kis i betลฑ tompa รฉkezettel", iacute:"Latin kis i betลฑ รฉles รฉkezettel", icirc:"Latin kalapos kis i betลฑ", iuml:"Latin kรฉtpontos kis i betลฑ", eth:"Latin kis eth betลฑ", ntilde:"Latin hullรกmvonalas kis n betลฑ", ograve:"Latin kis o betลฑ tompa รฉkezettel", oacute:"Latin kis o betลฑ รฉles รฉkezettel", ocirc:"Latin kalapos kis o betลฑ", otilde:"Latin hullรกmvonalas kis o betลฑ", ouml:"Latin kรฉtpontos kis o betลฑ", divide:"osztรกsjel", oslash:"Latin รกthรบzott kis o betลฑ\nLatin kis o betลฑ osztรกsjellel", ugrave:"Latin kis u betลฑ tompa รฉkezettel", uacute:"Latin kis u betลฑ รฉles รฉkezettel", ucirc:"Latin kalapos kis u betลฑ", uuml:"Latin kรฉtpontos kis u betลฑ", yacute:"Latin kis y รฉles รฉkezettel", thorn:"Latin kis thorn betลฑ", yuml:"Latin kรฉtpontos kis y", // Greek Characters and Symbols fnof:"Latin kis f horoggal\nfรผggvรฉnyforint", Alpha:"Gรถrรถg nagy alfa betลฑ", Beta:"Gรถrรถg nagy bรฉta betลฑ", Gamma:"Gรถrรถg nagy gamma betลฑ", Delta:"Gรถrรถg nagy delta betลฑ", Epsilon:"Gรถrรถg nagy epszilon betลฑ", Zeta:"Gรถrรถg nagy dzรฉta betลฑ", Eta:"Gรถrรถg nagy รฉta betลฑ", Theta:"Gรถrรถg nagy thรฉta betลฑ", Iota:"Gรถrรถg nagy iota betลฑ", Kappa:"Gรถrรถg nagy kappa betลฑ", Lambda:"Gรถrรถg nagy lambda betลฑ", Mu:"Gรถrรถg nagy mลฑ betลฑ", Nu:"Gรถrรถg nagy nลฑ betลฑ", Xi:"Gรถrรถg nagy kszรญ betลฑ", Omicron:"Gรถrรถg nagy omikron betลฑ", Pi:"Gรถrรถg nagy pi betลฑ", Rho:"Gรถrรถg nagy rรณ betลฑ", Sigma:"Gรถrรถg nagy szigma betลฑ", Tau:"Gรถrรถg nagy tau betลฑ", Upsilon:"Gรถrรถg nagy รผpszilon betลฑ", Phi:"Gรถrรถg nagy fรญ betลฑ", Chi:"Gรถrรถg nagy khรญ betลฑ", Psi:"Gรถrรถg nagy pszรญ betลฑ", Omega:"Gรถrรถg nagy รณmega betลฑ", alpha:"Gรถrรถg kis alfa betลฑ", beta:"Gรถrรถg kis bรฉta betลฑ", gamma:"Gรถrรถg kis gamma betลฑ", delta:"Gรถrรถg kis delta betลฑ", epsilon:"Gรถrรถg kis epszilon betลฑ", zeta:"Gรถrรถg kis dzรฉta betลฑ", eta:"Gรถrรถg kis รฉta betลฑ", theta:"Gรถrรถg kis thรฉta betลฑ", iota:"Gรถrรถg kis iรณta betลฑ", kappa:"Gรถrรถg kis kappa betลฑ", lambda:"Gรถrรถg kis lambda betลฑ", mu:"Gรถrรถg kis mลฑ betลฑ", nu:"Gรถrรถg kis nลฑ betลฑ", xi:"Gรถrรถg kis kszรญ betลฑ", omicron:"Gรถrรถg kis omikron betลฑ", pi:"Gรถrรถg kis pรญ betลฑ", rho:"Gรถrรถg kis rรณ betลฑ", sigmaf:"Gรถrรถg kis szigma betลฑ utolsรณ helyen", sigma:"Gรถrรถg kis szigma betลฑ", tau:"Gรถrรถg kis taรบ betลฑ", upsilon:"Gรถrรถg kis รผpszilon betลฑ", phi:"Gรถrรถg kis fรญ betลฑ", chi:"Gรถrรถg kis khรญ betลฑ", psi:"Gรถrรถg kis pszรญ betลฑ", omega:"Gรถrรถg kis รณmega betลฑ", thetasym:"Gรถrรถg kis thรฉta betลฑ szimbรณlum", upsih:"Gรถrรถg รผpszilon horog szimbรณlummal", piv:"Gรถrรถg pรญ szimbรณlum", bull:"felsorolรกsjel\nkis fekete kรถr", hellip:"vรญzszintes kihagyรกs\nbevezetล‘ hรกrmas pont", prime:"prรญm\nperc\nlรกb", Prime:"dupla pontossรกgรบ prรญm\nmรกsodperc\nhรผvelyk", oline:"felsล‘vonal\nkalapos felsล‘ vonal", frasl:"tรถrt osztรกsjel", weierp:"indexbe รญrt nagy P\nhatvรกnykitevล‘\nWeierstrass p", image:"megtรถrt nagy I\nkรฉpzetes (imaginรกrius) rรฉsz", real:"megtรถrt nagy R\nvalรณs rรฉsz szimbรณlum", trade:"vรฉdjegy jel", alefsym:"รกlef jel\nelsล‘ kvรกzi vรฉgtelen pozitรญv egรฉsz", larr:"balra mutatรณ nyรญl", uarr:"felfelรฉ mutatรณ nyรญl", rarr:"jobbra mutatรณ nyรญl", darr:"lefelรฉ mutatรณ nyรญl", harr:"balra-jobbra mutatรณ nyรญl", crarr:"lefelรฉ mutatรณ nyรญl bal oldalon sarokkal\nkocsi vissza", lArr:"balra mutatรณ dupla nyรญl", uArr:"felfelรฉ mutatรณ dupla nyรญl", rArr:"jobbra mutatรณ dupla nyรญl", dArr:"lefelรฉ mutatรณ dupla nyรญl", hArr:"balra-jobbra mutatรณ dupla nyรญl", forall:"minden\nfordรญtott nagy A betลฑ", part:"rรฉszleges differenciรกl", exist:"lรฉtezik", empty:"รผres halmaz\nnull halmaz\nรกtmรฉrล‘", nabla:"nabla\nvisszamutatรณ eltรฉrรฉs", isin:"eleme", notin:"nem eleme", ni:"tagkรฉnt tartalmazza", prod:"n-tagรบ termรฉk\ntermรฉk jel", sum:"n-tagรบ รถsszegzรฉs", minus:"mรญnusz jel", lowast:"csillag operรกtor", radic:"nรฉgyzetgyรถk\nnรฉgyzetgyรถk jel", prop:"arรกnyos", infin:"vรฉgtelen", ang:"szรถg", and:"logikai รฉs\nfordรญtott V", or:"logikai vagy\nV", cap:"metszet\nkalap", cup:"uniรณ\nU jel","int":"integrรกl", there4:"ezรฉrt", sim:"hullรกm operรกtor\negyรผtt vรกltozik\nhasonlรณ", cong:"megkรถzelรญtล‘leg egyenlล‘", asymp:"majdnem egyenlล‘\naszimptotikus", ne:"nem egyenlล‘", equiv:"azonos", le:"kisebb vagy egyenlล‘", ge:"nagyobb vagy egyenlล‘", sub:"rรฉszhalmaza", sup:"bล‘vรญtett halmaza", nsub:"nem rรฉszhalmaza", sube:"rรฉszhalmaza vagy egyenlล‘", supe:"bล‘vรญtett halmaza vagy egyenlล‘", oplus:"plusz jel kรถrben\ndirekt รถsszegzรฉs", otimes:"szorzรกs jel kรถrben\nvektoriรกlis szorzat", perp:"falsum\nortogonรกlis\nmerล‘leges", sdot:"pont operรกtor", lceil:"jobb szรถgletes zรกrรณjel felsล‘ sarok\nAPL upstile", rceil:"jobb szรถgletes zรกrรณjel felsล‘ sarok", lfloor:"jobb szรถgletes zรกrรณjel alsรณ sarok\nAPL downstile", rfloor:"jobb szรถgletes zรกrรณjel alsรณ sarok", lang:"balra mutatรณ hegyes zรกrรณjel", rang:"jobbra mutatรณ hegyes zรกrรณjel", loz:"rombusz", spades:"fekete pikk kรกrtyajel", clubs:"fekete treff kรกrtyjel\nlรณhere", hearts:"fekete kรถr kรกrtyajel\nszรญvalak", diams:"fekete kรกrรณ kรกryajel", OElig:"Latin nagy OE ikerbetลฑ", oelig:"Latin kis oe ikerbetลฑ", Scaron:"Latin nagy S betลฑ csรณnakkal", scaron:"Latin kis s betลฑ csรณnakkal", Yuml:"Latin kรฉtpontos nagy Y betลฑ", circ:"betลฑt mรณdosรญtรณ kalap รฉkezet", tilde:"kis hullรกm", ensp:"n szรณkรถz", emsp:"m szรณkรถz", thinsp:"szลฑk szรณkรถz", zwnj:"tรถrhetล‘ รผres jel", zwj:"nem tรถrhetล‘ รผres jel", lrm:"balrรณl jobbra jel", rlm:"jobbrรณl balra jel", ndash:"n kรถtล‘jel", mdash:"m kรถtล‘jel", lsquo:"bal szimpla idรฉzล‘jel", rsquo:"jobb szimpla idรฉzล‘jel", sbquo:"alsรณ 9-es szimpla idรฉzล‘jel", ldquo:"bal dupla idรฉzล‘jel", rdquo:"jobb dupla idรฉzล‘jel", bdquo:"alsรณ 9-es dupla idรฉzล‘jel", dagger:"kereszt", Dagger:"dupla kereszt", permil:"ezrelรฉkjel", lsaquo:"szimpla balra mutatรณ hegyes idรฉzล‘jel", rsaquo:"szimpla jobbra mutatรณ hegyes idรฉzล‘jel", euro:"euro jel" }) );
kanarelo/dairy
dairy/static/js/libs/dojo_toolkit/dojox/editor/plugins/nls/hu/latinEntities.js.uncompressed.js
JavaScript
apache-2.0
9,798
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Routing netlink socket interface: protocol independent part. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Fixes: * Vitaly E. Lavrov RTA_OK arithmetics was wrong. */ #include <linux/errno.h> #include <linux/module.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/capability.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/security.h> #include <linux/mutex.h> #include <linux/if_addr.h> #include <linux/if_bridge.h> #include <linux/pci.h> #include <linux/etherdevice.h> #include <asm/uaccess.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <net/ip.h> #include <net/protocol.h> #include <net/arp.h> #include <net/route.h> #include <net/udp.h> #include <net/sock.h> #include <net/pkt_sched.h> #include <net/fib_rules.h> #include <net/rtnetlink.h> #include <net/net_namespace.h> struct rtnl_link { rtnl_doit_func doit; rtnl_dumpit_func dumpit; rtnl_calcit_func calcit; }; static DEFINE_MUTEX(rtnl_mutex); void rtnl_lock(void) { mutex_lock(&rtnl_mutex); } EXPORT_SYMBOL(rtnl_lock); void __rtnl_unlock(void) { mutex_unlock(&rtnl_mutex); } void rtnl_unlock(void) { /* This fellow will unlock it for us. */ netdev_run_todo(); } EXPORT_SYMBOL(rtnl_unlock); int rtnl_trylock(void) { return mutex_trylock(&rtnl_mutex); } EXPORT_SYMBOL(rtnl_trylock); int rtnl_is_locked(void) { return mutex_is_locked(&rtnl_mutex); } EXPORT_SYMBOL(rtnl_is_locked); #ifdef CONFIG_PROVE_LOCKING int lockdep_rtnl_is_held(void) { return lockdep_is_held(&rtnl_mutex); } EXPORT_SYMBOL(lockdep_rtnl_is_held); #endif /* #ifdef CONFIG_PROVE_LOCKING */ static struct rtnl_link *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1]; static inline int rtm_msgindex(int msgtype) { int msgindex = msgtype - RTM_BASE; /* * msgindex < 0 implies someone tried to register a netlink * control code. msgindex >= RTM_NR_MSGTYPES may indicate that * the message type has not been added to linux/rtnetlink.h */ BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES); return msgindex; } static rtnl_doit_func rtnl_get_doit(int protocol, int msgindex) { struct rtnl_link *tab; if (protocol <= RTNL_FAMILY_MAX) tab = rtnl_msg_handlers[protocol]; else tab = NULL; if (tab == NULL || tab[msgindex].doit == NULL) tab = rtnl_msg_handlers[PF_UNSPEC]; return tab[msgindex].doit; } static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex) { struct rtnl_link *tab; if (protocol <= RTNL_FAMILY_MAX) tab = rtnl_msg_handlers[protocol]; else tab = NULL; if (tab == NULL || tab[msgindex].dumpit == NULL) tab = rtnl_msg_handlers[PF_UNSPEC]; return tab[msgindex].dumpit; } static rtnl_calcit_func rtnl_get_calcit(int protocol, int msgindex) { struct rtnl_link *tab; if (protocol <= RTNL_FAMILY_MAX) tab = rtnl_msg_handlers[protocol]; else tab = NULL; if (tab == NULL || tab[msgindex].calcit == NULL) tab = rtnl_msg_handlers[PF_UNSPEC]; return tab[msgindex].calcit; } /** * __rtnl_register - Register a rtnetlink message type * @protocol: Protocol family or PF_UNSPEC * @msgtype: rtnetlink message type * @doit: Function pointer called for each request message * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message * @calcit: Function pointer to calc size of dump message * * Registers the specified function pointers (at least one of them has * to be non-NULL) to be called whenever a request message for the * specified protocol family and message type is received. * * The special protocol family PF_UNSPEC may be used to define fallback * function pointers for the case when no entry for the specific protocol * family exists. * * Returns 0 on success or a negative error code. */ int __rtnl_register(int protocol, int msgtype, rtnl_doit_func doit, rtnl_dumpit_func dumpit, rtnl_calcit_func calcit) { struct rtnl_link *tab; int msgindex; BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); msgindex = rtm_msgindex(msgtype); tab = rtnl_msg_handlers[protocol]; if (tab == NULL) { tab = kcalloc(RTM_NR_MSGTYPES, sizeof(*tab), GFP_KERNEL); if (tab == NULL) return -ENOBUFS; rtnl_msg_handlers[protocol] = tab; } if (doit) tab[msgindex].doit = doit; if (dumpit) tab[msgindex].dumpit = dumpit; if (calcit) tab[msgindex].calcit = calcit; return 0; } EXPORT_SYMBOL_GPL(__rtnl_register); /** * rtnl_register - Register a rtnetlink message type * * Identical to __rtnl_register() but panics on failure. This is useful * as failure of this function is very unlikely, it can only happen due * to lack of memory when allocating the chain to store all message * handlers for a protocol. Meant for use in init functions where lack * of memory implies no sense in continuing. */ void rtnl_register(int protocol, int msgtype, rtnl_doit_func doit, rtnl_dumpit_func dumpit, rtnl_calcit_func calcit) { if (__rtnl_register(protocol, msgtype, doit, dumpit, calcit) < 0) panic("Unable to register rtnetlink message handler, " "protocol = %d, message type = %d\n", protocol, msgtype); } EXPORT_SYMBOL_GPL(rtnl_register); /** * rtnl_unregister - Unregister a rtnetlink message type * @protocol: Protocol family or PF_UNSPEC * @msgtype: rtnetlink message type * * Returns 0 on success or a negative error code. */ int rtnl_unregister(int protocol, int msgtype) { int msgindex; BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); msgindex = rtm_msgindex(msgtype); if (rtnl_msg_handlers[protocol] == NULL) return -ENOENT; rtnl_msg_handlers[protocol][msgindex].doit = NULL; rtnl_msg_handlers[protocol][msgindex].dumpit = NULL; return 0; } EXPORT_SYMBOL_GPL(rtnl_unregister); /** * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol * @protocol : Protocol family or PF_UNSPEC * * Identical to calling rtnl_unregster() for all registered message types * of a certain protocol family. */ void rtnl_unregister_all(int protocol) { BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); kfree(rtnl_msg_handlers[protocol]); rtnl_msg_handlers[protocol] = NULL; } EXPORT_SYMBOL_GPL(rtnl_unregister_all); static LIST_HEAD(link_ops); static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind) { const struct rtnl_link_ops *ops; list_for_each_entry(ops, &link_ops, list) { if (!strcmp(ops->kind, kind)) return ops; } return NULL; } /** * __rtnl_link_register - Register rtnl_link_ops with rtnetlink. * @ops: struct rtnl_link_ops * to register * * The caller must hold the rtnl_mutex. This function should be used * by drivers that create devices during module initialization. It * must be called before registering the devices. * * Returns 0 on success or a negative error code. */ int __rtnl_link_register(struct rtnl_link_ops *ops) { if (rtnl_link_ops_get(ops->kind)) return -EEXIST; if (!ops->dellink) ops->dellink = unregister_netdevice_queue; list_add_tail(&ops->list, &link_ops); return 0; } EXPORT_SYMBOL_GPL(__rtnl_link_register); /** * rtnl_link_register - Register rtnl_link_ops with rtnetlink. * @ops: struct rtnl_link_ops * to register * * Returns 0 on success or a negative error code. */ int rtnl_link_register(struct rtnl_link_ops *ops) { int err; rtnl_lock(); err = __rtnl_link_register(ops); rtnl_unlock(); return err; } EXPORT_SYMBOL_GPL(rtnl_link_register); static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) { struct net_device *dev; LIST_HEAD(list_kill); for_each_netdev(net, dev) { if (dev->rtnl_link_ops == ops) ops->dellink(dev, &list_kill); } unregister_netdevice_many(&list_kill); } /** * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. * @ops: struct rtnl_link_ops * to unregister * * The caller must hold the rtnl_mutex. */ void __rtnl_link_unregister(struct rtnl_link_ops *ops) { struct net *net; for_each_net(net) { __rtnl_kill_links(net, ops); } list_del(&ops->list); } EXPORT_SYMBOL_GPL(__rtnl_link_unregister); /** * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. * @ops: struct rtnl_link_ops * to unregister */ void rtnl_link_unregister(struct rtnl_link_ops *ops) { rtnl_lock(); __rtnl_link_unregister(ops); rtnl_unlock(); } EXPORT_SYMBOL_GPL(rtnl_link_unregister); static size_t rtnl_link_get_size(const struct net_device *dev) { const struct rtnl_link_ops *ops = dev->rtnl_link_ops; size_t size; if (!ops) return 0; size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ if (ops->get_size) /* IFLA_INFO_DATA + nested data */ size += nla_total_size(sizeof(struct nlattr)) + ops->get_size(dev); if (ops->get_xstats_size) /* IFLA_INFO_XSTATS */ size += nla_total_size(ops->get_xstats_size(dev)); return size; } static LIST_HEAD(rtnl_af_ops); static const struct rtnl_af_ops *rtnl_af_lookup(const int family) { const struct rtnl_af_ops *ops; list_for_each_entry(ops, &rtnl_af_ops, list) { if (ops->family == family) return ops; } return NULL; } /** * __rtnl_af_register - Register rtnl_af_ops with rtnetlink. * @ops: struct rtnl_af_ops * to register * * The caller must hold the rtnl_mutex. * * Returns 0 on success or a negative error code. */ int __rtnl_af_register(struct rtnl_af_ops *ops) { list_add_tail(&ops->list, &rtnl_af_ops); return 0; } EXPORT_SYMBOL_GPL(__rtnl_af_register); /** * rtnl_af_register - Register rtnl_af_ops with rtnetlink. * @ops: struct rtnl_af_ops * to register * * Returns 0 on success or a negative error code. */ int rtnl_af_register(struct rtnl_af_ops *ops) { int err; rtnl_lock(); err = __rtnl_af_register(ops); rtnl_unlock(); return err; } EXPORT_SYMBOL_GPL(rtnl_af_register); /** * __rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink. * @ops: struct rtnl_af_ops * to unregister * * The caller must hold the rtnl_mutex. */ void __rtnl_af_unregister(struct rtnl_af_ops *ops) { list_del(&ops->list); } EXPORT_SYMBOL_GPL(__rtnl_af_unregister); /** * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink. * @ops: struct rtnl_af_ops * to unregister */ void rtnl_af_unregister(struct rtnl_af_ops *ops) { rtnl_lock(); __rtnl_af_unregister(ops); rtnl_unlock(); } EXPORT_SYMBOL_GPL(rtnl_af_unregister); static size_t rtnl_link_get_af_size(const struct net_device *dev) { struct rtnl_af_ops *af_ops; size_t size; /* IFLA_AF_SPEC */ size = nla_total_size(sizeof(struct nlattr)); list_for_each_entry(af_ops, &rtnl_af_ops, list) { if (af_ops->get_link_af_size) { /* AF_* + nested data */ size += nla_total_size(sizeof(struct nlattr)) + af_ops->get_link_af_size(dev); } } return size; } static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev) { const struct rtnl_link_ops *ops = dev->rtnl_link_ops; struct nlattr *linkinfo, *data; int err = -EMSGSIZE; linkinfo = nla_nest_start(skb, IFLA_LINKINFO); if (linkinfo == NULL) goto out; if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0) goto err_cancel_link; if (ops->fill_xstats) { err = ops->fill_xstats(skb, dev); if (err < 0) goto err_cancel_link; } if (ops->fill_info) { data = nla_nest_start(skb, IFLA_INFO_DATA); if (data == NULL) { err = -EMSGSIZE; goto err_cancel_link; } err = ops->fill_info(skb, dev); if (err < 0) goto err_cancel_data; nla_nest_end(skb, data); } nla_nest_end(skb, linkinfo); return 0; err_cancel_data: nla_nest_cancel(skb, data); err_cancel_link: nla_nest_cancel(skb, linkinfo); out: return err; } int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo) { struct sock *rtnl = net->rtnl; int err = 0; NETLINK_CB(skb).dst_group = group; if (echo) atomic_inc(&skb->users); netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL); if (echo) err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT); return err; } int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid) { struct sock *rtnl = net->rtnl; return nlmsg_unicast(rtnl, skb, pid); } EXPORT_SYMBOL(rtnl_unicast); void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, struct nlmsghdr *nlh, gfp_t flags) { struct sock *rtnl = net->rtnl; int report = 0; if (nlh) report = nlmsg_report(nlh); nlmsg_notify(rtnl, skb, pid, group, report, flags); } EXPORT_SYMBOL(rtnl_notify); void rtnl_set_sk_err(struct net *net, u32 group, int error) { struct sock *rtnl = net->rtnl; netlink_set_err(rtnl, 0, group, error); } EXPORT_SYMBOL(rtnl_set_sk_err); int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) { struct nlattr *mx; int i, valid = 0; mx = nla_nest_start(skb, RTA_METRICS); if (mx == NULL) return -ENOBUFS; for (i = 0; i < RTAX_MAX; i++) { if (metrics[i]) { valid++; if (nla_put_u32(skb, i+1, metrics[i])) goto nla_put_failure; } } if (!valid) { nla_nest_cancel(skb, mx); return 0; } return nla_nest_end(skb, mx); nla_put_failure: nla_nest_cancel(skb, mx); return -EMSGSIZE; } EXPORT_SYMBOL(rtnetlink_put_metrics); int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, long expires, u32 error) { struct rta_cacheinfo ci = { .rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse), .rta_used = dst->__use, .rta_clntref = atomic_read(&(dst->__refcnt)), .rta_error = error, .rta_id = id, }; if (expires) { unsigned long clock; clock = jiffies_to_clock_t(abs(expires)); clock = min_t(unsigned long, clock, INT_MAX); ci.rta_expires = (expires > 0) ? clock : -clock; } return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci); } EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo); static void set_operstate(struct net_device *dev, unsigned char transition) { unsigned char operstate = dev->operstate; switch (transition) { case IF_OPER_UP: if ((operstate == IF_OPER_DORMANT || operstate == IF_OPER_UNKNOWN) && !netif_dormant(dev)) operstate = IF_OPER_UP; break; case IF_OPER_DORMANT: if (operstate == IF_OPER_UP || operstate == IF_OPER_UNKNOWN) operstate = IF_OPER_DORMANT; break; } if (dev->operstate != operstate) { write_lock_bh(&dev_base_lock); dev->operstate = operstate; write_unlock_bh(&dev_base_lock); netdev_state_change(dev); } } static unsigned int rtnl_dev_get_flags(const struct net_device *dev) { return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) | (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI)); } static unsigned int rtnl_dev_combine_flags(const struct net_device *dev, const struct ifinfomsg *ifm) { unsigned int flags = ifm->ifi_flags; /* bugwards compatibility: ifi_change == 0 is treated as ~0 */ if (ifm->ifi_change) flags = (flags & ifm->ifi_change) | (rtnl_dev_get_flags(dev) & ~ifm->ifi_change); return flags; } static void copy_rtnl_link_stats(struct rtnl_link_stats *a, const struct rtnl_link_stats64 *b) { a->rx_packets = b->rx_packets; a->tx_packets = b->tx_packets; a->rx_bytes = b->rx_bytes; a->tx_bytes = b->tx_bytes; a->rx_errors = b->rx_errors; a->tx_errors = b->tx_errors; a->rx_dropped = b->rx_dropped; a->tx_dropped = b->tx_dropped; a->multicast = b->multicast; a->collisions = b->collisions; a->rx_length_errors = b->rx_length_errors; a->rx_over_errors = b->rx_over_errors; a->rx_crc_errors = b->rx_crc_errors; a->rx_frame_errors = b->rx_frame_errors; a->rx_fifo_errors = b->rx_fifo_errors; a->rx_missed_errors = b->rx_missed_errors; a->tx_aborted_errors = b->tx_aborted_errors; a->tx_carrier_errors = b->tx_carrier_errors; a->tx_fifo_errors = b->tx_fifo_errors; a->tx_heartbeat_errors = b->tx_heartbeat_errors; a->tx_window_errors = b->tx_window_errors; a->rx_compressed = b->rx_compressed; a->tx_compressed = b->tx_compressed; } static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b) { memcpy(v, b, sizeof(*b)); } /* All VF info */ static inline int rtnl_vfinfo_size(const struct net_device *dev, u32 ext_filter_mask) { if (dev->dev.parent && dev_is_pci(dev->dev.parent) && (ext_filter_mask & RTEXT_FILTER_VF)) { int num_vfs = dev_num_vf(dev->dev.parent); size_t size = nla_total_size(sizeof(struct nlattr)); size += nla_total_size(num_vfs * sizeof(struct nlattr)); size += num_vfs * (nla_total_size(sizeof(struct ifla_vf_mac)) + nla_total_size(sizeof(struct ifla_vf_vlan)) + nla_total_size(sizeof(struct ifla_vf_tx_rate)) + nla_total_size(sizeof(struct ifla_vf_spoofchk))); return size; } else return 0; } static size_t rtnl_port_size(const struct net_device *dev, u32 ext_filter_mask) { size_t port_size = nla_total_size(4) /* PORT_VF */ + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */ + nla_total_size(sizeof(struct ifla_port_vsi)) /* PORT_VSI_TYPE */ + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */ + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */ + nla_total_size(1) /* PROT_VDP_REQUEST */ + nla_total_size(2); /* PORT_VDP_RESPONSE */ size_t vf_ports_size = nla_total_size(sizeof(struct nlattr)); size_t vf_port_size = nla_total_size(sizeof(struct nlattr)) + port_size; size_t port_self_size = nla_total_size(sizeof(struct nlattr)) + port_size; if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || !(ext_filter_mask & RTEXT_FILTER_VF)) return 0; if (dev_num_vf(dev->dev.parent)) return port_self_size + vf_ports_size + vf_port_size * dev_num_vf(dev->dev.parent); else return port_self_size; } static noinline size_t if_nlmsg_size(const struct net_device *dev, u32 ext_filter_mask) { return NLMSG_ALIGN(sizeof(struct ifinfomsg)) + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */ + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ + nla_total_size(sizeof(struct rtnl_link_ifmap)) + nla_total_size(sizeof(struct rtnl_link_stats)) + nla_total_size(sizeof(struct rtnl_link_stats64)) + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ + nla_total_size(4) /* IFLA_TXQLEN */ + nla_total_size(4) /* IFLA_WEIGHT */ + nla_total_size(4) /* IFLA_MTU */ + nla_total_size(4) /* IFLA_LINK */ + nla_total_size(4) /* IFLA_MASTER */ + nla_total_size(1) /* IFLA_CARRIER */ + nla_total_size(4) /* IFLA_PROMISCUITY */ + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */ + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */ + nla_total_size(1) /* IFLA_OPERSTATE */ + nla_total_size(1) /* IFLA_LINKMODE */ + nla_total_size(ext_filter_mask & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */ } static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) { struct nlattr *vf_ports; struct nlattr *vf_port; int vf; int err; vf_ports = nla_nest_start(skb, IFLA_VF_PORTS); if (!vf_ports) return -EMSGSIZE; for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { vf_port = nla_nest_start(skb, IFLA_VF_PORT); if (!vf_port) goto nla_put_failure; if (nla_put_u32(skb, IFLA_PORT_VF, vf)) goto nla_put_failure; err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); if (err == -EMSGSIZE) goto nla_put_failure; if (err) { nla_nest_cancel(skb, vf_port); continue; } nla_nest_end(skb, vf_port); } nla_nest_end(skb, vf_ports); return 0; nla_put_failure: nla_nest_cancel(skb, vf_ports); return -EMSGSIZE; } static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) { struct nlattr *port_self; int err; port_self = nla_nest_start(skb, IFLA_PORT_SELF); if (!port_self) return -EMSGSIZE; err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); if (err) { nla_nest_cancel(skb, port_self); return (err == -EMSGSIZE) ? err : 0; } nla_nest_end(skb, port_self); return 0; } static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev, u32 ext_filter_mask) { int err; if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || !(ext_filter_mask & RTEXT_FILTER_VF)) return 0; err = rtnl_port_self_fill(skb, dev); if (err) return err; if (dev_num_vf(dev->dev.parent)) { err = rtnl_vf_ports_fill(skb, dev); if (err) return err; } return 0; } static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, int type, u32 pid, u32 seq, u32 change, unsigned int flags, u32 ext_filter_mask) { struct ifinfomsg *ifm; struct nlmsghdr *nlh; struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *stats; struct nlattr *attr, *af_spec; struct rtnl_af_ops *af_ops; struct net_device *upper_dev = netdev_master_upper_dev_get(dev); ASSERT_RTNL(); nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); if (nlh == NULL) return -EMSGSIZE; ifm = nlmsg_data(nlh); ifm->ifi_family = AF_UNSPEC; ifm->__ifi_pad = 0; ifm->ifi_type = dev->type; ifm->ifi_index = dev->ifindex; ifm->ifi_flags = dev_get_flags(dev); ifm->ifi_change = change; if (nla_put_string(skb, IFLA_IFNAME, dev->name) || nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) || nla_put_u8(skb, IFLA_OPERSTATE, netif_running(dev) ? dev->operstate : IF_OPER_DOWN) || nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) || nla_put_u32(skb, IFLA_MTU, dev->mtu) || nla_put_u32(skb, IFLA_GROUP, dev->group) || nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) || nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) || #ifdef CONFIG_RPS nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) || #endif (dev->ifindex != dev->iflink && nla_put_u32(skb, IFLA_LINK, dev->iflink)) || (upper_dev && nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex)) || nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) || (dev->qdisc && nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) || (dev->ifalias && nla_put_string(skb, IFLA_IFALIAS, dev->ifalias))) goto nla_put_failure; if (1) { struct rtnl_link_ifmap map = { .mem_start = dev->mem_start, .mem_end = dev->mem_end, .base_addr = dev->base_addr, .irq = dev->irq, .dma = dev->dma, .port = dev->if_port, }; if (nla_put(skb, IFLA_MAP, sizeof(map), &map)) goto nla_put_failure; } if (dev->addr_len) { if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) || nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast)) goto nla_put_failure; } attr = nla_reserve(skb, IFLA_STATS, sizeof(struct rtnl_link_stats)); if (attr == NULL) goto nla_put_failure; stats = dev_get_stats(dev, &temp); copy_rtnl_link_stats(nla_data(attr), stats); attr = nla_reserve(skb, IFLA_STATS64, sizeof(struct rtnl_link_stats64)); if (attr == NULL) goto nla_put_failure; copy_rtnl_link_stats64(nla_data(attr), stats); if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) && nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent))) goto nla_put_failure; if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) { int i; struct nlattr *vfinfo, *vf; int num_vfs = dev_num_vf(dev->dev.parent); vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST); if (!vfinfo) goto nla_put_failure; for (i = 0; i < num_vfs; i++) { struct ifla_vf_info ivi; struct ifla_vf_mac vf_mac; struct ifla_vf_vlan vf_vlan; struct ifla_vf_tx_rate vf_tx_rate; struct ifla_vf_spoofchk vf_spoofchk; /* * Not all SR-IOV capable drivers support the * spoofcheck query. Preset to -1 so the user * space tool can detect that the driver didn't * report anything. */ ivi.spoofchk = -1; memset(ivi.mac, 0, sizeof(ivi.mac)); if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) break; vf_mac.vf = vf_vlan.vf = vf_tx_rate.vf = vf_spoofchk.vf = ivi.vf; memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); vf_vlan.vlan = ivi.vlan; vf_vlan.qos = ivi.qos; vf_tx_rate.rate = ivi.tx_rate; vf_spoofchk.setting = ivi.spoofchk; vf = nla_nest_start(skb, IFLA_VF_INFO); if (!vf) { nla_nest_cancel(skb, vfinfo); goto nla_put_failure; } if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) || nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) || nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), &vf_tx_rate) || nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), &vf_spoofchk)) goto nla_put_failure; nla_nest_end(skb, vf); } nla_nest_end(skb, vfinfo); } if (rtnl_port_fill(skb, dev, ext_filter_mask)) goto nla_put_failure; if (dev->rtnl_link_ops) { if (rtnl_link_fill(skb, dev) < 0) goto nla_put_failure; } if (!(af_spec = nla_nest_start(skb, IFLA_AF_SPEC))) goto nla_put_failure; list_for_each_entry(af_ops, &rtnl_af_ops, list) { if (af_ops->fill_link_af) { struct nlattr *af; int err; if (!(af = nla_nest_start(skb, af_ops->family))) goto nla_put_failure; err = af_ops->fill_link_af(skb, dev); /* * Caller may return ENODATA to indicate that there * was no data to be dumped. This is not an error, it * means we should trim the attribute header and * continue. */ if (err == -ENODATA) nla_nest_cancel(skb, af); else if (err < 0) goto nla_put_failure; nla_nest_end(skb, af); } } nla_nest_end(skb, af_spec); return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); int h, s_h; int idx = 0, s_idx; struct net_device *dev; struct hlist_head *head; struct nlattr *tb[IFLA_MAX+1]; u32 ext_filter_mask = 0; int err; int hdrlen; s_h = cb->args[0]; s_idx = cb->args[1]; rcu_read_lock(); cb->seq = net->dev_base_seq; /* A hack to preserve kernel<->userspace interface. * The correct header is ifinfomsg. It is consistent with rtnl_getlink. * However, before Linux v3.9 the code here assumed rtgenmsg and that's * what iproute2 < v3.9.0 used. * We can detect the old iproute2. Even including the IFLA_EXT_MASK * attribute, its netlink message is shorter than struct ifinfomsg. */ hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ? sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) { if (tb[IFLA_EXT_MASK]) ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); } for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { idx = 0; head = &net->dev_index_head[h]; hlist_for_each_entry_rcu(dev, head, index_hlist) { if (idx < s_idx) goto cont; err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 0, NLM_F_MULTI, ext_filter_mask); /* If we ran out of room on the first message, * we're in trouble */ WARN_ON((err == -EMSGSIZE) && (skb->len == 0)); if (err <= 0) goto out; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); cont: idx++; } } out: rcu_read_unlock(); cb->args[1] = idx; cb->args[0] = h; return skb->len; } const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 }, [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) }, [IFLA_MTU] = { .type = NLA_U32 }, [IFLA_LINK] = { .type = NLA_U32 }, [IFLA_MASTER] = { .type = NLA_U32 }, [IFLA_CARRIER] = { .type = NLA_U8 }, [IFLA_TXQLEN] = { .type = NLA_U32 }, [IFLA_WEIGHT] = { .type = NLA_U32 }, [IFLA_OPERSTATE] = { .type = NLA_U8 }, [IFLA_LINKMODE] = { .type = NLA_U8 }, [IFLA_LINKINFO] = { .type = NLA_NESTED }, [IFLA_NET_NS_PID] = { .type = NLA_U32 }, [IFLA_NET_NS_FD] = { .type = NLA_U32 }, [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, [IFLA_VF_PORTS] = { .type = NLA_NESTED }, [IFLA_PORT_SELF] = { .type = NLA_NESTED }, [IFLA_AF_SPEC] = { .type = NLA_NESTED }, [IFLA_EXT_MASK] = { .type = NLA_U32 }, [IFLA_PROMISCUITY] = { .type = NLA_U32 }, [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 }, [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 }, }; EXPORT_SYMBOL(ifla_policy); static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { [IFLA_INFO_KIND] = { .type = NLA_STRING }, [IFLA_INFO_DATA] = { .type = NLA_NESTED }, }; static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = { [IFLA_VF_INFO] = { .type = NLA_NESTED }, }; static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { [IFLA_VF_MAC] = { .type = NLA_BINARY, .len = sizeof(struct ifla_vf_mac) }, [IFLA_VF_VLAN] = { .type = NLA_BINARY, .len = sizeof(struct ifla_vf_vlan) }, [IFLA_VF_TX_RATE] = { .type = NLA_BINARY, .len = sizeof(struct ifla_vf_tx_rate) }, [IFLA_VF_SPOOFCHK] = { .type = NLA_BINARY, .len = sizeof(struct ifla_vf_spoofchk) }, }; static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { [IFLA_PORT_VF] = { .type = NLA_U32 }, [IFLA_PORT_PROFILE] = { .type = NLA_STRING, .len = PORT_PROFILE_MAX }, [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY, .len = sizeof(struct ifla_port_vsi)}, [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY, .len = PORT_UUID_MAX }, [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING, .len = PORT_UUID_MAX }, [IFLA_PORT_REQUEST] = { .type = NLA_U8, }, [IFLA_PORT_RESPONSE] = { .type = NLA_U16, }, }; struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) { struct net *net; /* Examine the link attributes and figure out which * network namespace we are talking about. */ if (tb[IFLA_NET_NS_PID]) net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID])); else if (tb[IFLA_NET_NS_FD]) net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD])); else net = get_net(src_net); return net; } EXPORT_SYMBOL(rtnl_link_get_net); static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) { if (dev) { if (tb[IFLA_ADDRESS] && nla_len(tb[IFLA_ADDRESS]) < dev->addr_len) return -EINVAL; if (tb[IFLA_BROADCAST] && nla_len(tb[IFLA_BROADCAST]) < dev->addr_len) return -EINVAL; } if (tb[IFLA_AF_SPEC]) { struct nlattr *af; int rem, err; nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { const struct rtnl_af_ops *af_ops; if (!(af_ops = rtnl_af_lookup(nla_type(af)))) return -EAFNOSUPPORT; if (!af_ops->set_link_af) return -EOPNOTSUPP; if (af_ops->validate_link_af) { err = af_ops->validate_link_af(dev, af); if (err < 0) return err; } } } return 0; } static int do_setvfinfo(struct net_device *dev, struct nlattr *attr) { int rem, err = -EINVAL; struct nlattr *vf; const struct net_device_ops *ops = dev->netdev_ops; nla_for_each_nested(vf, attr, rem) { switch (nla_type(vf)) { case IFLA_VF_MAC: { struct ifla_vf_mac *ivm; ivm = nla_data(vf); err = -EOPNOTSUPP; if (ops->ndo_set_vf_mac) err = ops->ndo_set_vf_mac(dev, ivm->vf, ivm->mac); break; } case IFLA_VF_VLAN: { struct ifla_vf_vlan *ivv; ivv = nla_data(vf); err = -EOPNOTSUPP; if (ops->ndo_set_vf_vlan) err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, ivv->qos); break; } case IFLA_VF_TX_RATE: { struct ifla_vf_tx_rate *ivt; ivt = nla_data(vf); err = -EOPNOTSUPP; if (ops->ndo_set_vf_tx_rate) err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, ivt->rate); break; } case IFLA_VF_SPOOFCHK: { struct ifla_vf_spoofchk *ivs; ivs = nla_data(vf); err = -EOPNOTSUPP; if (ops->ndo_set_vf_spoofchk) err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, ivs->setting); break; } default: err = -EINVAL; break; } if (err) break; } return err; } static int do_set_master(struct net_device *dev, int ifindex) { struct net_device *upper_dev = netdev_master_upper_dev_get(dev); const struct net_device_ops *ops; int err; if (upper_dev) { if (upper_dev->ifindex == ifindex) return 0; ops = upper_dev->netdev_ops; if (ops->ndo_del_slave) { err = ops->ndo_del_slave(upper_dev, dev); if (err) return err; } else { return -EOPNOTSUPP; } } if (ifindex) { upper_dev = __dev_get_by_index(dev_net(dev), ifindex); if (!upper_dev) return -EINVAL; ops = upper_dev->netdev_ops; if (ops->ndo_add_slave) { err = ops->ndo_add_slave(upper_dev, dev); if (err) return err; } else { return -EOPNOTSUPP; } } return 0; } static int do_setlink(const struct sk_buff *skb, struct net_device *dev, struct ifinfomsg *ifm, struct nlattr **tb, char *ifname, int modified) { const struct net_device_ops *ops = dev->netdev_ops; int err; if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) { struct net *net = rtnl_link_get_net(dev_net(dev), tb); if (IS_ERR(net)) { err = PTR_ERR(net); goto errout; } if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) { err = -EPERM; goto errout; } err = dev_change_net_namespace(dev, net, ifname); put_net(net); if (err) goto errout; modified = 1; } if (tb[IFLA_MAP]) { struct rtnl_link_ifmap *u_map; struct ifmap k_map; if (!ops->ndo_set_config) { err = -EOPNOTSUPP; goto errout; } if (!netif_device_present(dev)) { err = -ENODEV; goto errout; } u_map = nla_data(tb[IFLA_MAP]); k_map.mem_start = (unsigned long) u_map->mem_start; k_map.mem_end = (unsigned long) u_map->mem_end; k_map.base_addr = (unsigned short) u_map->base_addr; k_map.irq = (unsigned char) u_map->irq; k_map.dma = (unsigned char) u_map->dma; k_map.port = (unsigned char) u_map->port; err = ops->ndo_set_config(dev, &k_map); if (err < 0) goto errout; modified = 1; } if (tb[IFLA_ADDRESS]) { struct sockaddr *sa; int len; len = sizeof(sa_family_t) + dev->addr_len; sa = kmalloc(len, GFP_KERNEL); if (!sa) { err = -ENOMEM; goto errout; } sa->sa_family = dev->type; memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]), dev->addr_len); err = dev_set_mac_address(dev, sa); kfree(sa); if (err) goto errout; modified = 1; } if (tb[IFLA_MTU]) { err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU])); if (err < 0) goto errout; modified = 1; } if (tb[IFLA_GROUP]) { dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); modified = 1; } /* * Interface selected by interface index but interface * name provided implies that a name change has been * requested. */ if (ifm->ifi_index > 0 && ifname[0]) { err = dev_change_name(dev, ifname); if (err < 0) goto errout; modified = 1; } if (tb[IFLA_IFALIAS]) { err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]), nla_len(tb[IFLA_IFALIAS])); if (err < 0) goto errout; modified = 1; } if (tb[IFLA_BROADCAST]) { nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len); call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); } if (ifm->ifi_flags || ifm->ifi_change) { err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm)); if (err < 0) goto errout; } if (tb[IFLA_MASTER]) { err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER])); if (err) goto errout; modified = 1; } if (tb[IFLA_CARRIER]) { err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER])); if (err) goto errout; modified = 1; } if (tb[IFLA_TXQLEN]) dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); if (tb[IFLA_OPERSTATE]) set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); if (tb[IFLA_LINKMODE]) { write_lock_bh(&dev_base_lock); dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); write_unlock_bh(&dev_base_lock); } if (tb[IFLA_VFINFO_LIST]) { struct nlattr *attr; int rem; nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { if (nla_type(attr) != IFLA_VF_INFO) { err = -EINVAL; goto errout; } err = do_setvfinfo(dev, attr); if (err < 0) goto errout; modified = 1; } } err = 0; if (tb[IFLA_VF_PORTS]) { struct nlattr *port[IFLA_PORT_MAX+1]; struct nlattr *attr; int vf; int rem; err = -EOPNOTSUPP; if (!ops->ndo_set_vf_port) goto errout; nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { if (nla_type(attr) != IFLA_VF_PORT) continue; err = nla_parse_nested(port, IFLA_PORT_MAX, attr, ifla_port_policy); if (err < 0) goto errout; if (!port[IFLA_PORT_VF]) { err = -EOPNOTSUPP; goto errout; } vf = nla_get_u32(port[IFLA_PORT_VF]); err = ops->ndo_set_vf_port(dev, vf, port); if (err < 0) goto errout; modified = 1; } } err = 0; if (tb[IFLA_PORT_SELF]) { struct nlattr *port[IFLA_PORT_MAX+1]; err = nla_parse_nested(port, IFLA_PORT_MAX, tb[IFLA_PORT_SELF], ifla_port_policy); if (err < 0) goto errout; err = -EOPNOTSUPP; if (ops->ndo_set_vf_port) err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port); if (err < 0) goto errout; modified = 1; } if (tb[IFLA_AF_SPEC]) { struct nlattr *af; int rem; nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { const struct rtnl_af_ops *af_ops; if (!(af_ops = rtnl_af_lookup(nla_type(af)))) BUG(); err = af_ops->set_link_af(dev, af); if (err < 0) goto errout; modified = 1; } } err = 0; errout: if (err < 0 && modified) net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n", dev->name); return err; } static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct ifinfomsg *ifm; struct net_device *dev; int err; struct nlattr *tb[IFLA_MAX+1]; char ifname[IFNAMSIZ]; err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); if (err < 0) goto errout; if (tb[IFLA_IFNAME]) nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); else ifname[0] = '\0'; err = -EINVAL; ifm = nlmsg_data(nlh); if (ifm->ifi_index > 0) dev = __dev_get_by_index(net, ifm->ifi_index); else if (tb[IFLA_IFNAME]) dev = __dev_get_by_name(net, ifname); else goto errout; if (dev == NULL) { err = -ENODEV; goto errout; } err = validate_linkmsg(dev, tb); if (err < 0) goto errout; err = do_setlink(skb, dev, ifm, tb, ifname, 0); errout: return err; } static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); const struct rtnl_link_ops *ops; struct net_device *dev; struct ifinfomsg *ifm; char ifname[IFNAMSIZ]; struct nlattr *tb[IFLA_MAX+1]; int err; LIST_HEAD(list_kill); err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); if (err < 0) return err; if (tb[IFLA_IFNAME]) nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); ifm = nlmsg_data(nlh); if (ifm->ifi_index > 0) dev = __dev_get_by_index(net, ifm->ifi_index); else if (tb[IFLA_IFNAME]) dev = __dev_get_by_name(net, ifname); else return -EINVAL; if (!dev) return -ENODEV; ops = dev->rtnl_link_ops; if (!ops) return -EOPNOTSUPP; ops->dellink(dev, &list_kill); unregister_netdevice_many(&list_kill); return 0; } int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm) { unsigned int old_flags; int err; old_flags = dev->flags; if (ifm && (ifm->ifi_flags || ifm->ifi_change)) { err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm)); if (err < 0) return err; } dev->rtnl_link_state = RTNL_LINK_INITIALIZED; rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); __dev_notify_flags(dev, old_flags); return 0; } EXPORT_SYMBOL(rtnl_configure_link); struct net_device *rtnl_create_link(struct net *net, char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[]) { int err; struct net_device *dev; unsigned int num_tx_queues = 1; unsigned int num_rx_queues = 1; if (tb[IFLA_NUM_TX_QUEUES]) num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]); else if (ops->get_num_tx_queues) num_tx_queues = ops->get_num_tx_queues(); if (tb[IFLA_NUM_RX_QUEUES]) num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]); else if (ops->get_num_rx_queues) num_rx_queues = ops->get_num_rx_queues(); err = -ENOMEM; dev = alloc_netdev_mqs(ops->priv_size, ifname, ops->setup, num_tx_queues, num_rx_queues); if (!dev) goto err; dev_net_set(dev, net); dev->rtnl_link_ops = ops; dev->rtnl_link_state = RTNL_LINK_INITIALIZING; if (tb[IFLA_MTU]) dev->mtu = nla_get_u32(tb[IFLA_MTU]); if (tb[IFLA_ADDRESS]) { memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]), nla_len(tb[IFLA_ADDRESS])); dev->addr_assign_type = NET_ADDR_SET; } if (tb[IFLA_BROADCAST]) memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]), nla_len(tb[IFLA_BROADCAST])); if (tb[IFLA_TXQLEN]) dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); if (tb[IFLA_OPERSTATE]) set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); if (tb[IFLA_LINKMODE]) dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); if (tb[IFLA_GROUP]) dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); return dev; err: return ERR_PTR(err); } EXPORT_SYMBOL(rtnl_create_link); static int rtnl_group_changelink(const struct sk_buff *skb, struct net *net, int group, struct ifinfomsg *ifm, struct nlattr **tb) { struct net_device *dev; int err; for_each_netdev(net, dev) { if (dev->group == group) { err = do_setlink(skb, dev, ifm, tb, NULL, 0); if (err < 0) return err; } } return 0; } static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); const struct rtnl_link_ops *ops; struct net_device *dev; struct ifinfomsg *ifm; char kind[MODULE_NAME_LEN]; char ifname[IFNAMSIZ]; struct nlattr *tb[IFLA_MAX+1]; struct nlattr *linkinfo[IFLA_INFO_MAX+1]; int err; #ifdef CONFIG_MODULES replay: #endif err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); if (err < 0) return err; if (tb[IFLA_IFNAME]) nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); else ifname[0] = '\0'; ifm = nlmsg_data(nlh); if (ifm->ifi_index > 0) dev = __dev_get_by_index(net, ifm->ifi_index); else { if (ifname[0]) dev = __dev_get_by_name(net, ifname); else dev = NULL; } err = validate_linkmsg(dev, tb); if (err < 0) return err; if (tb[IFLA_LINKINFO]) { err = nla_parse_nested(linkinfo, IFLA_INFO_MAX, tb[IFLA_LINKINFO], ifla_info_policy); if (err < 0) return err; } else memset(linkinfo, 0, sizeof(linkinfo)); if (linkinfo[IFLA_INFO_KIND]) { nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind)); ops = rtnl_link_ops_get(kind); } else { kind[0] = '\0'; ops = NULL; } if (1) { struct nlattr *attr[ops ? ops->maxtype + 1 : 0], **data = NULL; struct net *dest_net; if (ops) { if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) { err = nla_parse_nested(attr, ops->maxtype, linkinfo[IFLA_INFO_DATA], ops->policy); if (err < 0) return err; data = attr; } if (ops->validate) { err = ops->validate(tb, data); if (err < 0) return err; } } if (dev) { int modified = 0; if (nlh->nlmsg_flags & NLM_F_EXCL) return -EEXIST; if (nlh->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP; if (linkinfo[IFLA_INFO_DATA]) { if (!ops || ops != dev->rtnl_link_ops || !ops->changelink) return -EOPNOTSUPP; err = ops->changelink(dev, tb, data); if (err < 0) return err; modified = 1; } return do_setlink(skb, dev, ifm, tb, ifname, modified); } if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { if (ifm->ifi_index == 0 && tb[IFLA_GROUP]) return rtnl_group_changelink(skb, net, nla_get_u32(tb[IFLA_GROUP]), ifm, tb); return -ENODEV; } if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO]) return -EOPNOTSUPP; if (!ops) { #ifdef CONFIG_MODULES if (kind[0]) { __rtnl_unlock(); request_module("rtnl-link-%s", kind); rtnl_lock(); ops = rtnl_link_ops_get(kind); if (ops) goto replay; } #endif return -EOPNOTSUPP; } if (!ifname[0]) snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind); dest_net = rtnl_link_get_net(net, tb); if (IS_ERR(dest_net)) return PTR_ERR(dest_net); dev = rtnl_create_link(dest_net, ifname, ops, tb); if (IS_ERR(dev)) { err = PTR_ERR(dev); goto out; } dev->ifindex = ifm->ifi_index; if (ops->newlink) err = ops->newlink(net, dev, tb, data); else err = register_netdevice(dev); if (err < 0 && !IS_ERR(dev)) free_netdev(dev); if (err < 0) goto out; err = rtnl_configure_link(dev, ifm); if (err < 0) unregister_netdevice(dev); out: put_net(dest_net); return err; } } static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh) { struct net *net = sock_net(skb->sk); struct ifinfomsg *ifm; char ifname[IFNAMSIZ]; struct nlattr *tb[IFLA_MAX+1]; struct net_device *dev = NULL; struct sk_buff *nskb; int err; u32 ext_filter_mask = 0; err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); if (err < 0) return err; if (tb[IFLA_IFNAME]) nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); if (tb[IFLA_EXT_MASK]) ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); ifm = nlmsg_data(nlh); if (ifm->ifi_index > 0) dev = __dev_get_by_index(net, ifm->ifi_index); else if (tb[IFLA_IFNAME]) dev = __dev_get_by_name(net, ifname); else return -EINVAL; if (dev == NULL) return -ENODEV; nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL); if (nskb == NULL) return -ENOBUFS; err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 0, ext_filter_mask); if (err < 0) { /* -EMSGSIZE implies BUG in if_nlmsg_size */ WARN_ON(err == -EMSGSIZE); kfree_skb(nskb); } else err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); return err; } static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct net_device *dev; struct nlattr *tb[IFLA_MAX+1]; u32 ext_filter_mask = 0; u16 min_ifinfo_dump_size = 0; int hdrlen; /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */ hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) { if (tb[IFLA_EXT_MASK]) ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); } if (!ext_filter_mask) return NLMSG_GOODSIZE; /* * traverse the list of net devices and compute the minimum * buffer size based upon the filter mask. */ list_for_each_entry(dev, &net->dev_base_head, dev_list) { min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size, if_nlmsg_size(dev, ext_filter_mask)); } return min_ifinfo_dump_size; } static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) { int idx; int s_idx = cb->family; if (s_idx == 0) s_idx = 1; for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) { int type = cb->nlh->nlmsg_type-RTM_BASE; if (idx < s_idx || idx == PF_PACKET) continue; if (rtnl_msg_handlers[idx] == NULL || rtnl_msg_handlers[idx][type].dumpit == NULL) continue; if (idx > s_idx) { memset(&cb->args[0], 0, sizeof(cb->args)); cb->prev_seq = 0; cb->seq = 0; } if (rtnl_msg_handlers[idx][type].dumpit(skb, cb)) break; } cb->family = idx; return skb->len; } void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change) { struct net *net = dev_net(dev); struct sk_buff *skb; int err = -ENOBUFS; size_t if_info_size; pr_info("rtmsg : %s[%x]\n", dev->name, dev->flags); skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), GFP_KERNEL); if (skb == NULL) goto errout; err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0); if (err < 0) { /* -EMSGSIZE implies BUG in if_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL); return; errout: if (err < 0) rtnl_set_sk_err(net, RTNLGRP_LINK, err); } EXPORT_SYMBOL(rtmsg_ifinfo); static int nlmsg_populate_fdb_fill(struct sk_buff *skb, struct net_device *dev, u8 *addr, u32 pid, u32 seq, int type, unsigned int flags, int nlflags) { struct nlmsghdr *nlh; struct ndmsg *ndm; nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags); if (!nlh) return -EMSGSIZE; ndm = nlmsg_data(nlh); ndm->ndm_family = AF_BRIDGE; ndm->ndm_pad1 = 0; ndm->ndm_pad2 = 0; ndm->ndm_flags = flags; ndm->ndm_type = 0; ndm->ndm_ifindex = dev->ifindex; ndm->ndm_state = NUD_PERMANENT; if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr)) goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static inline size_t rtnl_fdb_nlmsg_size(void) { return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN); } static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type) { struct net *net = dev_net(dev); struct sk_buff *skb; int err = -ENOBUFS; skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC); if (!skb) goto errout; err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF, 0); if (err < 0) { kfree_skb(skb); goto errout; } rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); return; errout: rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); } /** * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry */ int ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 flags) { int err = -EINVAL; /* If aging addresses are supported device will need to * implement its own handler for this. */ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { pr_info("%s: FDB only supports static addresses\n", dev->name); return err; } if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) err = dev_uc_add_excl(dev, addr); else if (is_multicast_ether_addr(addr)) err = dev_mc_add_excl(dev, addr); /* Only return duplicate errors if NLM_F_EXCL is set */ if (err == -EEXIST && !(flags & NLM_F_EXCL)) err = 0; return err; } EXPORT_SYMBOL(ndo_dflt_fdb_add); static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct ndmsg *ndm; struct nlattr *tb[NDA_MAX+1]; struct net_device *dev; u8 *addr; int err; err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL); if (err < 0) return err; ndm = nlmsg_data(nlh); if (ndm->ndm_ifindex == 0) { pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ifindex\n"); return -EINVAL; } dev = __dev_get_by_index(net, ndm->ndm_ifindex); if (dev == NULL) { pr_info("PF_BRIDGE: RTM_NEWNEIGH with unknown ifindex\n"); return -ENODEV; } if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid address\n"); return -EINVAL; } addr = nla_data(tb[NDA_LLADDR]); if (is_zero_ether_addr(addr)) { pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ether address\n"); return -EINVAL; } err = -EOPNOTSUPP; /* Support fdb on master device the net/bridge default case */ if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && (dev->priv_flags & IFF_BRIDGE_PORT)) { struct net_device *br_dev = netdev_master_upper_dev_get(dev); const struct net_device_ops *ops = br_dev->netdev_ops; err = ops->ndo_fdb_add(ndm, tb, dev, addr, nlh->nlmsg_flags); if (err) goto out; else ndm->ndm_flags &= ~NTF_MASTER; } /* Embedded bridge, macvlan, and any other device support */ if ((ndm->ndm_flags & NTF_SELF)) { if (dev->netdev_ops->ndo_fdb_add) err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr, nlh->nlmsg_flags); else err = ndo_dflt_fdb_add(ndm, tb, dev, addr, nlh->nlmsg_flags); if (!err) { rtnl_fdb_notify(dev, addr, RTM_NEWNEIGH); ndm->ndm_flags &= ~NTF_SELF; } } out: return err; } /** * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry */ int ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr) { int err = -EOPNOTSUPP; /* If aging addresses are supported device will need to * implement its own handler for this. */ if (!(ndm->ndm_state & NUD_PERMANENT)) { pr_info("%s: FDB only supports static addresses\n", dev->name); return -EINVAL; } if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) err = dev_uc_del(dev, addr); else if (is_multicast_ether_addr(addr)) err = dev_mc_del(dev, addr); else err = -EINVAL; return err; } EXPORT_SYMBOL(ndo_dflt_fdb_del); static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct ndmsg *ndm; struct nlattr *tb[NDA_MAX+1]; struct net_device *dev; int err = -EINVAL; __u8 *addr; if (!netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL); if (err < 0) return err; ndm = nlmsg_data(nlh); if (ndm->ndm_ifindex == 0) { pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ifindex\n"); return -EINVAL; } dev = __dev_get_by_index(net, ndm->ndm_ifindex); if (dev == NULL) { pr_info("PF_BRIDGE: RTM_DELNEIGH with unknown ifindex\n"); return -ENODEV; } if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid address\n"); return -EINVAL; } addr = nla_data(tb[NDA_LLADDR]); if (is_zero_ether_addr(addr)) { pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ether address\n"); return -EINVAL; } err = -EOPNOTSUPP; /* Support fdb on master device the net/bridge default case */ if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && (dev->priv_flags & IFF_BRIDGE_PORT)) { struct net_device *br_dev = netdev_master_upper_dev_get(dev); const struct net_device_ops *ops = br_dev->netdev_ops; if (ops->ndo_fdb_del) err = ops->ndo_fdb_del(ndm, tb, dev, addr); if (err) goto out; else ndm->ndm_flags &= ~NTF_MASTER; } /* Embedded bridge, macvlan, and any other device support */ if (ndm->ndm_flags & NTF_SELF) { if (dev->netdev_ops->ndo_fdb_del) err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr); else err = ndo_dflt_fdb_del(ndm, tb, dev, addr); if (!err) { rtnl_fdb_notify(dev, addr, RTM_DELNEIGH); ndm->ndm_flags &= ~NTF_SELF; } } out: return err; } static int nlmsg_populate_fdb(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, int *idx, struct netdev_hw_addr_list *list) { struct netdev_hw_addr *ha; int err; u32 portid, seq; portid = NETLINK_CB(cb->skb).portid; seq = cb->nlh->nlmsg_seq; list_for_each_entry(ha, &list->list, list) { if (*idx < cb->args[0]) goto skip; err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, portid, seq, RTM_NEWNEIGH, NTF_SELF, NLM_F_MULTI); if (err < 0) return err; skip: *idx += 1; } return 0; } /** * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table. * @nlh: netlink message header * @dev: netdevice * * Default netdevice operation to dump the existing unicast address list. * Returns number of addresses from list put in skb. */ int ndo_dflt_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, int idx) { int err; netif_addr_lock_bh(dev); err = nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->uc); if (err) goto out; nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->mc); out: netif_addr_unlock_bh(dev); return idx; } EXPORT_SYMBOL(ndo_dflt_fdb_dump); static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) { int idx = 0; struct net *net = sock_net(skb->sk); struct net_device *dev; rcu_read_lock(); for_each_netdev_rcu(net, dev) { if (dev->priv_flags & IFF_BRIDGE_PORT) { struct net_device *br_dev; const struct net_device_ops *ops; br_dev = netdev_master_upper_dev_get(dev); ops = br_dev->netdev_ops; if (ops->ndo_fdb_dump) idx = ops->ndo_fdb_dump(skb, cb, dev, idx); } if (dev->netdev_ops->ndo_fdb_dump) idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, idx); else idx = ndo_dflt_fdb_dump(skb, cb, dev, idx); } rcu_read_unlock(); cb->args[0] = idx; return skb->len; } int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u16 mode) { struct nlmsghdr *nlh; struct ifinfomsg *ifm; struct nlattr *br_afspec; u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; struct net_device *br_dev = netdev_master_upper_dev_get(dev); nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), NLM_F_MULTI); if (nlh == NULL) return -EMSGSIZE; ifm = nlmsg_data(nlh); ifm->ifi_family = AF_BRIDGE; ifm->__ifi_pad = 0; ifm->ifi_type = dev->type; ifm->ifi_index = dev->ifindex; ifm->ifi_flags = dev_get_flags(dev); ifm->ifi_change = 0; if (nla_put_string(skb, IFLA_IFNAME, dev->name) || nla_put_u32(skb, IFLA_MTU, dev->mtu) || nla_put_u8(skb, IFLA_OPERSTATE, operstate) || (br_dev && nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) || (dev->addr_len && nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || (dev->ifindex != dev->iflink && nla_put_u32(skb, IFLA_LINK, dev->iflink))) goto nla_put_failure; br_afspec = nla_nest_start(skb, IFLA_AF_SPEC); if (!br_afspec) goto nla_put_failure; if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF) || nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) { nla_nest_cancel(skb, br_afspec); goto nla_put_failure; } nla_nest_end(skb, br_afspec); return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } EXPORT_SYMBOL(ndo_dflt_bridge_getlink); static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); struct net_device *dev; int idx = 0; u32 portid = NETLINK_CB(cb->skb).portid; u32 seq = cb->nlh->nlmsg_seq; struct nlattr *extfilt; u32 filter_mask = 0; extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg), IFLA_EXT_MASK); if (extfilt) filter_mask = nla_get_u32(extfilt); rcu_read_lock(); for_each_netdev_rcu(net, dev) { const struct net_device_ops *ops = dev->netdev_ops; struct net_device *br_dev = netdev_master_upper_dev_get(dev); if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { if (idx >= cb->args[0] && br_dev->netdev_ops->ndo_bridge_getlink( skb, portid, seq, dev, filter_mask) < 0) break; idx++; } if (ops->ndo_bridge_getlink) { if (idx >= cb->args[0] && ops->ndo_bridge_getlink(skb, portid, seq, dev, filter_mask) < 0) break; idx++; } } rcu_read_unlock(); cb->args[0] = idx; return skb->len; } static inline size_t bridge_nlmsg_size(void) { return NLMSG_ALIGN(sizeof(struct ifinfomsg)) + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ + nla_total_size(sizeof(u32)) /* IFLA_MASTER */ + nla_total_size(sizeof(u32)) /* IFLA_MTU */ + nla_total_size(sizeof(u32)) /* IFLA_LINK */ + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */ + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */ + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */ + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */ + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */ } static int rtnl_bridge_notify(struct net_device *dev, u16 flags) { struct net *net = dev_net(dev); struct net_device *br_dev = netdev_master_upper_dev_get(dev); struct sk_buff *skb; int err = -EOPNOTSUPP; skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC); if (!skb) { err = -ENOMEM; goto errout; } if ((!flags || (flags & BRIDGE_FLAGS_MASTER)) && br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { err = br_dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0); if (err < 0) goto errout; } if ((flags & BRIDGE_FLAGS_SELF) && dev->netdev_ops->ndo_bridge_getlink) { err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0); if (err < 0) goto errout; } rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); return 0; errout: WARN_ON(err == -EMSGSIZE); kfree_skb(skb); rtnl_set_sk_err(net, RTNLGRP_LINK, err); return err; } static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct ifinfomsg *ifm; struct net_device *dev; struct nlattr *br_spec, *attr = NULL; int rem, err = -EOPNOTSUPP; u16 oflags, flags = 0; bool have_flags = false; if (nlmsg_len(nlh) < sizeof(*ifm)) return -EINVAL; ifm = nlmsg_data(nlh); if (ifm->ifi_family != AF_BRIDGE) return -EPFNOSUPPORT; dev = __dev_get_by_index(net, ifm->ifi_index); if (!dev) { pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n"); return -ENODEV; } br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); if (br_spec) { nla_for_each_nested(attr, br_spec, rem) { if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { have_flags = true; flags = nla_get_u16(attr); break; } } } oflags = flags; if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { struct net_device *br_dev = netdev_master_upper_dev_get(dev); if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) { err = -EOPNOTSUPP; goto out; } err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh); if (err) goto out; flags &= ~BRIDGE_FLAGS_MASTER; } if ((flags & BRIDGE_FLAGS_SELF)) { if (!dev->netdev_ops->ndo_bridge_setlink) err = -EOPNOTSUPP; else err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh); if (!err) flags &= ~BRIDGE_FLAGS_SELF; } if (have_flags) memcpy(nla_data(attr), &flags, sizeof(flags)); /* Generate event to notify upper layer of bridge change */ if (!err) err = rtnl_bridge_notify(dev, oflags); out: return err; } static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct ifinfomsg *ifm; struct net_device *dev; struct nlattr *br_spec, *attr = NULL; int rem, err = -EOPNOTSUPP; u16 oflags, flags = 0; bool have_flags = false; if (nlmsg_len(nlh) < sizeof(*ifm)) return -EINVAL; ifm = nlmsg_data(nlh); if (ifm->ifi_family != AF_BRIDGE) return -EPFNOSUPPORT; dev = __dev_get_by_index(net, ifm->ifi_index); if (!dev) { pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n"); return -ENODEV; } br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); if (br_spec) { nla_for_each_nested(attr, br_spec, rem) { if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { have_flags = true; flags = nla_get_u16(attr); break; } } } oflags = flags; if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { struct net_device *br_dev = netdev_master_upper_dev_get(dev); if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) { err = -EOPNOTSUPP; goto out; } err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh); if (err) goto out; flags &= ~BRIDGE_FLAGS_MASTER; } if ((flags & BRIDGE_FLAGS_SELF)) { if (!dev->netdev_ops->ndo_bridge_dellink) err = -EOPNOTSUPP; else err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh); if (!err) flags &= ~BRIDGE_FLAGS_SELF; } if (have_flags) memcpy(nla_data(attr), &flags, sizeof(flags)); /* Generate event to notify upper layer of bridge change */ if (!err) err = rtnl_bridge_notify(dev, oflags); out: return err; } /* Process one rtnetlink message. */ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); rtnl_doit_func doit; int sz_idx, kind; int family; int type; int err; type = nlh->nlmsg_type; if (type > RTM_MAX) return -EOPNOTSUPP; type -= RTM_BASE; /* All the messages must have at least 1 byte length */ if (nlmsg_len(nlh) < sizeof(struct rtgenmsg)) return 0; family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; sz_idx = type>>2; kind = type&3; if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN)) return -EPERM; if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { struct sock *rtnl; rtnl_dumpit_func dumpit; rtnl_calcit_func calcit; u16 min_dump_alloc = 0; dumpit = rtnl_get_dumpit(family, type); if (dumpit == NULL) return -EOPNOTSUPP; calcit = rtnl_get_calcit(family, type); if (calcit) min_dump_alloc = calcit(skb, nlh); __rtnl_unlock(); rtnl = net->rtnl; { struct netlink_dump_control c = { .dump = dumpit, .min_dump_alloc = min_dump_alloc, }; err = netlink_dump_start(rtnl, skb, nlh, &c); } rtnl_lock(); return err; } doit = rtnl_get_doit(family, type); if (doit == NULL) return -EOPNOTSUPP; return doit(skb, nlh); } static void rtnetlink_rcv(struct sk_buff *skb) { rtnl_lock(); netlink_rcv_skb(skb, &rtnetlink_rcv_msg); rtnl_unlock(); } static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = ptr; switch (event) { case NETDEV_UP: case NETDEV_DOWN: case NETDEV_PRE_UP: case NETDEV_POST_INIT: case NETDEV_REGISTER: case NETDEV_CHANGE: case NETDEV_PRE_TYPE_CHANGE: case NETDEV_GOING_DOWN: case NETDEV_UNREGISTER: case NETDEV_UNREGISTER_FINAL: case NETDEV_RELEASE: case NETDEV_JOIN: break; default: rtmsg_ifinfo(RTM_NEWLINK, dev, 0); break; } return NOTIFY_DONE; } static struct notifier_block rtnetlink_dev_notifier = { .notifier_call = rtnetlink_event, }; static int __net_init rtnetlink_net_init(struct net *net) { struct sock *sk; struct netlink_kernel_cfg cfg = { .groups = RTNLGRP_MAX, .input = rtnetlink_rcv, .cb_mutex = &rtnl_mutex, .flags = NL_CFG_F_NONROOT_RECV, }; sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg); if (!sk) return -ENOMEM; net->rtnl = sk; return 0; } static void __net_exit rtnetlink_net_exit(struct net *net) { netlink_kernel_release(net->rtnl); net->rtnl = NULL; } static struct pernet_operations rtnetlink_net_ops = { .init = rtnetlink_net_init, .exit = rtnetlink_net_exit, }; void __init rtnetlink_init(void) { if (register_pernet_subsys(&rtnetlink_net_ops)) panic("rtnetlink_init: cannot initialize rtnetlink\n"); register_netdevice_notifier(&rtnetlink_dev_notifier); rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, rtnl_dump_ifinfo, rtnl_calcit); rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, NULL); rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, NULL); rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, NULL); rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, NULL); rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, NULL); rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, NULL); rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, NULL); rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL); rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, NULL); rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, NULL); rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, NULL); }
ghostkim-sc/SMG920T_profiling_enabled
net/core/rtnetlink.c
C
apache-2.0
68,407
// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ec2 import ( "fmt" "net" "reflect" "testing" "github.com/coreos/coreos-cloudinit/datasource" "github.com/coreos/coreos-cloudinit/datasource/metadata" "github.com/coreos/coreos-cloudinit/datasource/metadata/test" "github.com/coreos/coreos-cloudinit/pkg" ) func TestType(t *testing.T) { want := "ec2-metadata-service" if kind := (metadataService{}).Type(); kind != want { t.Fatalf("bad type: want %q, got %q", want, kind) } } func TestFetchAttributes(t *testing.T) { for _, s := range []struct { resources map[string]string err error tests []struct { path string val []string } }{ { resources: map[string]string{ "/": "a\nb\nc/", "/c/": "d\ne/", "/c/e/": "f", "/a": "1", "/b": "2", "/c/d": "3", "/c/e/f": "4", }, tests: []struct { path string val []string }{ {"/", []string{"a", "b", "c/"}}, {"/b", []string{"2"}}, {"/c/d", []string{"3"}}, {"/c/e/", []string{"f"}}, }, }, { err: fmt.Errorf("test error"), tests: []struct { path string val []string }{ {"", nil}, }, }, } { service := metadataService{metadata.MetadataService{ Client: &test.HttpClient{Resources: s.resources, Err: s.err}, }} for _, tt := range s.tests { attrs, err := service.fetchAttributes(tt.path) if err != s.err { t.Fatalf("bad error for %q (%q): want %q, got %q", tt.path, s.resources, s.err, err) } if !reflect.DeepEqual(attrs, tt.val) { t.Fatalf("bad fetch for %q (%q): want %q, got %q", tt.path, s.resources, tt.val, attrs) } } } } func TestFetchAttribute(t *testing.T) { for _, s := range []struct { resources map[string]string err error tests []struct { path string val string } }{ { resources: map[string]string{ "/": "a\nb\nc/", "/c/": "d\ne/", "/c/e/": "f", "/a": "1", "/b": "2", "/c/d": "3", "/c/e/f": "4", }, tests: []struct { path string val string }{ {"/a", "1"}, {"/b", "2"}, {"/c/d", "3"}, {"/c/e/f", "4"}, }, }, { err: fmt.Errorf("test error"), tests: []struct { path string val string }{ {"", ""}, }, }, } { service := metadataService{metadata.MetadataService{ Client: &test.HttpClient{Resources: s.resources, Err: s.err}, }} for _, tt := range s.tests { attr, err := service.fetchAttribute(tt.path) if err != s.err { t.Fatalf("bad error for %q (%q): want %q, got %q", tt.path, s.resources, s.err, err) } if attr != tt.val { t.Fatalf("bad fetch for %q (%q): want %q, got %q", tt.path, s.resources, tt.val, attr) } } } } func TestFetchMetadata(t *testing.T) { for _, tt := range []struct { root string metadataPath string resources map[string]string expect datasource.Metadata clientErr error expectErr error }{ { root: "/", metadataPath: "2009-04-04/meta-data", resources: map[string]string{ "/2009-04-04/meta-data/public-keys": "bad\n", }, expectErr: fmt.Errorf("malformed public key: \"bad\""), }, { root: "/", metadataPath: "2009-04-04/meta-data", resources: map[string]string{ "/2009-04-04/meta-data/hostname": "host", "/2009-04-04/meta-data/local-ipv4": "1.2.3.4", "/2009-04-04/meta-data/public-ipv4": "5.6.7.8", "/2009-04-04/meta-data/public-keys": "0=test1\n", "/2009-04-04/meta-data/public-keys/0": "openssh-key", "/2009-04-04/meta-data/public-keys/0/openssh-key": "key", }, expect: datasource.Metadata{ Hostname: "host", PrivateIPv4: net.ParseIP("1.2.3.4"), PublicIPv4: net.ParseIP("5.6.7.8"), SSHPublicKeys: map[string]string{"test1": "key"}, }, }, { root: "/", metadataPath: "2009-04-04/meta-data", resources: map[string]string{ "/2009-04-04/meta-data/hostname": "host domain another_domain", "/2009-04-04/meta-data/local-ipv4": "1.2.3.4", "/2009-04-04/meta-data/public-ipv4": "5.6.7.8", "/2009-04-04/meta-data/public-keys": "0=test1\n", "/2009-04-04/meta-data/public-keys/0": "openssh-key", "/2009-04-04/meta-data/public-keys/0/openssh-key": "key", }, expect: datasource.Metadata{ Hostname: "host", PrivateIPv4: net.ParseIP("1.2.3.4"), PublicIPv4: net.ParseIP("5.6.7.8"), SSHPublicKeys: map[string]string{"test1": "key"}, }, }, { clientErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")}, expectErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")}, }, } { service := &metadataService{metadata.MetadataService{ Root: tt.root, Client: &test.HttpClient{Resources: tt.resources, Err: tt.clientErr}, MetadataPath: tt.metadataPath, }} metadata, err := service.FetchMetadata() if Error(err) != Error(tt.expectErr) { t.Fatalf("bad error (%q): want %q, got %q", tt.resources, tt.expectErr, err) } if !reflect.DeepEqual(tt.expect, metadata) { t.Fatalf("bad fetch (%q): want %#v, got %#v", tt.resources, tt.expect, metadata) } } } func Error(err error) string { if err != nil { return err.Error() } return "" }
packethost/coreos-cloudinit
datasource/metadata/ec2/metadata_test.go
GO
apache-2.0
5,910
/*! ****************************************************************************** * * Pentaho Data Integration * * Copyright (C) 2002-2013 by Pentaho : http://www.pentaho.com * ******************************************************************************* * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ package org.pentaho.di.starmodeler; import org.pentaho.di.core.gui.SpoonFactory; import org.pentaho.di.i18n.LanguageChoice; import org.pentaho.di.ui.core.dialog.ErrorDialog; import org.pentaho.di.ui.spoon.ISpoonMenuController; import org.pentaho.di.ui.spoon.Spoon; import org.pentaho.di.ui.spoon.SpoonPerspectiveManager; import org.pentaho.di.ui.spoon.TabMapEntry; import org.pentaho.metadata.model.concept.types.LocalizedString; import org.pentaho.ui.xul.dom.Document; import org.pentaho.ui.xul.impl.AbstractXulEventHandler; import org.pentaho.xul.swt.tab.TabItem; public class ModelerHelper extends AbstractXulEventHandler implements ISpoonMenuController { protected static Class<?> PKG = ModelerHelper.class; // for i18n public static final String MODELER_NAME = "Modeler"; private static ModelerHelper instance = null; private String defaultLocale = LanguageChoice.getInstance().getDefaultLocale().toString(); private ModelerHelper() { } public static ModelerHelper getInstance() { if( instance == null ) { instance = new ModelerHelper(); Spoon spoon = ((Spoon)SpoonFactory.getInstance()); spoon.addSpoonMenuController(instance); } return instance; } protected String getUniqueUntitledTabName(Spoon spoon, String title) { int num = 1; String tabName = title + " " + num; // TODO: Add new plugin object type to spoon TabItem tabItem = spoon.delegates.tabs.findTabMapEntry(tabName, TabMapEntry.ObjectType.BROWSER).getTabItem(); while (tabItem != null) { tabName = title + " " + (++num); // TODO: Add new plugin object type to spoon tabItem = spoon.delegates.tabs.findTabMapEntry(tabName, TabMapEntry.ObjectType.BROWSER).getTabItem(); } return tabName; } public String getName(){ return "starModeler"; } public void createEmptyModel() { try { StarDomain starDomain = new StarDomain(); starDomain.getDomain().setName(new LocalizedString(defaultLocale, "Star model domain")); starDomain.getDomain().setDescription(new LocalizedString(defaultLocale, "This star model domain contains multiple star models for the same subject domain")); StarModelerPerspective.getInstance().createTabForDomain(starDomain); SpoonPerspectiveManager.getInstance().activatePerspective(StarModelerPerspective.class); } catch (Exception e) { new ErrorDialog(((Spoon) SpoonFactory.getInstance()).getShell(), "Error", "Error creating visualization", e); } } public void updateMenu(Document doc) { // Nothing so far. } }
sajeetharan/pentaho-kettle
plugins/star-modeler/src/org/pentaho/di/starmodeler/ModelerHelper.java
Java
apache-2.0
3,455
<?php /** * @package Hello_Dolly * @version 1.6 */ /* Plugin Name: Hello Dolly Plugin URI: http://wordpress.org/plugins/hello-dolly/ Description: This is not just a plugin, it symbolizes the hope and enthusiasm of an entire generation summed up in two words sung most famously by Louis Armstrong: Hello, Dolly. When activated you will randomly see a lyric from <cite>Hello, Dolly</cite> in the upper right of your admin screen on every page. Author: Matt Mullenweg Version: 1.6 Author URI: http://ma.tt/ */ function hello_dolly_get_lyric() { /** These are the lyrics to Hello Dolly */ $lyrics = "Hello, Dolly Well, hello, Dolly It's so nice to have you back where you belong You're lookin' swell, Dolly I can tell, Dolly You're still glowin', you're still crowin' You're still goin' strong We feel the room swayin' While the band's playin' One of your old favourite songs from way back when So, take her wrap, fellas Find her an empty lap, fellas Dolly'll never go away again Hello, Dolly Well, hello, Dolly It's so nice to have you back where you belong You're lookin' swell, Dolly I can tell, Dolly You're still glowin', you're still crowin' You're still goin' strong We feel the room swayin' While the band's playin' One of your old favourite songs from way back when Golly, gee, fellas Find her a vacant knee, fellas Dolly'll never go away Dolly'll never go away Dolly'll never go away again"; // Here we split it into lines $lyrics = explode( "\n", $lyrics ); // And then randomly choose a line return wptexturize( $lyrics[ mt_rand( 0, count( $lyrics ) - 1 ) ] ); } // This just echoes the chosen line, we'll position it later function hello_dolly() { $chosen = hello_dolly_get_lyric(); echo "<p id='dolly'>$chosen</p>"; } // Now we set that function up to execute when the admin_notices action is called add_action( 'admin_notices', 'hello_dolly' ); // We need some CSS to position the paragraph function dolly_css() { // This makes sure that the positioning is also good for right-to-left languages $x = is_rtl() ? 'left' : 'right'; echo " <style type='text/css'> #dolly { float: $x; padding-$x: 15px; padding-top: 5px; margin: 0; font-size: 11px; } </style> "; } add_action( 'admin_head', 'dolly_css' ); ?>
mrinsss/Full-Repo
mydealfound/blog/wp-content/plugins/hello.php
PHP
apache-2.0
2,337
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package java.util; /** * An AbstractSet is an abstract implementation of the Set interface. This * implementation does not support adding. A subclass must implement the * abstract methods iterator() and size(). * * @since 1.2 */ public abstract class AbstractSet<E> extends AbstractCollection<E> implements Set<E> { /** * Constructs a new instance of this AbstractSet. */ protected AbstractSet() { } /** * Compares the specified object to this Set and returns true if they are * equal. The object must be an instance of Set and contain the same * objects. * * @param object * the object to compare with this set. * @return {@code true} if the specified object is equal to this set, * {@code false} otherwise * @see #hashCode */ @Override public boolean equals(Object object) { if (this == object) { return true; } if (object instanceof Set) { Set<?> s = (Set<?>) object; try { return size() == s.size() && containsAll(s); } catch (NullPointerException ignored) { return false; } catch (ClassCastException ignored) { return false; } } return false; } /** * Returns the hash code for this set. Two set which are equal must return * the same value. This implementation calculates the hash code by adding * each element's hash code. * * @return the hash code of this set. * @see #equals */ @Override public int hashCode() { int result = 0; Iterator<?> it = iterator(); while (it.hasNext()) { Object next = it.next(); result += next == null ? 0 : next.hashCode(); } return result; } /** * Removes all occurrences in this collection which are contained in the * specified collection. * * @param collection * the collection of objects to remove. * @return {@code true} if this collection was modified, {@code false} * otherwise. * @throws UnsupportedOperationException * if removing from this collection is not supported. */ @Override public boolean removeAll(Collection<?> collection) { boolean result = false; if (size() <= collection.size()) { Iterator<?> it = iterator(); while (it.hasNext()) { if (collection.contains(it.next())) { it.remove(); result = true; } } } else { Iterator<?> it = collection.iterator(); while (it.hasNext()) { result = remove(it.next()) || result; } } return result; } }
Buggaboo/j2objc
jre_emul/android/libcore/luni/src/main/java/java/util/AbstractSet.java
Java
apache-2.0
3,711
package pflag import ( "fmt" "strconv" ) // -- float64 Value type float64Value float64 func newFloat64Value(val float64, p *float64) *float64Value { *p = val return (*float64Value)(p) } func (f *float64Value) Set(s string) error { v, err := strconv.ParseFloat(s, 64) *f = float64Value(v) return err } func (f *float64Value) Type() string { return "float64" } func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } func float64Conv(sval string) (interface{}, error) { return strconv.ParseFloat(sval, 64) } // GetFloat64 return the float64 value of a flag with the given name func (f *FlagSet) GetFloat64(name string) (float64, error) { val, err := f.getFlagType(name, "float64", float64Conv) if err != nil { return 0, err } return val.(float64), nil } // Float64Var defines a float64 flag with specified name, default value, and usage string. // The argument p points to a float64 variable in which to store the value of the flag. func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) { f.VarP(newFloat64Value(value, p), name, "", usage) } // Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) { f.VarP(newFloat64Value(value, p), name, shorthand, usage) } // Float64Var defines a float64 flag with specified name, default value, and usage string. // The argument p points to a float64 variable in which to store the value of the flag. func Float64Var(p *float64, name string, value float64, usage string) { CommandLine.VarP(newFloat64Value(value, p), name, "", usage) } // Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. func Float64VarP(p *float64, name, shorthand string, value float64, usage string) { CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage) } // Float64 defines a float64 flag with specified name, default value, and usage string. // The return value is the address of a float64 variable that stores the value of the flag. func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { p := new(float64) f.Float64VarP(p, name, "", value, usage) return p } // Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 { p := new(float64) f.Float64VarP(p, name, shorthand, value, usage) return p } // Float64 defines a float64 flag with specified name, default value, and usage string. // The return value is the address of a float64 variable that stores the value of the flag. func Float64(name string, value float64, usage string) *float64 { return CommandLine.Float64P(name, "", value, usage) } // Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. func Float64P(name, shorthand string, value float64, usage string) *float64 { return CommandLine.Float64P(name, shorthand, value, usage) }
yangxiangyu/kubernetes
Godeps/_workspace/src/github.com/spf13/pflag/float64.go
GO
apache-2.0
3,093
package pflag import ( "fmt" "strconv" ) // -- uint64 Value type uint64Value uint64 func newUint64Value(val uint64, p *uint64) *uint64Value { *p = val return (*uint64Value)(p) } func (i *uint64Value) Set(s string) error { v, err := strconv.ParseUint(s, 0, 64) *i = uint64Value(v) return err } func (i *uint64Value) Type() string { return "uint64" } func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } func uint64Conv(sval string) (interface{}, error) { v, err := strconv.ParseUint(sval, 0, 64) if err != nil { return 0, err } return uint64(v), nil } // GetUint64 return the uint64 value of a flag with the given name func (f *FlagSet) GetUint64(name string) (uint64, error) { val, err := f.getFlagType(name, "uint64", uint64Conv) if err != nil { return 0, err } return val.(uint64), nil } // Uint64Var defines a uint64 flag with specified name, default value, and usage string. // The argument p points to a uint64 variable in which to store the value of the flag. func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) { f.VarP(newUint64Value(value, p), name, "", usage) } // Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { f.VarP(newUint64Value(value, p), name, shorthand, usage) } // Uint64Var defines a uint64 flag with specified name, default value, and usage string. // The argument p points to a uint64 variable in which to store the value of the flag. func Uint64Var(p *uint64, name string, value uint64, usage string) { CommandLine.VarP(newUint64Value(value, p), name, "", usage) } // Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage) } // Uint64 defines a uint64 flag with specified name, default value, and usage string. // The return value is the address of a uint64 variable that stores the value of the flag. func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { p := new(uint64) f.Uint64VarP(p, name, "", value, usage) return p } // Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 { p := new(uint64) f.Uint64VarP(p, name, shorthand, value, usage) return p } // Uint64 defines a uint64 flag with specified name, default value, and usage string. // The return value is the address of a uint64 variable that stores the value of the flag. func Uint64(name string, value uint64, usage string) *uint64 { return CommandLine.Uint64P(name, "", value, usage) } // Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. func Uint64P(name, shorthand string, value uint64, usage string) *uint64 { return CommandLine.Uint64P(name, shorthand, value, usage) }
widgetpl/contrib
addon-resizer/vendor/github.com/spf13/pflag/uint64.go
GO
apache-2.0
3,084
/*************************GO-LICENSE-START********************************* * Copyright 2014 ThoughtWorks, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *************************GO-LICENSE-END***********************************/ package com.thoughtworks.go.plugin.activation; import com.thoughtworks.go.plugin.api.annotation.Load; import com.thoughtworks.go.plugin.api.annotation.UnLoad; import com.thoughtworks.go.plugin.api.info.PluginContext; import com.thoughtworks.go.plugin.api.info.PluginDescriptor; import com.thoughtworks.go.plugin.api.info.PluginDescriptorAware; public class NonExtensionWithLoadUnloadAnnotation implements PluginDescriptorAware { public static int loadInvoked = 0; public static int unLoadInvoked = 0; @Load public void setupData(PluginContext context) { loadInvoked++; } @UnLoad public void tearDown(PluginContext context) { unLoadInvoked++; } @Override public void setPluginDescriptor(PluginDescriptor descriptor) { } }
mdaliejaz/gocd
plugin-infra/go-plugin-activator/test/com/thoughtworks/go/plugin/activation/NonExtensionWithLoadUnloadAnnotation.java
Java
apache-2.0
1,534
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package e2e import ( "fmt" "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_4" "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var _ = framework.KubeDescribe("[Feature:Federation]", func() { f := framework.NewDefaultFederatedFramework("federation-apiserver-authn") var _ = Describe("Federation API server authentication", func() { BeforeEach(func() { framework.SkipUnlessFederated(f.Client) }) It("should accept cluster resources when the client has right authentication credentials", func() { framework.SkipUnlessFederated(f.Client) nsName := f.FederationNamespace.Name svc := createServiceOrFail(f.FederationClientset_1_4, nsName, FederatedServiceName) deleteServiceOrFail(f.FederationClientset_1_4, nsName, svc.Name) }) It("should not accept cluster resources when the client has invalid authentication credentials", func() { framework.SkipUnlessFederated(f.Client) contexts := f.GetUnderlyingFederatedContexts() // `contexts` is obtained by calling // `f.GetUnderlyingFederatedContexts()`. This function in turn // checks that the contexts it returns does not include the // federation API server context. So `contexts` is guaranteed to // contain only the underlying Kubernetes cluster contexts. fcs, err := invalidAuthFederationClientSet(contexts[0].User) framework.ExpectNoError(err) nsName := f.FederationNamespace.Name svc, err := createService(fcs, nsName, FederatedServiceName) Expect(errors.IsUnauthorized(err)).To(BeTrue()) if err == nil && svc != nil { deleteServiceOrFail(fcs, nsName, svc.Name) } }) It("should not accept cluster resources when the client has no authentication credentials", func() { framework.SkipUnlessFederated(f.Client) fcs, err := invalidAuthFederationClientSet(nil) ExpectNoError(err) nsName := f.FederationNamespace.Name svc, err := createService(fcs, nsName, FederatedServiceName) Expect(errors.IsUnauthorized(err)).To(BeTrue()) if err == nil && svc != nil { deleteServiceOrFail(fcs, nsName, svc.Name) } }) }) }) func invalidAuthFederationClientSet(user *framework.KubeUser) (*federation_release_1_4.Clientset, error) { overrides := &clientcmd.ConfigOverrides{} if user != nil { overrides = &clientcmd.ConfigOverrides{ AuthInfo: clientcmdapi.AuthInfo{ Token: user.User.Token, Username: user.User.Username, Password: user.User.Password, }, } } config, err := framework.LoadFederatedConfig(overrides) if err != nil { return nil, err } if user == nil { config.Password = "" config.BearerToken = "" config.Username = "" } c, err := federation_release_1_4.NewForConfig(config) if err != nil { return nil, fmt.Errorf("error creating federation clientset: %v", err) } // Set timeout for each client in the set. c.DiscoveryClient.Client.Timeout = framework.SingleCallTimeout c.FederationClient.Client.Timeout = framework.SingleCallTimeout c.CoreClient.Client.Timeout = framework.SingleCallTimeout c.ExtensionsClient.Client.Timeout = framework.SingleCallTimeout return c, nil }
ksshanam/kubernetes
test/e2e/federation-authn.go
GO
apache-2.0
3,901
/* * Copyright 2000-2009 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * @author max */ package com.intellij.psi.impl.source.tree; import com.intellij.openapi.extensions.ExtensionPointName; import com.intellij.psi.PsiElement; import com.intellij.psi.PsiManager; import com.intellij.util.CharTable; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; public interface TreeGenerator { ExtensionPointName<TreeGenerator> EP_NAME = ExtensionPointName.create("com.intellij.treeGenerator"); @Nullable TreeElement generateTreeFor(@NotNull PsiElement original, @NotNull CharTable table, @NotNull final PsiManager manager); }
semonte/intellij-community
platform/core-impl/src/com/intellij/psi/impl/source/tree/TreeGenerator.java
Java
apache-2.0
1,195
/* * Copyright 2008 Web Cohesion * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.security.oauth.provider.token; import org.springframework.beans.factory.DisposableBean; import java.util.Iterator; import java.util.Map; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; /** * Implementation of TokenServices that stores tokens in memory. The token services will schedule a thread to do cleaning up of expired tokens. * * @author Ryan Heaton */ public class InMemorySelfCleaningProviderTokenServices extends InMemoryProviderTokenServices implements DisposableBean { private ScheduledExecutorService scheduler; private Integer cleanupIntervalSeconds; @Override public void afterPropertiesSet() throws Exception { super.afterPropertiesSet(); if (cleanupIntervalSeconds == null) { cleanupIntervalSeconds = 60 * 60; } if (cleanupIntervalSeconds > 0) { scheduler = Executors.newSingleThreadScheduledExecutor(); Runnable cleanupLogic = new Runnable() { public void run() { Iterator<Map.Entry<String, OAuthProviderTokenImpl>> entriesIt = tokenStore.entrySet().iterator(); while (entriesIt.hasNext()) { Map.Entry<String, OAuthProviderTokenImpl> entry = entriesIt.next(); OAuthProviderTokenImpl tokenImpl = entry.getValue(); if (isExpired(tokenImpl)) { //there's a race condition here, but we'll live with it for now. entriesIt.remove(); onTokenRemoved(tokenImpl); } } } }; scheduler.scheduleAtFixedRate(cleanupLogic, getAccessTokenValiditySeconds(), cleanupIntervalSeconds, TimeUnit.SECONDS); } } public void destroy() throws Exception { if (scheduler != null) { scheduler.shutdownNow(); } } /** * The interval at which to schedule cleanup. (&lt;= 0 for never). * * @return The interval at which to schedule cleanup. */ public Integer getCleanupIntervalSeconds() { return cleanupIntervalSeconds; } /** * The interval at which to schedule cleanup. * * @param cleanupIntervalSeconds The interval at which to schedule cleanup. */ public void setCleanupIntervalSeconds(Integer cleanupIntervalSeconds) { this.cleanupIntervalSeconds = cleanupIntervalSeconds; } }
ollie314/spring-security-oauth
spring-security-oauth/src/main/java/org/springframework/security/oauth/provider/token/InMemorySelfCleaningProviderTokenServices.java
Java
apache-2.0
2,940
/* * This file is part of "SnipSnap Radeox Rendering Engine". Copyright (c) 2002 * Stephan J. Schmidt, Matthias L. Jugel All Rights Reserved. Please visit * http://radeox.org/ for updates and contact. --LICENSE NOTICE-- Licensed under * the Apache License, Version 2.0 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy of the License * at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable * law or agreed to in writing, software distributed under the License is * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the specific language * governing permissions and limitations under the License. --LICENSE NOTICE-- */ /* * package org.radeox.example; import org.picocontainer.PicoContainer; import * org.picocontainer.defaults.DefaultPicoContainer; import * org.radeox.api.engine.RenderEngine; import * org.radeox.api.engine.context.InitialRenderContext; import * org.radeox.api.engine.context.RenderContext; import * org.radeox.engine.BaseRenderEngine; import * org.radeox.engine.context.BaseInitialRenderContext; import * org.radeox.engine.context.BaseRenderContext; import java.util.Locale; /* * Example how to use BaseRenderEngine with Pico @author Stephan J. Schmidt * * @version $Id$ / * public class PicoExample { public static void main(String[] args) { * String test = "==SnipSnap== {link:Radeox|http://radeox.org}"; * DefaultPicoContainer c = new * org.picocontainer.defaults.DefaultPicoContainer(); try { * InitialRenderContext initialContext = new * BaseInitialRenderContext(); * initialContext.set(RenderContext.INPUT_LOCALE, new * Locale("otherwiki", "")); * c.registerComponentInstance(InitialRenderContext.class, * initialContext); * c.registerComponentImplementation(RenderEngine.class, * BaseRenderEngine.class); c.getComponentInstances(); } catch * (Exception e) { System.err.println("Could not register component: * "+e); } PicoContainer container = c; // no only work with container // * Only ask for RenderEngine, we automatically get an object // that * implements RenderEngine RenderEngine engine = (RenderEngine) * container.getComponentInstance(RenderEngine.class); RenderContext * context = new BaseRenderContext(); * System.out.println(engine.render(test, context)); } } */
hackbuteer59/sakai
rwiki/rwiki-util/radeox/src/java/org/radeox/example/PicoExample.java
Java
apache-2.0
2,571
<?php /** * Zend Framework * * LICENSE * * This source file is subject to the new BSD license that is bundled * with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://framework.zend.com/license/new-bsd * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to license@zend.com so we can send you a copy immediately. * * @category Zend * @package Zend_Gdata * @subpackage YouTube * @copyright Copyright (c) 2005-2014 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License * @version $Id$ */ /** * @see Zend_Gdata_Media */ require_once 'Zend/Gdata/Media.php'; /** * @see Zend_Gdata_YouTube_VideoEntry */ require_once 'Zend/Gdata/YouTube/VideoEntry.php'; /** * @see Zend_Gdata_YouTube_VideoFeed */ require_once 'Zend/Gdata/YouTube/VideoFeed.php'; /** * @see Zend_Gdata_YouTube_CommentFeed */ require_once 'Zend/Gdata/YouTube/CommentFeed.php'; /** * @see Zend_Gdata_YouTube_PlaylistListFeed */ require_once 'Zend/Gdata/YouTube/PlaylistListFeed.php'; /** * @see Zend_Gdata_YouTube_SubscriptionFeed */ require_once 'Zend/Gdata/YouTube/SubscriptionFeed.php'; /** * @see Zend_Gdata_YouTube_ContactFeed */ require_once 'Zend/Gdata/YouTube/ContactFeed.php'; /** * @see Zend_Gdata_YouTube_PlaylistVideoFeed */ require_once 'Zend/Gdata/YouTube/PlaylistVideoFeed.php'; /** * @see Zend_Gdata_YouTube_ActivityFeed */ require_once 'Zend/Gdata/YouTube/ActivityFeed.php'; /** * @see Zend_Gdata_YouTube_InboxFeed */ require_once 'Zend/Gdata/YouTube/InboxFeed.php'; /** @see Zend_Xml_Security */ require_once 'Zend/Xml/Security.php'; /** * Service class for interacting with the YouTube Data API. * @link http://code.google.com/apis/youtube/ * * @category Zend * @package Zend_Gdata * @subpackage YouTube * @copyright Copyright (c) 2005-2014 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License */ class Zend_Gdata_YouTube extends Zend_Gdata_Media { const AUTH_SERVICE_NAME = 'youtube'; const CLIENTLOGIN_URL = 'https://www.google.com/youtube/accounts/ClientLogin'; const STANDARD_TOP_RATED_URI = 'https://gdata.youtube.com/feeds/api/standardfeeds/top_rated'; const STANDARD_MOST_VIEWED_URI = 'https://gdata.youtube.com/feeds/api/standardfeeds/most_viewed'; const STANDARD_RECENTLY_FEATURED_URI = 'https://gdata.youtube.com/feeds/api/standardfeeds/recently_featured'; const STANDARD_WATCH_ON_MOBILE_URI = 'https://gdata.youtube.com/feeds/api/standardfeeds/watch_on_mobile'; const STANDARD_TOP_RATED_URI_V2 = 'https://gdata.youtube.com/feeds/api/standardfeeds/top_rated'; const STANDARD_MOST_VIEWED_URI_V2 = 'https://gdata.youtube.com/feeds/api/standardfeeds/most_viewed'; const STANDARD_RECENTLY_FEATURED_URI_V2 = 'https://gdata.youtube.com/feeds/api/standardfeeds/recently_featured'; const STANDARD_WATCH_ON_MOBILE_URI_V2 = 'https://gdata.youtube.com/feeds/api/standardfeeds/watch_on_mobile'; const USER_URI = 'https://gdata.youtube.com/feeds/api/users'; const VIDEO_URI = 'https://gdata.youtube.com/feeds/api/videos'; const PLAYLIST_REL = 'http://gdata.youtube.com/schemas/2007#playlist'; const USER_UPLOADS_REL = 'http://gdata.youtube.com/schemas/2007#user.uploads'; const USER_PLAYLISTS_REL = 'http://gdata.youtube.com/schemas/2007#user.playlists'; const USER_SUBSCRIPTIONS_REL = 'http://gdata.youtube.com/schemas/2007#user.subscriptions'; const USER_CONTACTS_REL = 'http://gdata.youtube.com/schemas/2007#user.contacts'; const USER_FAVORITES_REL = 'http://gdata.youtube.com/schemas/2007#user.favorites'; const VIDEO_RESPONSES_REL = 'http://gdata.youtube.com/schemas/2007#video.responses'; const VIDEO_RATINGS_REL = 'http://gdata.youtube.com/schemas/2007#video.ratings'; const VIDEO_COMPLAINTS_REL = 'http://gdata.youtube.com/schemas/2007#video.complaints'; const ACTIVITY_FEED_URI = 'https://gdata.youtube.com/feeds/api/events'; const FRIEND_ACTIVITY_FEED_URI = 'https://gdata.youtube.com/feeds/api/users/default/friendsactivity'; /** * The URI of the in-reply-to schema for comments in reply to * other comments. * * @var string */ const IN_REPLY_TO_SCHEME = 'http://gdata.youtube.com/schemas/2007#in-reply-to'; /** * The URI of the inbox feed for the currently authenticated user. * * @var string */ const INBOX_FEED_URI = 'https://gdata.youtube.com/feeds/api/users/default/inbox'; /** * The maximum number of users for which activity can be requested for, * as enforced by the API. * * @var integer */ const ACTIVITY_FEED_MAX_USERS = 20; /** * The suffix for a feed of favorites. * * @var string */ const FAVORITES_URI_SUFFIX = 'favorites'; /** * The suffix for the user's upload feed. * * @var string */ const UPLOADS_URI_SUFFIX = 'uploads'; /** * The suffix for a feed of video responses. * * @var string */ const RESPONSES_URI_SUFFIX = 'responses'; /** * The suffix for a feed of related videos. * * @var string */ const RELATED_URI_SUFFIX = 'related'; /** * The suffix for a feed of messages (inbox entries). * * @var string */ const INBOX_URI_SUFFIX = 'inbox'; /** * Namespaces used for Zend_Gdata_YouTube * * @var array */ public static $namespaces = array( array('yt', 'http://gdata.youtube.com/schemas/2007', 1, 0), array('georss', 'http://www.georss.org/georss', 1, 0), array('gml', 'http://www.opengis.net/gml', 1, 0), array('media', 'http://search.yahoo.com/mrss/', 1, 0) ); /** * Create Zend_Gdata_YouTube object * * @param Zend_Http_Client $client (optional) The HTTP client to use when * when communicating with the Google servers. * @param string $applicationId The identity of the app in the form of * Company-AppName-Version * @param string $clientId The clientId issued by the YouTube dashboard * @param string $developerKey The developerKey issued by the YouTube dashboard */ public function __construct($client = null, $applicationId = 'MyCompany-MyApp-1.0', $clientId = null, $developerKey = null) { $this->registerPackage('Zend_Gdata_YouTube'); $this->registerPackage('Zend_Gdata_YouTube_Extension'); $this->registerPackage('Zend_Gdata_Media'); $this->registerPackage('Zend_Gdata_Media_Extension'); // NOTE This constructor no longer calls the parent constructor $this->setHttpClient($client, $applicationId, $clientId, $developerKey); } /** * Set the Zend_Http_Client object used for communication * * @param Zend_Http_Client $client The client to use for communication * @throws Zend_Gdata_App_HttpException * @return Zend_Gdata_App Provides a fluent interface */ public function setHttpClient($client, $applicationId = 'MyCompany-MyApp-1.0', $clientId = null, $developerKey = null) { if ($client === null) { $client = new Zend_Http_Client(); } if (!$client instanceof Zend_Http_Client) { require_once 'Zend/Gdata/App/HttpException.php'; throw new Zend_Gdata_App_HttpException( 'Argument is not an instance of Zend_Http_Client.'); } if ($clientId != null) { $client->setHeaders('X-GData-Client', $clientId); } if ($developerKey != null) { $client->setHeaders('X-GData-Key', 'key='. $developerKey); } return parent::setHttpClient($client, $applicationId); } /** * Retrieves a feed of videos. * * @param mixed $location (optional) The URL to query or a * Zend_Gdata_Query object from which a URL can be determined * @return Zend_Gdata_YouTube_VideoFeed The feed of videos found at the * specified URL. */ public function getVideoFeed($location = null) { if ($location == null) { $uri = self::VIDEO_URI; } else if ($location instanceof Zend_Gdata_Query) { $uri = $location->getQueryUrl($this->getMajorProtocolVersion()); } else { $uri = $location; } return parent::getFeed($uri, 'Zend_Gdata_YouTube_VideoFeed'); } /** * Retrieves a specific video entry. * * @param mixed $videoId The ID of the video to retrieve. * @param mixed $location (optional) The URL to query or a * Zend_Gdata_Query object from which a URL can be determined. * @param boolean $fullEntry (optional) Retrieve the full metadata for the * entry. Only possible if entry belongs to currently authenticated * user. An exception will be thrown otherwise. * @throws Zend_Gdata_App_HttpException * @return Zend_Gdata_YouTube_VideoEntry The video entry found at the * specified URL. */ public function getVideoEntry($videoId = null, $location = null, $fullEntry = false) { if ($videoId !== null) { if ($fullEntry) { return $this->getFullVideoEntry($videoId); } else { $uri = self::VIDEO_URI . "/" . $videoId; } } else if ($location instanceof Zend_Gdata_Query) { $uri = $location->getQueryUrl($this->getMajorProtocolVersion()); } else { $uri = $location; } return parent::getEntry($uri, 'Zend_Gdata_YouTube_VideoEntry'); } /** * Retrieves a video entry from the user's upload feed. * * @param mixed $videoID The ID of the video to retrieve. * @throws Zend_Gdata_App_HttpException * @return Zend_Gdata_YouTube_VideoEntry|null The video entry to be * retrieved, or null if it was not found or the user requesting it * did not have the appropriate permissions. */ public function getFullVideoEntry($videoId) { $uri = self::USER_URI . "/default/" . self::UPLOADS_URI_SUFFIX . "/$videoId"; return parent::getEntry($uri, 'Zend_Gdata_YouTube_VideoEntry'); } /** * Retrieves a feed of videos related to the specified video ID. * * @param string $videoId The videoId of interest * @param mixed $location (optional) The URL to query or a * Zend_Gdata_Query object from which a URL can be determined * @return Zend_Gdata_YouTube_VideoFeed The feed of videos found at the * specified URL. */ public function getRelatedVideoFeed($videoId = null, $location = null) { if ($videoId !== null) { $uri = self::VIDEO_URI . "/" . $videoId . "/" . self::RELATED_URI_SUFFIX; } else if ($location instanceof Zend_Gdata_Query) { $uri = $location->getQueryUrl($this->getMajorProtocolVersion()); } else { $uri = $location; } return parent::getFeed($uri, 'Zend_Gdata_YouTube_VideoFeed'); } /** * Retrieves a feed of video responses related to the specified video ID. * * @param string $videoId The videoId of interest * @param mixed $location (optional) The URL to query or a * Zend_Gdata_Query object from which a URL can be determined * @return Zend_Gdata_YouTube_VideoFeed The feed of videos found at the * specified URL. */ public function getVideoResponseFeed($videoId = null, $location = null) { if ($videoId !== null) { $uri = self::VIDEO_URI . "/" . $videoId . "/" . self::RESPONSES_URI_SUFFIX; } else if ($location instanceof Zend_Gdata_Query) { $uri = $location->getQueryUrl($this->getMajorProtocolVersion()); } else { $uri = $location; } return parent::getFeed($uri, 'Zend_Gdata_YouTube_VideoFeed'); } /** * Retrieves a feed of comments related to the specified video ID. * * @param string $videoId The videoId of interest * @param mixed $location (optional) The URL to query or a * Zend_Gdata_Query object from which a URL can be determined * @return Zend_Gdata_YouTube_CommentFeed The feed of videos found at the * specified URL. */ public function getVideoCommentFeed($videoId = null, $location = null) { if ($videoId !== null) { $uri = self::VIDEO_URI . "/" . $videoId . "/comments"; } else if ($location instanceof Zend_Gdata_Query) { $uri = $location->getQueryUrl($this->getMajorProtocolVersion()); } else { $uri = $location; } return parent::getFeed($uri, 'Zend_Gdata_YouTube_CommentFeed'); } /** * Retrieves a feed of comments related to the specified video ID. * * @param mixed $location (optional) The URL to query or a * Zend_Gdata_Query object from which a URL can be determined * @return Zend_Gdata_YouTube_CommentFeed The feed of videos found at the * specified URL. */ public function getTopRatedVideoFeed($location = null) { $standardFeedUri = self::STANDARD_TOP_RATED_URI; if ($this->getMajorProtocolVersion() == 2) { $standardFeedUri = self::STANDARD_TOP_RATED_URI_V2; } if ($location == null) { $uri = $standardFeedUri; } else if ($location instanceof Zend_Gdata_Query) { if ($location instanceof Zend_Gdata_YouTube_VideoQuery) { if (!isset($location->url)) { $location->setFeedType('top rated'); } } $uri = $location->getQueryUrl($this->getMajorProtocolVersion()); } else { $uri = $location; } return parent::getFeed($uri, 'Zend_Gdata_YouTube_VideoFeed'); } /** * Retrieves a feed of the most viewed videos. * * @param mixed $location (optional) The URL to query or a * Zend_Gdata_Query object from which a URL can be determined * @return Zend_Gdata_YouTube_VideoFeed The feed of videos found at the * specified URL. */ public function getMostViewedVideoFeed($location = null) { $standardFeedUri = self::STANDARD_MOST_VIEWED_URI; if ($this->getMajorProtocolVersion() == 2) { $standardFeedUri = self::STANDARD_MOST_VIEWED_URI_V2; } if ($location == null) { $uri = $standardFeedUri; } else if ($location instanceof Zend_Gdata_Query) { if ($location instanceof Zend_Gdata_YouTube_VideoQuery) { if (!isset($location->url)) { $location->setFeedType('most viewed'); } } $uri = $location->getQueryUrl($this->getMajorProtocolVersion()); } else { $uri = $location; } return parent::getFeed($uri, 'Zend_Gdata_YouTube_VideoFeed'); } /** * Retrieves a feed of recently featured videos. * * @param mixed $location (optional) The URL to query or a * Zend_Gdata_Query object from which a URL can be determined * @return Zend_Gdata_YouTube_VideoFeed The feed of videos found at the * specified URL. */ public function getRecentlyFeaturedVideoFeed($location = null) { $standardFeedUri = self::STANDARD_RECENTLY_FEATURED_URI; if ($this->getMajorProtocolVersion() == 2) { $standardFeedUri = self::STANDARD_RECENTLY_FEATURED_URI_V2; } if ($location == null) { $uri = $standardFeedUri; } else if ($location instanceof Zend_Gdata_Query) { if ($location instanceof Zend_Gdata_YouTube_VideoQuery) { if (!isset($location->url)) { $location->setFeedType('recently featured'); } } $uri = $location->getQueryUrl($this->getMajorProtocolVersion()); } else { $uri = $location; } return parent::getFeed($uri, 'Zend_Gdata_YouTube_VideoFeed'); } /** * Retrieves a feed of videos recently featured for mobile devices. * These videos will have RTSP links in the $entry->mediaGroup->content * * @param mixed $location (optional) The URL to query or a * Zend_Gdata_Query object from which a URL can be determined * @return Zend_Gdata_YouTube_VideoFeed The feed of videos found at the * specified URL. */ public function getWatchOnMobileVideoFeed($location = null) { $standardFeedUri = self::STANDARD_WATCH_ON_MOBILE_URI; if ($this->getMajorProtocolVersion() == 2) { $standardFeedUri = self::STANDARD_WATCH_ON_MOBILE_URI_V2; } if ($location == null) { $uri = $standardFeedUri; } else if ($location instanceof Zend_Gdata_Query) { if ($location instanceof Zend_Gdata_YouTube_VideoQuery) { if (!isset($location->url)) { $location->setFeedType('watch on mobile'); } } $uri = $location->getQueryUrl($this->getMajorProtocolVersion()); } else { $uri = $location; } return parent::getFeed($uri, 'Zend_Gdata_YouTube_VideoFeed'); } /** * Retrieves a feed which lists a user's playlist * * @param string $user (optional) The username of interest * @param mixed $location (optional) The URL to query or a * Zend_Gdata_Query object from which a URL can be determined * @return Zend_Gdata_YouTube_PlaylistListFeed The feed of playlists */ public function getPlaylistListFeed($user = null, $location = null) { if ($user !== null) { $uri = self::USER_URI . '/' . $user . '/playlists'; } else if ($location instanceof Zend_Gdata_Query) { $uri = $location->getQueryUrl($this->getMajorProtocolVersion()); } else { $uri = $location; } return parent::getFeed($uri, 'Zend_Gdata_YouTube_PlaylistListFeed'); } /** * Retrieves a feed of videos in a particular playlist * * @param mixed $location (optional) The URL to query or a * Zend_Gdata_Query object from which a URL can be determined * @return Zend_Gdata_YouTube_PlaylistVideoFeed The feed of videos found at * the specified URL. */ public function getPlaylistVideoFeed($location) { if ($location instanceof Zend_Gdata_Query) { $uri = $location->getQueryUrl($this->getMajorProtocolVersion()); } else { $uri = $location; } return parent::getFeed($uri, 'Zend_Gdata_YouTube_PlaylistVideoFeed'); } /** * Retrieves a feed of a user's subscriptions * * @param string $user (optional) The username of interest * @param mixed $location (optional) The URL to query or a * Zend_Gdata_Query object from which a URL can be determined * @return Zend_Gdata_YouTube_SubscriptionListFeed The feed of subscriptions */ public function getSubscriptionFeed($user = null, $location = null) { if ($user !== null) { $uri = self::USER_URI . '/' . $user . '/subscriptions'; } else if ($location instanceof Zend_Gdata_Query) { $uri = $location->getQueryUrl($this->getMajorProtocolVersion()); } else { $uri = $location; } return parent::getFeed($uri, 'Zend_Gdata_YouTube_SubscriptionFeed'); } /** * Retrieves a feed of a user's contacts * * @param string $user (optional) The username of interest * @param mixed $location (optional) The URL to query or a * Zend_Gdata_Query object from which a URL can be determined * @return Zend_Gdata_YouTube_ContactFeed The feed of contacts */ public function getContactFeed($user = null, $location = null) { if ($user !== null) { $uri = self::USER_URI . '/' . $user . '/contacts'; } else if ($location instanceof Zend_Gdata_Query) { $uri = $location->getQueryUrl($this->getMajorProtocolVersion()); } else { $uri = $location; } return parent::getFeed($uri, 'Zend_Gdata_YouTube_ContactFeed'); } /** * Retrieves a user's uploads * * @param string $user (optional) The username of interest * @param mixed $location (optional) The URL to query or a * Zend_Gdata_Query object from which a URL can be determined * @return Zend_Gdata_YouTube_VideoFeed The videos uploaded by the user */ public function getUserUploads($user = null, $location = null) { if ($user !== null) { $uri = self::USER_URI . '/' . $user . '/' . self::UPLOADS_URI_SUFFIX; } else if ($location instanceof Zend_Gdata_Query) { $uri = $location->getQueryUrl($this->getMajorProtocolVersion()); } else { $uri = $location; } return parent::getFeed($uri, 'Zend_Gdata_YouTube_VideoFeed'); } /** * Retrieves a user's favorites * * @param string $user (optional) The username of interest * @param mixed $location (optional) The URL to query or a * Zend_Gdata_Query object from which a URL can be determined * @return Zend_Gdata_YouTube_VideoFeed The videos favorited by the user */ public function getUserFavorites($user = null, $location = null) { if ($user !== null) { $uri = self::USER_URI . '/' . $user . '/' . self::FAVORITES_URI_SUFFIX; } else if ($location instanceof Zend_Gdata_Query) { $uri = $location->getQueryUrl($this->getMajorProtocolVersion()); } else { $uri = $location; } return parent::getFeed($uri, 'Zend_Gdata_YouTube_VideoFeed'); } /** * Retrieves a user's profile as an entry * * @param string $user (optional) The username of interest * @param mixed $location (optional) The URL to query or a * Zend_Gdata_Query object from which a URL can be determined * @return Zend_Gdata_YouTube_UserProfileEntry The user profile entry */ public function getUserProfile($user = null, $location = null) { if ($user !== null) { $uri = self::USER_URI . '/' . $user; } else if ($location instanceof Zend_Gdata_Query) { $uri = $location->getQueryUrl($this->getMajorProtocolVersion()); } else { $uri = $location; } return parent::getEntry($uri, 'Zend_Gdata_YouTube_UserProfileEntry'); } /** * Helper function for parsing a YouTube token response * * @param string $response The service response * @throws Zend_Gdata_App_Exception * @return array An array containing the token and URL */ public static function parseFormUploadTokenResponse($response) { // Load the feed as an XML DOMDocument object @ini_set('track_errors', 1); $doc = new DOMDocument(); $doc = @Zend_Xml_Security::scan($response, $doc); @ini_restore('track_errors'); if (!$doc) { require_once 'Zend/Gdata/App/Exception.php'; throw new Zend_Gdata_App_Exception( "Zend_Gdata_YouTube::parseFormUploadTokenResponse - " . "DOMDocument cannot parse XML: $php_errormsg"); } $responseElement = $doc->getElementsByTagName('response')->item(0); $urlText = null; $tokenText = null; if ($responseElement != null) { $urlElement = $responseElement->getElementsByTagName('url')->item(0); $tokenElement = $responseElement->getElementsByTagName('token')->item(0); if ($urlElement && $urlElement->hasChildNodes() && $tokenElement && $tokenElement->hasChildNodes()) { $urlText = $urlElement->firstChild->nodeValue; $tokenText = $tokenElement->firstChild->nodeValue; } } if ($tokenText != null && $urlText != null) { return array('token' => $tokenText, 'url' => $urlText); } else { require_once 'Zend/Gdata/App/Exception.php'; throw new Zend_Gdata_App_Exception( 'Form upload token not found in response'); } } /** * Retrieves a YouTube token * * @param Zend_Gdata_YouTube_VideoEntry $videoEntry The video entry * @param string $url The location as a string URL * @throws Zend_Gdata_App_Exception * @return array An array containing a token and URL */ public function getFormUploadToken($videoEntry, $url='https://gdata.youtube.com/action/GetUploadToken') { if ($url != null && is_string($url)) { // $response is a Zend_Http_response object $response = $this->post($videoEntry, $url); return self::parseFormUploadTokenResponse($response->getBody()); } else { require_once 'Zend/Gdata/App/Exception.php'; throw new Zend_Gdata_App_Exception( 'Url must be provided as a string URL'); } } /** * Retrieves the activity feed for users * * @param mixed $usernames A string identifying the usernames for which to * retrieve activity for. This can also be a Zend_Gdata_Query * object from which a URL can be determined. * @throws Zend_Gdata_App_VersionException if using version less than 2. * @return Zend_Gdata_YouTube_ActivityFeed */ public function getActivityForUser($username) { if ($this->getMajorProtocolVersion() == 1) { require_once 'Zend/Gdata/App/VersionException.php'; throw new Zend_Gdata_App_VersionException('User activity feeds ' . 'are not available in API version 1.'); } $uri = null; if ($username instanceof Zend_Gdata_Query) { $uri = $username->getQueryUrl($this->getMajorProtocolVersion()); } else { if (count(explode(',', $username)) > self::ACTIVITY_FEED_MAX_USERS) { require_once 'Zend/Gdata/App/InvalidArgumentException.php'; throw new Zend_Gdata_App_InvalidArgumentException( 'Activity feed can only retrieve for activity for up to ' . self::ACTIVITY_FEED_MAX_USERS . ' users per request'); } $uri = self::ACTIVITY_FEED_URI . '?author=' . $username; } return parent::getFeed($uri, 'Zend_Gdata_YouTube_ActivityFeed'); } /** * Retrieve the activity of the currently authenticated users friend. * * @throws Zend_Gdata_App_Exception if not logged in. * @return Zend_Gdata_YouTube_ActivityFeed */ public function getFriendActivityForCurrentUser() { if (!$this->isAuthenticated()) { require_once 'Zend/Gdata/App/Exception.php'; throw new Zend_Gdata_App_Exception('You must be authenticated to ' . 'use the getFriendActivityForCurrentUser function in Zend_' . 'Gdata_YouTube.'); } return parent::getFeed(self::FRIEND_ACTIVITY_FEED_URI, 'Zend_Gdata_YouTube_ActivityFeed'); } /** * Retrieve a feed of messages in the currently authenticated user's inbox. * * @throws Zend_Gdata_App_Exception if not logged in. * @return Zend_Gdata_YouTube_InboxFeed|null */ public function getInboxFeedForCurrentUser() { if (!$this->isAuthenticated()) { require_once 'Zend/Gdata/App/Exception.php'; throw new Zend_Gdata_App_Exception('You must be authenticated to ' . 'use the getInboxFeedForCurrentUser function in Zend_' . 'Gdata_YouTube.'); } return parent::getFeed(self::INBOX_FEED_URI, 'Zend_Gdata_YouTube_InboxFeed'); } /** * Send a video message. * * Note: Either a Zend_Gdata_YouTube_VideoEntry or a valid video ID must * be provided. * * @param string $body The body of the message * @param Zend_Gdata_YouTube_VideoEntry (optional) The video entry to send * @param string $videoId The id of the video to send * @param string $recipientUserName The username of the recipient * @throws Zend_Gdata_App_InvalidArgumentException if no valid * Zend_Gdata_YouTube_VideoEntry or videoId were provided * @return Zend_Gdata_YouTube_InboxEntry|null The * Zend_Gdata_YouTube_Inbox_Entry representing the sent message. * */ public function sendVideoMessage($body, $videoEntry = null, $videoId = null, $recipientUserName) { if (!$videoId && !$videoEntry) { require_once 'Zend/Gdata/App/InvalidArgumentException.php'; throw new Zend_Gdata_App_InvalidArgumentException( 'Expecting either a valid videoID or a videoEntry object in ' . 'Zend_Gdata_YouTube->sendVideoMessage().'); } $messageEntry = new Zend_Gdata_YouTube_InboxEntry(); if ($this->getMajorProtocolVersion() == null || $this->getMajorProtocolVersion() == 1) { if (!$videoId) { $videoId = $videoEntry->getVideoId(); } elseif (strlen($videoId) < 12) { //Append the full URI $videoId = self::VIDEO_URI . '/' . $videoId; } $messageEntry->setId($this->newId($videoId)); // TODO there seems to be a bug where v1 inbox entries dont // retain their description... $messageEntry->setDescription( new Zend_Gdata_YouTube_Extension_Description($body)); } else { if (!$videoId) { $videoId = $videoEntry->getVideoId(); $videoId = substr($videoId, strrpos($videoId, ':')); } $messageEntry->setId($this->newId($videoId)); $messageEntry->setSummary($this->newSummary($body)); } $insertUrl = 'https://gdata.youtube.com/feeds/api/users/' . $recipientUserName . '/inbox'; $response = $this->insertEntry($messageEntry, $insertUrl, 'Zend_Gdata_YouTube_InboxEntry'); return $response; } /** * Post a comment in reply to an existing comment * * @param Zend_Gdata_YouTube_CommentEntry $commentEntry The comment entry * to reply to * @param string $commentText The text of the * comment to post * @return Zend_Gdata_YouTube_CommentEntry the posted comment */ public function replyToCommentEntry($commentEntry, $commentText) { $newComment = $this->newCommentEntry(); $newComment->content = $this->newContent()->setText($commentText); $commentId = $commentEntry->getId(); $commentIdArray = explode(':', $commentId); // create a new link element $inReplyToLinkHref = self::VIDEO_URI . '/' . $commentIdArray[3] . '/comments/' . $commentIdArray[5]; $inReplyToLink = $this->newLink($inReplyToLinkHref, self::IN_REPLY_TO_SCHEME, $type="application/atom+xml"); $links = $newComment->getLink(); $links[] = $inReplyToLink; $newComment->setLink($links); $commentFeedPostUrl = self::VIDEO_URI . '/' . $commentIdArray[3] . '/comments'; return $this->insertEntry($newComment, $commentFeedPostUrl, 'Zend_Gdata_YouTube_CommentEntry'); } }
mykhie/ePT-Repository
public/Zend/Gdata/YouTube.php
PHP
apache-2.0
32,397
// @author Bhavya Mehta package com.marshalchen.common.demoofui.listviewfilter.ui; import java.util.ArrayList; import android.content.Context; import android.graphics.Canvas; import android.graphics.Paint; import android.util.AttributeSet; import android.view.MotionEvent; import android.view.View; import com.marshalchen.common.uimodule.listviewfilter.IIndexBarFilter; import com.marshalchen.common.demoofui.R; // Represents right side index bar view with unique first latter of list view row text public class ListFilter extends View { // index bar margin float mIndexbarMargin; // user touched Y axis coordinate value float mSideIndexY; // flag used in touch events manipulations boolean mIsIndexing = false; // holds current section position selected by user int mCurrentSectionPosition = -1; // array list to store section positions public ArrayList<Integer> mListSections; // array list to store listView data ArrayList<String> mListItems; // paint object Paint mIndexPaint; // context object Context mContext; // interface object used as bridge between list view and index bar view for // filtering list view content on touch event IIndexBarFilter mIndexBarFilter; public ListFilter(Context context) { super(context); this.mContext = context; } public ListFilter(Context context, AttributeSet attrs) { super(context, attrs); this.mContext = context; } public ListFilter(Context context, AttributeSet attrs, int defStyle) { super(context, attrs, defStyle); this.mContext = context; } public void setData(PinnedHeaderListView listView, ArrayList<String> listItems,ArrayList<Integer> listSections) { this.mListItems = listItems; this.mListSections = listSections; // list view implements mIndexBarFilter interface mIndexBarFilter = listView; // set index bar margin from resources mIndexbarMargin = 40; // index bar item color and text size mIndexPaint = new Paint(); mIndexPaint.setColor(mContext.getResources().getColor(R.color.black)); mIndexPaint.setAntiAlias(true); mIndexPaint.setTextSize(20); } // draw view content on canvas using paint @Override protected void onDraw(Canvas canvas) { if (mListSections != null && mListSections.size() > 1) { float sectionHeight = (getMeasuredHeight() - 2 * mIndexbarMargin)/ mListSections.size(); float paddingTop = (sectionHeight - (mIndexPaint.descent() - mIndexPaint.ascent())) / 2; for (int i = 0; i < mListSections.size(); i++) { float paddingLeft = (getMeasuredWidth() - mIndexPaint.measureText(getSectionText(mListSections.get(i)))) / 2; canvas.drawText(getSectionText(mListSections.get(i)), paddingLeft, mIndexbarMargin + (sectionHeight * i) + paddingTop + mIndexPaint.descent(), mIndexPaint); } } super.onDraw(canvas); } public String getSectionText(int sectionPosition) { return mListItems.get(sectionPosition); } boolean contains(float x, float y) { // Determine if the point is in index bar region, which includes the // right margin of the bar return (x >= getLeft() && y >= getTop() && y <= getTop() + getMeasuredHeight()); } void filterListItem(float sideIndexY) { mSideIndexY = sideIndexY; // filter list items and get touched section position with in index bar mCurrentSectionPosition = (int) (((mSideIndexY) - getTop() - mIndexbarMargin) / ((getMeasuredHeight() - (2 * mIndexbarMargin)) / mListSections.size())); if (mCurrentSectionPosition >= 0 && mCurrentSectionPosition < mListSections.size()) { int position = mListSections.get(mCurrentSectionPosition); String previewText = mListItems.get(position); mIndexBarFilter.filterList(mSideIndexY, position, previewText); } } public boolean onTouchEvent(MotionEvent ev) { switch (ev.getAction()) { case MotionEvent.ACTION_DOWN: // If down event occurs inside index bar region, start indexing if (contains(ev.getX(), ev.getY())) { // It demonstrates that the motion event started from index // bar mIsIndexing = true; // Determine which section the point is in, and move the // list to // that section filterListItem(ev.getY()); return true; } else { mCurrentSectionPosition = -1; return false; } case MotionEvent.ACTION_MOVE: if (mIsIndexing) { // If this event moves inside index bar if (contains(ev.getX(), ev.getY())) { // Determine which section the point is in, and move the // list to that section filterListItem(ev.getY()); return true; } else { mCurrentSectionPosition = -1; return false; } } break; case MotionEvent.ACTION_UP: if (mIsIndexing) { mIsIndexing = false; mCurrentSectionPosition = -1; } break; } return false; } }
sitexa/UltimateAndroid
UltimateAndroidGradle/demoofui/src/main/java/com/marshalchen/common/demoofui/listviewfilter/ui/ListFilter.java
Java
apache-2.0
5,863
--- feature_name: Pixelated Image Rendering chrome_version: 41 feature_id: 5118058116939776 --- <p>The original image is a 2x2 pixel PNG. Squint or zoom into this dot: <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAACCAYAAABytg0kAAAAFElEQVQIHWP4z8DwHwyBNJDN8B8AQNEG+t5Ik2kAAAAASUVORK5CYII="></p> <p> When you blow the image up in size (for example, to 100x100 pixels) without editing the original source image, the browser will apply anti-aliasing. Like so: </p> <p><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAACCAYAAABytg0kAAAAFElEQVQIHWP4z8DwHwyBNJDN8B8AQNEG+t5Ik2kAAAAASUVORK5CYII=" width="100px" height="100px"></p> <p> To instead use a nearest-neighbor interpolation resulting in sharper edges, add <code>image-rendering: pixelated</code> to the image. This can be applied to image and canvas elements, along with any images loaded via <code>background-image</code> in CSS. </p> {% capture initial_output_content %} <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAACCAYAAABytg0kAAAAFElEQVQIHWP4z8DwHwyBNJDN8B8AQNEG+t5Ik2kAAAAASUVORK5CYII=" width="100px" height="100px" class="pixelated"> {% endcapture %} {% include output_helper.html initial_output_content=initial_output_content %} {% capture css %} .pixelated { image-rendering: pixelated; } {% endcapture %} {% include css_snippet.html css=css %}
beaufortfrancois/samples
image-rendering-pixelated/index.html
HTML
apache-2.0
1,371
/* * Copyright 2000-2014 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.openapi.wm.impl.content; import com.intellij.openapi.ui.JBPopupMenu; import com.intellij.openapi.ui.popup.ListPopup; import com.intellij.ui.UIBundle; import com.intellij.ui.awt.RelativeRectangle; import com.intellij.ui.content.Content; import com.intellij.ui.content.ContentManager; import com.intellij.ui.content.ContentManagerEvent; import com.intellij.ui.content.TabbedContent; import com.intellij.util.ui.BaseButtonBehavior; import com.intellij.util.ui.UIUtil; import org.jetbrains.annotations.Nullable; import javax.swing.*; import javax.swing.event.PopupMenuEvent; import javax.swing.event.PopupMenuListener; import java.awt.*; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; import java.awt.event.MouseEvent; import java.awt.image.BufferedImage; import java.util.ArrayList; import java.util.HashMap; import java.util.Map; class TabContentLayout extends ContentLayout { static final int MORE_ICON_BORDER = 6; LayoutData myLastLayout; JPopupMenu myPopup; final PopupMenuListener myPopupListener; ArrayList<ContentTabLabel> myTabs = new ArrayList<ContentTabLabel>(); final Map<Content, ContentTabLabel> myContent2Tabs = new HashMap<Content, ContentTabLabel>(); private Map<String, BufferedImage> myCached = new com.intellij.util.containers.HashMap<String, BufferedImage>(); private final MoreIcon myMoreIcon = new MoreIcon() { protected Rectangle getIconRec() { return myLastLayout.moreRect; } protected boolean isActive() { return myUi.myWindow.isActive(); } protected int getIconY(final Rectangle iconRec) { return iconRec.height / TAB_ARC - getIconHeight() / TAB_ARC; } }; TabContentLayout(ToolWindowContentUi ui) { super(ui); myPopupListener = new MyPopupListener(); new BaseButtonBehavior(myUi) { protected void execute(final MouseEvent e) { if (!myUi.isCurrent(TabContentLayout.this)) return; if (myLastLayout != null) { final Rectangle moreRect = myLastLayout.moreRect; if (moreRect != null && moreRect.contains(e.getPoint())) { showPopup(); } } } }; } @Override public void init() { reset(); myIdLabel = new BaseLabel(myUi, false) { @Override protected boolean allowEngravement() { return myUi.myWindow.isActive(); } }; for (int i = 0; i < myUi.myManager.getContentCount(); i++) { contentAdded(new ContentManagerEvent(this, myUi.myManager.getContent(i), i)); } } @Override public void reset() { myTabs.clear(); myContent2Tabs.clear(); myIdLabel = null; } private void showPopup() { myPopup = new JBPopupMenu(); myPopup.addPopupMenuListener(myPopupListener); ArrayList<ContentTabLabel> tabs = myTabs; for (final ContentTabLabel each : tabs) { final JCheckBoxMenuItem item = new JCheckBoxMenuItem(each.getText()); if (myUi.myManager.isSelected(each.myContent)) { item.setSelected(true); } item.addActionListener(new ActionListener() { public void actionPerformed(final ActionEvent e) { myUi.myManager.setSelectedContent(each.myContent, true); } }); myPopup.add(item); } myPopup.show(myUi, myLastLayout.moreRect.x, myLastLayout.moreRect.y); } private class MyPopupListener implements PopupMenuListener { public void popupMenuWillBecomeVisible(final PopupMenuEvent e) { } public void popupMenuWillBecomeInvisible(final PopupMenuEvent e) { if (myPopup != null) { myPopup.removePopupMenuListener(this); } myPopup = null; } public void popupMenuCanceled(final PopupMenuEvent e) { } } @Override public void layout() { Rectangle bounds = myUi.getBounds(); ContentManager manager = myUi.myManager; LayoutData data = new LayoutData(myUi); data.eachX = 2; data.eachY = 0; if (isIdVisible()) { myIdLabel.setBounds(data.eachX, data.eachY, myIdLabel.getPreferredSize().width, bounds.height); data.eachX += myIdLabel.getPreferredSize().width; } int tabsStart = data.eachX; if (manager.getContentCount() == 0) return; Content selected = manager.getSelectedContent(); if (selected == null) { selected = manager.getContents()[0]; } if (myLastLayout != null && myLastLayout.layoutSize.equals(bounds.getSize()) && myLastLayout.contentCount == manager.getContentCount()) { for (ContentTabLabel each : myTabs) { if (!each.isValid()) break; if (each.myContent == selected && each.getBounds().width != 0) { data = myLastLayout; data.fullLayout = false; } } } if (data.fullLayout) { for (ContentTabLabel eachTab : myTabs) { final Dimension eachSize = eachTab.getPreferredSize(); data.requiredWidth += eachSize.width; data.requiredWidth++; data.toLayout.add(eachTab); } data.moreRectWidth = myMoreIcon.getIconWidth() + MORE_ICON_BORDER * TAB_ARC; data.toFitWidth = bounds.getSize().width - data.eachX; final ContentTabLabel selectedTab = myContent2Tabs.get(selected); while (true) { if (data.requiredWidth <= data.toFitWidth) break; if (data.toLayout.size() <= 1) break; if (data.toLayout.get(0) != selectedTab) { dropTab(data, data.toLayout.remove(0)); } else if (data.toLayout.get(data.toLayout.size() - 1) != selectedTab) { dropTab(data, data.toLayout.remove(data.toLayout.size() - 1)); } else { break; } } boolean reachedBounds = false; data.moreRect = null; for (ContentTabLabel each : data.toLayout) { data.eachY = 0; final Dimension eachSize = each.getPreferredSize(); if (data.eachX + eachSize.width < data.toFitWidth + tabsStart) { each.setBounds(data.eachX, data.eachY, eachSize.width, bounds.height - data.eachY); data.eachX += eachSize.width; } else { if (!reachedBounds) { final int width = bounds.width - data.eachX - data.moreRectWidth; each.setBounds(data.eachX, data.eachY, width, bounds.height - data.eachY); data.eachX += width; } else { each.setBounds(0, 0, 0, 0); } reachedBounds = true; } } for (ContentTabLabel each : data.toDrop) { each.setBounds(0, 0, 0, 0); } } if (data.toDrop.size() > 0) { data.moreRect = new Rectangle(data.eachX + MORE_ICON_BORDER, 0, myMoreIcon.getIconWidth(), bounds.height); final int selectedIndex = manager.getIndexOfContent(manager.getSelectedContent()); if (selectedIndex == 0) { myMoreIcon.setPaintedIcons(false, true); } else if (selectedIndex == manager.getContentCount() - 1) { myMoreIcon.setPaintedIcons(true, false); } else { myMoreIcon.setPaintedIcons(true, true); } } else { data.moreRect = null; } myLastLayout = data; } @Override public int getMinimumWidth() { int result = 0; if (myIdLabel != null) { result += myIdLabel.getPreferredSize().width; Insets insets = myIdLabel.getInsets(); if (insets != null) { result += insets.left + insets.right; } } if (myLastLayout != null) { result += myLastLayout.moreRectWidth + myLastLayout.requiredWidth; result -= myLastLayout.toLayout.size() > 1 ? myLastLayout.moreRectWidth + 1 : -14; } return result; } static void dropTab(final LayoutData data, final ContentTabLabel toDropLabel) { data.requiredWidth -= (toDropLabel.getPreferredSize().width + 1); data.toDrop.add(toDropLabel); if (data.toDrop.size() == 1) { data.toFitWidth -= data.moreRectWidth; } } boolean isToDrawTabs() { return myTabs.size() > 1; } static class LayoutData { int toFitWidth; int requiredWidth; Dimension layoutSize; boolean fullLayout = true; int moreRectWidth; ArrayList<ContentTabLabel> toLayout = new ArrayList<ContentTabLabel>(); ArrayList<ContentTabLabel> toDrop = new ArrayList<ContentTabLabel>(); Rectangle moreRect; public int eachX; public int eachY; public int contentCount; LayoutData(ToolWindowContentUi ui) { layoutSize = ui.getSize(); contentCount = ui.myManager.getContentCount(); } } @Override public void paintComponent(Graphics g) { if (!isToDrawTabs()) return; boolean prevSelected = false; for (int i = 0; i < myTabs.size(); i++) { boolean last = (i == myTabs.size() - 1) || ((i + 1 < myTabs.size() && myTabs.get(i + 1).getBounds().width == 0)); ContentTabLabel each = myTabs.get(i); Rectangle r = each.getBounds(); StringBuilder key = new StringBuilder().append(i); if (each.isSelected()) key.append('s'); if (prevSelected) key.append('p'); if (last) key.append('l'); if (myUi.myWindow.isActive()) key.append('a'); BufferedImage image = myCached.get(key.toString()); if (image == null || image.getWidth() != r.width || image.getHeight() != r.height) { image = drawToBuffer(r, each.isSelected(), last, prevSelected, myUi.myWindow.isActive()); myCached.put(key.toString(), image); } if (image != null) { UIUtil.drawImage(g, image, isIdVisible() ? r.x : r.x - 2, r.y, null); } prevSelected = each.isSelected(); } } @Nullable private static BufferedImage drawToBuffer(Rectangle r, boolean selected, boolean last, boolean prevSelected, boolean active) { if (r.width <= 0 || r.height <= 0) return null; BufferedImage image = UIUtil.createImage(r.width, r.height, BufferedImage.TYPE_INT_ARGB); Graphics2D g2d = image.createGraphics(); g2d.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON); if (selected) { if (!UIUtil.isUnderDarcula()) { g2d.setColor(active ? new Color(0, 0, 0, 70) : new Color(0, 0, 0, 90)); g2d.fillRect(0, 0, r.width, r.height); g2d.setColor(new Color(0, 0, 0, 140)); g2d.drawLine(0, 0, r.width - 1, 0); g2d.drawLine(0, 1, 0, r.height - 1); g2d.setColor(new Color(0, 0, 0, 20)); g2d.drawLine(1, 1, r.width - 1, 1); g2d.drawLine(1, 2, 1, r.height - 2); g2d.drawLine(1, r.height - 1, r.width - 1, r.height - 1); g2d.setColor(new Color(0, 0, 0, 60)); g2d.drawLine(r.width - 1, 1, r.width - 1, r.height - 2); } if (active) { g2d.setColor(new Color(100, 150, 230, 50)); g2d.fill(new Rectangle(0, 0, r.width, r.height)); } } else { g2d.setPaint(UIUtil.getGradientPaint(0, 0, new Color(0, 0, 0, 10), 0, r.height, new Color(0, 0, 0, 30))); g2d.fillRect(0, 0, r.width, r.height); final Color c = new Color(255, 255, 255, UIUtil.isUnderDarcula() ? 10 : 80); if (last) { if (prevSelected) { g2d.setColor(c); g2d.drawRect(0, 0, r.width - 2, r.height - 1); } else { g2d.setColor(c); g2d.drawRect(1, 0, r.width - 3, r.height - 1); g2d.setColor(new Color(0, 0, 0, 60)); g2d.drawLine(0, 0, 0, r.height); } g2d.setColor(new Color(0, 0, 0, 60)); g2d.drawLine(r.width - 1, 0, r.width - 1, r.height); } else { if (prevSelected) { g2d.setColor(c); g2d.drawRect(0, 0, r.width - 1, r.height - 1); } else { g2d.setColor(c); g2d.drawRect(1, 0, r.width - 2, r.height - 1); g2d.setColor(new Color(0, 0, 0, 60)); g2d.drawLine(0, 0, 0, r.height); } } } g2d.dispose(); return image; } @Override public void paintChildren(Graphics g) { if (!isToDrawTabs()) return; if (myLastLayout != null && myLastLayout.moreRect != null) { myMoreIcon.paintIcon(myUi, g); } } @Override public void update() { for (ContentTabLabel each : myTabs) { each.update(); } updateIdLabel(myIdLabel); } @Override public void rebuild() { myUi.removeAll(); myUi.add(myIdLabel); myUi.initMouseListeners(myIdLabel, myUi); for (ContentTabLabel each : myTabs) { myUi.add(each); myUi.initMouseListeners(each, myUi); } myCached.clear(); } @Override public void contentAdded(ContentManagerEvent event) { final Content content = event.getContent(); final ContentTabLabel tab; if (content instanceof TabbedContent) { tab = new TabbedContentTabLabel((TabbedContent)content, this); } else { tab = new ContentTabLabel(content, this); } myTabs.add(event.getIndex(), tab); myContent2Tabs.put(content, tab); myCached.clear(); } @Override public void contentRemoved(ContentManagerEvent event) { final ContentTabLabel tab = myContent2Tabs.get(event.getContent()); if (tab != null) { myTabs.remove(tab); myContent2Tabs.remove(event.getContent()); } myCached.clear(); } @Override public boolean shouldDrawDecorations() { return isToDrawTabs(); } @Override public void showContentPopup(ListPopup listPopup) { Content selected = myUi.myManager.getSelectedContent(); if (selected != null) { ContentTabLabel tab = myContent2Tabs.get(selected); listPopup.showUnderneathOf(tab); } else { listPopup.showUnderneathOf(myIdLabel); } } @Override public RelativeRectangle getRectangleFor(Content content) { ContentTabLabel label = myContent2Tabs.get(content); return new RelativeRectangle(label.getParent(), label.getBounds()); } public Component getComponentFor(Content content) { return myContent2Tabs.get(content); } @Override public String getCloseActionName() { return UIBundle.message("tabbed.pane.close.tab.action.name"); } @Override public String getCloseAllButThisActionName() { return UIBundle.message("tabbed.pane.close.all.tabs.but.this.action.name"); } @Override public String getPreviousContentActionName() { return "Select Previous Tab"; } @Override public String getNextContentActionName() { return "Select Next Tab"; } }
Lekanich/intellij-community
platform/platform-impl/src/com/intellij/openapi/wm/impl/content/TabContentLayout.java
Java
apache-2.0
15,036
/** * Copyright 2015 Google Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ html, body { font-family: 'Roboto', 'Helvetica', sans-serif; margin: 0; padding: 0; } .mdl-demo .mdl-layout__header-row { padding-left: 40px; } .mdl-demo .mdl-layout.is-small-screen .mdl-layout__header-row h3 { font-size: inherit; } .mdl-demo .mdl-layout__tab-bar-button { display: none; } .mdl-demo .mdl-layout.is-small-screen .mdl-layout__tab-bar .mdl-button { display: none; } .mdl-demo .mdl-layout:not(.is-small-screen) .mdl-layout__tab-bar, .mdl-demo .mdl-layout:not(.is-small-screen) .mdl-layout__tab-bar-container { overflow: visible; } .mdl-demo .mdl-layout__tab-bar-container { height: 64px; } .mdl-demo .mdl-layout__tab-bar { padding: 0; padding-left: 16px; box-sizing: border-box; height: 100%; width: 100%; } .mdl-demo .mdl-layout__tab-bar .mdl-layout__tab { height: 64px; line-height: 64px; } .mdl-demo .mdl-layout__tab-bar .mdl-layout__tab.is-active::after { background-color: white; height: 4px; } .mdl-demo main > .mdl-layout__tab-panel { padding: 8px; padding-top: 48px; } .mdl-demo .mdl-card { height: auto; display: flex; flex-direction: column; } .mdl-demo .mdl-card > * { height: auto; } .mdl-demo .mdl-card .mdl-card__supporting-text { margin: 40px; flex-grow: 1; padding: 0; color: inherit; width: calc(100% - 80px); } .mdl-demo.mdl-demo .mdl-card__supporting-text h4 { margin-top: 0; margin-bottom: 20px; } .mdl-demo .mdl-card__actions { margin: 0; padding: 4px 40px; color: inherit; } .mdl-demo .mdl-card__actions a { color: #00BCD4; margin: 0; } .mdl-demo .mdl-card__actions a:hover, .mdl-demo .mdl-card__actions a:active { color: inherit; background-color: transparent; } .mdl-demo .mdl-card__supporting-text + .mdl-card__actions { border-top: 1px solid rgba(0, 0, 0, 0.12); } .mdl-demo #add { position: absolute; right: 40px; top: 36px; z-index: 999; } .mdl-demo .mdl-layout__content section:not(:last-of-type) { position: relative; margin-bottom: 48px; } .mdl-demo section.section--center { max-width: 860px; } .mdl-demo #features section.section--center { max-width: 620px; } .mdl-demo section > header{ display: flex; align-items: center; justify-content: center; } .mdl-demo section > .section__play-btn { min-height: 200px; } .mdl-demo section > header > .material-icons { font-size: 3rem; } .mdl-demo section > button { position: absolute; z-index: 99; top: 8px; right: 8px; } .mdl-demo section .section__circle { display: flex; align-items: center; justify-content: flex-start; flex-grow: 0; flex-shrink: 1; } .mdl-demo section .section__text { flex-grow: 1; flex-shrink: 0; padding-top: 8px; } .mdl-demo section .section__text h5 { font-size: inherit; margin: 0; margin-bottom: 0.5em; } .mdl-demo section .section__text a { text-decoration: none; } .mdl-demo section .section__circle-container > .section__circle-container__circle { width: 64px; height: 64px; border-radius: 32px; margin: 8px 0; } .mdl-demo section.section--footer .section__circle--big { width: 100px; height: 100px; border-radius: 50px; margin: 8px 32px; } .mdl-demo .is-small-screen section.section--footer .section__circle--big { width: 50px; height: 50px; border-radius: 25px; margin: 8px 16px; } .mdl-demo section.section--footer { padding: 64px 0; margin: 0 -8px -8px -8px; } .mdl-demo section.section--center .section__text:not(:last-child) { border-bottom: 1px solid rgba(0,0,0,.13); } .mdl-demo .mdl-card .mdl-card__supporting-text > h3:first-child { margin-bottom: 24px; } .mdl-demo .mdl-layout__tab-panel:not(#overview) { background-color: white; } .mdl-demo #features section { margin-bottom: 72px; } .mdl-demo #features h4, #features h5 { margin-bottom: 16px; } .mdl-demo .toc { border-left: 4px solid #C1EEF4; margin: 24px; padding: 0; padding-left: 8px; display: flex; flex-direction: column; } .mdl-demo .toc h4 { font-size: 0.9rem; margin-top: 0; } .mdl-demo .toc a { color: #4DD0E1; text-decoration: none; font-size: 16px; line-height: 28px; display: block; } .mdl-demo .mdl-menu__container { z-index: 99; }
puncoz/material-design-lite
templates/text-only/styles.css
CSS
apache-2.0
4,752
/* * Copyright 2000-2014 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.openapi.editor; import com.intellij.openapi.application.Result; import com.intellij.openapi.application.WriteAction; import com.intellij.openapi.command.CommandProcessor; import com.intellij.openapi.command.impl.CurrentEditorProvider; import com.intellij.openapi.command.impl.UndoManagerImpl; import com.intellij.openapi.command.undo.UndoManager; import com.intellij.openapi.editor.ex.EditorEx; import com.intellij.openapi.editor.impl.AbstractEditorTest; import com.intellij.openapi.fileEditor.FileEditor; import com.intellij.openapi.fileEditor.TextEditor; import com.intellij.openapi.fileEditor.impl.text.TextEditorProvider; import com.intellij.testFramework.TestFileType; import org.jetbrains.annotations.NotNull; import java.io.IOException; public class EditorMultiCaretUndoRedoTest extends AbstractEditorTest { private CurrentEditorProvider mySavedCurrentEditorProvider; public void setUp() throws Exception { super.setUp(); mySavedCurrentEditorProvider = getUndoManager().getEditorProvider(); } public void tearDown() throws Exception { getUndoManager().setEditorProvider(mySavedCurrentEditorProvider); super.tearDown(); } @Override // disabling execution of tests in command protected void runTest() throws Throwable { new WriteAction<Void>() { @Override protected void run(@NotNull Result<Void> result) throws Throwable { doRunTest(); } }.execute(); } public void testUndoRedo() throws Exception { init("some<caret> text<caret>\n" + "some <selection><caret>other</selection> <selection>text<caret></selection>\n" + "<selection>ano<caret>ther</selection> line"); type('A'); executeAction("EditorDelete"); mouse().clickAt(0, 1); undo(); checkResult("someA<caret>textA<caret>some A<caret>A<caret>A<caret>line"); undo(); checkResult("someA<caret> textA<caret>\n" + "some A<caret> A<caret>\n" + "A<caret> line"); undo(); checkResult("some<caret> text<caret>\n" + "some <selection><caret>other</selection> <selection>text<caret></selection>\n" + "<selection>ano<caret>ther</selection> line"); redo(); checkResult("someA<caret> textA<caret>\n" + "some A<caret> A<caret>\n" + "A<caret> line"); } public void testBlockSelectionStateAfterUndo() throws Exception { init("a"); ((EditorEx)myEditor).setColumnMode(true); mouse().clickAt(0, 2); type('b'); undo(); executeAction("EditorRightWithSelection"); verifyCaretsAndSelections(0, 3, 2, 3); } public void testBlockSelectionStateAfterUndo2() throws Exception { init("a"); ((EditorEx)myEditor).setColumnMode(true); mouse().clickAt(0, 0).dragTo(0, 2).release(); type('b'); undo(); verifyCaretsAndSelections(0, 2, 0, 2); } public void testPrimaryCaretPositionAfterUndo() throws Exception { init("line1\n" + "line2"); mouse().alt().clickAt(1, 1).dragTo(0, 0).release(); type(' '); undo(); assertEquals(new LogicalPosition(0, 0), myEditor.getCaretModel().getPrimaryCaret().getLogicalPosition()); } private void checkResult(final String text) { CommandProcessor.getInstance().runUndoTransparentAction(new Runnable() { @Override public void run() { checkResultByText(text); } }); } private static void undo() { getUndoManager().undo(getTextEditor()); } private static void redo() { getUndoManager().redo(getTextEditor()); } private static UndoManagerImpl getUndoManager() { return (UndoManagerImpl) UndoManager.getInstance(ourProject); } private static TextEditor getTextEditor() { return TextEditorProvider.getInstance().getTextEditor(myEditor); } private void init(String text) throws IOException { init(text, TestFileType.TEXT); setEditorVisibleSize(1000, 1000); getUndoManager().setEditorProvider(new CurrentEditorProvider() { @Override public FileEditor getCurrentEditor() { return getTextEditor(); } }); } }
muntasirsyed/intellij-community
platform/platform-tests/testSrc/com/intellij/openapi/editor/EditorMultiCaretUndoRedoTest.java
Java
apache-2.0
4,770
/* * Copyright 2000-2014 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.lang.xml; import com.intellij.codeInsight.daemon.Validator; import com.intellij.codeInsight.intention.IntentionAction; import com.intellij.lang.annotation.Annotation; import com.intellij.lang.annotation.AnnotationHolder; import com.intellij.lang.annotation.ExternalAnnotator; import com.intellij.openapi.util.Trinity; import com.intellij.psi.PsiElement; import com.intellij.psi.PsiFile; import com.intellij.psi.xml.XmlDocument; import com.intellij.psi.xml.XmlFile; import com.intellij.psi.xml.XmlTag; import com.intellij.psi.xml.XmlToken; import com.intellij.xml.XmlNSDescriptor; import com.intellij.xml.util.XmlTagUtil; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import java.util.ArrayList; import java.util.List; /** * @author ven */ public class XMLExternalAnnotator extends ExternalAnnotator<XMLExternalAnnotator.MyHost, XMLExternalAnnotator.MyHost> { @Nullable @Override public MyHost collectInformation(@NotNull PsiFile file) { if (!(file instanceof XmlFile)) return null; final XmlDocument document = ((XmlFile)file).getDocument(); if (document == null) return null; XmlTag rootTag = document.getRootTag(); XmlNSDescriptor nsDescriptor = rootTag == null ? null : rootTag.getNSDescriptor(rootTag.getNamespace(), false); if (nsDescriptor instanceof Validator) { //noinspection unchecked MyHost host = new MyHost(); ((Validator<XmlDocument>)nsDescriptor).validate(document, host); return host; } return null; } @Nullable @Override public MyHost doAnnotate(MyHost collectedInfo) { return collectedInfo; } @Override public void apply(@NotNull PsiFile file, MyHost annotationResult, @NotNull AnnotationHolder holder) { annotationResult.apply(holder); } private static void appendFixes(final Annotation annotation, final IntentionAction... actions) { if (actions != null) { for (IntentionAction action : actions) annotation.registerFix(action); } } static class MyHost implements Validator.ValidationHost { private final List<Trinity<PsiElement, String, ErrorType>> messages = new ArrayList<Trinity<PsiElement, String, ErrorType>>(); @Override public void addMessage(PsiElement context, String message, int type) { throw new UnsupportedOperationException(); } @Override public void addMessage(PsiElement context, String message, @NotNull ErrorType type) { messages.add(Trinity.create(context, message, type)); } void apply (AnnotationHolder holder) { for (Trinity<PsiElement, String, ErrorType> message : messages) { addMessageWithFixes(message.first, message.second, message.third, holder); } } } public static void addMessageWithFixes(final PsiElement context, final String message, @NotNull final Validator.ValidationHost.ErrorType type, AnnotationHolder myHolder, @NotNull final IntentionAction... fixes) { if (message != null && !message.isEmpty()) { if (context instanceof XmlTag) { addMessagesForTag((XmlTag)context, message, type, myHolder, fixes); } else { if (type == Validator.ValidationHost.ErrorType.ERROR) { appendFixes(myHolder.createErrorAnnotation(context, message), fixes); } else { appendFixes(myHolder.createWarningAnnotation(context, message), fixes); } } } } private static void addMessagesForTag(XmlTag tag, String message, Validator.ValidationHost.ErrorType type, AnnotationHolder myHolder, IntentionAction... actions) { XmlToken childByRole = XmlTagUtil.getStartTagNameElement(tag); addMessagesForTreeChild(childByRole, type, message, myHolder, actions); childByRole = XmlTagUtil.getEndTagNameElement(tag); addMessagesForTreeChild(childByRole, type, message, myHolder, actions); } private static void addMessagesForTreeChild(final XmlToken childByRole, final Validator.ValidationHost.ErrorType type, final String message, AnnotationHolder myHolder, IntentionAction... actions) { if (childByRole != null) { Annotation annotation; if (type == Validator.ValidationHost.ErrorType.ERROR) { annotation = myHolder.createErrorAnnotation(childByRole, message); } else { annotation = myHolder.createWarningAnnotation(childByRole, message); } appendFixes(annotation, actions); } } }
diorcety/intellij-community
xml/xml-analysis-impl/src/com/intellij/lang/xml/XMLExternalAnnotator.java
Java
apache-2.0
5,343
/* * Copyright 2012 The Closure Compiler Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @fileoverview Externs for Angular 1.0.x * * TODO: Mocks. * TODO: Remaining Services: * $compileProvider * $cookies * $cookieStore * $document * $httpBackend * $interpolate * $locale * $resource * $rootElement * $rootScope * $rootScopeProvider * $routeParams * $sanitize * $templateCache * $window * * TODO: Resolve two issues with angular.$http * 1) angular.$http isn't declared as a * callable type. It should be declared as a function, and properties * added following the technique used by $timeout, $parse and * $interval. * 2) angular.$http.delete cannot be added as an extern * as it is a reserved keyword. * Its use is potentially not supported in IE. * It may be aliased as 'remove' in a future version. * * @see http://angularjs.org/ * @externs */ /** * @typedef {(Window|Document|Element|Array.<Element>|string|!angular.JQLite| * NodeList|{length: number})} */ var JQLiteSelector; /** * @type {Object} * @const */ var angular = {}; /** * @param {Object} self * @param {Function} fn * @param {...*} args * @return {Function} */ angular.bind = function(self, fn, args) {}; /** * @param {Element|HTMLDocument} element * @param {Array.<string|Function>=} opt_modules * @return {!angular.$injector} */ angular.bootstrap = function(element, opt_modules) {}; /** * @param {T} source * @param {(Object|Array)=} opt_dest * @return {T} * @template T */ angular.copy = function(source, opt_dest) {}; /** * @param {(JQLiteSelector|Object)} element * @param {(JQLiteSelector|Object)=} opt_context * @return {!angular.JQLite} */ angular.element = function(element, opt_context) {}; /** * @param {*} o1 * @param {*} o2 * @return {boolean} */ angular.equals = function(o1, o2) {}; /** * @param {Object} dest * @param {...Object} srcs */ angular.extend = function(dest, srcs) {}; /** * @param {Object|Array} obj * @param {Function} iterator * @param {Object=} opt_context * @return {Object|Array} */ angular.forEach = function(obj, iterator, opt_context) {}; /** * @param {string|T} json * @return {Object|Array|Date|T} * @template T */ angular.fromJson = function(json) {}; /** * @param {*} arg * @return {*} */ angular.identity = function(arg) {}; /** * @param {Array.<string|Function>} modules * @return {!angular.$injector} */ angular.injector = function(modules) {}; /** * @param {*} value * @return {boolean} */ angular.isArray = function(value) {}; /** * @param {*} value * @return {boolean} */ angular.isDate = function(value) {}; /** * @param {*} value * @return {boolean} */ angular.isDefined = function(value) {}; /** * @param {*} value * @return {boolean} */ angular.isElement = function(value) {}; /** * @param {*} value * @return {boolean} */ angular.isFunction = function(value) {}; /** * @param {*} value * @return {boolean} */ angular.isNumber = function(value) {}; /** * @param {*} value * @return {boolean} */ angular.isObject = function(value) {}; /** * @param {*} value * @return {boolean} */ angular.isString = function(value) {}; /** * @param {*} value * @return {boolean} */ angular.isUndefined = function(value) {}; /** * @param {string} s * @return {string} */ angular.lowercase = function(s) {}; angular.mock = {}; /** * @param {string} name * @param {Array.<string>=} opt_requires * @param {(Function|Array.<string|Function>)=} opt_configFn * @return {!angular.Module} */ angular.module = function(name, opt_requires, opt_configFn) {}; angular.noop = function() {}; /** * @param {Object|Array|Date|string|number} obj * @param {boolean=} opt_pretty * @return {string} */ angular.toJson = function(obj, opt_pretty) {}; /** * @param {string} s * @return {string} */ angular.uppercase = function(s) {}; /** * @typedef {{ * $attr: Object.<string,string>, * $normalize: function(string): string, * $observe: function(string, function(*)): function(*), * $set: function(string, ?(string|boolean), boolean=, string=) * }} */ angular.Attributes; /** * @param {string} name * @return {string} */ angular.Attributes.$normalize = function(name) {}; /** * @param {string} key * @param {function(*)} fn * @return {function(*)} */ angular.Attributes.$observe = function(key, fn) {}; /** * @param {string} key * @param {?(string|boolean)} value * @param {boolean=} opt_writeAttr * @param {string=} opt_attrName */ angular.Attributes.$set = function(key, value, opt_writeAttr, opt_attrName) {}; /** * @typedef {{ * pre: (function( * !angular.Scope=, angular.JQLite=, !angular.Attributes=, Object=)| * undefined), * post: (function( * !angular.Scope=, !angular.JQLite=, !angular.Attributes=, Object=)| * undefined) * }} */ angular.LinkingFunctions; /** * @param {!angular.Scope=} scope * @param {!angular.JQLite=} iElement * @param {!angular.Attributes=} iAttrs * @param {(Object|Array.<Object>)=} controller */ angular.LinkingFunctions.pre = function(scope, iElement, iAttrs, controller) {}; /** * @param {!angular.Scope=} scope * @param {!angular.JQLite=} iElement * @param {!angular.Attributes=} iAttrs * @param {(Object|Array.<Object>)=} controller */ angular.LinkingFunctions.post = function(scope, iElement, iAttrs, controller) { }; /** * @typedef {{ * compile: (function( * !angular.JQLite=, !angular.Attributes=, Function=)|undefined), * controller: (Function|undefined), * controllerAs: (string|undefined), * link: (function( * !angular.Scope=, !angular.JQLite=, !angular.Attributes=, * (Object|Array.<Object>)=)| * undefined), * name: (string|undefined), * priority: (number|undefined), * replace: (boolean|undefined), * require: (string|Array.<string>|undefined), * restrict: (string|undefined), * scope: (boolean|Object.<string, string>|undefined), * template: (string|undefined), * templateUrl: (string|undefined), * terminal: (boolean|undefined), * transclude: (boolean|string|undefined) * }} */ angular.Directive; /** * @param {!angular.JQLite=} tElement * @param {!angular.Attributes=} tAttrs * @param {Function=} transclude * @return {Function|angular.LinkingFunctions|undefined} */ angular.Directive.compile = function(tElement, tAttrs, transclude) {}; angular.Directive.controller = function() {}; /** * @type {string|undefined} */ angular.Directive.controllerAs; /** * @param {!angular.Scope=} scope * @param {!angular.JQLite=} iElement * @param {!angular.Attributes=} iAttrs * @param {(Object|Array.<Object>)=} controller */ angular.Directive.link = function(scope, iElement, iAttrs, controller) {}; /** * @type {(string|undefined)} */ angular.Directive.name; /** * @type {(number|undefined)} */ angular.Directive.priority; /** * @type {(boolean|undefined)} */ angular.Directive.replace; /** * @type {(string|Array.<string>|undefined)} */ angular.Directive.require; /** * @type {(string|undefined)} */ angular.Directive.restrict; /** * @type {(boolean|Object.<string, string>|undefined)} */ angular.Directive.scope; /** * @type {(string|undefined)} * TODO: This can also be a function which returns a string. */ angular.Directive.template; /** * @type {(string|undefined)} */ angular.Directive.templateUrl; /** * @type {(boolean|undefined)} */ angular.Directive.terminal; /** * @type {(boolean|string|undefined)} */ angular.Directive.transclude; /** * @typedef {{ * addClass: function(string): !angular.JQLite, * after: function(JQLiteSelector): !angular.JQLite, * append: function(JQLiteSelector): !angular.JQLite, * attr: function(string, (string|boolean)=): * (!angular.JQLite|string|boolean), * bind: function(string, Function): !angular.JQLite, * children: function(): !angular.JQLite, * clone: function(): !angular.JQLite, * contents: function(): !angular.JQLite, * controller: function(string=): Object, * css: function(string, string=): (!angular.JQLite|string), * data: function(string=, *=): *, * eq: function(number): !angular.JQLite, * find: function(string): !angular.JQLite, * hasClass: function(string): boolean, * html: function(string=): (!angular.JQLite|string), * inheritedData: function(string=, *=): *, * injector: function(): !angular.$injector, * length: number, * next: function(): !angular.JQLite, * parent: function(): !angular.JQLite, * prepend: function(JQLiteSelector): !angular.JQLite, * prop: function(string, *=): *, * ready: function(Function): !angular.JQLite, * remove: function(): !angular.JQLite, * removeAttr: function(string): !angular.JQLite, * removeClass: function(string): !angular.JQLite, * removeData: function(): !angular.JQLite, * replaceWith: function(JQLiteSelector): !angular.JQLite, * scope: function(): !angular.Scope, * text: function(string=): (!angular.JQLite|string), * toggleClass: function(string, boolean=): !angular.JQLite, * unbind: function(string=, Function=): !angular.JQLite, * val: function(string=): (!angular.JQLite|string), * wrap: function(JQLiteSelector): !angular.JQLite * }} */ angular.JQLite; /** * @param {string} name * @return {!angular.JQLite} */ angular.JQLite.addClass = function(name) {}; /** * @param {JQLiteSelector} element * @return {!angular.JQLite} */ angular.JQLite.after = function(element) {}; /** * @param {JQLiteSelector} element * @return {!angular.JQLite} */ angular.JQLite.append = function(element) {}; /** * @param {string} name * @param {(string|boolean)=} opt_value * @return {!angular.JQLite|string|boolean} */ angular.JQLite.attr = function(name, opt_value) {}; /** * @param {string} type * @param {Function} fn * @return {!angular.JQLite} */ angular.JQLite.bind = function(type, fn) {}; /** * @return {!angular.JQLite} */ angular.JQLite.children = function() {}; /** * @return {!angular.JQLite} */ angular.JQLite.clone = function() {}; /** * @return {!angular.JQLite} */ angular.JQLite.contents = function() {}; /** * @param {string=} opt_name * @return {Object} */ angular.JQLite.controller = function(opt_name) {}; /** * @param {string} name * @param {string=} opt_value * @return {!angular.JQLite|string} */ angular.JQLite.css = function(name, opt_value) {}; /** * @param {string=} opt_key * @param {*=} opt_value * @return {*} */ angular.JQLite.data = function(opt_key, opt_value) {}; /** * @param {number} index * @return {!angular.JQLite} */ angular.JQLite.eq = function(index) {}; /** * @param {string} selector * @return {!angular.JQLite} */ angular.JQLite.find = function(selector) {}; /** * @param {string} name * @return {boolean} */ angular.JQLite.hasClass = function(name) {}; /** * @param {string=} opt_value * @return {!angular.JQLite|string} */ angular.JQLite.html = function(opt_value) {}; /** * @param {string=} opt_key * @param {*=} opt_value * @return {*} */ angular.JQLite.inheritedData = function(opt_key, opt_value) {}; /** * @return {!angular.$injector} */ angular.JQLite.injector = function() {}; /** @type {number} */ angular.JQLite.length; /** * @return {!angular.JQLite} */ angular.JQLite.next = function() {}; /** * @return {!angular.JQLite} */ angular.JQLite.parent = function() {}; /** * @param {JQLiteSelector} element * @return {!angular.JQLite} */ angular.JQLite.prepend = function(element) {}; /** * @param {string} name * @param {*=} opt_value * @return {*} */ angular.JQLite.prop = function(name, opt_value) {}; /** * @param {Function} fn * @return {!angular.JQLite} */ angular.JQLite.ready = function(fn) {}; /** * @return {!angular.JQLite} */ angular.JQLite.remove = function() {}; /** * @param {string} name * @return {!angular.JQLite} */ angular.JQLite.removeAttr = function(name) {}; /** * @param {string} name * @return {!angular.JQLite} */ angular.JQLite.removeClass = function(name) {}; /** * @return {!angular.JQLite} */ angular.JQLite.removeData = function() {}; /** * @param {JQLiteSelector} element * @return {!angular.JQLite} */ angular.JQLite.replaceWith = function(element) {}; /** * @return {!angular.Scope} */ angular.JQLite.scope = function() {}; /** * @param {string=} opt_value * @return {!angular.JQLite|string} */ angular.JQLite.text = function(opt_value) {}; /** * @param {string} name * @param {boolean=} opt_condition * @return {!angular.JQLite} */ angular.JQLite.toggleClass = function(name, opt_condition) {}; /** * @param {string=} opt_type * @param {Function=} opt_fn * @return {!angular.JQLite} */ angular.JQLite.unbind = function(opt_type, opt_fn) {}; /** * @param {string=} opt_value * @return {!angular.JQLite|string} */ angular.JQLite.val = function(opt_value) {}; /** * @param {JQLiteSelector} element * @return {!angular.JQLite} */ angular.JQLite.wrap = function(element) {}; /** * @typedef {{ * config: function((Function|Array.<string|Function>)):!angular.Module, * constant: function(string, *):!angular.Module, * controller: * (function(string, (Function|Array.<string|Function>)):!angular.Module| * function(!Object.<(Function|Array.<string|Function>)>): * !angular.Module), * directive: * (function(string, (Function|Array.<string|Function>)):!angular.Module| * function(!Object.<(Function|Array.<string|Function>)>): * !angular.Module), * factory: * function(string, (Function|Array.<string|Function>)):!angular.Module, * filter: * function(string, (Function|Array.<string|Function>)):!angular.Module, * name: string, * provider: function(string, * (Object|Function|Array.<string|Function>)):!angular.Module, * requires: Array.<string>, * run: function((Function|Array.<string|Function>)):!angular.Module, * service: * function(string, (Function|Array.<string|Function>)):!angular.Module, * value: function(string, *):!angular.Module * }} */ angular.Module; /** * @param {Function|Array.<string|Function>} configFn * @return {!angular.Module} */ angular.Module.config = function(configFn) {}; /** * @param {string} name * @param {*} object * @return {!angular.Module} */ angular.Module.constant = function(name, object) {}; /** * @param {string} name * @param {Function|Array.<string|Function>} constructor * @return {!angular.Module} */ angular.Module.controller = function(name, constructor) {}; /** * @param {string} name * @param {Function|Array.<string|Function>} directiveFactory * @return {!angular.Module} */ angular.Module.directive = function(name, directiveFactory) {}; /** * @param {string} name * @param {Function|Array.<string|Function>} providerFunction * @return {!angular.Module} */ angular.Module.factory = function(name, providerFunction) {}; /** * @param {string} name * @param {Function|Array.<string|Function>} filterFactory * @return {!angular.Module} */ angular.Module.filter = function(name, filterFactory) {}; /** * @param {string} name * @param {Function|Array.<string|Function>} providerType * @return {!angular.Module} */ angular.Module.provider = function(name, providerType) {}; /** * @param {Function|Array.<string|Function>} initializationFn * @return {!angular.Module} */ angular.Module.run = function(initializationFn) {}; /** * @param {string} name * @param {Function|Array.<string|Function>} constructor * @return {!angular.Module} */ angular.Module.service = function(name, constructor) {}; /** * @param {string} name * @param {*} object * @return {!angular.Module} */ angular.Module.value = function(name, object) {}; /** * @type {string} */ angular.Module.name = ''; /** * @type {Array.<string>} */ angular.Module.requires; /** * @typedef {{ * $$phase: string, * $apply: function((string|function(angular.Scope))=):*, * $broadcast: function(string, ...*), * $destroy: function(), * $digest: function(), * $emit: function(string, ...*), * $eval: function((string|function(!angular.Scope))=, Object=):*, * $evalAsync: function((string|function())=), * $id: string, * $new: function(boolean=):!angular.Scope, * $on: function(string, function(!angular.Scope.Event, ...?)):function(), * $parent: !angular.Scope, * $root: !angular.Scope, * $watch: function( * (string|Function), (string|Function)=, boolean=):function() * }} */ angular.Scope; /** @type {string} */ angular.Scope.$$phase; /** * @param {(string|function(!angular.Scope))=} opt_exp * @return {*} */ angular.Scope.$apply = function(opt_exp) {}; /** * @param {string} name * @param {...*} args */ angular.Scope.$broadcast = function(name, args) {}; angular.Scope.$destroy = function() {}; angular.Scope.$digest = function() {}; /** * @param {string} name * @param {...*} args */ angular.Scope.$emit = function(name, args) {}; /** * @param {(string|function())=} opt_exp * @param {Object=} opt_locals * @return {*} */ angular.Scope.$eval = function(opt_exp, opt_locals) {}; /** * @param {(string|function())=} opt_exp */ angular.Scope.$evalAsync = function(opt_exp) {}; /** @type {string} */ angular.Scope.$id; /** * @param {boolean=} opt_isolate * @return {!angular.Scope} */ angular.Scope.$new = function(opt_isolate) {}; /** * @param {string} name * @param {function(!angular.Scope.Event, ...?)} listener * @return {function()} */ angular.Scope.$on = function(name, listener) {}; /** @type {!angular.Scope} */ angular.Scope.$parent; /** @type {!angular.Scope} */ angular.Scope.$root; /** * @param {string|Function} exp * @param {(string|Function)=} opt_listener * @param {boolean=} opt_objectEquality * @return {function()} */ angular.Scope.$watch = function(exp, opt_listener, opt_objectEquality) {}; /** * @typedef {{ * currentScope: !angular.Scope, * defaultPrevented: boolean, * name: string, * preventDefault: function(), * stopPropagation: function(), * targetScope: !angular.Scope * }} */ angular.Scope.Event; /** @type {!angular.Scope} */ angular.Scope.Event.currentScope; /** @type {boolean} */ angular.Scope.Event.defaultPrevented; /** @type {string} */ angular.Scope.Event.name; angular.Scope.Event.preventDefault = function() {}; angular.Scope.Event.stopPropagation = function() {}; /** @type {!angular.Scope} */ angular.Scope.Event.targetScope; /** * @type {Object} */ angular.version = {}; /** * @type {string} */ angular.version.full = ''; /** * @type {number} */ angular.version.major = 0; /** * @type {number} */ angular.version.minor = 0; /** * @type {number} */ angular.version.dot = 0; /** * @type {string} */ angular.version.codeName = ''; /****************************************************************************** * $anchorScroll Service *****************************************************************************/ /** * @typedef {function()} */ angular.$anchorScroll; /****************************************************************************** * $anchorScrollProvider Service *****************************************************************************/ /** * @typedef {{ * disableAutoScrolling: function() * }} */ angular.$anchorScrollProvider; /** * @type {function()} */ angular.$anchorScrollProvider.disableAutoScrolling = function() {}; /****************************************************************************** * $compile Service *****************************************************************************/ /** * @typedef { * function( * (JQLiteSelector|Object), * function(!angular.Scope, Function=)=, number=): * function(!angular.Scope, * function(Object, !angular.Scope=)=): Object} */ angular.$compile; /****************************************************************************** * $cacheFactory Service *****************************************************************************/ /** * @typedef { * function(string, angular.$cacheFactory.Options=): * !angular.$cacheFactory.Cache} */ angular.$cacheFactory; /** @typedef {{capacity: (number|undefined)}} */ angular.$cacheFactory.Options; /** * @typedef {{ * info: function():!angular.$cacheFactory.Cache.Info, * put: function(string, *), * get: function(string):*, * remove: function(string), * removeAll: function(), * destroy: function() * }} */ angular.$cacheFactory.Cache; /** * @typedef {{ * id: string, * size: number, * options: !angular.$cacheFactory.Options * }} */ angular.$cacheFactory.Cache.Info; /****************************************************************************** * $controller Service *****************************************************************************/ /** * @typedef {function((Function|string), Object):Object} */ angular.$controller; /****************************************************************************** * $controllerProvider Service *****************************************************************************/ /** * @typedef {{ * register: function((string|Object), (Function|Array)) * }} */ angular.$controllerProvider; /****************************************************************************** * $exceptionHandler Service *****************************************************************************/ /** * @typedef {function(Error, string=)} */ angular.$exceptionHandler; /****************************************************************************** * $filter Service *****************************************************************************/ /** * @typedef {function(string): !Function} */ angular.$filter; /** * The 'orderBy' filter is available through $filterProvider and AngularJS * injection; but is not accessed through a documented public API of AngularJS. * <p>In current AngularJS version the injection is satisfied by * angular.orderByFunction, where the implementation is found. * <p>See http://docs.angularjs.org/api/ng.filter:orderBy. * @typedef {function(Array, * (string|function(?):*|Array.<(string|function(?):*)>), * boolean=): Array} */ angular.$filter.orderBy; /****************************************************************************** * $filterProvider Service *****************************************************************************/ /** * @typedef {{ * register: function(string, (Function|Array.<string|Function>)) * }} */ angular.$filterProvider; /** * @param {string} name * @param {(Function|Array.<string|Function>)} fn */ angular.$filterProvider.register = function(name, fn) {}; /****************************************************************************** * $http Service *****************************************************************************/ /** * This is a typedef because the closure compiler does not allow * defining a type that is a function with properties. * If you are trying to use the $http service as a function, try * using one of the helper functions instead. * @typedef {{ * delete: function(string, angular.$http.Config=):!angular.$http.HttpPromise, * get: function(string, angular.$http.Config=):!angular.$http.HttpPromise, * head: function(string, angular.$http.Config=):!angular.$http.HttpPromise, * jsonp: function(string, angular.$http.Config=):!angular.$http.HttpPromise, * post: function(string, *, angular.$http.Config=): * !angular.$http.HttpPromise, * put: function(string, *, angular.$http.Config=):!angular.$http.HttpPromise, * defaults: angular.$http.Config, * pendingRequests: Array.<angular.$http.Config> * }} */ angular.$http; /** * @typedef {{ * cache: (boolean|!angular.$cacheFactory.Cache|undefined), * data: (string|Object|undefined), * headers: (Object|undefined), * method: (string|undefined), * params: (Object.<(string|Object)>|undefined), * timeout: (number|undefined), * transformRequest: * (function((string|Object), Object):(string|Object)| * Array.<function((string|Object), Object):(string|Object)>|undefined), * transformResponse: * (function((string|Object), Object):(string|Object)| * Array.<function((string|Object), Object):(string|Object)>|undefined), * url: (string|undefined), * withCredentials: (boolean|undefined) * }} */ angular.$http.Config; // /** // * This extern is currently incomplete as delete is a reserved word. // * To use delete, index $http. // * Example: $http['delete'](url, opt_config); // * @param {string} url // * @param {angular.$http.Config=} opt_config // * @return {!angular.$http.HttpPromise} // */ // angular.$http.delete = function(url, opt_config) {}; /** * @param {string} url * @param {angular.$http.Config=} opt_config * @return {!angular.$http.HttpPromise} */ angular.$http.get = function(url, opt_config) {}; /** * @param {string} url * @param {angular.$http.Config=} opt_config * @return {!angular.$http.HttpPromise} */ angular.$http.head = function(url, opt_config) {}; /** * @param {string} url * @param {angular.$http.Config=} opt_config * @return {!angular.$http.HttpPromise} */ angular.$http.jsonp = function(url, opt_config) {}; /** * @param {string} url * @param {*} data * @param {angular.$http.Config=} opt_config * @return {!angular.$http.HttpPromise} */ angular.$http.post = function(url, data, opt_config) {}; /** * @param {string} url * @param {*} data * @param {angular.$http.Config=} opt_config * @return {!angular.$http.HttpPromise} */ angular.$http.put = function(url, data, opt_config) {}; /** * @type {angular.$http.Config} */ angular.$http.defaults; /** * @type {Array.<angular.$http.Config>} * @const */ angular.$http.pendingRequests; /** * @typedef {function((string|Object), number, * function(string=): (string|Object|null), angular.$http.Config)} */ angular.HttpCallback; /** * @typedef {{ * then: function( * ?function(!angular.$http.Response), * ?function(!angular.$http.Response)=): !angular.$http.HttpPromise, * success: function(angular.HttpCallback): !angular.$http.HttpPromise, * error: function(angular.HttpCallback): !angular.$http.HttpPromise * }} */ angular.$http.HttpPromise; /** * @param {?function(!angular.$http.Response)} successCallback * @param {?function(!angular.$http.Response)=} opt_errorCallback * @return {!angular.$http.HttpPromise} */ angular.$http.HttpPromise.then = function( successCallback, opt_errorCallback) {}; /** * @param {angular.HttpCallback} callback * @return {!angular.$http.HttpPromise} Promise for chaining. */ angular.$http.HttpPromise.success = function(callback) {}; /** * @param {angular.HttpCallback} callback * @return {!angular.$http.HttpPromise} Promise for chaining. */ angular.$http.HttpPromise.error = function(callback) {}; /** * @typedef {{ * data: (string|Object), * status: number, * headers: function(string=): (string|Object), * config: !angular.$http.Config * }} */ angular.$http.Response; /****************************************************************************** * $injector Service *****************************************************************************/ /** * @typedef {{ * annotate: function((Function|Array.<string|Function>)):Array.<string>, * get: function(string):(?), * instantiate: function(Function, Object=):Object, * invoke: function( * (Function|Array.<string|Function>), Object=, Object=):(?) * }} */ angular.$injector; /** * @param {(Function|Array.<string|Function>)} fn * @return {Array.<string>} */ angular.$injector.annotate = function(fn) {}; /** * @param {string} name * @return {?} */ angular.$injector.get = function(name) {}; /** * @param {Function} type * @param {Object=} opt_locals * @return {Object} */ angular.$injector.instantiate = function(type, opt_locals) {}; /** * @param {(Function|Array.<string|Function>)} fn * @param {Object=} opt_self * @param {Object=} opt_locals * @return {?} */ angular.$injector.invoke = function(fn, opt_self, opt_locals) {}; /****************************************************************************** * $interpolateProvider Service *****************************************************************************/ /** * @typedef {{ * startSymbol: function(string), * endSymbol: function(string) * }} */ angular.$interpolateProvider; /** @type {function(string)} */ angular.$interpolateProvider.startSymbol; /** @type {function(string)} */ angular.$interpolateProvider.endSymbol; /****************************************************************************** * $interval Service *****************************************************************************/ /** * @typedef { * function(function(), number=, number=, boolean=):angular.$q.Promise * } */ angular.$interval; /** * Augment the angular.$interval type definition by reopening the type via an * artificial angular.$interval instance. * * This allows us to define methods on function objects which is something * that can't be expressed via typical type annotations. * * @type {angular.$interval} */ angular.$interval_; /** * @type {function(!angular.$q.Promise):boolean} */ angular.$interval_.cancel = function(promise) {}; /****************************************************************************** * $location Service *****************************************************************************/ /** * @typedef {{ * absUrl: function():string, * hash: function(string=):string, * host: function():string, * path: function(string=):(string|!angular.$location), * port: function():number, * protocol: function():string, * replace: function(), * search: function((string|Object.<string, string>)=, ?string=): * (string|!Object.<string, string>), * url: function(string=):string * }} */ angular.$location; /** * @return {string} */ angular.$location.absUrl = function() {}; /** * @param {string=} opt_hash * @return {string} */ angular.$location.hash = function(opt_hash) {}; /** * @return {string} */ angular.$location.host = function() {}; /** * @param {string=} opt_path * @return {string|!angular.$location} */ angular.$location.path = function(opt_path) {}; /** * @return {number} */ angular.$location.port = function() {}; /** * @return {string} */ angular.$location.protocol = function() {}; /** * @type {function()} */ angular.$location.replace = function() {}; /** * @param {(string|Object.<string, string>)=} opt_search * @param {?string=} opt_paramValue * @return {string|!Object.<string, string>} */ angular.$location.search = function(opt_search, opt_paramValue) {}; /** * @param {string=} opt_url * @return {string} */ angular.$location.url = function(opt_url) {}; /****************************************************************************** * $locationProvider Service *****************************************************************************/ /** * @typedef {{ * hashPrefix: * function(string=): (string|angular.$locationProvider), * html5Mode: * function(boolean=): (boolean|!angular.$locationProvider) * }} */ angular.$locationProvider; /** * @param {string=} opt_prefix * @return {string|!angular.$locationProvider} */ angular.$locationProvider.hashPrefix = function(opt_prefix) {}; /** * @param {boolean=} opt_enabled * @return {boolean|!angular.$locationProvider} */ angular.$locationProvider.html5Mode = function(opt_enabled) {}; /****************************************************************************** * $log Service *****************************************************************************/ /** * @typedef {{ * error: function(...*), * info: function(...*), * log: function(...*), * warn: function(...*) * }} */ angular.$log; /** * @param {...*} var_args */ angular.$log.error = function(var_args) {}; /** * @param {...*} var_args */ angular.$log.info = function(var_args) {}; /** * @param {...*} var_args */ angular.$log.log = function(var_args) {}; /** * @param {...*} var_args */ angular.$log.warn = function(var_args) {}; /****************************************************************************** * NgModelController *****************************************************************************/ /** * @constructor */ angular.NgModelController = function() {}; /** * @type {?} */ angular.NgModelController.prototype.$modelValue; /** * @type {boolean} */ angular.NgModelController.prototype.$dirty; /** * @type {!Object.<boolean>} */ angular.NgModelController.prototype.$error; /** * @type {!Array.<function(?):*>} */ angular.NgModelController.prototype.$formatters; /** * @type {boolean} */ angular.NgModelController.prototype.$invalid; /** * @type {!Array.<function(?):*>} */ angular.NgModelController.prototype.$parsers; /** * @type {boolean} */ angular.NgModelController.prototype.$pristine; angular.NgModelController.prototype.$render = function() {}; /** * @param {string} key * @param {boolean} isValid */ angular.NgModelController.prototype.$setValidity = function(key, isValid) {}; /** * @param {?} value */ angular.NgModelController.prototype.$setViewValue = function(value) {}; /** * @type {boolean} */ angular.NgModelController.prototype.$valid; /** * @type {!Array.<function()>} */ angular.NgModelController.prototype.$viewChangeListeners; /** * @type {?} */ angular.NgModelController.prototype.$viewValue; /****************************************************************************** * FormController *****************************************************************************/ /** * @constructor */ angular.FormController = function() {}; /** * @type {boolean} */ angular.FormController.prototype.$dirty; /** * @type {!Object.<boolean>} */ angular.FormController.prototype.$error; /** * @type {boolean} */ angular.FormController.prototype.$invalid; /** * @type {boolean} */ angular.FormController.prototype.$pristine; /** * @type {boolean} */ angular.FormController.prototype.$valid; /****************************************************************************** * $parse Service *****************************************************************************/ /** * @typedef {function(string):!angular.$parse.Expression} */ angular.$parse; /** * @typedef {function((!angular.Scope|!Object), Object=):*} */ angular.$parse.Expression; /** * Augment the angular.$parse.Expression type definition by reopening the type * via an artificial angular.$parse instance. * * This allows us to define methods on function objects which is something * that can't be expressed via typical type annotations. * * @type {angular.$parse.Expression} */ angular.$parse_; /** * @type {function((!angular.Scope|!Object), *)} */ angular.$parse_.assign = function(scope, newValue) {}; /****************************************************************************** * $provide Service *****************************************************************************/ /** * @typedef {{ * constant: function(string, *): Object, * decorator: function(string, (Function|Array.<string|Function>)), * factory: function(string, (Function|Array.<string|Function>)): Object, * provider: function(string, (Function|Array.<string|Function>)): Object, * service: function(string, (Function|Array.<string|Function>)): Object, * value: function(string, *): Object * }} */ angular.$provide; /** * @param {string} name * @param {*} object * @return {Object} */ angular.$provide.constant = function(name, object) {}; /** * @param {string} name * @param {Function|Array.<string|Function>} decorator */ angular.$provide.decorator = function(name, decorator) {}; /** * @param {string} name * @param {Function|Array.<string|Function>} providerFunction * @return {Object} */ angular.$provide.factory = function(name, providerFunction) {}; /** * @param {string} name * @param {Function|Array.<string|Function>} providerType * @return {Object} */ angular.$provide.provider = function(name, providerType) {}; /** * @param {string} name * @param {Function|Array.<string|Function>} constructor * @return {Object} */ angular.$provide.service = function(name, constructor) {}; /** * @param {string} name * @param {*} object * @return {Object} */ angular.$provide.value = function(name, object) {}; /****************************************************************************** * $q Service *****************************************************************************/ /** * @typedef {{ * all: function(!Array.<!angular.$q.Promise>): !angular.$q.Promise, * defer: function():!angular.$q.Deferred, * reject: function(*):!angular.$q.Promise, * when: function(*):!angular.$q.Promise * }} */ angular.$q; /** * @param {Array.<!angular.$q.Promise>} promises * @return {!angular.$q.Promise} */ angular.$q.all = function(promises) {}; /** * @return {!angular.$q.Deferred} */ angular.$q.defer = function() {}; /** * @param {*} reason * @return {!angular.$q.Promise} */ angular.$q.reject = function(reason) {}; /** * @param {*} value * @return {!angular.$q.Promise} */ angular.$q.when = function(value) {}; /** * @typedef {{ * resolve: function(*=), * reject: function(*=), * promise: !angular.$q.Promise * }} */ angular.$q.Deferred; /** @param {*=} opt_value */ angular.$q.Deferred.resolve = function(opt_value) {}; /** @param {*=} opt_reason */ angular.$q.Deferred.reject = function(opt_reason) {}; /** @type {!angular.$q.Promise} */ angular.$q.Deferred.promise; /** * @typedef {{then: function(?function(?), ?function(?)=): !angular.$q.Promise}} */ angular.$q.Promise; /** * @param {?function(?)} successCallback * @param {?function(?)=} opt_errorCallback * @return {!angular.$q.Promise} */ angular.$q.Promise.then = function(successCallback, opt_errorCallback) {}; /****************************************************************************** * $route Service *****************************************************************************/ /** * @typedef {{ * reload: function(), * current: angular.$route.Route, * routes: Array.<!angular.$route.Route> * }} */ angular.$route; /** @type {function()} */ angular.$route.reload = function() {}; /** @type {!angular.$route.Route} */ angular.$route.current; /** @type {Array.<!angular.$route.Route>} */ angular.$route.routes; /** * @typedef {{ * $route: angular.$routeProvider.Params, * locals: Object.<string, *>, * params: Object.<string, string>, * pathParams: Object.<string, string>, * scope: Object.<string, *> * }} */ angular.$route.Route; /** @type {angular.$routeProvider.Params} */ angular.$route.Route.$route; /** @type {Object.<string, *>} */ angular.$route.Route.locals; /** @type {Object.<string, string>} */ angular.$route.Route.params; /** @type {Object.<string, string>} */ angular.$route.Route.pathParams; /** @type {Object.<string, *>} */ angular.$route.Route.scope; /****************************************************************************** * $routeProvider Service *****************************************************************************/ /** * @typedef {{ * otherwise: * function(angular.$routeProvider.Params): !angular.$routeProvider, * when: * function( * string, angular.$routeProvider.Params): !angular.$routeProvider * }} */ angular.$routeProvider; /** * @param {angular.$routeProvider.Params} params * @return {!angular.$routeProvider} */ angular.$routeProvider.otherwise = function(params) {}; /** * @param {string} path * @param {angular.$routeProvider.Params} route * @return {!angular.$routeProvider} */ angular.$routeProvider.when = function(path, route) {}; /** * @typedef {{ * controller: (Function|Array.<string|Function>|string|undefined), * template: (string|undefined), * templateUrl: (string|undefined), * resolve: (Object.<string, ( * string|Function|Array.<string|Function>|!angular.$q.Promise * )>|undefined), * redirectTo: (string|function()|undefined), * reloadOnSearch: (boolean|undefined) * }} */ angular.$routeProvider.Params; /** @type {Function|Array.<string|Function>|string} */ angular.$routeProvider.Params.controller; /** @type {string} */ angular.$routeProvider.Params.template; /** @type {string} */ angular.$routeProvider.Params.templateUrl; /** * @type { * Object.<string, ( * string|Function|Array.<string|Function>|!angular.$q.Promise * )>} */ angular.$routeProvider.Params.resolve; /** @type {string|function()} */ angular.$routeProvider.Params.redirectTo; /** @type {boolean} */ angular.$routeProvider.Params.reloadOnSearch; /****************************************************************************** * $timeout Service *****************************************************************************/ /** * @typedef {function(function(), number=, boolean=):!angular.$q.Promise} */ angular.$timeout; /** * Augment the angular.$timeout type definition by reopening the type via an * artificial angular.$timeout instance. * * This allows us to define methods on function objects which is something * that can't be expressed via typical type annotations. * * @type {angular.$timeout} */ angular.$timeout_; /** * @type {function(!angular.$q.Promise):boolean} */ angular.$timeout_.cancel = function(promise) {};
neraliu/closure-compiler
contrib/externs/angular-1.0.js
JavaScript
apache-2.0
42,292
"""Concrete date/time and related types -- prototype implemented in Python. See http://www.zope.org/Members/fdrake/DateTimeWiki/FrontPage See also http://dir.yahoo.com/Reference/calendars/ For a primer on DST, including many current DST rules, see http://webexhibits.org/daylightsaving/ For more about DST than you ever wanted to know, see ftp://elsie.nci.nih.gov/pub/ Sources for time zone and DST data: http://www.twinsun.com/tz/tz-link.htm This was originally copied from the sandbox of the CPython CVS repository. Thanks to Tim Peters for suggesting using it. """ import time as _time import math as _math def _cmp(x, y): return 0 if x == y else 1 if x > y else -1 MINYEAR = 1 MAXYEAR = 9999 _MAXORDINAL = 3652059 # date.max.toordinal() # Utility functions, adapted from Python's Demo/classes/Dates.py, which # also assumes the current Gregorian calendar indefinitely extended in # both directions. Difference: Dates.py calls January 1 of year 0 day # number 1. The code here calls January 1 of year 1 day number 1. This is # to match the definition of the "proleptic Gregorian" calendar in Dershowitz # and Reingold's "Calendrical Calculations", where it's the base calendar # for all computations. See the book for algorithms for converting between # proleptic Gregorian ordinals and many other calendar systems. _DAYS_IN_MONTH = [None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] _DAYS_BEFORE_MONTH = [None] dbm = 0 for dim in _DAYS_IN_MONTH[1:]: _DAYS_BEFORE_MONTH.append(dbm) dbm += dim del dbm, dim def _is_leap(year): "year -> 1 if leap year, else 0." return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0) def _days_before_year(year): "year -> number of days before January 1st of year." y = year - 1 return y*365 + y//4 - y//100 + y//400 def _days_in_month(year, month): "year, month -> number of days in that month in that year." assert 1 <= month <= 12, month if month == 2 and _is_leap(year): return 29 return _DAYS_IN_MONTH[month] def _days_before_month(year, month): "year, month -> number of days in year preceeding first day of month." assert 1 <= month <= 12, 'month must be in 1..12' return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year)) def _ymd2ord(year, month, day): "year, month, day -> ordinal, considering 01-Jan-0001 as day 1." assert 1 <= month <= 12, 'month must be in 1..12' dim = _days_in_month(year, month) assert 1 <= day <= dim, ('day must be in 1..%d' % dim) return (_days_before_year(year) + _days_before_month(year, month) + day) _DI400Y = _days_before_year(401) # number of days in 400 years _DI100Y = _days_before_year(101) # " " " " 100 " _DI4Y = _days_before_year(5) # " " " " 4 " # A 4-year cycle has an extra leap day over what we'd get from pasting # together 4 single years. assert _DI4Y == 4 * 365 + 1 # Similarly, a 400-year cycle has an extra leap day over what we'd get from # pasting together 4 100-year cycles. assert _DI400Y == 4 * _DI100Y + 1 # OTOH, a 100-year cycle has one fewer leap day than we'd get from # pasting together 25 4-year cycles. assert _DI100Y == 25 * _DI4Y - 1 def _ord2ymd(n): "ordinal -> (year, month, day), considering 01-Jan-0001 as day 1." # n is a 1-based index, starting at 1-Jan-1. The pattern of leap years # repeats exactly every 400 years. The basic strategy is to find the # closest 400-year boundary at or before n, then work with the offset # from that boundary to n. Life is much clearer if we subtract 1 from # n first -- then the values of n at 400-year boundaries are exactly # those divisible by _DI400Y: # # D M Y n n-1 # -- --- ---- ---------- ---------------- # 31 Dec -400 -_DI400Y -_DI400Y -1 # 1 Jan -399 -_DI400Y +1 -_DI400Y 400-year boundary # ... # 30 Dec 000 -1 -2 # 31 Dec 000 0 -1 # 1 Jan 001 1 0 400-year boundary # 2 Jan 001 2 1 # 3 Jan 001 3 2 # ... # 31 Dec 400 _DI400Y _DI400Y -1 # 1 Jan 401 _DI400Y +1 _DI400Y 400-year boundary n -= 1 n400, n = divmod(n, _DI400Y) year = n400 * 400 + 1 # ..., -399, 1, 401, ... # Now n is the (non-negative) offset, in days, from January 1 of year, to # the desired date. Now compute how many 100-year cycles precede n. # Note that it's possible for n100 to equal 4! In that case 4 full # 100-year cycles precede the desired day, which implies the desired # day is December 31 at the end of a 400-year cycle. n100, n = divmod(n, _DI100Y) # Now compute how many 4-year cycles precede it. n4, n = divmod(n, _DI4Y) # And now how many single years. Again n1 can be 4, and again meaning # that the desired day is December 31 at the end of the 4-year cycle. n1, n = divmod(n, 365) year += n100 * 100 + n4 * 4 + n1 if n1 == 4 or n100 == 4: assert n == 0 return year-1, 12, 31 # Now the year is correct, and n is the offset from January 1. We find # the month via an estimate that's either exact or one too large. leapyear = n1 == 3 and (n4 != 24 or n100 == 3) assert leapyear == _is_leap(year) month = (n + 50) >> 5 preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear) if preceding > n: # estimate is too large month -= 1 preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear) n -= preceding assert 0 <= n < _days_in_month(year, month) # Now the year and month are correct, and n is the offset from the # start of that month: we're done! return year, month, n+1 # Month and day names. For localized versions, see the calendar module. _MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] _DAYNAMES = [None, "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] def _build_struct_time(y, m, d, hh, mm, ss, dstflag): wday = (_ymd2ord(y, m, d) + 6) % 7 dnum = _days_before_month(y, m) + d return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag)) def _format_time(hh, mm, ss, us): # Skip trailing microseconds when us==0. result = "%02d:%02d:%02d" % (hh, mm, ss) if us: result += ".%06d" % us return result # Correctly substitute for %z and %Z escapes in strftime formats. def _wrap_strftime(object, format, timetuple): year = timetuple[0] if year < 1000: raise ValueError("year=%d is before 1000; the datetime strftime() " "methods require year >= 1000" % year) # Don't call utcoffset() or tzname() unless actually needed. freplace = None # the string to use for %f zreplace = None # the string to use for %z Zreplace = None # the string to use for %Z # Scan format for %z and %Z escapes, replacing as needed. newformat = [] push = newformat.append i, n = 0, len(format) while i < n: ch = format[i] i += 1 if ch == '%': if i < n: ch = format[i] i += 1 if ch == 'f': if freplace is None: freplace = '%06d' % getattr(object, 'microsecond', 0) newformat.append(freplace) elif ch == 'z': if zreplace is None: zreplace = "" if hasattr(object, "utcoffset"): offset = object.utcoffset() if offset is not None: sign = '+' if offset.days < 0: offset = -offset sign = '-' h, m = divmod(offset, timedelta(hours=1)) assert not m % timedelta(minutes=1), "whole minute" m //= timedelta(minutes=1) zreplace = '%c%02d%02d' % (sign, h, m) assert '%' not in zreplace newformat.append(zreplace) elif ch == 'Z': if Zreplace is None: Zreplace = "" if hasattr(object, "tzname"): s = object.tzname() if s is not None: # strftime is going to have at this: escape % Zreplace = s.replace('%', '%%') newformat.append(Zreplace) else: push('%') push(ch) else: push('%') else: push(ch) newformat = "".join(newformat) return _time.strftime(newformat, timetuple) def _call_tzinfo_method(tzinfo, methname, tzinfoarg): if tzinfo is None: return None return getattr(tzinfo, methname)(tzinfoarg) # Just raise TypeError if the arg isn't None or a string. def _check_tzname(name): if name is not None and not isinstance(name, str): raise TypeError("tzinfo.tzname() must return None or string, " "not '%s'" % type(name)) # name is the offset-producing method, "utcoffset" or "dst". # offset is what it returned. # If offset isn't None or timedelta, raises TypeError. # If offset is None, returns None. # Else offset is checked for being in range, and a whole # of minutes. # If it is, its integer value is returned. Else ValueError is raised. def _check_utc_offset(name, offset): assert name in ("utcoffset", "dst") if offset is None: return if not isinstance(offset, timedelta): raise TypeError("tzinfo.%s() must return None " "or timedelta, not '%s'" % (name, type(offset))) if offset % timedelta(minutes=1) or offset.microseconds: raise ValueError("tzinfo.%s() must return a whole number " "of minutes, got %s" % (name, offset)) if not -timedelta(1) < offset < timedelta(1): raise ValueError("%s()=%s, must be must be strictly between" " -timedelta(hours=24) and timedelta(hours=24)" % (name, offset)) def _check_date_fields(year, month, day): if not isinstance(year, int): raise TypeError('int expected') if not MINYEAR <= year <= MAXYEAR: raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year) if not 1 <= month <= 12: raise ValueError('month must be in 1..12', month) dim = _days_in_month(year, month) if not 1 <= day <= dim: raise ValueError('day must be in 1..%d' % dim, day) def _check_time_fields(hour, minute, second, microsecond): if not isinstance(hour, int): raise TypeError('int expected') if not 0 <= hour <= 23: raise ValueError('hour must be in 0..23', hour) if not 0 <= minute <= 59: raise ValueError('minute must be in 0..59', minute) if not 0 <= second <= 59: raise ValueError('second must be in 0..59', second) if not 0 <= microsecond <= 999999: raise ValueError('microsecond must be in 0..999999', microsecond) def _check_tzinfo_arg(tz): if tz is not None and not isinstance(tz, tzinfo): raise TypeError("tzinfo argument must be None or of a tzinfo subclass") def _cmperror(x, y): raise TypeError("can't compare '%s' to '%s'" % ( type(x).__name__, type(y).__name__)) class timedelta: """Represent the difference between two datetime objects. Supported operators: - add, subtract timedelta - unary plus, minus, abs - compare to timedelta - multiply, divide by int/long In addition, datetime supports subtraction of two datetime objects returning a timedelta, and addition or subtraction of a datetime and a timedelta giving a datetime. Representation: (days, seconds, microseconds). Why? Because I felt like it. """ __slots__ = '_days', '_seconds', '_microseconds' def __new__(cls, days=0, seconds=0, microseconds=0, milliseconds=0, minutes=0, hours=0, weeks=0): # Doing this efficiently and accurately in C is going to be difficult # and error-prone, due to ubiquitous overflow possibilities, and that # C double doesn't have enough bits of precision to represent # microseconds over 10K years faithfully. The code here tries to make # explicit where go-fast assumptions can be relied on, in order to # guide the C implementation; it's way more convoluted than speed- # ignoring auto-overflow-to-long idiomatic Python could be. # XXX Check that all inputs are ints or floats. # Final values, all integer. # s and us fit in 32-bit signed ints; d isn't bounded. d = s = us = 0 # Normalize everything to days, seconds, microseconds. days += weeks*7 seconds += minutes*60 + hours*3600 microseconds += milliseconds*1000 # Get rid of all fractions, and normalize s and us. # Take a deep breath <wink>. if isinstance(days, float): dayfrac, days = _math.modf(days) daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.)) assert daysecondswhole == int(daysecondswhole) # can't overflow s = int(daysecondswhole) assert days == int(days) d = int(days) else: daysecondsfrac = 0.0 d = days assert isinstance(daysecondsfrac, float) assert abs(daysecondsfrac) <= 1.0 assert isinstance(d, int) assert abs(s) <= 24 * 3600 # days isn't referenced again before redefinition if isinstance(seconds, float): secondsfrac, seconds = _math.modf(seconds) assert seconds == int(seconds) seconds = int(seconds) secondsfrac += daysecondsfrac assert abs(secondsfrac) <= 2.0 else: secondsfrac = daysecondsfrac # daysecondsfrac isn't referenced again assert isinstance(secondsfrac, float) assert abs(secondsfrac) <= 2.0 assert isinstance(seconds, int) days, seconds = divmod(seconds, 24*3600) d += days s += int(seconds) # can't overflow assert isinstance(s, int) assert abs(s) <= 2 * 24 * 3600 # seconds isn't referenced again before redefinition usdouble = secondsfrac * 1e6 assert abs(usdouble) < 2.1e6 # exact value not critical # secondsfrac isn't referenced again if isinstance(microseconds, float): microseconds += usdouble microseconds = round(microseconds, 0) seconds, microseconds = divmod(microseconds, 1e6) assert microseconds == int(microseconds) assert seconds == int(seconds) days, seconds = divmod(seconds, 24.*3600.) assert days == int(days) assert seconds == int(seconds) d += int(days) s += int(seconds) # can't overflow assert isinstance(s, int) assert abs(s) <= 3 * 24 * 3600 else: seconds, microseconds = divmod(microseconds, 1000000) days, seconds = divmod(seconds, 24*3600) d += days s += int(seconds) # can't overflow assert isinstance(s, int) assert abs(s) <= 3 * 24 * 3600 microseconds = float(microseconds) microseconds += usdouble microseconds = round(microseconds, 0) assert abs(s) <= 3 * 24 * 3600 assert abs(microseconds) < 3.1e6 # Just a little bit of carrying possible for microseconds and seconds. assert isinstance(microseconds, float) assert int(microseconds) == microseconds us = int(microseconds) seconds, us = divmod(us, 1000000) s += seconds # cant't overflow assert isinstance(s, int) days, s = divmod(s, 24*3600) d += days assert isinstance(d, int) assert isinstance(s, int) and 0 <= s < 24*3600 assert isinstance(us, int) and 0 <= us < 1000000 self = object.__new__(cls) self._days = d self._seconds = s self._microseconds = us if abs(d) > 999999999: raise OverflowError("timedelta # of days is too large: %d" % d) return self def __repr__(self): if self._microseconds: return "%s(%d, %d, %d)" % ('datetime.' + self.__class__.__name__, self._days, self._seconds, self._microseconds) if self._seconds: return "%s(%d, %d)" % ('datetime.' + self.__class__.__name__, self._days, self._seconds) return "%s(%d)" % ('datetime.' + self.__class__.__name__, self._days) def __str__(self): mm, ss = divmod(self._seconds, 60) hh, mm = divmod(mm, 60) s = "%d:%02d:%02d" % (hh, mm, ss) if self._days: def plural(n): return n, abs(n) != 1 and "s" or "" s = ("%d day%s, " % plural(self._days)) + s if self._microseconds: s = s + ".%06d" % self._microseconds return s def total_seconds(self): """Total seconds in the duration.""" return ((self.days * 86400 + self.seconds)*10**6 + self.microseconds) / 10**6 # Read-only field accessors @property def days(self): """days""" return self._days @property def seconds(self): """seconds""" return self._seconds @property def microseconds(self): """microseconds""" return self._microseconds def __add__(self, other): if isinstance(other, timedelta): # for CPython compatibility, we cannot use # our __class__ here, but need a real timedelta return timedelta(self._days + other._days, self._seconds + other._seconds, self._microseconds + other._microseconds) return NotImplemented __radd__ = __add__ def __sub__(self, other): if isinstance(other, timedelta): # for CPython compatibility, we cannot use # our __class__ here, but need a real timedelta return timedelta(self._days - other._days, self._seconds - other._seconds, self._microseconds - other._microseconds) return NotImplemented def __rsub__(self, other): if isinstance(other, timedelta): return -self + other return NotImplemented def __neg__(self): # for CPython compatibility, we cannot use # our __class__ here, but need a real timedelta return timedelta(-self._days, -self._seconds, -self._microseconds) def __pos__(self): return self def __abs__(self): if self._days < 0: return -self else: return self def __mul__(self, other): if isinstance(other, int): # for CPython compatibility, we cannot use # our __class__ here, but need a real timedelta return timedelta(self._days * other, self._seconds * other, self._microseconds * other) if isinstance(other, float): a, b = other.as_integer_ratio() return self * a / b return NotImplemented __rmul__ = __mul__ def _to_microseconds(self): return ((self._days * (24*3600) + self._seconds) * 1000000 + self._microseconds) def __floordiv__(self, other): if not isinstance(other, (int, timedelta)): return NotImplemented usec = self._to_microseconds() if isinstance(other, timedelta): return usec // other._to_microseconds() if isinstance(other, int): return timedelta(0, 0, usec // other) def __truediv__(self, other): if not isinstance(other, (int, float, timedelta)): return NotImplemented usec = self._to_microseconds() if isinstance(other, timedelta): return usec / other._to_microseconds() if isinstance(other, int): return timedelta(0, 0, usec / other) if isinstance(other, float): a, b = other.as_integer_ratio() return timedelta(0, 0, b * usec / a) def __mod__(self, other): if isinstance(other, timedelta): r = self._to_microseconds() % other._to_microseconds() return timedelta(0, 0, r) return NotImplemented def __divmod__(self, other): if isinstance(other, timedelta): q, r = divmod(self._to_microseconds(), other._to_microseconds()) return q, timedelta(0, 0, r) return NotImplemented # Comparisons of timedelta objects with other. def __eq__(self, other): if isinstance(other, timedelta): return self._cmp(other) == 0 else: return False def __ne__(self, other): if isinstance(other, timedelta): return self._cmp(other) != 0 else: return True def __le__(self, other): if isinstance(other, timedelta): return self._cmp(other) <= 0 else: _cmperror(self, other) def __lt__(self, other): if isinstance(other, timedelta): return self._cmp(other) < 0 else: _cmperror(self, other) def __ge__(self, other): if isinstance(other, timedelta): return self._cmp(other) >= 0 else: _cmperror(self, other) def __gt__(self, other): if isinstance(other, timedelta): return self._cmp(other) > 0 else: _cmperror(self, other) def _cmp(self, other): assert isinstance(other, timedelta) return _cmp(self._getstate(), other._getstate()) def __hash__(self): return hash(self._getstate()) def __bool__(self): return (self._days != 0 or self._seconds != 0 or self._microseconds != 0) # Pickle support. def _getstate(self): return (self._days, self._seconds, self._microseconds) def __reduce__(self): return (self.__class__, self._getstate()) timedelta.min = timedelta(-999999999) timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59, microseconds=999999) timedelta.resolution = timedelta(microseconds=1) class date: """Concrete date type. Constructors: __new__() fromtimestamp() today() fromordinal() Operators: __repr__, __str__ __cmp__, __hash__ __add__, __radd__, __sub__ (add/radd only with timedelta arg) Methods: timetuple() toordinal() weekday() isoweekday(), isocalendar(), isoformat() ctime() strftime() Properties (readonly): year, month, day """ __slots__ = '_year', '_month', '_day' def __new__(cls, year, month=None, day=None): """Constructor. Arguments: year, month, day (required, base 1) """ if (isinstance(year, bytes) and len(year) == 4 and 1 <= year[2] <= 12 and month is None): # Month is sane # Pickle support self = object.__new__(cls) self.__setstate(year) return self _check_date_fields(year, month, day) self = object.__new__(cls) self._year = year self._month = month self._day = day return self # Additional constructors @classmethod def fromtimestamp(cls, t): "Construct a date from a POSIX timestamp (like time.time())." y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t) return cls(y, m, d) @classmethod def today(cls): "Construct a date from time.time()." t = _time.time() return cls.fromtimestamp(t) @classmethod def fromordinal(cls, n): """Contruct a date from a proleptic Gregorian ordinal. January 1 of year 1 is day 1. Only the year, month and day are non-zero in the result. """ y, m, d = _ord2ymd(n) return cls(y, m, d) # Conversions to string def __repr__(self): """Convert to formal string, for repr(). >>> dt = datetime(2010, 1, 1) >>> repr(dt) 'datetime.datetime(2010, 1, 1, 0, 0)' >>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc) >>> repr(dt) 'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)' """ return "%s(%d, %d, %d)" % ('datetime.' + self.__class__.__name__, self._year, self._month, self._day) # XXX These shouldn't depend on time.localtime(), because that # clips the usable dates to [1970 .. 2038). At least ctime() is # easily done without using strftime() -- that's better too because # strftime("%c", ...) is locale specific. def ctime(self): "Return ctime() style string." weekday = self.toordinal() % 7 or 7 return "%s %s %2d 00:00:00 %04d" % ( _DAYNAMES[weekday], _MONTHNAMES[self._month], self._day, self._year) def strftime(self, fmt): "Format using strftime()." return _wrap_strftime(self, fmt, self.timetuple()) def __format__(self, fmt): if len(fmt) != 0: return self.strftime(fmt) return str(self) def isoformat(self): """Return the date formatted according to ISO. This is 'YYYY-MM-DD'. References: - http://www.w3.org/TR/NOTE-datetime - http://www.cl.cam.ac.uk/~mgk25/iso-time.html """ return "%04d-%02d-%02d" % (self._year, self._month, self._day) __str__ = isoformat # Read-only field accessors @property def year(self): """year (1-9999)""" return self._year @property def month(self): """month (1-12)""" return self._month @property def day(self): """day (1-31)""" return self._day # Standard conversions, __cmp__, __hash__ (and helpers) def timetuple(self): "Return local time tuple compatible with time.localtime()." return _build_struct_time(self._year, self._month, self._day, 0, 0, 0, -1) def toordinal(self): """Return proleptic Gregorian ordinal for the year, month and day. January 1 of year 1 is day 1. Only the year, month and day values contribute to the result. """ return _ymd2ord(self._year, self._month, self._day) def replace(self, year=None, month=None, day=None): """Return a new date with new values for the specified fields.""" if year is None: year = self._year if month is None: month = self._month if day is None: day = self._day _check_date_fields(year, month, day) return date(year, month, day) # Comparisons of date objects with other. def __eq__(self, other): if isinstance(other, date): return self._cmp(other) == 0 return NotImplemented def __ne__(self, other): if isinstance(other, date): return self._cmp(other) != 0 return NotImplemented def __le__(self, other): if isinstance(other, date): return self._cmp(other) <= 0 return NotImplemented def __lt__(self, other): if isinstance(other, date): return self._cmp(other) < 0 return NotImplemented def __ge__(self, other): if isinstance(other, date): return self._cmp(other) >= 0 return NotImplemented def __gt__(self, other): if isinstance(other, date): return self._cmp(other) > 0 return NotImplemented def _cmp(self, other): assert isinstance(other, date) y, m, d = self._year, self._month, self._day y2, m2, d2 = other._year, other._month, other._day return _cmp((y, m, d), (y2, m2, d2)) def __hash__(self): "Hash." return hash(self._getstate()) # Computations def __add__(self, other): "Add a date to a timedelta." if isinstance(other, timedelta): o = self.toordinal() + other.days if 0 < o <= _MAXORDINAL: return date.fromordinal(o) raise OverflowError("result out of range") return NotImplemented __radd__ = __add__ def __sub__(self, other): """Subtract two dates, or a date and a timedelta.""" if isinstance(other, timedelta): return self + timedelta(-other.days) if isinstance(other, date): days1 = self.toordinal() days2 = other.toordinal() return timedelta(days1 - days2) return NotImplemented def weekday(self): "Return day of the week, where Monday == 0 ... Sunday == 6." return (self.toordinal() + 6) % 7 # Day-of-the-week and week-of-the-year, according to ISO def isoweekday(self): "Return day of the week, where Monday == 1 ... Sunday == 7." # 1-Jan-0001 is a Monday return self.toordinal() % 7 or 7 def isocalendar(self): """Return a 3-tuple containing ISO year, week number, and weekday. The first ISO week of the year is the (Mon-Sun) week containing the year's first Thursday; everything else derives from that. The first week is 1; Monday is 1 ... Sunday is 7. ISO calendar algorithm taken from http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm """ year = self._year week1monday = _isoweek1monday(year) today = _ymd2ord(self._year, self._month, self._day) # Internally, week and day have origin 0 week, day = divmod(today - week1monday, 7) if week < 0: year -= 1 week1monday = _isoweek1monday(year) week, day = divmod(today - week1monday, 7) elif week >= 52: if today >= _isoweek1monday(year+1): year += 1 week = 0 return year, week+1, day+1 # Pickle support. def _getstate(self): yhi, ylo = divmod(self._year, 256) return bytes([yhi, ylo, self._month, self._day]), def __setstate(self, string): if len(string) != 4 or not (1 <= string[2] <= 12): raise TypeError("not enough arguments") yhi, ylo, self._month, self._day = string self._year = yhi * 256 + ylo def __reduce__(self): return (self.__class__, self._getstate()) _date_class = date # so functions w/ args named "date" can get at the class date.min = date(1, 1, 1) date.max = date(9999, 12, 31) date.resolution = timedelta(days=1) class tzinfo: """Abstract base class for time zone info classes. Subclasses must override the name(), utcoffset() and dst() methods. """ __slots__ = () def tzname(self, dt): "datetime -> string name of time zone." raise NotImplementedError("tzinfo subclass must override tzname()") def utcoffset(self, dt): "datetime -> minutes east of UTC (negative for west of UTC)" raise NotImplementedError("tzinfo subclass must override utcoffset()") def dst(self, dt): """datetime -> DST offset in minutes east of UTC. Return 0 if DST not in effect. utcoffset() must include the DST offset. """ raise NotImplementedError("tzinfo subclass must override dst()") def fromutc(self, dt): "datetime in UTC -> datetime in local time." if not isinstance(dt, datetime): raise TypeError("fromutc() requires a datetime argument") if dt.tzinfo is not self: raise ValueError("dt.tzinfo is not self") dtoff = dt.utcoffset() if dtoff is None: raise ValueError("fromutc() requires a non-None utcoffset() " "result") # See the long comment block at the end of this file for an # explanation of this algorithm. dtdst = dt.dst() if dtdst is None: raise ValueError("fromutc() requires a non-None dst() result") delta = dtoff - dtdst if delta: dt += delta dtdst = dt.dst() if dtdst is None: raise ValueError("fromutc(): dt.dst gave inconsistent " "results; cannot convert") return dt + dtdst # Pickle support. def __reduce__(self): getinitargs = getattr(self, "__getinitargs__", None) if getinitargs: args = getinitargs() else: args = () getstate = getattr(self, "__getstate__", None) if getstate: state = getstate() else: state = getattr(self, "__dict__", None) or None if state is None: return (self.__class__, args) else: return (self.__class__, args, state) _tzinfo_class = tzinfo class time: """Time with time zone. Constructors: __new__() Operators: __repr__, __str__ __cmp__, __hash__ Methods: strftime() isoformat() utcoffset() tzname() dst() Properties (readonly): hour, minute, second, microsecond, tzinfo """ def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None): """Constructor. Arguments: hour, minute (required) second, microsecond (default to zero) tzinfo (default to None) """ self = object.__new__(cls) if isinstance(hour, bytes) and len(hour) == 6: # Pickle support self.__setstate(hour, minute or None) return self _check_tzinfo_arg(tzinfo) _check_time_fields(hour, minute, second, microsecond) self._hour = hour self._minute = minute self._second = second self._microsecond = microsecond self._tzinfo = tzinfo return self # Read-only field accessors @property def hour(self): """hour (0-23)""" return self._hour @property def minute(self): """minute (0-59)""" return self._minute @property def second(self): """second (0-59)""" return self._second @property def microsecond(self): """microsecond (0-999999)""" return self._microsecond @property def tzinfo(self): """timezone info object""" return self._tzinfo # Standard conversions, __hash__ (and helpers) # Comparisons of time objects with other. def __eq__(self, other): if isinstance(other, time): return self._cmp(other) == 0 else: return False def __ne__(self, other): if isinstance(other, time): return self._cmp(other) != 0 else: return True def __le__(self, other): if isinstance(other, time): return self._cmp(other) <= 0 else: _cmperror(self, other) def __lt__(self, other): if isinstance(other, time): return self._cmp(other) < 0 else: _cmperror(self, other) def __ge__(self, other): if isinstance(other, time): return self._cmp(other) >= 0 else: _cmperror(self, other) def __gt__(self, other): if isinstance(other, time): return self._cmp(other) > 0 else: _cmperror(self, other) def _cmp(self, other): assert isinstance(other, time) mytz = self._tzinfo ottz = other._tzinfo myoff = otoff = None if mytz is ottz: base_compare = True else: myoff = self.utcoffset() otoff = other.utcoffset() base_compare = myoff == otoff if base_compare: return _cmp((self._hour, self._minute, self._second, self._microsecond), (other._hour, other._minute, other._second, other._microsecond)) if myoff is None or otoff is None: raise TypeError("cannot compare naive and aware times") myhhmm = self._hour * 60 + self._minute - myoff//timedelta(minutes=1) othhmm = other._hour * 60 + other._minute - otoff//timedelta(minutes=1) return _cmp((myhhmm, self._second, self._microsecond), (othhmm, other._second, other._microsecond)) def __hash__(self): """Hash.""" tzoff = self.utcoffset() if not tzoff: # zero or None return hash(self._getstate()[0]) h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff, timedelta(hours=1)) assert not m % timedelta(minutes=1), "whole minute" m //= timedelta(minutes=1) if 0 <= h < 24: return hash(time(h, m, self.second, self.microsecond)) return hash((h, m, self.second, self.microsecond)) # Conversion to string def _tzstr(self, sep=":"): """Return formatted timezone offset (+xx:xx) or None.""" off = self.utcoffset() if off is not None: if off.days < 0: sign = "-" off = -off else: sign = "+" hh, mm = divmod(off, timedelta(hours=1)) assert not mm % timedelta(minutes=1), "whole minute" mm //= timedelta(minutes=1) assert 0 <= hh < 24 off = "%s%02d%s%02d" % (sign, hh, sep, mm) return off def __repr__(self): """Convert to formal string, for repr().""" if self._microsecond != 0: s = ", %d, %d" % (self._second, self._microsecond) elif self._second != 0: s = ", %d" % self._second else: s = "" s= "%s(%d, %d%s)" % ('datetime.' + self.__class__.__name__, self._hour, self._minute, s) if self._tzinfo is not None: assert s[-1:] == ")" s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")" return s def isoformat(self): """Return the time formatted according to ISO. This is 'HH:MM:SS.mmmmmm+zz:zz', or 'HH:MM:SS+zz:zz' if self.microsecond == 0. """ s = _format_time(self._hour, self._minute, self._second, self._microsecond) tz = self._tzstr() if tz: s += tz return s __str__ = isoformat def strftime(self, fmt): """Format using strftime(). The date part of the timestamp passed to underlying strftime should not be used. """ # The year must be >= 1000 else Python's strftime implementation # can raise a bogus exception. timetuple = (1900, 1, 1, self._hour, self._minute, self._second, 0, 1, -1) return _wrap_strftime(self, fmt, timetuple) def __format__(self, fmt): if len(fmt) != 0: return self.strftime(fmt) return str(self) # Timezone functions def utcoffset(self): """Return the timezone offset in minutes east of UTC (negative west of UTC).""" if self._tzinfo is None: return None offset = self._tzinfo.utcoffset(None) _check_utc_offset("utcoffset", offset) return offset def tzname(self): """Return the timezone name. Note that the name is 100% informational -- there's no requirement that it mean anything in particular. For example, "GMT", "UTC", "-500", "-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies. """ if self._tzinfo is None: return None name = self._tzinfo.tzname(None) _check_tzname(name) return name def dst(self): """Return 0 if DST is not in effect, or the DST offset (in minutes eastward) if DST is in effect. This is purely informational; the DST offset has already been added to the UTC offset returned by utcoffset() if applicable, so there's no need to consult dst() unless you're interested in displaying the DST info. """ if self._tzinfo is None: return None offset = self._tzinfo.dst(None) _check_utc_offset("dst", offset) return offset def replace(self, hour=None, minute=None, second=None, microsecond=None, tzinfo=True): """Return a new time with new values for the specified fields.""" if hour is None: hour = self.hour if minute is None: minute = self.minute if second is None: second = self.second if microsecond is None: microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo _check_time_fields(hour, minute, second, microsecond) _check_tzinfo_arg(tzinfo) return time(hour, minute, second, microsecond, tzinfo) def __bool__(self): if self.second or self.microsecond: return True offset = self.utcoffset() or timedelta(0) return timedelta(hours=self.hour, minutes=self.minute) != offset # Pickle support. def _getstate(self): us2, us3 = divmod(self._microsecond, 256) us1, us2 = divmod(us2, 256) basestate = bytes([self._hour, self._minute, self._second, us1, us2, us3]) if self._tzinfo is None: return (basestate,) else: return (basestate, self._tzinfo) def __setstate(self, string, tzinfo): if len(string) != 6 or string[0] >= 24: raise TypeError("an integer is required") (self._hour, self._minute, self._second, us1, us2, us3) = string self._microsecond = (((us1 << 8) | us2) << 8) | us3 if tzinfo is None or isinstance(tzinfo, _tzinfo_class): self._tzinfo = tzinfo else: raise TypeError("bad tzinfo state arg %r" % tzinfo) def __reduce__(self): return (time, self._getstate()) _time_class = time # so functions w/ args named "time" can get at the class time.min = time(0, 0, 0) time.max = time(23, 59, 59, 999999) time.resolution = timedelta(microseconds=1) class datetime(date): """datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]]) The year, month and day arguments are required. tzinfo may be None, or an instance of a tzinfo subclass. The remaining arguments may be ints or longs. """ __slots__ = date.__slots__ + ( '_hour', '_minute', '_second', '_microsecond', '_tzinfo') def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0, microsecond=0, tzinfo=None): if isinstance(year, bytes) and len(year) == 10: # Pickle support self = date.__new__(cls, year[:4]) self.__setstate(year, month) return self _check_tzinfo_arg(tzinfo) _check_time_fields(hour, minute, second, microsecond) self = date.__new__(cls, year, month, day) self._hour = hour self._minute = minute self._second = second self._microsecond = microsecond self._tzinfo = tzinfo return self # Read-only field accessors @property def hour(self): """hour (0-23)""" return self._hour @property def minute(self): """minute (0-59)""" return self._minute @property def second(self): """second (0-59)""" return self._second @property def microsecond(self): """microsecond (0-999999)""" return self._microsecond @property def tzinfo(self): """timezone info object""" return self._tzinfo @classmethod def fromtimestamp(cls, t, tz=None): """Construct a datetime from a POSIX timestamp (like time.time()). A timezone info object may be passed in as well. """ _check_tzinfo_arg(tz) converter = _time.localtime if tz is None else _time.gmtime t, frac = divmod(t, 1.0) us = round(frac * 1e6) # If timestamp is less than one microsecond smaller than a # full second, us can be rounded up to 1000000. In this case, # roll over to seconds, otherwise, ValueError is raised # by the constructor. if us == 1000000: t += 1 us = 0 y, m, d, hh, mm, ss, weekday, jday, dst = converter(t) ss = min(ss, 59) # clamp out leap seconds if the platform has them result = cls(y, m, d, hh, mm, ss, us, tz) if tz is not None: result = tz.fromutc(result) return result @classmethod def utcfromtimestamp(cls, t): "Construct a UTC datetime from a POSIX timestamp (like time.time())." t, frac = divmod(t, 1.0) us = round(frac * 1e6) # If timestamp is less than one microsecond smaller than a # full second, us can be rounded up to 1000000. In this case, # roll over to seconds, otherwise, ValueError is raised # by the constructor. if us == 1000000: t += 1 us = 0 y, m, d, hh, mm, ss, weekday, jday, dst = _time.gmtime(t) ss = min(ss, 59) # clamp out leap seconds if the platform has them return cls(y, m, d, hh, mm, ss, us) # XXX This is supposed to do better than we *can* do by using time.time(), # XXX if the platform supports a more accurate way. The C implementation # XXX uses gettimeofday on platforms that have it, but that isn't # XXX available from Python. So now() may return different results # XXX across the implementations. @classmethod def now(cls, tz=None): "Construct a datetime from time.time() and optional time zone info." t = _time.time() return cls.fromtimestamp(t, tz) @classmethod def utcnow(cls): "Construct a UTC datetime from time.time()." t = _time.time() return cls.utcfromtimestamp(t) @classmethod def combine(cls, date, time): "Construct a datetime from a given date and a given time." if not isinstance(date, _date_class): raise TypeError("date argument must be a date instance") if not isinstance(time, _time_class): raise TypeError("time argument must be a time instance") return cls(date.year, date.month, date.day, time.hour, time.minute, time.second, time.microsecond, time.tzinfo) def timetuple(self): "Return local time tuple compatible with time.localtime()." dst = self.dst() if dst is None: dst = -1 elif dst: dst = 1 else: dst = 0 return _build_struct_time(self.year, self.month, self.day, self.hour, self.minute, self.second, dst) def utctimetuple(self): "Return UTC time tuple compatible with time.gmtime()." offset = self.utcoffset() if offset: self -= offset y, m, d = self.year, self.month, self.day hh, mm, ss = self.hour, self.minute, self.second return _build_struct_time(y, m, d, hh, mm, ss, 0) def date(self): "Return the date part." return date(self._year, self._month, self._day) def time(self): "Return the time part, with tzinfo None." return time(self.hour, self.minute, self.second, self.microsecond) def timetz(self): "Return the time part, with same tzinfo." return time(self.hour, self.minute, self.second, self.microsecond, self._tzinfo) def replace(self, year=None, month=None, day=None, hour=None, minute=None, second=None, microsecond=None, tzinfo=True): """Return a new datetime with new values for the specified fields.""" if year is None: year = self.year if month is None: month = self.month if day is None: day = self.day if hour is None: hour = self.hour if minute is None: minute = self.minute if second is None: second = self.second if microsecond is None: microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo _check_date_fields(year, month, day) _check_time_fields(hour, minute, second, microsecond) _check_tzinfo_arg(tzinfo) return datetime(year, month, day, hour, minute, second, microsecond, tzinfo) def astimezone(self, tz): if not isinstance(tz, tzinfo): raise TypeError("tz argument must be an instance of tzinfo") mytz = self.tzinfo if mytz is None: raise ValueError("astimezone() requires an aware datetime") if tz is mytz: return self # Convert self to UTC, and attach the new time zone object. myoffset = self.utcoffset() if myoffset is None: raise ValueError("astimezone() requires an aware datetime") utc = (self - myoffset).replace(tzinfo=tz) # Convert from UTC to tz's local time. return tz.fromutc(utc) # Ways to produce a string. def ctime(self): "Return ctime() style string." weekday = self.toordinal() % 7 or 7 return "%s %s %2d %02d:%02d:%02d %04d" % ( _DAYNAMES[weekday], _MONTHNAMES[self._month], self._day, self._hour, self._minute, self._second, self._year) def isoformat(self, sep='T'): """Return the time formatted according to ISO. This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if self.microsecond == 0. If self.tzinfo is not None, the UTC offset is also attached, giving 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'. Optional argument sep specifies the separator between date and time, default 'T'. """ s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day, sep) + _format_time(self._hour, self._minute, self._second, self._microsecond)) off = self.utcoffset() if off is not None: if off.days < 0: sign = "-" off = -off else: sign = "+" hh, mm = divmod(off, timedelta(hours=1)) assert not mm % timedelta(minutes=1), "whole minute" mm //= timedelta(minutes=1) s += "%s%02d:%02d" % (sign, hh, mm) return s def __repr__(self): """Convert to formal string, for repr().""" L = [self._year, self._month, self._day, # These are never zero self._hour, self._minute, self._second, self._microsecond] if L[-1] == 0: del L[-1] if L[-1] == 0: del L[-1] s = ", ".join(map(str, L)) s = "%s(%s)" % ('datetime.' + self.__class__.__name__, s) if self._tzinfo is not None: assert s[-1:] == ")" s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")" return s def __str__(self): "Convert to string, for str()." return self.isoformat(sep=' ') @classmethod def strptime(cls, date_string, format): 'string, format -> new datetime parsed from a string (like time.strptime()).' import _strptime return _strptime._strptime_datetime(cls, date_string, format) def utcoffset(self): """Return the timezone offset in minutes east of UTC (negative west of UTC).""" if self._tzinfo is None: return None offset = self._tzinfo.utcoffset(self) _check_utc_offset("utcoffset", offset) return offset def tzname(self): """Return the timezone name. Note that the name is 100% informational -- there's no requirement that it mean anything in particular. For example, "GMT", "UTC", "-500", "-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies. """ name = _call_tzinfo_method(self._tzinfo, "tzname", self) _check_tzname(name) return name def dst(self): """Return 0 if DST is not in effect, or the DST offset (in minutes eastward) if DST is in effect. This is purely informational; the DST offset has already been added to the UTC offset returned by utcoffset() if applicable, so there's no need to consult dst() unless you're interested in displaying the DST info. """ if self._tzinfo is None: return None offset = self._tzinfo.dst(self) _check_utc_offset("dst", offset) return offset # Comparisons of datetime objects with other. def __eq__(self, other): if isinstance(other, datetime): return self._cmp(other) == 0 elif not isinstance(other, date): return NotImplemented else: return False def __ne__(self, other): if isinstance(other, datetime): return self._cmp(other) != 0 elif not isinstance(other, date): return NotImplemented else: return True def __le__(self, other): if isinstance(other, datetime): return self._cmp(other) <= 0 elif not isinstance(other, date): return NotImplemented else: _cmperror(self, other) def __lt__(self, other): if isinstance(other, datetime): return self._cmp(other) < 0 elif not isinstance(other, date): return NotImplemented else: _cmperror(self, other) def __ge__(self, other): if isinstance(other, datetime): return self._cmp(other) >= 0 elif not isinstance(other, date): return NotImplemented else: _cmperror(self, other) def __gt__(self, other): if isinstance(other, datetime): return self._cmp(other) > 0 elif not isinstance(other, date): return NotImplemented else: _cmperror(self, other) def _cmp(self, other): assert isinstance(other, datetime) mytz = self._tzinfo ottz = other._tzinfo myoff = otoff = None if mytz is ottz: base_compare = True else: if mytz is not None: myoff = self.utcoffset() if ottz is not None: otoff = other.utcoffset() base_compare = myoff == otoff if base_compare: return _cmp((self._year, self._month, self._day, self._hour, self._minute, self._second, self._microsecond), (other._year, other._month, other._day, other._hour, other._minute, other._second, other._microsecond)) if myoff is None or otoff is None: raise TypeError("cannot compare naive and aware datetimes") # XXX What follows could be done more efficiently... diff = self - other # this will take offsets into account if diff.days < 0: return -1 return diff and 1 or 0 def __add__(self, other): "Add a datetime and a timedelta." if not isinstance(other, timedelta): return NotImplemented delta = timedelta(self.toordinal(), hours=self._hour, minutes=self._minute, seconds=self._second, microseconds=self._microsecond) delta += other hour, rem = divmod(delta.seconds, 3600) minute, second = divmod(rem, 60) if 0 < delta.days <= _MAXORDINAL: return datetime.combine(date.fromordinal(delta.days), time(hour, minute, second, delta.microseconds, tzinfo=self._tzinfo)) raise OverflowError("result out of range") __radd__ = __add__ def __sub__(self, other): "Subtract two datetimes, or a datetime and a timedelta." if not isinstance(other, datetime): if isinstance(other, timedelta): return self + -other return NotImplemented days1 = self.toordinal() days2 = other.toordinal() secs1 = self._second + self._minute * 60 + self._hour * 3600 secs2 = other._second + other._minute * 60 + other._hour * 3600 base = timedelta(days1 - days2, secs1 - secs2, self._microsecond - other._microsecond) if self._tzinfo is other._tzinfo: return base myoff = self.utcoffset() otoff = other.utcoffset() if myoff == otoff: return base if myoff is None or otoff is None: raise TypeError("cannot mix naive and timezone-aware time") return base + otoff - myoff def __hash__(self): tzoff = self.utcoffset() if tzoff is None: return hash(self._getstate()[0]) days = _ymd2ord(self.year, self.month, self.day) seconds = self.hour * 3600 + self.minute * 60 + self.second return hash(timedelta(days, seconds, self.microsecond) - tzoff) # Pickle support. def _getstate(self): yhi, ylo = divmod(self._year, 256) us2, us3 = divmod(self._microsecond, 256) us1, us2 = divmod(us2, 256) basestate = bytes([yhi, ylo, self._month, self._day, self._hour, self._minute, self._second, us1, us2, us3]) if self._tzinfo is None: return (basestate,) else: return (basestate, self._tzinfo) def __setstate(self, string, tzinfo): (yhi, ylo, self._month, self._day, self._hour, self._minute, self._second, us1, us2, us3) = string self._year = yhi * 256 + ylo self._microsecond = (((us1 << 8) | us2) << 8) | us3 if tzinfo is None or isinstance(tzinfo, _tzinfo_class): self._tzinfo = tzinfo else: raise TypeError("bad tzinfo state arg %r" % tzinfo) def __reduce__(self): return (self.__class__, self._getstate()) datetime.min = datetime(1, 1, 1) datetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999) datetime.resolution = timedelta(microseconds=1) def _isoweek1monday(year): # Helper to calculate the day number of the Monday starting week 1 # XXX This could be done more efficiently THURSDAY = 3 firstday = _ymd2ord(year, 1, 1) firstweekday = (firstday + 6) % 7 # See weekday() above week1monday = firstday - firstweekday if firstweekday > THURSDAY: week1monday += 7 return week1monday class timezone(tzinfo): __slots__ = '_offset', '_name' # Sentinel value to disallow None _Omitted = object() def __new__(cls, offset, name=_Omitted): if not isinstance(offset, timedelta): raise TypeError("offset must be a timedelta") if name is cls._Omitted: if not offset: return cls.utc name = None elif not isinstance(name, str): raise TypeError("name must be a string") if not cls._minoffset <= offset <= cls._maxoffset: raise ValueError("offset must be a timedelta" " strictly between -timedelta(hours=24) and" " timedelta(hours=24).") if (offset.microseconds != 0 or offset.seconds % 60 != 0): raise ValueError("offset must be a timedelta" " representing a whole number of minutes") return cls._create(offset, name) @classmethod def _create(cls, offset, name=None): self = tzinfo.__new__(cls) self._offset = offset self._name = name return self def __getinitargs__(self): """pickle support""" if self._name is None: return (self._offset,) return (self._offset, self._name) def __eq__(self, other): return self._offset == other._offset def __hash__(self): return hash(self._offset) def __repr__(self): """Convert to formal string, for repr(). >>> tz = timezone.utc >>> repr(tz) 'datetime.timezone.utc' >>> tz = timezone(timedelta(hours=-5), 'EST') >>> repr(tz) "datetime.timezone(datetime.timedelta(-1, 68400), 'EST')" """ if self is self.utc: return 'datetime.timezone.utc' if self._name is None: return "%s(%r)" % ('datetime.' + self.__class__.__name__, self._offset) return "%s(%r, %r)" % ('datetime.' + self.__class__.__name__, self._offset, self._name) def __str__(self): return self.tzname(None) def utcoffset(self, dt): if isinstance(dt, datetime) or dt is None: return self._offset raise TypeError("utcoffset() argument must be a datetime instance" " or None") def tzname(self, dt): if isinstance(dt, datetime) or dt is None: if self._name is None: return self._name_from_offset(self._offset) return self._name raise TypeError("tzname() argument must be a datetime instance" " or None") def dst(self, dt): if isinstance(dt, datetime) or dt is None: return None raise TypeError("dst() argument must be a datetime instance" " or None") def fromutc(self, dt): if isinstance(dt, datetime): if dt.tzinfo is not self: raise ValueError("fromutc: dt.tzinfo " "is not self") return dt + self._offset raise TypeError("fromutc() argument must be a datetime instance" " or None") _maxoffset = timedelta(hours=23, minutes=59) _minoffset = -_maxoffset @staticmethod def _name_from_offset(delta): if delta < timedelta(0): sign = '-' delta = -delta else: sign = '+' hours, rest = divmod(delta, timedelta(hours=1)) minutes = rest // timedelta(minutes=1) return 'UTC{}{:02d}:{:02d}'.format(sign, hours, minutes) timezone.utc = timezone._create(timedelta(0)) timezone.min = timezone._create(timezone._minoffset) timezone.max = timezone._create(timezone._maxoffset) """ Some time zone algebra. For a datetime x, let x.n = x stripped of its timezone -- its naive time. x.o = x.utcoffset(), and assuming that doesn't raise an exception or return None x.d = x.dst(), and assuming that doesn't raise an exception or return None x.s = x's standard offset, x.o - x.d Now some derived rules, where k is a duration (timedelta). 1. x.o = x.s + x.d This follows from the definition of x.s. 2. If x and y have the same tzinfo member, x.s = y.s. This is actually a requirement, an assumption we need to make about sane tzinfo classes. 3. The naive UTC time corresponding to x is x.n - x.o. This is again a requirement for a sane tzinfo class. 4. (x+k).s = x.s This follows from #2, and that datimetimetz+timedelta preserves tzinfo. 5. (x+k).n = x.n + k Again follows from how arithmetic is defined. Now we can explain tz.fromutc(x). Let's assume it's an interesting case (meaning that the various tzinfo methods exist, and don't blow up or return None when called). The function wants to return a datetime y with timezone tz, equivalent to x. x is already in UTC. By #3, we want y.n - y.o = x.n [1] The algorithm starts by attaching tz to x.n, and calling that y. So x.n = y.n at the start. Then it wants to add a duration k to y, so that [1] becomes true; in effect, we want to solve [2] for k: (y+k).n - (y+k).o = x.n [2] By #1, this is the same as (y+k).n - ((y+k).s + (y+k).d) = x.n [3] By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start. Substituting that into [3], x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving k - (y+k).s - (y+k).d = 0; rearranging, k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so k = y.s - (y+k).d On the RHS, (y+k).d can't be computed directly, but y.s can be, and we approximate k by ignoring the (y+k).d term at first. Note that k can't be very large, since all offset-returning methods return a duration of magnitude less than 24 hours. For that reason, if y is firmly in std time, (y+k).d must be 0, so ignoring it has no consequence then. In any case, the new value is z = y + y.s [4] It's helpful to step back at look at [4] from a higher level: it's simply mapping from UTC to tz's standard time. At this point, if z.n - z.o = x.n [5] we have an equivalent time, and are almost done. The insecurity here is at the start of daylight time. Picture US Eastern for concreteness. The wall time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good sense then. The docs ask that an Eastern tzinfo class consider such a time to be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST on the day DST starts. We want to return the 1:MM EST spelling because that's the only spelling that makes sense on the local wall clock. In fact, if [5] holds at this point, we do have the standard-time spelling, but that takes a bit of proof. We first prove a stronger result. What's the difference between the LHS and RHS of [5]? Let diff = x.n - (z.n - z.o) [6] Now z.n = by [4] (y + y.s).n = by #5 y.n + y.s = since y.n = x.n x.n + y.s = since z and y are have the same tzinfo member, y.s = z.s by #2 x.n + z.s Plugging that back into [6] gives diff = x.n - ((x.n + z.s) - z.o) = expanding x.n - x.n - z.s + z.o = cancelling - z.s + z.o = by #2 z.d So diff = z.d. If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time spelling we wanted in the endcase described above. We're done. Contrarily, if z.d = 0, then we have a UTC equivalent, and are also done. If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to add to z (in effect, z is in tz's standard time, and we need to shift the local clock into tz's daylight time). Let z' = z + z.d = z + diff [7] and we can again ask whether z'.n - z'.o = x.n [8] If so, we're done. If not, the tzinfo class is insane, according to the assumptions we've made. This also requires a bit of proof. As before, let's compute the difference between the LHS and RHS of [8] (and skipping some of the justifications for the kinds of substitutions we've done several times already): diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7] x.n - (z.n + diff - z'.o) = replacing diff via [6] x.n - (z.n + x.n - (z.n - z.o) - z'.o) = x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n - z.n + z.n - z.o + z'.o = cancel z.n - z.o + z'.o = #1 twice -z.s - z.d + z'.s + z'.d = z and z' have same tzinfo z'.d - z.d So z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal, we've found the UTC-equivalent so are done. In fact, we stop with [7] and return z', not bothering to compute z'.d. How could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by a dst() offset, and starting *from* a time already in DST (we know z.d != 0), would have to change the result dst() returns: we start in DST, and moving a little further into it takes us out of DST. There isn't a sane case where this can happen. The closest it gets is at the end of DST, where there's an hour in UTC with no spelling in a hybrid tzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM UTC) because the docs insist on that, but 0:MM is taken as being in daylight time (4:MM UTC). There is no local time mapping to 5:MM UTC. The local clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in standard time. Since that's what the local clock *does*, we want to map both UTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous in local time, but so it goes -- it's the way the local clock works. When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0, so z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going. z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8] (correctly) concludes that z' is not UTC-equivalent to x. Because we know z.d said z was in daylight time (else [5] would have held and we would have stopped then), and we know z.d != z'.d (else [8] would have held and we we have stopped then), and there are only 2 possible values dst() can return in Eastern, it follows that z'.d must be 0 (which it is in the example, but the reasoning doesn't depend on the example -- it depends on there being two possible dst() outcomes, one zero and the other non-zero). Therefore z' must be in standard time, and is the spelling we want in this case. Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is concerned (because it takes z' as being in standard time rather than the daylight time we intend here), but returning it gives the real-life "local clock repeats an hour" behavior when mapping the "unspellable" UTC hour into tz. When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with the 1:MM standard time spelling we want. So how can this break? One of the assumptions must be violated. Two possibilities: 1) [2] effectively says that y.s is invariant across all y belong to a given time zone. This isn't true if, for political reasons or continental drift, a region decides to change its base offset from UTC. 2) There may be versions of "double daylight" time where the tail end of the analysis gives up a step too early. I haven't thought about that enough to say. In any case, it's clear that the default fromutc() is strong enough to handle "almost all" time zones: so long as the standard offset is invariant, it doesn't matter if daylight time transition points change from year to year, or if daylight time is skipped in some years; it doesn't matter how large or small dst() may get within its bounds; and it doesn't even matter if some perverse time zone returns a negative dst()). So a breaking case must be pretty bizarre, and a tzinfo subclass can override fromutc() if it is. """ try: from _datetime import * except ImportError: pass else: # Clean up unused names del (_DAYNAMES, _DAYS_BEFORE_MONTH, _DAYS_IN_MONTH, _DI100Y, _DI400Y, _DI4Y, _MAXORDINAL, _MONTHNAMES, _build_struct_time, _call_tzinfo_method, _check_date_fields, _check_time_fields, _check_tzinfo_arg, _check_tzname, _check_utc_offset, _cmp, _cmperror, _date_class, _days_before_month, _days_before_year, _days_in_month, _format_time, _is_leap, _isoweek1monday, _math, _ord2ymd, _time, _time_class, _tzinfo_class, _wrap_strftime, _ymd2ord) # XXX Since import * above excludes names that start with _, # docstring does not get overwritten. In the future, it may be # appropriate to maintain a single module level docstring and # remove the following line. from _datetime import __doc__
BeATz-UnKNoWN/python-for-android
python3-alpha/python3-src/Lib/datetime.py
Python
apache-2.0
73,767
var assert = require('assert'), error = require('../../../lib/error/index'), math = require('../../../index'), multinomial = math.multinomial, _ = require('underscore'); describe('multinomial', function() { it('should calculate the multinomial of an array of numbers', function() { assert.equal(multinomial([1,2,1]), 12); assert.equal(multinomial([4,2,1]), 105); assert.equal(multinomial([4,4]), 70); }); it('should calculate the multinomial of n items taken k at a time with BigNumbers', function() { assert.equal(_.isEqual(multinomial([math.bignumber(3), math.bignumber(4), math.bignumber(5)]), math.bignumber(27720)),true); assert.deepEqual(multinomial([math.bignumber(10), math.bignumber(1), math.bignumber(2)]), math.bignumber(858)); }); it('should not work with non-integer and negative input', function() { assert.throws(function() {multinomial([0.5,3])}, TypeError); assert.throws(function() {multinomial([math.bignumber(3), math.bignumber(0.5)])}, TypeError); assert.throws(function() {multinomial([math.bignumber(3.5), math.bignumber(-3)])}, TypeError); assert.throws(function() {multinomial([math.bignumber(3.5), 1/3])}, TypeError); }); it('should not work with the wrong number or type of arguments', function() { assert.throws(function() {multinomial(5, 3, 2)}); assert.throws(function() {multinomial(true, "hello world")}); }); });
luke-gumbley/mathjs
test/function/probability/multinomial.test.js
JavaScript
apache-2.0
1,424
๏ปฟusing System; using NUnit.Framework; using System.Threading; namespace Selenium.Tests { [TestFixture] public class TestElementPresent : SeleniumTestCaseBase { [Test] public void ShouldDetectElementPresent() { selenium.Open("../tests/html/test_element_present.html"); Assert.IsTrue(selenium.IsElementPresent("aLink")); selenium.Click("removeLinkAfterAWhile"); for (int second = 0; ; second++) { if (second >= 60) Assert.Fail("timeout"); try { if (!selenium.IsElementPresent("aLink")) break; } catch (Exception) { } Thread.Sleep(1000); } Assert.IsFalse(selenium.IsElementPresent("aLink")); selenium.Click("addLinkAfterAWhile"); for (int second = 0; ; second++) { if (second >= 60) Assert.Fail("timeout"); try { if (selenium.IsElementPresent("aLink")) break; } catch (Exception) { } Thread.Sleep(1000); } Assert.IsTrue(selenium.IsElementPresent("aLink")); } } }
chrisblock/selenium
dotnet/test/webdriverbackedselenium/TestElementPresent.cs
C#
apache-2.0
1,185
/* * Copyright 2000-2014 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.ui.popup; import com.intellij.openapi.application.ApplicationManager; import com.intellij.openapi.project.Project; import com.intellij.openapi.ui.popup.*; import com.intellij.openapi.util.Computable; import com.intellij.openapi.util.Condition; import com.intellij.openapi.util.Disposer; import com.intellij.openapi.util.Pair; import com.intellij.ui.ActiveComponent; import com.intellij.util.BooleanFunction; import com.intellij.util.Processor; import com.intellij.util.ui.EmptyIcon; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import javax.swing.*; import java.awt.*; import java.awt.event.ActionListener; import java.awt.event.KeyEvent; import java.util.*; import java.util.List; /** * @author anna * @since 15-Mar-2006 */ public class ComponentPopupBuilderImpl implements ComponentPopupBuilder { private String myTitle = ""; private boolean myResizable; private boolean myMovable; private final JComponent myComponent; private final JComponent myPreferredFocusedComponent; private boolean myRequestFocus; private String myDimensionServiceKey = null; private Computable<Boolean> myCallback = null; private Project myProject; private boolean myCancelOnClickOutside = true; private boolean myCancelOnWindowDeactivation = true; private final Set<JBPopupListener> myListeners = new LinkedHashSet<JBPopupListener>(); private boolean myUseDimServiceForXYLocation; private IconButton myCancelButton; private MouseChecker myCancelOnMouseOutCallback; private boolean myCancelOnWindow; private ActiveIcon myTitleIcon = new ActiveIcon(EmptyIcon.ICON_0); private boolean myCancelKeyEnabled = true; private boolean myLocateByContent = false; private boolean myPlaceWithinScreen = true; private Processor<JBPopup> myPinCallback = null; private Dimension myMinSize; private MaskProvider myMaskProvider; private float myAlpha; private List<Object> myUserData; private boolean myInStack = true; private boolean myModalContext = true; private Component[] myFocusOwners = new Component[0]; private String myAd; private boolean myShowShadow = true; private boolean myShowBorder = true; private boolean myFocusable = true; private ActiveComponent myCommandButton; private List<Pair<ActionListener, KeyStroke>> myKeyboardActions = Collections.emptyList(); private Component mySettingsButtons; private boolean myMayBeParent; private int myAdAlignment = SwingConstants.LEFT; private BooleanFunction<KeyEvent> myKeyEventHandler; public ComponentPopupBuilderImpl(@NotNull JComponent component, JComponent preferredFocusedComponent) { myComponent = component; myPreferredFocusedComponent = preferredFocusedComponent; } @Override @NotNull public ComponentPopupBuilder setMayBeParent(boolean mayBeParent) { myMayBeParent = mayBeParent; return this; } @Override @NotNull public ComponentPopupBuilder setTitle(String title) { myTitle = title; return this; } @Override @NotNull public ComponentPopupBuilder setResizable(final boolean resizable) { myResizable = resizable; return this; } @Override @NotNull public ComponentPopupBuilder setMovable(final boolean movable) { myMovable = movable; return this; } @Override @NotNull public ComponentPopupBuilder setCancelOnClickOutside(final boolean cancel) { myCancelOnClickOutside = cancel; return this; } @Override @NotNull public ComponentPopupBuilder setCancelOnMouseOutCallback(final MouseChecker shouldCancel) { myCancelOnMouseOutCallback = shouldCancel; return this; } @Override @NotNull public ComponentPopupBuilder addListener(final JBPopupListener listener) { myListeners.add(listener); return this; } @Override @NotNull public ComponentPopupBuilder setRequestFocus(final boolean requestFocus) { myRequestFocus = requestFocus; return this; } @Override @NotNull public ComponentPopupBuilder setFocusable(final boolean focusable) { myFocusable = focusable; return this; } @Override @NotNull public ComponentPopupBuilder setDimensionServiceKey(final Project project, final String key, final boolean useForXYLocation) { myDimensionServiceKey = key; myUseDimServiceForXYLocation = useForXYLocation; myProject = project; return this; } @Override @NotNull public ComponentPopupBuilder setCancelCallback(final Computable<Boolean> shouldProceed) { myCallback = shouldProceed; return this; } @Override @NotNull public ComponentPopupBuilder setCancelButton(@NotNull final IconButton cancelButton) { myCancelButton = cancelButton; return this; } @Override @NotNull public ComponentPopupBuilder setCommandButton(@NotNull ActiveComponent button) { myCommandButton = button; return this; } @Override @NotNull public ComponentPopupBuilder setCouldPin(@Nullable final Processor<JBPopup> callback) { myPinCallback = callback; return this; } @Override @NotNull public ComponentPopupBuilder setKeyboardActions(@NotNull List<Pair<ActionListener, KeyStroke>> keyboardActions) { myKeyboardActions = keyboardActions; return this; } @Override @NotNull public ComponentPopupBuilder setSettingButtons(@NotNull Component button) { mySettingsButtons = button; return this; } @Override @NotNull public ComponentPopupBuilder setCancelOnOtherWindowOpen(final boolean cancelOnWindow) { myCancelOnWindow = cancelOnWindow; return this; } @Override public ComponentPopupBuilder setCancelOnWindowDeactivation(boolean cancelOnWindowDeactivation) { myCancelOnWindowDeactivation = cancelOnWindowDeactivation; return this; } @NotNull @Override public ComponentPopupBuilder setKeyEventHandler(@NotNull BooleanFunction<KeyEvent> handler) { myKeyEventHandler = handler; return this; } @Override @NotNull public ComponentPopupBuilder setProject(Project project) { myProject = project; return this; } @Override @NotNull public JBPopup createPopup() { AbstractPopup popup = new AbstractPopup().init( myProject, myComponent, myPreferredFocusedComponent, myRequestFocus, myFocusable, myMovable, myDimensionServiceKey, myResizable, myTitle, myCallback, myCancelOnClickOutside, myListeners, myUseDimServiceForXYLocation, myCommandButton, myCancelButton, myCancelOnMouseOutCallback, myCancelOnWindow, myTitleIcon, myCancelKeyEnabled, myLocateByContent, myPlaceWithinScreen, myMinSize, myAlpha, myMaskProvider, myInStack, myModalContext, myFocusOwners, myAd, myAdAlignment, false, myKeyboardActions, mySettingsButtons, myPinCallback, myMayBeParent, myShowShadow, myShowBorder, myCancelOnWindowDeactivation, myKeyEventHandler ); if (myUserData != null) { popup.setUserData(myUserData); } Disposer.register(ApplicationManager.getApplication(), popup); return popup; } @Override @NotNull public ComponentPopupBuilder setRequestFocusCondition(Project project, Condition<Project> condition) { myRequestFocus = condition.value(project); return this; } @Override @NotNull public ComponentPopupBuilder setTitleIcon(@NotNull final ActiveIcon icon) { myTitleIcon = icon; return this; } @Override @NotNull public ComponentPopupBuilder setCancelKeyEnabled(final boolean enabled) { myCancelKeyEnabled = enabled; return this; } @Override @NotNull public ComponentPopupBuilder setLocateByContent(final boolean byContent) { myLocateByContent = byContent; return this; } @Override @NotNull public ComponentPopupBuilder setLocateWithinScreenBounds(final boolean within) { myPlaceWithinScreen = within; return this; } @Override @NotNull public ComponentPopupBuilder setMinSize(final Dimension minSize) { myMinSize = minSize; return this; } @Override @NotNull public ComponentPopupBuilder setMaskProvider(MaskProvider maskProvider) { myMaskProvider = maskProvider; return this; } @Override @NotNull public ComponentPopupBuilder setAlpha(final float alpha) { myAlpha = alpha; return this; } @Override @NotNull public ComponentPopupBuilder setBelongsToGlobalPopupStack(final boolean isInStack) { myInStack = isInStack; return this; } @Override @NotNull public ComponentPopupBuilder addUserData(final Object object) { if (myUserData == null) { myUserData = new ArrayList<Object>(); } myUserData.add(object); return this; } @Override @NotNull public ComponentPopupBuilder setModalContext(final boolean modal) { myModalContext = modal; return this; } @Override @NotNull public ComponentPopupBuilder setFocusOwners(@NotNull final Component[] focusOwners) { myFocusOwners = focusOwners; return this; } @Override @NotNull public ComponentPopupBuilder setAdText(@Nullable final String text) { return setAdText(text, SwingConstants.LEFT); } @NotNull @Override public ComponentPopupBuilder setAdText(@Nullable String text, int textAlignment) { myAd = text; myAdAlignment = textAlignment; return this; } @NotNull @Override public ComponentPopupBuilder setShowShadow(boolean show) { myShowShadow = show; return this; } @NotNull @Override public ComponentPopupBuilder setShowBorder(boolean show) { myShowBorder = show; return this; } }
akosyakov/intellij-community
platform/platform-impl/src/com/intellij/ui/popup/ComponentPopupBuilderImpl.java
Java
apache-2.0
10,139
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* oidc implements the authenticator.Token interface using the OpenID Connect protocol. config := oidc.OIDCOptions{ IssuerURL: "https://accounts.google.com", ClientID: os.Getenv("GOOGLE_CLIENT_ID"), UsernameClaim: "email", } tokenAuthenticator, err := oidc.New(config) */ package oidc import ( "crypto/tls" "crypto/x509" "errors" "fmt" "net/http" "net/url" "sync" "sync/atomic" "github.com/coreos/go-oidc/jose" "github.com/coreos/go-oidc/oidc" "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/authentication/user" certutil "k8s.io/client-go/util/cert" ) type OIDCOptions struct { // IssuerURL is the URL the provider signs ID Tokens as. This will be the "iss" // field of all tokens produced by the provider and is used for configuration // discovery. // // The URL is usually the provider's URL without a path, for example // "https://accounts.google.com" or "https://login.salesforce.com". // // The provider must implement configuration discovery. // See: https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfig IssuerURL string // ClientID the JWT must be issued for, the "sub" field. This plugin only trusts a single // client to ensure the plugin can be used with public providers. // // The plugin supports the "authorized party" OpenID Connect claim, which allows // specialized providers to issue tokens to a client for a different client. // See: https://openid.net/specs/openid-connect-core-1_0.html#IDToken ClientID string // Path to a PEM encoded root certificate of the provider. CAFile string // UsernameClaim is the JWT field to use as the user's username. UsernameClaim string // GroupsClaim, if specified, causes the OIDCAuthenticator to try to populate the user's // groups with an ID Token field. If the GrouppClaim field is present in an ID Token the value // must be a string or list of strings. GroupsClaim string } type OIDCAuthenticator struct { issuerURL string trustedClientID string usernameClaim string groupsClaim string httpClient *http.Client // Contains an *oidc.Client. Do not access directly. Use client() method. oidcClient atomic.Value // Guards the close method and is used to lock during initialization and closing. mu sync.Mutex close func() // May be nil } // New creates a token authenticator which validates OpenID Connect ID Tokens. func New(opts OIDCOptions) (*OIDCAuthenticator, error) { url, err := url.Parse(opts.IssuerURL) if err != nil { return nil, err } if url.Scheme != "https" { return nil, fmt.Errorf("'oidc-issuer-url' (%q) has invalid scheme (%q), require 'https'", opts.IssuerURL, url.Scheme) } if opts.UsernameClaim == "" { return nil, errors.New("no username claim provided") } var roots *x509.CertPool if opts.CAFile != "" { roots, err = certutil.NewPool(opts.CAFile) if err != nil { return nil, fmt.Errorf("Failed to read the CA file: %v", err) } } else { glog.Info("OIDC: No x509 certificates provided, will use host's root CA set") } // Copied from http.DefaultTransport. tr := net.SetTransportDefaults(&http.Transport{ // According to golang's doc, if RootCAs is nil, // TLS uses the host's root CA set. TLSClientConfig: &tls.Config{RootCAs: roots}, }) authenticator := &OIDCAuthenticator{ issuerURL: opts.IssuerURL, trustedClientID: opts.ClientID, usernameClaim: opts.UsernameClaim, groupsClaim: opts.GroupsClaim, httpClient: &http.Client{Transport: tr}, } // Attempt to initialize the authenticator asynchronously. // // Ignore errors instead of returning it since the OpenID Connect provider might not be // available yet, for instance if it's running on the cluster and needs the API server // to come up first. Errors will be logged within the client() method. go func() { defer runtime.HandleCrash() authenticator.client() }() return authenticator, nil } // Close stops all goroutines used by the authenticator. func (a *OIDCAuthenticator) Close() { a.mu.Lock() defer a.mu.Unlock() if a.close != nil { a.close() } return } func (a *OIDCAuthenticator) client() (*oidc.Client, error) { // Fast check to see if client has already been initialized. if client := a.oidcClient.Load(); client != nil { return client.(*oidc.Client), nil } // Acquire lock, then recheck initialization. a.mu.Lock() defer a.mu.Unlock() if client := a.oidcClient.Load(); client != nil { return client.(*oidc.Client), nil } // Try to initialize client. providerConfig, err := oidc.FetchProviderConfig(a.httpClient, a.issuerURL) if err != nil { glog.Errorf("oidc authenticator: failed to fetch provider discovery data: %v", err) return nil, fmt.Errorf("fetch provider config: %v", err) } clientConfig := oidc.ClientConfig{ HTTPClient: a.httpClient, Credentials: oidc.ClientCredentials{ID: a.trustedClientID}, ProviderConfig: providerConfig, } client, err := oidc.NewClient(clientConfig) if err != nil { glog.Errorf("oidc authenticator: failed to create client: %v", err) return nil, fmt.Errorf("create client: %v", err) } // SyncProviderConfig will start a goroutine to periodically synchronize the provider config. // The synchronization interval is set by the expiration length of the config, and has a minimum // and maximum threshold. stop := client.SyncProviderConfig(a.issuerURL) a.oidcClient.Store(client) a.close = func() { // This assumes the stop is an unbuffered channel. // So instead of closing the channel, we send am empty struct here. // This guarantees that when this function returns, there is no flying requests, // because a send to an unbuffered channel happens after the receive from the channel. stop <- struct{}{} } return client, nil } // AuthenticateToken decodes and verifies an ID Token using the OIDC client, if the verification succeeds, // then it will extract the user info from the JWT claims. func (a *OIDCAuthenticator) AuthenticateToken(value string) (user.Info, bool, error) { jwt, err := jose.ParseJWT(value) if err != nil { return nil, false, err } client, err := a.client() if err != nil { return nil, false, err } if err := client.VerifyJWT(jwt); err != nil { return nil, false, err } claims, err := jwt.Claims() if err != nil { return nil, false, err } claim, ok, err := claims.StringClaim(a.usernameClaim) if err != nil { return nil, false, err } if !ok { return nil, false, fmt.Errorf("cannot find %q in JWT claims", a.usernameClaim) } var username string switch a.usernameClaim { case "email": verified, ok := claims["email_verified"] if !ok { return nil, false, errors.New("'email_verified' claim not present") } emailVerified, ok := verified.(bool) if !ok { // OpenID Connect spec defines 'email_verified' as a boolean. For now, be a pain and error if // it's a different type. If there are enough misbehaving providers we can relax this latter. // // See: https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims return nil, false, fmt.Errorf("malformed claim 'email_verified', expected boolean got %T", verified) } if !emailVerified { return nil, false, errors.New("email not verified") } username = claim default: // For all other cases, use issuerURL + claim as the user name. username = fmt.Sprintf("%s#%s", a.issuerURL, claim) } // TODO(yifan): Add UID, also populate the issuer to upper layer. info := &user.DefaultInfo{Name: username} if a.groupsClaim != "" { groups, found, err := claims.StringsClaim(a.groupsClaim) if err != nil { // Groups type is present but is not an array of strings, try to decode as a string. group, _, err := claims.StringClaim(a.groupsClaim) if err != nil { // Custom claim is present, but isn't an array of strings or a string. return nil, false, fmt.Errorf("custom group claim contains invalid type: %T", claims[a.groupsClaim]) } info.Groups = []string{group} } else if found { info.Groups = groups } } return info, true, nil }
elyscape/origin
cmd/cluster-capacity/go/src/github.com/kubernetes-incubator/cluster-capacity/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/oidc.go
GO
apache-2.0
8,698
<!-- @license Copyright (c) 2015 The Polymer Project Authors. All rights reserved. This code may only be used under the BSD style license found at http://polymer.github.io/LICENSE.txt The complete set of authors may be found at http://polymer.github.io/AUTHORS.txt The complete set of contributors may be found at http://polymer.github.io/CONTRIBUTORS.txt Code distributed by Google as part of the polymer project is also subject to an additional IP rights grant found at http://polymer.github.io/PATENTS.txt --> <link rel="import" href="../polymer/polymer.html"> <script> (function(){ /** `iron-signals` provides basic publish-subscribe functionality. Note: avoid using `iron-signals` whenever you can use a controller (parent element) to mediate communication instead. To send a signal, fire a custom event of type `iron-signal`, with a detail object containing `name` and `data` fields. this.fire('iron-signal', {name: 'hello', data: null}); To receive a signal, listen for `iron-signal-<name>` event on a `iron-signals` element. <iron-signals on-iron-signal-hello="{{helloSignal}}"> You can fire a signal event from anywhere, and all `iron-signals` elements will receive the event, regardless of where they are in DOM. @demo demo/index.html */ Polymer({ is: 'iron-signals', attached: function() { signals.push(this); }, detached: function() { var i = signals.indexOf(this); if (i >= 0) { signals.splice(i, 1); } } }); // private shared database var signals = []; // signal dispatcher function notify(name, data) { // convert generic-signal event to named-signal event var signal = new CustomEvent('iron-signal-' + name, { // if signals bubble, it's easy to get confusing duplicates // (1) listen on a container on behalf of local child // (2) some deep child ignores the event and it bubbles // up to said container // (3) local child event bubbles up to container // also, for performance, we avoid signals flying up the // tree from all over the place bubbles: false, detail: data }); // dispatch named-signal to all 'signals' instances, // only interested listeners will react signals.forEach(function(s) { s.dispatchEvent(signal); }); } // signal listener at document document.addEventListener('iron-signal', function(e) { notify(e.detail.name, e.detail.data); }); })(); </script>
frankyan/Genomic-Interactive-Visualization-Engine-1
give/html/components/bower_components/iron-signals/iron-signals.html
HTML
apache-2.0
2,476
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <html> <head> <META http-equiv="Content-Type" content="text/html; charset=UTF-8"> <meta content="Apache Forrest" name="Generator"> <meta name="Forrest-version" content="0.8"> <meta name="Forrest-skin-name" content="pelt"> <title>Hadoop้›†็พคๆญๅปบ</title> <link type="text/css" href="skin/basic.css" rel="stylesheet"> <link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet"> <link media="print" type="text/css" href="skin/print.css" rel="stylesheet"> <link type="text/css" href="skin/profile.css" rel="stylesheet"> <script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script> <link rel="shortcut icon" href="images/favicon.ico"> </head> <body onload="init()"> <script type="text/javascript">ndeSetTextSize();</script> <div id="top"> <!--+ |breadtrail +--> <div class="breadtrail"> <a href="http://www.apache.org/">Apache</a> &gt; <a href="http://hadoop.apache.org/">Hadoop</a> &gt; <a href="http://hadoop.apache.org/core/">Core</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script> </div> <!--+ |header +--> <div class="header"> <!--+ |start group logo +--> <div class="grouplogo"> <a href="http://hadoop.apache.org/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Apache Hadoop"></a> </div> <!--+ |end group logo +--> <!--+ |start Project Logo +--> <div class="projectlogo"> <a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/core-logo.gif" title="Scalable Computing Platform"></a> </div> <!--+ |end Project Logo +--> <!--+ |start Search +--> <div class="searchbox"> <form action="http://www.google.com/search" method="get" class="roundtopsmall"> <input value="hadoop.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp; <input name="Search" value="Search" type="submit"> </form> </div> <!--+ |end search +--> <!--+ |start Tabs +--> <ul id="tabs"> <li> <a class="unselected" href="http://hadoop.apache.org/core/">้กน็›ฎ</a> </li> <li> <a class="unselected" href="http://wiki.apache.org/hadoop">็ปดๅŸบ</a> </li> <li class="current"> <a class="selected" href="index.html">Hadoop 0.18ๆ–‡ๆกฃ</a> </li> </ul> <!--+ |end Tabs +--> </div> </div> <div id="main"> <div id="publishedStrip"> <!--+ |start Subtabs +--> <div id="level2tabs"></div> <!--+ |end Endtabs +--> <script type="text/javascript"><!-- document.write("Last Published: " + document.lastModified); // --></script> </div> <!--+ |breadtrail +--> <div class="breadtrail"> &nbsp; </div> <!--+ |start Menu, mainarea +--> <!--+ |start Menu +--> <div id="menu"> <div onclick="SwitchMenu('menu_selected_1.1', 'skin/')" id="menu_selected_1.1Title" class="menutitle" style="background-image: url('skin/images/chapter_open.gif');">ๆ–‡ๆกฃ</div> <div id="menu_selected_1.1" class="selectedmenuitemgroup" style="display: block;"> <div class="menuitem"> <a href="index.html">ๆฆ‚่ฟฐ</a> </div> <div class="menuitem"> <a href="quickstart.html">ๅฟซ้€Ÿๅ…ฅ้—จ</a> </div> <div class="menupage"> <div class="menupagetitle">้›†็พคๆญๅปบ</div> </div> <div class="menuitem"> <a href="hdfs_design.html">HDFSๆž„ๆžถ่ฎพ่ฎก</a> </div> <div class="menuitem"> <a href="hdfs_user_guide.html">HDFSไฝฟ็”จๆŒ‡ๅ—</a> </div> <div class="menuitem"> <a href="hdfs_permissions_guide.html">HDFSๆƒ้™ๆŒ‡ๅ—</a> </div> <div class="menuitem"> <a href="hdfs_quota_admin_guide.html">HDFS้…้ข็ฎก็†ๆŒ‡ๅ—</a> </div> <div class="menuitem"> <a href="commands_manual.html">ๅ‘ฝไปคๆ‰‹ๅ†Œ</a> </div> <div class="menuitem"> <a href="hdfs_shell.html">FS Shellไฝฟ็”จๆŒ‡ๅ—</a> </div> <div class="menuitem"> <a href="distcp.html">DistCpไฝฟ็”จๆŒ‡ๅ—</a> </div> <div class="menuitem"> <a href="mapred_tutorial.html">Map-Reduceๆ•™็จ‹</a> </div> <div class="menuitem"> <a href="native_libraries.html">Hadoopๆœฌๅœฐๅบ“</a> </div> <div class="menuitem"> <a href="streaming.html">Streaming</a> </div> <div class="menuitem"> <a href="hadoop_archives.html">Hadoop Archives</a> </div> <div class="menuitem"> <a href="hod.html">Hadoop On Demand</a> </div> <div class="menuitem"> <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/index.html">APIๅ‚่€ƒ</a> </div> <div class="menuitem"> <a href="http://hadoop.apache.org/core/docs/r0.18.2/jdiff/changes.html">API Changes</a> </div> <div class="menuitem"> <a href="http://wiki.apache.org/hadoop/">็ปดๅŸบ</a> </div> <div class="menuitem"> <a href="http://wiki.apache.org/hadoop/FAQ">ๅธธ่ง้—ฎ้ข˜</a> </div> <div class="menuitem"> <a href="http://hadoop.apache.org/core/mailing_lists.html">้‚ฎไปถๅˆ—่กจ</a> </div> <div class="menuitem"> <a href="http://hadoop.apache.org/core/docs/r0.18.2/releasenotes.html">ๅ‘่กŒ่ฏดๆ˜Ž</a> </div> <div class="menuitem"> <a href="http://hadoop.apache.org/core/docs/r0.18.2/changes.html">ๅ˜ๆ›ดๆ—ฅๅฟ—</a> </div> </div> <div id="credit"></div> <div id="roundbottom"> <img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div> <!--+ |alternative credits +--> <div id="credit2"></div> </div> <!--+ |end Menu +--> <!--+ |start content +--> <div id="content"> <div title="Portable Document Format" class="pdflink"> <a class="dida" href="cluster_setup.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br> PDF</a> </div> <h1>Hadoop้›†็พคๆญๅปบ</h1> <div id="minitoc-area"> <ul class="minitoc"> <li> <a href="#%E7%9B%AE%E7%9A%84">็›ฎ็š„</a> </li> <li> <a href="#%E5%85%88%E5%86%B3%E6%9D%A1%E4%BB%B6">ๅ…ˆๅ†ณๆกไปถ</a> </li> <li> <a href="#%E5%AE%89%E8%A3%85">ๅฎ‰่ฃ…</a> </li> <li> <a href="#%E9%85%8D%E7%BD%AE">้…็ฝฎ</a> <ul class="minitoc"> <li> <a href="#%E9%85%8D%E7%BD%AE%E6%96%87%E4%BB%B6">้…็ฝฎๆ–‡ไปถ</a> </li> <li> <a href="#%E9%9B%86%E7%BE%A4%E9%85%8D%E7%BD%AE">้›†็พค้…็ฝฎ</a> <ul class="minitoc"> <li> <a href="#%E9%85%8D%E7%BD%AEHadoop%E5%AE%88%E6%8A%A4%E8%BF%9B%E7%A8%8B%E7%9A%84%E8%BF%90%E8%A1%8C%E7%8E%AF%E5%A2%83">้…็ฝฎHadoopๅฎˆๆŠค่ฟ›็จ‹็š„่ฟ่กŒ็Žฏๅขƒ</a> </li> <li> <a href="#%E9%85%8D%E7%BD%AEHadoop%E5%AE%88%E6%8A%A4%E8%BF%9B%E7%A8%8B%E7%9A%84%E8%BF%90%E8%A1%8C%E5%8F%82%E6%95%B0">้…็ฝฎHadoopๅฎˆๆŠค่ฟ›็จ‹็š„่ฟ่กŒๅ‚ๆ•ฐ</a> </li> <li> <a href="#Slaves">Slaves</a> </li> <li> <a href="#%E6%97%A5%E5%BF%97">ๆ—ฅๅฟ—</a> </li> </ul> </li> </ul> </li> <li> <a href="#Hadoop%E7%9A%84%E6%9C%BA%E6%9E%B6%E6%84%9F%E7%9F%A5">Hadoop็š„ๆœบๆžถๆ„Ÿ็Ÿฅ</a> </li> <li> <a href="#%E5%90%AF%E5%8A%A8Hadoop">ๅฏๅŠจHadoop</a> </li> <li> <a href="#%E5%81%9C%E6%AD%A2Hadoop">ๅœๆญขHadoop</a> </li> </ul> </div> <a name="N1000D"></a><a name="%E7%9B%AE%E7%9A%84"></a> <h2 class="h3">็›ฎ็š„</h2> <div class="section"> <p>ๆœฌๆ–‡ๆ่ฟฐไบ†ๅฆ‚ไฝ•ๅฎ‰่ฃ…ใ€้…็ฝฎๅ’Œ็ฎก็†ๆœ‰ๅฎž้™…ๆ„ไน‰็š„Hadoop้›†็พค๏ผŒๅ…ถ่ง„ๆจกๅฏไปŽๅ‡ ไธช่Š‚็‚น็š„ๅฐ้›†็พคๅˆฐๅ‡ ๅƒไธช่Š‚็‚น็š„่ถ…ๅคง้›†็พคใ€‚</p> <p>ๅฆ‚ๆžœไฝ ๅธŒๆœ›ๅœจๅ•ๆœบไธŠๅฎ‰่ฃ…Hadoop็Žฉ็Žฉ๏ผŒไปŽ<a href="quickstart.html">่ฟ™้‡Œ</a>่ƒฝๆ‰พๅˆฐ็›ธๅ…ณ็ป†่Š‚ใ€‚</p> </div> <a name="N1001E"></a><a name="%E5%85%88%E5%86%B3%E6%9D%A1%E4%BB%B6"></a> <h2 class="h3">ๅ…ˆๅ†ณๆกไปถ</h2> <div class="section"> <ol> <li> ็กฎไฟๅœจไฝ ้›†็พคไธญ็š„ๆฏไธช่Š‚็‚นไธŠ้ƒฝๅฎ‰่ฃ…ไบ†ๆ‰€ๆœ‰<a href="quickstart.html#PreReqs">ๅฟ…้œ€</a>่ฝฏไปถใ€‚ </li> <li> <a href="quickstart.html#%E4%B8%8B%E8%BD%BD">่Žทๅ–</a>Hadoop่ฝฏไปถๅŒ…ใ€‚ </li> </ol> </div> <a name="N10036"></a><a name="%E5%AE%89%E8%A3%85"></a> <h2 class="h3">ๅฎ‰่ฃ…</h2> <div class="section"> <p>ๅฎ‰่ฃ…Hadoop้›†็พค้€šๅธธ่ฆๅฐ†ๅฎ‰่ฃ…่ฝฏไปถ่งฃๅŽ‹ๅˆฐ้›†็พคๅ†…็š„ๆ‰€ๆœ‰ๆœบๅ™จไธŠใ€‚</p> <p>้€šๅธธ๏ผŒ้›†็พค้‡Œ็š„ไธ€ๅฐๆœบๅ™จ่ขซๆŒ‡ๅฎšไธบ <span class="codefrag">NameNode</span>๏ผŒๅฆไธ€ๅฐไธๅŒ็š„ๆœบๅ™จ่ขซๆŒ‡ๅฎšไธบ<span class="codefrag">JobTracker</span>ใ€‚่ฟ™ไบ›ๆœบๅ™จๆ˜ฏ<em>masters</em>ใ€‚ไฝ™ไธ‹็š„ๆœบๅ™จๅณไฝœไธบ<span class="codefrag">DataNode</span><em>ไนŸ</em>ไฝœไธบ<span class="codefrag">TaskTracker</span>ใ€‚่ฟ™ไบ›ๆœบๅ™จๆ˜ฏ<em>slaves</em>ใ€‚</p> <p>ๆˆ‘ไปฌ็”จ<span class="codefrag">HADOOP_HOME</span>ๆŒ‡ไปฃๅฎ‰่ฃ…็š„ๆ น่ทฏๅพ„ใ€‚้€šๅธธ๏ผŒ้›†็พค้‡Œ็š„ๆ‰€ๆœ‰ๆœบๅ™จ็š„<span class="codefrag">HADOOP_HOME</span>่ทฏๅพ„็›ธๅŒใ€‚</p> </div> <a name="N10060"></a><a name="%E9%85%8D%E7%BD%AE"></a> <h2 class="h3">้…็ฝฎ</h2> <div class="section"> <p>ๆŽฅไธ‹ๆฅ็š„ๅ‡ ่Š‚ๆ่ฟฐไบ†ๅฆ‚ไฝ•้…็ฝฎHadoop้›†็พคใ€‚</p> <a name="N10069"></a><a name="%E9%85%8D%E7%BD%AE%E6%96%87%E4%BB%B6"></a> <h3 class="h4">้…็ฝฎๆ–‡ไปถ</h3> <p>ๅฏนHadoop็š„้…็ฝฎ้€š่ฟ‡<span class="codefrag">conf/</span>็›ฎๅฝ•ไธ‹็š„ไธคไธช้‡่ฆ้…็ฝฎๆ–‡ไปถๅฎŒๆˆ๏ผš</p> <ol> <li> <a href="http://hadoop.apache.org/core/docs/current/hadoop-default.html">hadoop-default.xml</a> - ๅช่ฏป็š„้ป˜่ฎค้…็ฝฎใ€‚ </li> <li> <em>hadoop-site.xml</em> - ้›†็พค็‰นๆœ‰็š„้…็ฝฎใ€‚ </li> </ol> <p>่ฆไบ†่งฃๆ›ดๅคšๅ…ณไบŽ่ฟ™ไบ›้…็ฝฎๆ–‡ไปถๅฆ‚ไฝ•ๅฝฑๅ“Hadoopๆก†ๆžถ็š„็ป†่Š‚๏ผŒ่ฏท็œ‹<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/conf/Configuration.html">่ฟ™้‡Œ</a>ใ€‚</p> <p>ๆญคๅค–๏ผŒ้€š่ฟ‡่ฎพ็ฝฎ<span class="codefrag">conf/hadoop-env.sh</span>ไธญ็š„ๅ˜้‡ไธบ้›†็พค็‰นๆœ‰็š„ๅ€ผ๏ผŒไฝ ๅฏไปฅๅฏน<span class="codefrag">bin/</span>็›ฎๅฝ•ไธ‹็š„Hadoop่„šๆœฌ่ฟ›่กŒๆŽงๅˆถใ€‚</p> <a name="N10096"></a><a name="%E9%9B%86%E7%BE%A4%E9%85%8D%E7%BD%AE"></a> <h3 class="h4">้›†็พค้…็ฝฎ</h3> <p>่ฆ้…็ฝฎHadoop้›†็พค๏ผŒไฝ ้œ€่ฆ่ฎพ็ฝฎHadoopๅฎˆๆŠค่ฟ›็จ‹็š„<em>่ฟ่กŒ็Žฏๅขƒ</em>ๅ’ŒHadoopๅฎˆๆŠค่ฟ›็จ‹็š„<em>่ฟ่กŒๅ‚ๆ•ฐ</em>ใ€‚</p> <p>HadoopๅฎˆๆŠค่ฟ›็จ‹ๆŒ‡<span class="codefrag">NameNode</span>/<span class="codefrag">DataNode</span> ๅ’Œ<span class="codefrag">JobTracker</span>/<span class="codefrag">TaskTracker</span>ใ€‚</p> <a name="N100B4"></a><a name="%E9%85%8D%E7%BD%AEHadoop%E5%AE%88%E6%8A%A4%E8%BF%9B%E7%A8%8B%E7%9A%84%E8%BF%90%E8%A1%8C%E7%8E%AF%E5%A2%83"></a> <h4>้…็ฝฎHadoopๅฎˆๆŠค่ฟ›็จ‹็š„่ฟ่กŒ็Žฏๅขƒ</h4> <p>็ฎก็†ๅ‘˜ๅฏๅœจ<span class="codefrag">conf/hadoop-env.sh</span>่„šๆœฌๅ†…ๅฏนHadoopๅฎˆๆŠค่ฟ›็จ‹็š„่ฟ่กŒ็Žฏๅขƒๅš็‰นๅˆซๆŒ‡ๅฎšใ€‚</p> <p>่‡ณๅฐ‘๏ผŒไฝ ๅพ—่ฎพๅฎš<span class="codefrag">JAVA_HOME</span>ไฝฟไน‹ๅœจๆฏไธ€่ฟœ็ซฏ่Š‚็‚นไธŠ้ƒฝ่ขซๆญฃ็กฎ่ฎพ็ฝฎใ€‚</p> <p>็ฎก็†ๅ‘˜ๅฏไปฅ้€š่ฟ‡้…็ฝฎ้€‰้กน<span class="codefrag">HADOOP_*_OPTS</span>ๆฅๅˆ†ๅˆซ้…็ฝฎๅ„ไธชๅฎˆๆŠค่ฟ›็จ‹ใ€‚ ไธ‹่กจๆ˜ฏๅฏไปฅ้…็ฝฎ็š„้€‰้กนใ€‚ </p> <table class="ForrestTable" cellspacing="1" cellpadding="4"> <tr> <th colspan="1" rowspan="1">ๅฎˆๆŠค่ฟ›็จ‹</th><th colspan="1" rowspan="1">้…็ฝฎ้€‰้กน</th> </tr> <tr> <td colspan="1" rowspan="1">NameNode</td><td colspan="1" rowspan="1">HADOOP_NAMENODE_OPTS</td> </tr> <tr> <td colspan="1" rowspan="1">DataNode</td><td colspan="1" rowspan="1">HADOOP_DATANODE_OPTS</td> </tr> <tr> <td colspan="1" rowspan="1">SecondaryNamenode</td> <td colspan="1" rowspan="1">HADOOP_SECONDARYNAMENODE_OPTS</td> </tr> <tr> <td colspan="1" rowspan="1">JobTracker</td><td colspan="1" rowspan="1">HADOOP_JOBTRACKER_OPTS</td> </tr> <tr> <td colspan="1" rowspan="1">TaskTracker</td><td colspan="1" rowspan="1">HADOOP_TASKTRACKER_OPTS</td> </tr> </table> <p>ไพ‹ๅฆ‚๏ผŒ้…็ฝฎNamenodeๆ—ถ,ไธบไบ†ไฝฟๅ…ถ่ƒฝๅคŸๅนถ่กŒๅ›žๆ”ถๅžƒๅœพ๏ผˆparallelGC๏ผ‰๏ผŒ ่ฆๆŠŠไธ‹้ข็š„ไปฃ็ ๅŠ ๅ…ฅๅˆฐ<span class="codefrag">hadoop-env.sh</span> : <br> <span class="codefrag"> export HADOOP_NAMENODE_OPTS="-XX:+UseParallelGC ${HADOOP_NAMENODE_OPTS}" </span> <br> </p> <p>ๅ…ถๅฎƒๅฏๅฎšๅˆถ็š„ๅธธ็”จๅ‚ๆ•ฐ่ฟ˜ๅŒ…ๆ‹ฌ๏ผš</p> <ul> <li> <span class="codefrag">HADOOP_LOG_DIR</span> - ๅฎˆๆŠค่ฟ›็จ‹ๆ—ฅๅฟ—ๆ–‡ไปถ็š„ๅญ˜ๆ”พ็›ฎๅฝ•ใ€‚ๅฆ‚ๆžœไธๅญ˜ๅœจไผš่ขซ่‡ชๅŠจๅˆ›ๅปบใ€‚ </li> <li> <span class="codefrag">HADOOP_HEAPSIZE</span> - ๆœ€ๅคงๅฏ็”จ็š„ๅ †ๅคงๅฐ๏ผŒๅ•ไฝไธบMBใ€‚ๆฏ”ๅฆ‚๏ผŒ<span class="codefrag">1000MB</span>ใ€‚ ่ฟ™ไธชๅ‚ๆ•ฐ็”จไบŽ่ฎพ็ฝฎhadoopๅฎˆๆŠค่ฟ›็จ‹็š„ๅ †ๅคงๅฐใ€‚็ผบ็œๅคงๅฐๆ˜ฏ<span class="codefrag">1000MB</span>ใ€‚ </li> </ul> <a name="N1012F"></a><a name="%E9%85%8D%E7%BD%AEHadoop%E5%AE%88%E6%8A%A4%E8%BF%9B%E7%A8%8B%E7%9A%84%E8%BF%90%E8%A1%8C%E5%8F%82%E6%95%B0"></a> <h4>้…็ฝฎHadoopๅฎˆๆŠค่ฟ›็จ‹็š„่ฟ่กŒๅ‚ๆ•ฐ</h4> <p>่ฟ™้ƒจๅˆ†ๆถ‰ๅŠHadoop้›†็พค็š„้‡่ฆๅ‚ๆ•ฐ๏ผŒ่ฟ™ไบ›ๅ‚ๆ•ฐๅœจ<span class="codefrag">conf/hadoop-site.xml</span>ไธญๆŒ‡ๅฎšใ€‚</p> <table class="ForrestTable" cellspacing="1" cellpadding="4"> <tr> <th colspan="1" rowspan="1">ๅ‚ๆ•ฐ</th> <th colspan="1" rowspan="1">ๅ–ๅ€ผ</th> <th colspan="1" rowspan="1">ๅค‡ๆณจ</th> </tr> <tr> <td colspan="1" rowspan="1">fs.default.name</td> <td colspan="1" rowspan="1"><span class="codefrag">NameNode</span>็š„URIใ€‚</td> <td colspan="1" rowspan="1"><em>hdfs://ไธปๆœบๅ/</em></td> </tr> <tr> <td colspan="1" rowspan="1">mapred.job.tracker</td> <td colspan="1" rowspan="1"><span class="codefrag">JobTracker</span>็š„ไธปๆœบ๏ผˆๆˆ–่€…IP๏ผ‰ๅ’Œ็ซฏๅฃใ€‚</td> <td colspan="1" rowspan="1"><em>ไธปๆœบ:็ซฏๅฃ</em>ใ€‚</td> </tr> <tr> <td colspan="1" rowspan="1">dfs.name.dir</td> <td colspan="1" rowspan="1"> <span class="codefrag">NameNode</span>ๆŒไน…ๅญ˜ๅ‚จๅๅญ—็ฉบ้—ดๅŠไบ‹ๅŠกๆ—ฅๅฟ—็š„ๆœฌๅœฐๆ–‡ไปถ็ณป็ปŸ่ทฏๅพ„ใ€‚</td> <td colspan="1" rowspan="1">ๅฝ“่ฟ™ไธชๅ€ผๆ˜ฏไธ€ไธช้€—ๅทๅˆ†ๅ‰ฒ็š„็›ฎๅฝ•ๅˆ—่กจๆ—ถ๏ผŒnametableๆ•ฐๆฎๅฐ†ไผš่ขซๅคๅˆถๅˆฐๆ‰€ๆœ‰็›ฎๅฝ•ไธญๅšๅ†—ไฝ™ๅค‡ไปฝใ€‚ </td> </tr> <tr> <td colspan="1" rowspan="1">dfs.data.dir</td> <td colspan="1" rowspan="1"> <span class="codefrag">DataNode</span>ๅญ˜ๆ”พๅ—ๆ•ฐๆฎ็š„ๆœฌๅœฐๆ–‡ไปถ็ณป็ปŸ่ทฏๅพ„๏ผŒ้€—ๅทๅˆ†ๅ‰ฒ็š„ๅˆ—่กจใ€‚ </td> <td colspan="1" rowspan="1"> ๅฝ“่ฟ™ไธชๅ€ผๆ˜ฏ้€—ๅทๅˆ†ๅ‰ฒ็š„็›ฎๅฝ•ๅˆ—่กจๆ—ถ๏ผŒๆ•ฐๆฎๅฐ†่ขซๅญ˜ๅ‚จๅœจๆ‰€ๆœ‰็›ฎๅฝ•ไธ‹๏ผŒ้€šๅธธๅˆ†ๅธƒๅœจไธๅŒ่ฎพๅค‡ไธŠใ€‚ </td> </tr> <tr> <td colspan="1" rowspan="1">mapred.system.dir</td> <td colspan="1" rowspan="1">Map/Reduceๆก†ๆžถๅญ˜ๅ‚จ็ณป็ปŸๆ–‡ไปถ็š„HDFS่ทฏๅพ„ใ€‚ๆฏ”ๅฆ‚<span class="codefrag">/hadoop/mapred/system/</span>ใ€‚ </td> <td colspan="1" rowspan="1">่ฟ™ไธช่ทฏๅพ„ๆ˜ฏ้ป˜่ฎคๆ–‡ไปถ็ณป็ปŸ๏ผˆHDFS๏ผ‰ไธ‹็š„่ทฏๅพ„๏ผŒ ้กปไปŽๆœๅŠกๅ™จๅ’Œๅฎขๆˆท็ซฏไธŠๅ‡ๅฏ่ฎฟ้—ฎใ€‚ </td> </tr> <tr> <td colspan="1" rowspan="1">mapred.local.dir</td> <td colspan="1" rowspan="1">ๆœฌๅœฐๆ–‡ไปถ็ณป็ปŸไธ‹้€—ๅทๅˆ†ๅ‰ฒ็š„่ทฏๅพ„ๅˆ—่กจ๏ผŒMap/Reduceไธดๆ—ถๆ•ฐๆฎๅญ˜ๆ”พ็š„ๅœฐๆ–นใ€‚ </td> <td colspan="1" rowspan="1">ๅคš่ทฏๅพ„ๆœ‰ๅŠฉไบŽๅˆฉ็”จ็ฃ็›˜i/oใ€‚</td> </tr> <tr> <td colspan="1" rowspan="1">mapred.tasktracker.{map|reduce}.tasks.maximum</td> <td colspan="1" rowspan="1">ๆŸไธ€<span class="codefrag">TaskTracker</span>ไธŠๅฏ่ฟ่กŒ็š„ๆœ€ๅคงMap/ReduceไปปๅŠกๆ•ฐ๏ผŒ่ฟ™ไบ›ไปปๅŠกๅฐ†ๅŒๆ—ถๅ„่‡ช่ฟ่กŒใ€‚ </td> <td colspan="1" rowspan="1"> ้ป˜่ฎคไธบ2๏ผˆ2ไธชmapๅ’Œ2ไธชreduce๏ผ‰๏ผŒๅฏไพๆฎ็กฌไปถๆƒ…ๅ†ตๆ›ดๆ”นใ€‚ </td> </tr> <tr> <td colspan="1" rowspan="1">dfs.hosts/dfs.hosts.exclude</td> <td colspan="1" rowspan="1">่ฎธๅฏ/ๆ‹’็ปDataNodeๅˆ—่กจใ€‚</td> <td colspan="1" rowspan="1"> ๅฆ‚ๆœ‰ๅฟ…่ฆ๏ผŒ็”จ่ฟ™ไธชๆ–‡ไปถๆŽงๅˆถ่ฎธๅฏ็š„datanodeๅˆ—่กจใ€‚ </td> </tr> <tr> <td colspan="1" rowspan="1">mapred.hosts/mapred.hosts.exclude</td> <td colspan="1" rowspan="1">่ฎธๅฏ/ๆ‹’็ปTaskTrackerๅˆ—่กจใ€‚</td> <td colspan="1" rowspan="1"> ๅฆ‚ๆœ‰ๅฟ…่ฆ๏ผŒ็”จ่ฟ™ไธชๆ–‡ไปถๆŽงๅˆถ่ฎธๅฏ็š„TaskTrackerๅˆ—่กจใ€‚ </td> </tr> </table> <p>้€šๅธธ๏ผŒไธŠ่ฟฐๅ‚ๆ•ฐ่ขซๆ ‡่ฎฐไธบ <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/conf/Configuration.html#FinalParams"> final</a> ไปฅ็กฎไฟๅฎƒไปฌไธ่ขซ็”จๆˆทๅบ”็”จๆ›ดๆ”นใ€‚ </p> <a name="N1020C"></a><a name="%E7%8E%B0%E5%AE%9E%E4%B8%96%E7%95%8C%E7%9A%84%E9%9B%86%E7%BE%A4%E9%85%8D%E7%BD%AE"></a> <h5>็Žฐๅฎžไธ–็•Œ็š„้›†็พค้…็ฝฎ</h5> <p>่ฟ™่Š‚็ฝ—ๅˆ—ๅœจๅคง่ง„ๆจก้›†็พคไธŠ่ฟ่กŒ<em>sort</em>ๅŸบๅ‡†ๆต‹่ฏ•(benchmark)ๆ—ถไฝฟ็”จๅˆฐ็š„ไธ€ไบ›้ž็ผบ็œ้…็ฝฎใ€‚</p> <ul> <li> <p>่ฟ่กŒsort900็š„ไธ€ไบ›้ž็ผบ็œ้…็ฝฎๅ€ผ๏ผŒsort900ๅณๅœจ900ไธช่Š‚็‚น็š„้›†็พคไธŠๅฏน9TB็š„ๆ•ฐๆฎ่ฟ›่กŒๆŽ’ๅบ๏ผš</p> <table class="ForrestTable" cellspacing="1" cellpadding="4"> <tr> <th colspan="1" rowspan="1">ๅ‚ๆ•ฐ</th> <th colspan="1" rowspan="1">ๅ–ๅ€ผ</th> <th colspan="1" rowspan="1">ๅค‡ๆณจ</th> </tr> <tr> <td colspan="1" rowspan="1">dfs.block.size</td> <td colspan="1" rowspan="1">134217728</td> <td colspan="1" rowspan="1">้’ˆๅฏนๅคงๆ–‡ไปถ็ณป็ปŸ๏ผŒHDFS็š„ๅ—ๅคงๅฐๅ–128MBใ€‚</td> </tr> <tr> <td colspan="1" rowspan="1">dfs.namenode.handler.count</td> <td colspan="1" rowspan="1">40</td> <td colspan="1" rowspan="1"> ๅฏๅŠจๆ›ดๅคš็š„NameNodeๆœๅŠก็บฟ็จ‹ๅŽปๅค„็†ๆฅ่‡ชๅคง้‡DataNode็š„RPC่ฏทๆฑ‚ใ€‚ </td> </tr> <tr> <td colspan="1" rowspan="1">mapred.reduce.parallel.copies</td> <td colspan="1" rowspan="1">20</td> <td colspan="1" rowspan="1"> reduceๅฏๅŠจๆ›ดๅคš็š„ๅนถ่กŒๆ‹ท่ดๅ™จไปฅ่Žทๅ–ๅคง้‡map็š„่พ“ๅ‡บใ€‚ </td> </tr> <tr> <td colspan="1" rowspan="1">mapred.child.java.opts</td> <td colspan="1" rowspan="1">-Xmx512M</td> <td colspan="1" rowspan="1"> ไธบmap/reduceๅญ่™šๆ‹Ÿๆœบไฝฟ็”จๆ›ดๅคง็š„ๅ †ใ€‚ </td> </tr> <tr> <td colspan="1" rowspan="1">fs.inmemory.size.mb</td> <td colspan="1" rowspan="1">200</td> <td colspan="1" rowspan="1"> ไธบreduce้˜ถๆฎตๅˆๅนถmap่พ“ๅ‡บๆ‰€้œ€็š„ๅ†…ๅญ˜ๆ–‡ไปถ็ณป็ปŸๅˆ†้…ๆ›ดๅคš็š„ๅ†…ๅญ˜ใ€‚ </td> </tr> <tr> <td colspan="1" rowspan="1">io.sort.factor</td> <td colspan="1" rowspan="1">100</td> <td colspan="1" rowspan="1">ๆ–‡ไปถๆŽ’ๅบๆ—ถๆ›ดๅคš็š„ๆตๅฐ†ๅŒๆ—ถ่ขซๅฝ’ๅนถใ€‚</td> </tr> <tr> <td colspan="1" rowspan="1">io.sort.mb</td> <td colspan="1" rowspan="1">200</td> <td colspan="1" rowspan="1">ๆ้ซ˜ๆŽ’ๅบๆ—ถ็š„ๅ†…ๅญ˜ไธŠ้™ใ€‚</td> </tr> <tr> <td colspan="1" rowspan="1">io.file.buffer.size</td> <td colspan="1" rowspan="1">131072</td> <td colspan="1" rowspan="1">SequenceFileไธญ็”จๅˆฐ็š„่ฏป/ๅ†™็ผ“ๅญ˜ๅคงๅฐใ€‚</td> </tr> </table> </li> <li> <p>่ฟ่กŒsort1400ๅ’Œsort2000ๆ—ถ้œ€่ฆๆ›ดๆ–ฐ็š„้…็ฝฎ๏ผŒๅณๅœจ1400ไธช่Š‚็‚นไธŠๅฏน14TB็š„ๆ•ฐๆฎ่ฟ›่กŒๆŽ’ๅบๅ’Œๅœจ2000ไธช่Š‚็‚นไธŠๅฏน20TB็š„ๆ•ฐๆฎ่ฟ›่กŒๆŽ’ๅบ๏ผš</p> <table class="ForrestTable" cellspacing="1" cellpadding="4"> <tr> <th colspan="1" rowspan="1">ๅ‚ๆ•ฐ</th> <th colspan="1" rowspan="1">ๅ–ๅ€ผ</th> <th colspan="1" rowspan="1">ๅค‡ๆณจ</th> </tr> <tr> <td colspan="1" rowspan="1">mapred.job.tracker.handler.count</td> <td colspan="1" rowspan="1">60</td> <td colspan="1" rowspan="1"> ๅฏ็”จๆ›ดๅคš็š„JobTrackerๆœๅŠก็บฟ็จ‹ๅŽปๅค„็†ๆฅ่‡ชๅคง้‡TaskTracker็š„RPC่ฏทๆฑ‚ใ€‚ </td> </tr> <tr> <td colspan="1" rowspan="1">mapred.reduce.parallel.copies</td> <td colspan="1" rowspan="1">50</td> <td colspan="1" rowspan="1"></td> </tr> <tr> <td colspan="1" rowspan="1">tasktracker.http.threads</td> <td colspan="1" rowspan="1">50</td> <td colspan="1" rowspan="1"> ไธบTaskTracker็š„HttpๆœๅŠกๅฏ็”จๆ›ดๅคš็š„ๅทฅไฝœ็บฟ็จ‹ใ€‚reduce้€š่ฟ‡HttpๆœๅŠก่Žทๅ–map็š„ไธญ้—ด่พ“ๅ‡บใ€‚ </td> </tr> <tr> <td colspan="1" rowspan="1">mapred.child.java.opts</td> <td colspan="1" rowspan="1">-Xmx1024M</td> <td colspan="1" rowspan="1">ไฝฟ็”จๆ›ดๅคง็š„ๅ †็”จไบŽmaps/reduces็š„ๅญ่™šๆ‹Ÿๆœบ</td> </tr> </table> </li> </ul> <a name="N1032A"></a><a name="Slaves"></a> <h4>Slaves</h4> <p>้€šๅธธ๏ผŒไฝ ้€‰ๆ‹ฉ้›†็พคไธญ็š„ไธ€ๅฐๆœบๅ™จไฝœไธบ<span class="codefrag">NameNode</span>๏ผŒๅฆๅค–ไธ€ๅฐไธๅŒ็š„ๆœบๅ™จไฝœไธบ<span class="codefrag">JobTracker</span>ใ€‚ไฝ™ไธ‹็š„ๆœบๅ™จๅณไฝœไธบ<span class="codefrag">DataNode</span>ๅˆไฝœไธบ<span class="codefrag">TaskTracker</span>๏ผŒ่ฟ™ไบ›่ขซ็งฐไน‹ไธบ<em>slaves</em>ใ€‚</p> <p>ๅœจ<span class="codefrag">conf/slaves</span>ๆ–‡ไปถไธญๅˆ—ๅ‡บๆ‰€ๆœ‰slave็š„ไธปๆœบๅๆˆ–่€…IPๅœฐๅ€๏ผŒไธ€่กŒไธ€ไธชใ€‚</p> <a name="N10349"></a><a name="%E6%97%A5%E5%BF%97"></a> <h4>ๆ—ฅๅฟ—</h4> <p>Hadoopไฝฟ็”จ<a href="http://logging.apache.org/log4j/">Apache log4j</a>ๆฅ่ฎฐๅฝ•ๆ—ฅๅฟ—๏ผŒๅฎƒ็”ฑ<a href="http://commons.apache.org/logging/">Apache Commons Logging</a>ๆก†ๆžถๆฅๅฎž็Žฐใ€‚็ผ–่พ‘<span class="codefrag">conf/log4j.properties</span>ๆ–‡ไปถๅฏไปฅๆ”นๅ˜HadoopๅฎˆๆŠค่ฟ›็จ‹็š„ๆ—ฅๅฟ—้…็ฝฎ๏ผˆๆ—ฅๅฟ—ๆ ผๅผ็ญ‰๏ผ‰ใ€‚</p> <a name="N1035D"></a><a name="%E5%8E%86%E5%8F%B2%E6%97%A5%E5%BF%97"></a> <h5>ๅކๅฒๆ—ฅๅฟ—</h5> <p>ไฝœไธš็š„ๅކๅฒๆ–‡ไปถ้›†ไธญๅญ˜ๆ”พๅœจ<span class="codefrag">hadoop.job.history.location</span>๏ผŒ่ฟ™ไธชไนŸๅฏไปฅๆ˜ฏๅœจๅˆ†ๅธƒๅผๆ–‡ไปถ็ณป็ปŸไธ‹็š„่ทฏๅพ„๏ผŒๅ…ถ้ป˜่ฎคๅ€ผไธบ<span class="codefrag">${HADOOP_LOG_DIR}/history</span>ใ€‚jobtracker็š„web UIไธŠๆœ‰ๅކๅฒๆ—ฅๅฟ—็š„web UI้“พๆŽฅใ€‚</p> <p>ๅކๅฒๆ–‡ไปถๅœจ็”จๆˆทๆŒ‡ๅฎš็š„็›ฎๅฝ•<span class="codefrag">hadoop.job.history.user.location</span>ไนŸไผš่ฎฐๅฝ•ไธ€ไปฝ๏ผŒ่ฟ™ไธช้…็ฝฎ็š„็ผบ็œๅ€ผไธบไฝœไธš็š„่พ“ๅ‡บ็›ฎๅฝ•ใ€‚่ฟ™ไบ›ๆ–‡ไปถ่ขซๅญ˜ๆ”พๅœจๆŒ‡ๅฎš่ทฏๅพ„ไธ‹็š„&ldquo;_logs/history/&rdquo;็›ฎๅฝ•ไธญใ€‚ๅ› ๆญค๏ผŒ้ป˜่ฎคๆƒ…ๅ†ตไธ‹ๆ—ฅๅฟ—ๆ–‡ไปถไผšๅœจ&ldquo;mapred.output.dir/_logs/history/&rdquo;ไธ‹ใ€‚ๅฆ‚ๆžœๅฐ†<span class="codefrag">hadoop.job.history.user.location</span>ๆŒ‡ๅฎšไธบๅ€ผ<span class="codefrag">none</span>๏ผŒ็ณป็ปŸๅฐ†ไธๅ†่ฎฐๅฝ•ๆญคๆ—ฅๅฟ—ใ€‚</p> <p>็”จๆˆทๅฏไฝฟ็”จไปฅไธ‹ๅ‘ฝไปคๅœจๆŒ‡ๅฎš่ทฏๅพ„ไธ‹ๆŸฅ็œ‹ๅކๅฒๆ—ฅๅฟ—ๆฑ‡ๆ€ป<br> <span class="codefrag">$ bin/hadoop job -history output-dir</span> <br> ่ฟ™ๆกๅ‘ฝไปคไผšๆ˜พ็คบไฝœไธš็š„็ป†่Š‚ไฟกๆฏ๏ผŒๅคฑ่ดฅๅ’Œ็ปˆๆญข็š„ไปปๅŠก็ป†่Š‚ใ€‚ <br> ๅ…ณไบŽไฝœไธš็š„ๆ›ดๅคš็ป†่Š‚๏ผŒๆฏ”ๅฆ‚ๆˆๅŠŸ็š„ไปปๅŠก๏ผŒไปฅๅŠๅฏนๆฏไธชไปปๅŠก็š„ๆ‰€ๅš็š„ๅฐ่ฏ•ๆฌกๆ•ฐ็ญ‰ๅฏไปฅ็”จไธ‹้ข็š„ๅ‘ฝไปคๆŸฅ็œ‹<br> <span class="codefrag">$ bin/hadoop job -history all output-dir</span> <br> </p> <p>ไธ€ไฝ†ๅ…จ้ƒจๅฟ…่ฆ็š„้…็ฝฎๅฎŒๆˆ๏ผŒๅฐ†่ฟ™ไบ›ๆ–‡ไปถๅˆ†ๅ‘ๅˆฐๆ‰€ๆœ‰ๆœบๅ™จ็š„<span class="codefrag">HADOOP_CONF_DIR</span>่ทฏๅพ„ไธ‹๏ผŒ้€šๅธธๆ˜ฏ<span class="codefrag">${HADOOP_HOME}/conf</span>ใ€‚</p> </div> <a name="N10395"></a><a name="Hadoop%E7%9A%84%E6%9C%BA%E6%9E%B6%E6%84%9F%E7%9F%A5"></a> <h2 class="h3">Hadoop็š„ๆœบๆžถๆ„Ÿ็Ÿฅ</h2> <div class="section"> <p>HDFSๅ’ŒMap/Reduce็š„็ป„ไปถๆ˜ฏ่ƒฝๅคŸๆ„Ÿ็Ÿฅๆœบๆžถ็š„ใ€‚</p> <p> <span class="codefrag">NameNode</span>ๅ’Œ<span class="codefrag">JobTracker</span>้€š่ฟ‡่ฐƒ็”จ็ฎก็†ๅ‘˜้…็ฝฎๆจกๅ—ไธญ็š„API<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/net/DNSToSwitchMapping.html#resolve(java.util.List)">resolve</a>ๆฅ่Žทๅ–้›†็พค้‡Œๆฏไธชslave็š„<span class="codefrag">ๆœบๆžถid</span>ใ€‚่ฏฅAPIๅฐ†slave็š„DNSๅ็งฐ๏ผˆๆˆ–่€…IPๅœฐๅ€๏ผ‰่ฝฌๆขๆˆๆœบๆžถidใ€‚ไฝฟ็”จๅ“ชไธชๆจกๅ—ๆ˜ฏ้€š่ฟ‡้…็ฝฎ้กน<span class="codefrag">topology.node.switch.mapping.impl</span>ๆฅๆŒ‡ๅฎš็š„ใ€‚ๆจกๅ—็š„้ป˜่ฎคๅฎž็Žฐไผš่ฐƒ็”จ<span class="codefrag">topology.script.file.name</span>้…็ฝฎ้กนๆŒ‡ๅฎš็š„ไธ€ไธช็š„่„šๆœฌ/ๅ‘ฝไปคใ€‚ ๅฆ‚ๆžœtopology.script.file.nameๆœช่ขซ่ฎพ็ฝฎ๏ผŒๅฏนไบŽๆ‰€ๆœ‰ไผ ๅ…ฅ็š„IPๅœฐๅ€๏ผŒๆจกๅ—ไผš่ฟ”ๅ›ž<span class="codefrag">/default-rack</span>ไฝœไธบๆœบๆžถidใ€‚ๅœจMap/Reduce้ƒจๅˆ†่ฟ˜ๆœ‰ไธ€ไธช้ขๅค–็š„้…็ฝฎ้กน<span class="codefrag">mapred.cache.task.levels</span>๏ผŒ่ฏฅๅ‚ๆ•ฐๅ†ณๅฎšcache็š„็บงๆ•ฐ๏ผˆๅœจ็ฝ‘็ปœๆ‹“ๆ‰‘ไธญ๏ผ‰ใ€‚ไพ‹ๅฆ‚๏ผŒๅฆ‚ๆžœ้ป˜่ฎคๅ€ผๆ˜ฏ2๏ผŒไผšๅปบ็ซ‹ไธค็บง็š„cache๏ผ ไธ€็บง้’ˆๅฏนไธปๆœบ๏ผˆไธปๆœบ -&gt; ไปปๅŠก็š„ๆ˜ ๅฐ„๏ผ‰ๅฆไธ€็บง้’ˆๅฏนๆœบๆžถ๏ผˆๆœบๆžถ -&gt; ไปปๅŠก็š„ๆ˜ ๅฐ„๏ผ‰ใ€‚ </p> </div> <a name="N103BA"></a><a name="%E5%90%AF%E5%8A%A8Hadoop"></a> <h2 class="h3">ๅฏๅŠจHadoop</h2> <div class="section"> <p>ๅฏๅŠจHadoop้›†็พค้œ€่ฆๅฏๅŠจHDFS้›†็พคๅ’ŒMap/Reduce้›†็พคใ€‚</p> <p> ๆ ผๅผๅŒ–ไธ€ไธชๆ–ฐ็š„ๅˆ†ๅธƒๅผๆ–‡ไปถ็ณป็ปŸ๏ผš<br> <span class="codefrag">$ bin/hadoop namenode -format</span> </p> <p> ๅœจๅˆ†้…็š„<span class="codefrag">NameNode</span>ไธŠ๏ผŒ่ฟ่กŒไธ‹้ข็š„ๅ‘ฝไปคๅฏๅŠจHDFS๏ผš<br> <span class="codefrag">$ bin/start-dfs.sh</span> </p> <p> <span class="codefrag">bin/start-dfs.sh</span>่„šๆœฌไผšๅ‚็…ง<span class="codefrag">NameNode</span>ไธŠ<span class="codefrag">${HADOOP_CONF_DIR}/slaves</span>ๆ–‡ไปถ็š„ๅ†…ๅฎน๏ผŒๅœจๆ‰€ๆœ‰ๅˆ—ๅ‡บ็š„slaveไธŠๅฏๅŠจ<span class="codefrag">DataNode</span>ๅฎˆๆŠค่ฟ›็จ‹ใ€‚</p> <p> ๅœจๅˆ†้…็š„<span class="codefrag">JobTracker</span>ไธŠ๏ผŒ่ฟ่กŒไธ‹้ข็š„ๅ‘ฝไปคๅฏๅŠจMap/Reduce๏ผš<br> <span class="codefrag">$ bin/start-mapred.sh</span> </p> <p> <span class="codefrag">bin/start-mapred.sh</span>่„šๆœฌไผšๅ‚็…ง<span class="codefrag">JobTracker</span>ไธŠ<span class="codefrag">${HADOOP_CONF_DIR}/slaves</span>ๆ–‡ไปถ็š„ๅ†…ๅฎน๏ผŒๅœจๆ‰€ๆœ‰ๅˆ—ๅ‡บ็š„slaveไธŠๅฏๅŠจ<span class="codefrag">TaskTracker</span>ๅฎˆๆŠค่ฟ›็จ‹ใ€‚</p> </div> <a name="N103FE"></a><a name="%E5%81%9C%E6%AD%A2Hadoop"></a> <h2 class="h3">ๅœๆญขHadoop</h2> <div class="section"> <p> ๅœจๅˆ†้…็š„<span class="codefrag">NameNode</span>ไธŠ๏ผŒๆ‰ง่กŒไธ‹้ข็š„ๅ‘ฝไปคๅœๆญขHDFS๏ผš<br> <span class="codefrag">$ bin/stop-dfs.sh</span> </p> <p> <span class="codefrag">bin/stop-dfs.sh</span>่„šๆœฌไผšๅ‚็…ง<span class="codefrag">NameNode</span>ไธŠ<span class="codefrag">${HADOOP_CONF_DIR}/slaves</span>ๆ–‡ไปถ็š„ๅ†…ๅฎน๏ผŒๅœจๆ‰€ๆœ‰ๅˆ—ๅ‡บ็š„slaveไธŠๅœๆญข<span class="codefrag">DataNode</span>ๅฎˆๆŠค่ฟ›็จ‹ใ€‚</p> <p> ๅœจๅˆ†้…็š„<span class="codefrag">JobTracker</span>ไธŠ๏ผŒ่ฟ่กŒไธ‹้ข็š„ๅ‘ฝไปคๅœๆญขMap/Reduce๏ผš<br> <span class="codefrag">$ bin/stop-mapred.sh</span> <br> </p> <p> <span class="codefrag">bin/stop-mapred.sh</span>่„šๆœฌไผšๅ‚็…ง<span class="codefrag">JobTracker</span>ไธŠ<span class="codefrag">${HADOOP_CONF_DIR}/slaves</span>ๆ–‡ไปถ็š„ๅ†…ๅฎน๏ผŒๅœจๆ‰€ๆœ‰ๅˆ—ๅ‡บ็š„slaveไธŠๅœๆญข<span class="codefrag">TaskTracker</span>ๅฎˆๆŠค่ฟ›็จ‹ใ€‚</p> </div> </div> <!--+ |end content +--> <div class="clearboth">&nbsp;</div> </div> <div id="footer"> <!--+ |start bottomstrip +--> <div class="lastmodified"> <script type="text/javascript"><!-- document.write("Last Published: " + document.lastModified); // --></script> </div> <div class="copyright"> Copyright &copy; 2007 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a> </div> <!--+ |end bottomstrip +--> </div> </body> </html>
totemtang/hadoop-RHJoin
docs/cn/cluster_setup.html
HTML
apache-2.0
28,095
<!DOCTYPE html> <html> <!-- Copyright 2008 The Closure Library Authors. All Rights Reserved. Use of this source code is governed by the Apache License, Version 2.0. See the COPYING file for details. --> <!-- --> <head> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <meta charset="UTF-8" /> <title> Closure Unit Tests - goog.ui.style.app.MenuButtonRenderer </title> <script src="../../../base.js"> </script> <script> goog.require('goog.ui.style.app.MenuButtonRendererTest'); </script> </head> <body> <div id="sandbox"></div> <div id="button" title="Click for Decorated"> Hello Decorated </div> <div id="button-2" title="Click for Decorated"> Hello Decorated </div> <!-- The component DOM must always be created without whitespace. --> <div id="button-box" title="Click for Decorated Box" class="goog-menu-button goog-button-base"><div class="goog-inline-block goog-button-base-outer-box"><div class="goog-inline-block goog-button-base-inner-box"><div class="goog-button-base-pos"><div class="goog-button-base-top-shadow">&nbsp;</div><div class="goog-button-base-content">Hello Decorated Box<div class="goog-menu-button-dropdown"> </div></div></div></div></div></div> <div id="button-with-dom-content" class="goog-menu-button"> <strong>Hello Strong</strong> <em>Box</em> </div> <div id="button-with-menu" class="goog-menu-button"> Button with Menu <div class="goog-menu"> <div class="goog-menuitem">Item 1</div> <div class="goog-menuitem">Item 2</div> </div> </div> </body> </html>
ikabirov/closure-library
closure/goog/ui/style/app/menubuttonrenderer_test.html
HTML
apache-2.0
1,583
๏ปฟ// // Copyright (c) Microsoft. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // using System; using Microsoft.Azure.Common.OData; using Microsoft.Azure.Insights.Models; namespace Microsoft.Azure.Insights { /// <summary> /// The parameters to get the events for a subscription /// </summary> public class ListEventsParameters { /// <summary> /// Gets or sets the start time /// </summary> [FilterParameter("eventTimestamp", "O")] public DateTime EventTimestamp { get; set; } /// <summary> /// Gets or sets the event channel /// </summary> [FilterParameter("eventChannels")] public EventChannels? EventChannels { get; set; } /// <summary> /// Gets or sets the status /// </summary> [FilterParameter("status")] public string Status { get; set; } /// <summary> /// Gets or sets the caller /// </summary> [FilterParameter("caller")] public string Caller { get; set; } } /// <summary> /// The parameters to get the events for a correlation id /// </summary> public class ListEventsForCorrelationIdParameters : ListEventsParameters { /// <summary> /// Gets or sets the correlation id /// </summary> [FilterParameter("correlationId")] public string CorrelationId { get; set; } } /// <summary> /// The parameters to get the events for an event source /// </summary> public class ListEventsForEventSourceParameters : ListEventsParameters { /// <summary> /// Gets or sets the event source /// </summary> [FilterParameter("eventSource")] public string EventSource { get; set; } } /// <summary> /// The parameters to get the events for a resource /// </summary> public class ListEventsForResourceParameters : ListEventsParameters { /// <summary> /// Get or set the resource uri /// </summary> [FilterParameter("resourceUri")] public string ResourceUri { get; set; } } /// <summary> /// The parameters to get the events for a resource group /// </summary> public class ListEventsForResourceGroupParameters : ListEventsParameters { /// <summary> /// Get or set the resource group name /// </summary> [FilterParameter("resourceGroupName")] public string ResourceGroupName { get; set; } } /// <summary> /// The parameters to get the events for a resource provider /// </summary> public class ListEventsForResourceProviderParameters : ListEventsParameters { /// <summary> /// Get or set the resource provider /// </summary> [FilterParameter("resourceProvider")] public string ResourceProvider { get; set; } } /// <summary> /// The parameters to get the events for a event id /// </summary> public class ListEventsForEventIdParameters { /// <summary> /// Get or set the id /// </summary> [FilterParameter("id")] public string Id { get; set; } } }
msfcolombo/azure-sdk-for-net
src/ResourceManagement/Insights/Insights.Tests/Customizations/InsightsClient.ListEventsParameter.Customization.cs
C#
apache-2.0
3,739
<?php final class AlmanacManagementUntrustKeyWorkflow extends AlmanacManagementWorkflow { protected function didConstruct() { $this ->setName('untrust-key') ->setSynopsis(pht('Revoke trust of a public key.')) ->setArguments( array( array( 'name' => 'id', 'param' => 'id', 'help' => pht('ID of the key to revoke trust for.'), ), )); } public function execute(PhutilArgumentParser $args) { $console = PhutilConsole::getConsole(); $id = $args->getArg('id'); if (!$id) { throw new PhutilArgumentUsageException( pht('Specify a public key to revoke trust for with --id.')); } $key = id(new PhabricatorAuthSSHKeyQuery()) ->setViewer($this->getViewer()) ->withIDs(array($id)) ->executeOne(); if (!$key) { throw new PhutilArgumentUsageException( pht('No public key exists with ID "%s".', $id)); } if (!$key->getIsTrusted()) { throw new PhutilArgumentUsageException( pht('Public key with ID %s is not trusted.', $id)); } $key->setIsTrusted(0); $key->save(); $console->writeOut( "**<bg:green> %s </bg>** %s\n", pht('TRUST REVOKED'), pht('Trust has been revoked for public key %s.', $id)); } }
jwdeitch/phabricator
src/applications/almanac/management/AlmanacManagementUntrustKeyWorkflow.php
PHP
apache-2.0
1,321
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.zookeeper.test; import java.io.IOException; import java.util.Date; import java.util.LinkedList; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.AsyncCallback.Children2Callback; import org.apache.zookeeper.AsyncCallback.ChildrenCallback; import org.apache.zookeeper.AsyncCallback.StringCallback; import org.apache.zookeeper.AsyncCallback.VoidCallback; import org.apache.zookeeper.ZooDefs.Ids; import org.apache.zookeeper.data.Stat; import org.junit.Assert; import org.junit.Test; public class SyncCallTest extends ClientBase implements ChildrenCallback, Children2Callback, StringCallback, VoidCallback { private CountDownLatch opsCount; List<Integer> results = new LinkedList<Integer>(); Integer limit = 100 + 1 + 100 + 100; @Test public void testSync() throws Exception { try { LOG.info("Starting ZK:" + (new Date()).toString()); opsCount = new CountDownLatch(limit); ZooKeeper zk = createClient(); LOG.info("Beginning test:" + (new Date()).toString()); for(int i = 0; i < 100; i++) zk.create("/test" + i, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT, this, results); zk.sync("/test", this, results); for(int i = 0; i < 100; i++) zk.delete("/test" + i, 0, this, results); for(int i = 0; i < 100; i++) zk.getChildren("/", new NullWatcher(), (ChildrenCallback)this, results); for(int i = 0; i < 100; i++) zk.getChildren("/", new NullWatcher(), (Children2Callback)this, results); LOG.info("Submitted all operations:" + (new Date()).toString()); if(!opsCount.await(10000, TimeUnit.MILLISECONDS)) Assert.fail("Haven't received all confirmations" + opsCount.getCount()); for(int i = 0; i < limit ; i++){ Assert.assertEquals(0, (int) results.get(i)); } } catch (IOException e) { System.out.println(e.toString()); } } @SuppressWarnings("unchecked") public void processResult(int rc, String path, Object ctx, List<String> children) { ((List<Integer>)ctx).add(rc); opsCount.countDown(); } @SuppressWarnings("unchecked") public void processResult(int rc, String path, Object ctx, List<String> children, Stat stat) { ((List<Integer>)ctx).add(rc); opsCount.countDown(); } @SuppressWarnings("unchecked") public void processResult(int rc, String path, Object ctx, String name){ ((List<Integer>) ctx).add(rc); opsCount.countDown(); } @SuppressWarnings("unchecked") public void processResult(int rc, String path, Object ctx){ ((List<Integer>) ctx).add(rc); opsCount.countDown(); } }
yining0417/zookeeper
src/java/test/org/apache/zookeeper/test/SyncCallTest.java
Java
apache-2.0
3,966
/* * Copyright 2000-2014 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jetbrains.plugins.groovy.codeInspection.untypedUnresolvedAccess; import com.intellij.psi.PsiClassType; import com.intellij.psi.PsiElement; import com.intellij.psi.PsiPackage; import com.intellij.psi.PsiType; import org.jetbrains.annotations.Nls; import org.jetbrains.annotations.NotNull; import org.jetbrains.plugins.groovy.annotator.GrHighlightUtil; import org.jetbrains.plugins.groovy.codeInspection.BaseInspection; import org.jetbrains.plugins.groovy.codeInspection.BaseInspectionVisitor; import org.jetbrains.plugins.groovy.lang.psi.api.GroovyResolveResult; import org.jetbrains.plugins.groovy.lang.psi.api.statements.expressions.GrExpression; import org.jetbrains.plugins.groovy.lang.psi.api.statements.expressions.GrReferenceExpression; import org.jetbrains.plugins.groovy.lang.psi.util.PsiUtil; /** * @author Maxim.Medvedev */ public class GroovyUntypedAccessInspection extends BaseInspection { @Override @NotNull protected BaseInspectionVisitor buildVisitor() { return new BaseInspectionVisitor() { @Override public void visitReferenceExpression(GrReferenceExpression refExpr) { super.visitReferenceExpression(refExpr); if (PsiUtil.isThisOrSuperRef(refExpr)) return; GroovyResolveResult resolveResult = refExpr.advancedResolve(); PsiElement resolved = resolveResult.getElement(); if (resolved != null) { if (GrHighlightUtil.isDeclarationAssignment(refExpr) || resolved instanceof PsiPackage) return; } else { GrExpression qualifier = refExpr.getQualifierExpression(); if (qualifier == null && GrHighlightUtil.isDeclarationAssignment(refExpr)) return; } final PsiType refExprType = refExpr.getType(); if (refExprType == null) { if (resolved != null) { registerError(refExpr); } } else if (refExprType instanceof PsiClassType && ((PsiClassType)refExprType).resolve() == null) { registerError(refExpr); } } }; } @Override @Nls @NotNull public String getGroupDisplayName() { return PROBABLE_BUGS; } @Override @Nls @NotNull public String getDisplayName() { return "Access to untyped expression"; } @Override protected String buildErrorString(Object... args) { return "Cannot determine type of '#ref'"; } }
ryano144/intellij-community
plugins/groovy/groovy-psi/src/org/jetbrains/plugins/groovy/codeInspection/untypedUnresolvedAccess/GroovyUntypedAccessInspection.java
Java
apache-2.0
2,983
/* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package predicates import ( "fmt" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" "github.com/golang/glog" ) type NodeInfo interface { GetNodeInfo(nodeID string) (*api.Node, error) } type StaticNodeInfo struct { *api.NodeList } func (nodes StaticNodeInfo) GetNodeInfo(nodeID string) (*api.Node, error) { for ix := range nodes.Items { if nodes.Items[ix].Name == nodeID { return &nodes.Items[ix], nil } } return nil, fmt.Errorf("failed to find node: %s, %#v", nodeID, nodes) } type ClientNodeInfo struct { *client.Client } func (nodes ClientNodeInfo) GetNodeInfo(nodeID string) (*api.Node, error) { return nodes.Nodes().Get(nodeID) } func isVolumeConflict(volume api.Volume, pod *api.Pod) bool { if volume.GCEPersistentDisk != nil { disk := volume.GCEPersistentDisk manifest := &(pod.Spec) for ix := range manifest.Volumes { if manifest.Volumes[ix].GCEPersistentDisk != nil && manifest.Volumes[ix].GCEPersistentDisk.PDName == disk.PDName && !(manifest.Volumes[ix].GCEPersistentDisk.ReadOnly && disk.ReadOnly) { return true } } } if volume.AWSElasticBlockStore != nil { volumeID := volume.AWSElasticBlockStore.VolumeID manifest := &(pod.Spec) for ix := range manifest.Volumes { if manifest.Volumes[ix].AWSElasticBlockStore != nil && manifest.Volumes[ix].AWSElasticBlockStore.VolumeID == volumeID { return true } } } return false } // NoDiskConflict evaluates if a pod can fit due to the volumes it requests, and those that // are already mounted. Some times of volumes are mounted onto node machines. For now, these mounts // are exclusive so if there is already a volume mounted on that node, another pod can't schedule // there. This is GCE and Amazon EBS specific for now. // TODO: migrate this into some per-volume specific code? func NoDiskConflict(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { manifest := &(pod.Spec) for ix := range manifest.Volumes { for podIx := range existingPods { if isVolumeConflict(manifest.Volumes[ix], existingPods[podIx]) { return false, nil } } } return true, nil } type ResourceFit struct { info NodeInfo } type resourceRequest struct { milliCPU int64 memory int64 } var FailedResourceType string func getResourceRequest(pod *api.Pod) resourceRequest { result := resourceRequest{} for _, container := range pod.Spec.Containers { requests := container.Resources.Requests result.memory += requests.Memory().Value() result.milliCPU += requests.Cpu().MilliValue() } return result } func CheckPodsExceedingFreeResources(pods []*api.Pod, capacity api.ResourceList) (fitting []*api.Pod, notFittingCPU, notFittingMemory []*api.Pod) { totalMilliCPU := capacity.Cpu().MilliValue() totalMemory := capacity.Memory().Value() milliCPURequested := int64(0) memoryRequested := int64(0) for _, pod := range pods { podRequest := getResourceRequest(pod) fitsCPU := totalMilliCPU == 0 || (totalMilliCPU-milliCPURequested) >= podRequest.milliCPU fitsMemory := totalMemory == 0 || (totalMemory-memoryRequested) >= podRequest.memory if !fitsCPU { // the pod doesn't fit due to CPU limit notFittingCPU = append(notFittingCPU, pod) continue } if !fitsMemory { // the pod doesn't fit due to Memory limit notFittingMemory = append(notFittingMemory, pod) continue } // the pod fits milliCPURequested += podRequest.milliCPU memoryRequested += podRequest.memory fitting = append(fitting, pod) } return } // PodFitsResources calculates fit based on requested, rather than used resources func (r *ResourceFit) PodFitsResources(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { podRequest := getResourceRequest(pod) info, err := r.info.GetNodeInfo(node) if err != nil { return false, err } if podRequest.milliCPU == 0 && podRequest.memory == 0 { return int64(len(existingPods)) < info.Status.Capacity.Pods().Value(), nil } pods := []*api.Pod{} copy(pods, existingPods) pods = append(existingPods, pod) _, exceedingCPU, exceedingMemory := CheckPodsExceedingFreeResources(pods, info.Status.Capacity) if int64(len(pods)) > info.Status.Capacity.Pods().Value() { glog.V(4).Infof("Cannot schedule Pod %v, because Node %v is full, running %v out of %v Pods.", pod, node, len(pods)-1, info.Status.Capacity.Pods().Value()) FailedResourceType = "PodExceedsMaxPodNumber" return false, nil } if len(exceedingCPU) > 0 { glog.V(4).Infof("Cannot schedule Pod %v, because Node does not have sufficient CPU", pod) FailedResourceType = "PodExceedsFreeCPU" return false, nil } if len(exceedingMemory) > 0 { glog.V(4).Infof("Cannot schedule Pod %v, because Node does not have sufficient Memory", pod) FailedResourceType = "PodExceedsFreeMemory" return false, nil } glog.V(4).Infof("Schedule Pod %v on Node %v is allowed, Node is running only %v out of %v Pods.", pod, node, len(pods)-1, info.Status.Capacity.Pods().Value()) return true, nil } func NewResourceFitPredicate(info NodeInfo) algorithm.FitPredicate { fit := &ResourceFit{ info: info, } return fit.PodFitsResources } func NewSelectorMatchPredicate(info NodeInfo) algorithm.FitPredicate { selector := &NodeSelector{ info: info, } return selector.PodSelectorMatches } func PodMatchesNodeLabels(pod *api.Pod, node *api.Node) bool { if len(pod.Spec.NodeSelector) == 0 { return true } selector := labels.SelectorFromSet(pod.Spec.NodeSelector) return selector.Matches(labels.Set(node.Labels)) } type NodeSelector struct { info NodeInfo } func (n *NodeSelector) PodSelectorMatches(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { minion, err := n.info.GetNodeInfo(node) if err != nil { return false, err } return PodMatchesNodeLabels(pod, minion), nil } func PodFitsHost(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { if len(pod.Spec.NodeName) == 0 { return true, nil } return pod.Spec.NodeName == node, nil } type NodeLabelChecker struct { info NodeInfo labels []string presence bool } func NewNodeLabelPredicate(info NodeInfo, labels []string, presence bool) algorithm.FitPredicate { labelChecker := &NodeLabelChecker{ info: info, labels: labels, presence: presence, } return labelChecker.CheckNodeLabelPresence } // CheckNodeLabelPresence checks whether all of the specified labels exists on a minion or not, regardless of their value // If "presence" is false, then returns false if any of the requested labels matches any of the minion's labels, // otherwise returns true. // If "presence" is true, then returns false if any of the requested labels does not match any of the minion's labels, // otherwise returns true. // // Consider the cases where the minions are placed in regions/zones/racks and these are identified by labels // In some cases, it is required that only minions that are part of ANY of the defined regions/zones/racks be selected // // Alternately, eliminating minions that have a certain label, regardless of value, is also useful // A minion may have a label with "retiring" as key and the date as the value // and it may be desirable to avoid scheduling new pods on this minion func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { var exists bool minion, err := n.info.GetNodeInfo(node) if err != nil { return false, err } minionLabels := labels.Set(minion.Labels) for _, label := range n.labels { exists = minionLabels.Has(label) if (exists && !n.presence) || (!exists && n.presence) { return false, nil } } return true, nil } type ServiceAffinity struct { podLister algorithm.PodLister serviceLister algorithm.ServiceLister nodeInfo NodeInfo labels []string } func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, nodeInfo NodeInfo, labels []string) algorithm.FitPredicate { affinity := &ServiceAffinity{ podLister: podLister, serviceLister: serviceLister, nodeInfo: nodeInfo, labels: labels, } return affinity.CheckServiceAffinity } // CheckServiceAffinity ensures that only the minions that match the specified labels are considered for scheduling. // The set of labels to be considered are provided to the struct (ServiceAffinity). // The pod is checked for the labels and any missing labels are then checked in the minion // that hosts the service pods (peers) for the given pod. // // We add an implicit selector requiring some particular value V for label L to a pod, if: // - L is listed in the ServiceAffinity object that is passed into the function // - the pod does not have any NodeSelector for L // - some other pod from the same service is already scheduled onto a minion that has value V for label L func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { var affinitySelector labels.Selector // check if the pod being scheduled has the affinity labels specified in its NodeSelector affinityLabels := map[string]string{} nodeSelector := labels.Set(pod.Spec.NodeSelector) labelsExist := true for _, l := range s.labels { if nodeSelector.Has(l) { affinityLabels[l] = nodeSelector.Get(l) } else { // the current pod does not specify all the labels, look in the existing service pods labelsExist = false } } // skip looking at other pods in the service if the current pod defines all the required affinity labels if !labelsExist { services, err := s.serviceLister.GetPodServices(pod) if err == nil { // just use the first service and get the other pods within the service // TODO: a separate predicate can be created that tries to handle all services for the pod selector := labels.SelectorFromSet(services[0].Spec.Selector) servicePods, err := s.podLister.List(selector) if err != nil { return false, err } // consider only the pods that belong to the same namespace nsServicePods := []*api.Pod{} for _, nsPod := range servicePods { if nsPod.Namespace == pod.Namespace { nsServicePods = append(nsServicePods, nsPod) } } if len(nsServicePods) > 0 { // consider any service pod and fetch the minion its hosted on otherMinion, err := s.nodeInfo.GetNodeInfo(nsServicePods[0].Spec.NodeName) if err != nil { return false, err } for _, l := range s.labels { // If the pod being scheduled has the label value specified, do not override it if _, exists := affinityLabels[l]; exists { continue } if labels.Set(otherMinion.Labels).Has(l) { affinityLabels[l] = labels.Set(otherMinion.Labels).Get(l) } } } } } // if there are no existing pods in the service, consider all minions if len(affinityLabels) == 0 { affinitySelector = labels.Everything() } else { affinitySelector = labels.Set(affinityLabels).AsSelector() } minion, err := s.nodeInfo.GetNodeInfo(node) if err != nil { return false, err } // check if the minion matches the selector return affinitySelector.Matches(labels.Set(minion.Labels)), nil } func PodFitsPorts(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { existingPorts := getUsedPorts(existingPods...) wantPorts := getUsedPorts(pod) for wport := range wantPorts { if wport == 0 { continue } if existingPorts[wport] { return false, nil } } return true, nil } func getUsedPorts(pods ...*api.Pod) map[int]bool { ports := make(map[int]bool) for _, pod := range pods { for _, container := range pod.Spec.Containers { for _, podPort := range container.Ports { ports[podPort.HostPort] = true } } } return ports } func filterNonRunningPods(pods []*api.Pod) []*api.Pod { if len(pods) == 0 { return pods } result := []*api.Pod{} for _, pod := range pods { if pod.Status.Phase == api.PodSucceeded || pod.Status.Phase == api.PodFailed { continue } result = append(result, pod) } return result } // MapPodsToMachines obtains a list of pods and pivots that list into a map where the keys are host names // and the values are the list of pods running on that host. func MapPodsToMachines(lister algorithm.PodLister) (map[string][]*api.Pod, error) { machineToPods := map[string][]*api.Pod{} // TODO: perform more targeted query... pods, err := lister.List(labels.Everything()) if err != nil { return map[string][]*api.Pod{}, err } pods = filterNonRunningPods(pods) for _, scheduledPod := range pods { host := scheduledPod.Spec.NodeName machineToPods[host] = append(machineToPods[host], scheduledPod) } return machineToPods, nil }
okhomenko/kubernetes
plugin/pkg/scheduler/algorithm/predicates/predicates.go
GO
apache-2.0
13,337
var baseGet = require('../internal/baseGet'), baseSlice = require('../internal/baseSlice'), isFunction = require('../lang/isFunction'), isKey = require('../internal/isKey'), last = require('../array/last'), toPath = require('../internal/toPath'); /** * This method is like `_.get` except that if the resolved value is a function * it's invoked with the `this` binding of its parent object and its result * is returned. * * @static * @memberOf _ * @category Object * @param {Object} object The object to query. * @param {Array|string} path The path of the property to resolve. * @param {*} [defaultValue] The value returned if the resolved value is `undefined`. * @returns {*} Returns the resolved value. * @example * * var object = { 'a': [{ 'b': { 'c1': 3, 'c2': _.constant(4) } }] }; * * _.result(object, 'a[0].b.c1'); * // => 3 * * _.result(object, 'a[0].b.c2'); * // => 4 * * _.result(object, 'a.b.c', 'default'); * // => 'default' * * _.result(object, 'a.b.c', _.constant('default')); * // => 'default' */ function result(object, path, defaultValue) { var result = object == null ? undefined : object[path]; if (result === undefined) { if (object != null && !isKey(path, object)) { path = toPath(path); object = path.length == 1 ? object : baseGet(object, baseSlice(path, 0, -1)); result = object == null ? undefined : object[last(path)]; } result = result === undefined ? defaultValue : result; } return isFunction(result) ? result.call(object) : result; } module.exports = result;
magnumresearch/manatee
node_modules/opt-merger/node_modules/lodash/object/result.js
JavaScript
apache-2.0
1,575
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package integration import ( "reflect" "sort" "testing" "time" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/apiextensions-apiserver/test/integration/testserver" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/dynamic" ) func TestServerUp(t *testing.T) { stopCh, _, _, err := testserver.StartDefaultServer() if err != nil { t.Fatal(err) } defer close(stopCh) } func TestNamespaceScopedCRUD(t *testing.T) { stopCh, apiExtensionClient, clientPool, err := testserver.StartDefaultServer() if err != nil { t.Fatal(err) } defer close(stopCh) noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.NamespaceScoped) noxuVersionClient, err := testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, clientPool) if err != nil { t.Fatal(err) } ns := "not-the-default" testSimpleCRUD(t, ns, noxuDefinition, noxuVersionClient) } func TestClusterScopedCRUD(t *testing.T) { stopCh, apiExtensionClient, clientPool, err := testserver.StartDefaultServer() if err != nil { t.Fatal(err) } defer close(stopCh) noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped) noxuVersionClient, err := testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, clientPool) if err != nil { t.Fatal(err) } ns := "" testSimpleCRUD(t, ns, noxuDefinition, noxuVersionClient) } func testSimpleCRUD(t *testing.T, ns string, noxuDefinition *apiextensionsv1beta1.CustomResourceDefinition, noxuVersionClient dynamic.Interface) { noxuResourceClient := NewNamespacedCustomResourceClient(ns, noxuVersionClient, noxuDefinition) initialList, err := noxuResourceClient.List(metav1.ListOptions{}) if err != nil { t.Fatal(err) } if e, a := 0, len(initialList.(*unstructured.UnstructuredList).Items); e != a { t.Errorf("expected %v, got %v", e, a) } initialListTypeMeta, err := meta.TypeAccessor(initialList) if err != nil { t.Fatal(err) } if e, a := noxuDefinition.Spec.Group+"/"+noxuDefinition.Spec.Version, initialListTypeMeta.GetAPIVersion(); e != a { t.Errorf("expected %v, got %v", e, a) } if e, a := noxuDefinition.Spec.Names.ListKind, initialListTypeMeta.GetKind(); e != a { t.Errorf("expected %v, got %v", e, a) } initialListListMeta, err := meta.ListAccessor(initialList) if err != nil { t.Fatal(err) } noxuWatch, err := noxuResourceClient.Watch(metav1.ListOptions{ResourceVersion: initialListListMeta.GetResourceVersion()}) if err != nil { t.Fatal(err) } defer noxuWatch.Stop() createdNoxuInstance, err := instantiateCustomResource(t, testserver.NewNoxuInstance(ns, "foo"), noxuResourceClient, noxuDefinition) if err != nil { t.Fatalf("unable to create noxu Instance:%v", err) } select { case watchEvent := <-noxuWatch.ResultChan(): if e, a := watch.Added, watchEvent.Type; e != a { t.Errorf("expected %v, got %v", e, a) break } createdObjectMeta, err := meta.Accessor(watchEvent.Object) if err != nil { t.Fatal(err) } // it should have a UUID if len(createdObjectMeta.GetUID()) == 0 { t.Errorf("missing uuid: %#v", watchEvent.Object) } if e, a := ns, createdObjectMeta.GetNamespace(); e != a { t.Errorf("expected %v, got %v", e, a) } createdTypeMeta, err := meta.TypeAccessor(watchEvent.Object) if err != nil { t.Fatal(err) } if e, a := noxuDefinition.Spec.Group+"/"+noxuDefinition.Spec.Version, createdTypeMeta.GetAPIVersion(); e != a { t.Errorf("expected %v, got %v", e, a) } if e, a := noxuDefinition.Spec.Names.Kind, createdTypeMeta.GetKind(); e != a { t.Errorf("expected %v, got %v", e, a) } case <-time.After(5 * time.Second): t.Errorf("missing watch event") } gottenNoxuInstance, err := noxuResourceClient.Get("foo", metav1.GetOptions{}) if err != nil { t.Fatal(err) } if e, a := createdNoxuInstance, gottenNoxuInstance; !reflect.DeepEqual(e, a) { t.Errorf("expected %v, got %v", e, a) } listWithItem, err := noxuResourceClient.List(metav1.ListOptions{}) if err != nil { t.Fatal(err) } if e, a := 1, len(listWithItem.(*unstructured.UnstructuredList).Items); e != a { t.Errorf("expected %v, got %v", e, a) } if e, a := *createdNoxuInstance, listWithItem.(*unstructured.UnstructuredList).Items[0]; !reflect.DeepEqual(e, a) { t.Errorf("expected %v, got %v", e, a) } if err := noxuResourceClient.Delete("foo", nil); err != nil { t.Fatal(err) } listWithoutItem, err := noxuResourceClient.List(metav1.ListOptions{}) if err != nil { t.Fatal(err) } if e, a := 0, len(listWithoutItem.(*unstructured.UnstructuredList).Items); e != a { t.Errorf("expected %v, got %v", e, a) } select { case watchEvent := <-noxuWatch.ResultChan(): if e, a := watch.Deleted, watchEvent.Type; e != a { t.Errorf("expected %v, got %v", e, a) break } deletedObjectMeta, err := meta.Accessor(watchEvent.Object) if err != nil { t.Fatal(err) } // it should have a UUID createdObjectMeta, err := meta.Accessor(createdNoxuInstance) if err != nil { t.Fatal(err) } if e, a := createdObjectMeta.GetUID(), deletedObjectMeta.GetUID(); e != a { t.Errorf("expected %v, got %v", e, a) } case <-time.After(5 * time.Second): t.Errorf("missing watch event") } } func TestDiscovery(t *testing.T) { group := "mygroup.example.com" version := "v1beta1" stopCh, apiExtensionClient, clientPool, err := testserver.StartDefaultServer() if err != nil { t.Fatal(err) } defer close(stopCh) scope := apiextensionsv1beta1.NamespaceScoped noxuDefinition := testserver.NewNoxuCustomResourceDefinition(scope) _, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, clientPool) if err != nil { t.Fatal(err) } // check whether it shows up in discovery properly resources, err := apiExtensionClient.Discovery().ServerResourcesForGroupVersion(group + "/" + version) if err != nil { t.Fatal(err) } if len(resources.APIResources) != 1 { t.Fatalf("Expected exactly the resource \"noxus\" in group version %v/%v via discovery, got: %v", group, version, resources.APIResources) } r := resources.APIResources[0] if r.Name != "noxus" { t.Fatalf("Expected exactly the resource \"noxus\" in group version %v/%v via discovery, got: %v", group, version, r.Name) } if r.Kind != "WishIHadChosenNoxu" { t.Fatalf("Expected exactly the kind \"WishIHadChosenNoxu\" in group version %v/%v via discovery, got: %v", group, version, r.Kind) } s := []string{"foo", "bar", "abc", "def"} if !reflect.DeepEqual(r.ShortNames, s) { t.Fatalf("Expected exactly the shortnames `foo, bar, abc, def` in group version %v/%v via discovery, got: %v", group, version, r.ShortNames) } sort.Strings(r.Verbs) expectedVerbs := []string{"create", "delete", "deletecollection", "get", "list", "patch", "update", "watch"} if !reflect.DeepEqual([]string(r.Verbs), expectedVerbs) { t.Fatalf("Unexpected verbs for resource \"noxus\" in group version %v/%v via discovery: expected=%v got=%v", group, version, expectedVerbs, r.Verbs) } } func TestNoNamespaceReject(t *testing.T) { stopCh, apiExtensionClient, clientPool, err := testserver.StartDefaultServer() if err != nil { t.Fatal(err) } defer close(stopCh) noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.NamespaceScoped) noxuVersionClient, err := testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, clientPool) if err != nil { t.Fatal(err) } ns := "" noxuResourceClient := NewNamespacedCustomResourceClient(ns, noxuVersionClient, noxuDefinition) initialList, err := noxuResourceClient.List(metav1.ListOptions{}) if err != nil { t.Fatal(err) } if e, a := 0, len(initialList.(*unstructured.UnstructuredList).Items); e != a { t.Errorf("expected %v, got %v", e, a) } initialListTypeMeta, err := meta.TypeAccessor(initialList) if err != nil { t.Fatal(err) } if e, a := noxuDefinition.Spec.Group+"/"+noxuDefinition.Spec.Version, initialListTypeMeta.GetAPIVersion(); e != a { t.Errorf("expected %v, got %v", e, a) } if e, a := noxuDefinition.Spec.Names.ListKind, initialListTypeMeta.GetKind(); e != a { t.Errorf("expected %v, got %v", e, a) } createdNoxuInstance, err := instantiateCustomResource(t, testserver.NewNoxuInstance(ns, "foo"), noxuResourceClient, noxuDefinition) if err == nil { t.Fatalf("unexpected non-error: an empty namespace may not be set during creation while creating noxu instance: %v ", createdNoxuInstance) } } func TestSameNameDiffNamespace(t *testing.T) { stopCh, apiExtensionClient, clientPool, err := testserver.StartDefaultServer() if err != nil { t.Fatal(err) } defer close(stopCh) noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.NamespaceScoped) noxuVersionClient, err := testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, clientPool) if err != nil { t.Fatal(err) } ns1 := "namespace-1" testSimpleCRUD(t, ns1, noxuDefinition, noxuVersionClient) ns2 := "namespace-2" testSimpleCRUD(t, ns2, noxuDefinition, noxuVersionClient) } func TestSelfLink(t *testing.T) { stopCh, apiExtensionClient, clientPool, err := testserver.StartDefaultServer() if err != nil { t.Fatal(err) } defer close(stopCh) // namespace scoped noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.NamespaceScoped) noxuVersionClient, err := testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, clientPool) if err != nil { t.Fatal(err) } ns := "not-the-default" noxuNamespacedResourceClient := noxuVersionClient.Resource(&metav1.APIResource{ Name: noxuDefinition.Spec.Names.Plural, Namespaced: noxuDefinition.Spec.Scope == apiextensionsv1beta1.NamespaceScoped, }, ns) noxuInstanceToCreate := testserver.NewNoxuInstance(ns, "foo") createdNoxuInstance, err := noxuNamespacedResourceClient.Create(noxuInstanceToCreate) if err != nil { t.Fatal(err) } if e, a := "/apis/mygroup.example.com/v1beta1/namespaces/not-the-default/noxus/foo", createdNoxuInstance.GetSelfLink(); e != a { t.Errorf("expected %v, got %v", e, a) } // cluster scoped curletDefinition := testserver.NewCurletCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped) curletVersionClient, err := testserver.CreateNewCustomResourceDefinition(curletDefinition, apiExtensionClient, clientPool) if err != nil { t.Fatal(err) } curletResourceClient := curletVersionClient.Resource(&metav1.APIResource{ Name: curletDefinition.Spec.Names.Plural, Namespaced: curletDefinition.Spec.Scope == apiextensionsv1beta1.NamespaceScoped, }, ns) curletInstanceToCreate := testserver.NewCurletInstance(ns, "foo") createdCurletInstance, err := curletResourceClient.Create(curletInstanceToCreate) if err != nil { t.Fatal(err) } if e, a := "/apis/mygroup.example.com/v1beta1/foo", createdCurletInstance.GetSelfLink(); e != a { t.Errorf("expected %v, got %v", e, a) } } func TestPreserveInt(t *testing.T) { stopCh, apiExtensionClient, clientPool, err := testserver.StartDefaultServer() if err != nil { t.Fatal(err) } defer close(stopCh) noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped) noxuVersionClient, err := testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, clientPool) if err != nil { t.Fatal(err) } ns := "not-the-default" noxuNamespacedResourceClient := noxuVersionClient.Resource(&metav1.APIResource{ Name: noxuDefinition.Spec.Names.Plural, Namespaced: true, }, ns) noxuInstanceToCreate := testserver.NewNoxuInstance(ns, "foo") createdNoxuInstance, err := noxuNamespacedResourceClient.Create(noxuInstanceToCreate) if err != nil { t.Fatal(err) } originalJSON, err := runtime.Encode(unstructured.UnstructuredJSONScheme, createdNoxuInstance) if err != nil { t.Fatalf("unexpected error: %v", err) } gottenNoxuInstance, err := runtime.Decode(unstructured.UnstructuredJSONScheme, originalJSON) if err != nil { t.Fatalf("unexpected error: %v", err) } // Check if int is preserved. unstructuredObj := gottenNoxuInstance.(*unstructured.Unstructured).Object num := unstructuredObj["num"].(map[string]interface{}) num1 := num["num1"].(int64) num2 := num["num2"].(int64) if num1 != 9223372036854775807 || num2 != 1000000 { t.Errorf("Expected %v, got %v, %v", `9223372036854775807, 1000000`, num1, num2) } } func TestCrossNamespaceListWatch(t *testing.T) { stopCh, apiExtensionClient, clientPool, err := testserver.StartDefaultServer() if err != nil { t.Fatal(err) } defer close(stopCh) noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.NamespaceScoped) noxuVersionClient, err := testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, clientPool) if err != nil { t.Fatal(err) } ns := "" noxuResourceClient := NewNamespacedCustomResourceClient(ns, noxuVersionClient, noxuDefinition) initialList, err := noxuResourceClient.List(metav1.ListOptions{}) if err != nil { t.Fatal(err) } if e, a := 0, len(initialList.(*unstructured.UnstructuredList).Items); e != a { t.Errorf("expected %v, got %v", e, a) } initialListListMeta, err := meta.ListAccessor(initialList) if err != nil { t.Fatal(err) } noxuWatch, err := noxuResourceClient.Watch(metav1.ListOptions{ResourceVersion: initialListListMeta.GetResourceVersion()}) if err != nil { t.Fatal(err) } defer noxuWatch.Stop() instances := make(map[string]*unstructured.Unstructured) ns1 := "namespace-1" noxuNamespacedResourceClient1 := NewNamespacedCustomResourceClient(ns1, noxuVersionClient, noxuDefinition) instances[ns1] = createInstanceWithNamespaceHelper(t, ns1, "foo1", noxuNamespacedResourceClient1, noxuDefinition) noxuNamespacesWatch1, err := noxuNamespacedResourceClient1.Watch(metav1.ListOptions{ResourceVersion: initialListListMeta.GetResourceVersion()}) defer noxuNamespacesWatch1.Stop() ns2 := "namespace-2" noxuNamespacedResourceClient2 := NewNamespacedCustomResourceClient(ns2, noxuVersionClient, noxuDefinition) instances[ns2] = createInstanceWithNamespaceHelper(t, ns2, "foo2", noxuNamespacedResourceClient2, noxuDefinition) noxuNamespacesWatch2, err := noxuNamespacedResourceClient2.Watch(metav1.ListOptions{ResourceVersion: initialListListMeta.GetResourceVersion()}) defer noxuNamespacesWatch2.Stop() createdList, err := noxuResourceClient.List(metav1.ListOptions{}) if err != nil { t.Fatal(err) } if e, a := 2, len(createdList.(*unstructured.UnstructuredList).Items); e != a { t.Errorf("expected %v, got %v", e, a) } for _, a := range createdList.(*unstructured.UnstructuredList).Items { if e := instances[a.GetNamespace()]; !reflect.DeepEqual(e, &a) { t.Errorf("expected %v, got %v", e, a) } } addEvents := 0 for addEvents < 2 { select { case watchEvent := <-noxuWatch.ResultChan(): if e, a := watch.Added, watchEvent.Type; e != a { t.Fatalf("expected %v, got %v", e, a) } createdObjectMeta, err := meta.Accessor(watchEvent.Object) if err != nil { t.Fatal(err) } if len(createdObjectMeta.GetUID()) == 0 { t.Errorf("missing uuid: %#v", watchEvent.Object) } createdTypeMeta, err := meta.TypeAccessor(watchEvent.Object) if err != nil { t.Fatal(err) } if e, a := noxuDefinition.Spec.Group+"/"+noxuDefinition.Spec.Version, createdTypeMeta.GetAPIVersion(); e != a { t.Errorf("expected %v, got %v", e, a) } if e, a := noxuDefinition.Spec.Names.Kind, createdTypeMeta.GetKind(); e != a { t.Errorf("expected %v, got %v", e, a) } delete(instances, createdObjectMeta.GetNamespace()) addEvents++ case <-time.After(5 * time.Second): t.Fatalf("missing watch event") } } if e, a := 0, len(instances); e != a { t.Errorf("expected %v, got %v", e, a) } checkNamespacesWatchHelper(t, ns1, noxuNamespacesWatch1) checkNamespacesWatchHelper(t, ns2, noxuNamespacesWatch2) } func createInstanceWithNamespaceHelper(t *testing.T, ns string, name string, noxuNamespacedResourceClient dynamic.ResourceInterface, noxuDefinition *apiextensionsv1beta1.CustomResourceDefinition) *unstructured.Unstructured { createdInstance, err := instantiateCustomResource(t, testserver.NewNoxuInstance(ns, name), noxuNamespacedResourceClient, noxuDefinition) if err != nil { t.Fatalf("unable to create noxu Instance:%v", err) } return createdInstance } func checkNamespacesWatchHelper(t *testing.T, ns string, namespacedwatch watch.Interface) { namespacedAddEvent := 0 for namespacedAddEvent < 2 { select { case watchEvent := <-namespacedwatch.ResultChan(): // Check that the namespaced watch only has one result if namespacedAddEvent > 0 { t.Fatalf("extra watch event") } if e, a := watch.Added, watchEvent.Type; e != a { t.Fatalf("expected %v, got %v", e, a) } createdObjectMeta, err := meta.Accessor(watchEvent.Object) if err != nil { t.Fatal(err) } if e, a := ns, createdObjectMeta.GetNamespace(); e != a { t.Errorf("expected %v, got %v", e, a) } case <-time.After(5 * time.Second): if namespacedAddEvent != 1 { t.Fatalf("missing watch event") } } namespacedAddEvent++ } } func TestNameConflict(t *testing.T) { stopCh, apiExtensionClient, clientPool, err := testserver.StartDefaultServer() if err != nil { t.Fatal(err) } defer close(stopCh) noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.NamespaceScoped) _, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, clientPool) if err != nil { t.Fatal(err) } noxu2Definition := testserver.NewNoxu2CustomResourceDefinition(apiextensionsv1beta1.NamespaceScoped) _, err = apiExtensionClient.Apiextensions().CustomResourceDefinitions().Create(noxu2Definition) if err != nil { t.Fatal(err) } // A NameConflict occurs err = wait.Poll(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { crd, err := testserver.GetCustomResourceDefinition(noxu2Definition, apiExtensionClient) if err != nil { return false, err } for _, condition := range crd.Status.Conditions { if condition.Type == apiextensionsv1beta1.NamesAccepted && condition.Status == apiextensionsv1beta1.ConditionFalse { return true, nil } } return false, nil }) if err != nil { t.Fatal(err) } err = testserver.DeleteCustomResourceDefinition(noxuDefinition, apiExtensionClient) if err != nil { t.Fatal(err) } // Names are now accepted err = wait.Poll(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { crd, err := testserver.GetCustomResourceDefinition(noxu2Definition, apiExtensionClient) if err != nil { return false, err } for _, condition := range crd.Status.Conditions { if condition.Type == apiextensionsv1beta1.NamesAccepted && condition.Status == apiextensionsv1beta1.ConditionTrue { return true, nil } } return false, nil }) if err != nil { t.Fatal(err) } }
zhangjm12/kubernetes
staging/src/k8s.io/apiextensions-apiserver/test/integration/basic_test.go
GO
apache-2.0
19,847
var prefix = require('../'); var should = require('should'); var fs = require('fs'); var autoprefixer = require("autoprefixer"); var gutil = require('gulp-util'); var Stream = require('stream'); var es = require('event-stream'); require('mocha'); var testBrowsers = ["last 1 version", "> 1%", "ie 8", "ie 7"]; var testOptions = { cascade: true }; var testfile = fs.readFileSync("./test/test.css","utf8"); describe('gulp-autoprefixer', function() { it('should prefix with defaults', function(done) { var stream = prefix(); var fakeFile = new gutil.File({ contents: new Buffer(testfile) }); stream.on('data', function(newFile){ String(newFile.contents).should.equal(autoprefixer.process(testfile).css); done(); }); stream.write(fakeFile); stream.end(); }); it('should prefix with browsers', function(done) { var stream = prefix("last 1 version", "> 1%", "ie 8", "ie 7"); var fakeFile = new gutil.File({ contents: new Buffer(testfile) }); stream.on('data', function(newFile){ String(newFile.contents).should.equal(autoprefixer("last 1 version", "> 1%", "ie 8", "ie 7").process(testfile).css); done(); }); stream.write(fakeFile); stream.end(); }); it('should prefix with browsers array', function(done) { var stream = prefix(testBrowsers); var fakeFile = new gutil.File({ contents: new Buffer(testfile) }); stream.on('data', function(newFile){ String(newFile.contents).should.equal(autoprefixer(testBrowsers).process(testfile).css); done(); }); stream.write(fakeFile); stream.end(); }); it('should prefix with options', function(done) { var stream = prefix(testOptions); var fakeFile = new gutil.File({ contents: new Buffer(testfile) }); stream.on('data', function(newFile){ String(newFile.contents).should.equal(autoprefixer().process(testfile, testOptions).css); done(); }); stream.write(fakeFile); stream.end(); }); it('should prefix with browsers and options', function(done) { var stream = prefix("last 1 version", "> 1%", "ie 8", "ie 7", testOptions); var fakeFile = new gutil.File({ contents: new Buffer(testfile) }); stream.on('data', function(newFile){ String(newFile.contents).should.equal(autoprefixer("last 1 version", "> 1%", "ie 8", "ie 7").process(testfile, testOptions).css); done(); }); stream.write(fakeFile); stream.end(); }); it('should prefix with browsers array and options', function(done) { var stream = prefix(testBrowsers, testOptions); var fakeFile = new gutil.File({ contents: new Buffer(testfile) }); stream.on('data', function(newFile){ String(newFile.contents).should.equal(autoprefixer(testBrowsers).process(testfile, testOptions).css); done(); }); stream.write(fakeFile); stream.end(); }); it('should work the same in stream mode', function(done) { var stream = prefix(); var fakeFile = new gutil.File({ contents: new Stream() }); stream.on('data', function(data) { data.contents.pipe(es.wait(function(err, data) { data.should.equal(autoprefixer.process(testfile).css); done(); })); }); stream.write(fakeFile); fakeFile.contents.write(testfile); fakeFile.contents.end(); }); it('should work the same in stream mode, with browsers', function(done) { var stream = prefix("last 1 version", "> 1%", "ie 8", "ie 7"); var fakeFile = new gutil.File({ contents: new Stream() }); stream.on('data', function(data) { data.contents.pipe(es.wait(function(err, data) { data.should.equal(autoprefixer("last 1 version", "> 1%", "ie 8", "ie 7").process(testfile).css); done(); })); }); stream.write(fakeFile); fakeFile.contents.write(testfile); fakeFile.contents.end(); }); it('should work the same in stream mode, with browsers array', function(done) { var stream = prefix(testBrowsers); var fakeFile = new gutil.File({ contents: new Stream() }); stream.on('data', function(data) { data.contents.pipe(es.wait(function(err, data) { data.should.equal(autoprefixer(testBrowsers).process(testfile).css); done(); })); }); stream.write(fakeFile); fakeFile.contents.write(testfile); fakeFile.contents.end(); }); it('should work the same in stream mode, with options', function(done) { var stream = prefix(testOptions); var fakeFile = new gutil.File({ contents: new Stream() }); stream.on('data', function(data) { data.contents.pipe(es.wait(function(err, data) { data.should.equal(autoprefixer().process(testfile, testOptions).css); done(); })); }); stream.write(fakeFile); fakeFile.contents.write(testfile); fakeFile.contents.end(); }); it('should work the same in stream mode, with browsers and options', function(done) { var stream = prefix("last 1 version", "> 1%", "ie 8", "ie 7", testOptions); var fakeFile = new gutil.File({ contents: new Stream() }); stream.on('data', function(data) { data.contents.pipe(es.wait(function(err, data) { data.should.equal(autoprefixer("last 1 version", "> 1%", "ie 8", "ie 7").process(testfile, testOptions).css); done(); })); }); stream.write(fakeFile); fakeFile.contents.write(testfile); fakeFile.contents.end(); }); it('should work the same in stream mode, with browsers array and options', function(done) { var stream = prefix(testBrowsers, testOptions); var fakeFile = new gutil.File({ contents: new Stream() }); stream.on('data', function(data) { data.contents.pipe(es.wait(function(err, data) { data.should.equal(autoprefixer(testBrowsers).process(testfile, testOptions).css); done(); })); }); stream.write(fakeFile); fakeFile.contents.write(testfile); fakeFile.contents.end(); }); it('should let null files pass through', function(done) { var stream = prefix(), n = 0; stream.pipe(es.through(function(file) { file.path.should.equal('null.md'); (file.contents === null).should.be.true; n++; }, function() { n.should.equal(1); done(); })); stream.write(new gutil.File({ path: 'null.md', contents: null })); stream.end(); }); });
hudsonwoods/hudson_woods
node_modules/gulp-autoprefixer/test/test.js
JavaScript
apache-2.0
6,684
/* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package iptables import ( "fmt" "io/ioutil" "os" "regexp" "strings" "sync" "github.com/coreos/go-semver/semver" godbus "github.com/godbus/dbus" "github.com/golang/glog" utildbus "k8s.io/kubernetes/pkg/util/dbus" utilexec "k8s.io/kubernetes/pkg/util/exec" "k8s.io/kubernetes/pkg/util/sets" ) type RulePosition string const ( Prepend RulePosition = "-I" Append RulePosition = "-A" ) // An injectable interface for running iptables commands. Implementations must be goroutine-safe. type Interface interface { // EnsureChain checks if the specified chain exists and, if not, creates it. If the chain existed, return true. EnsureChain(table Table, chain Chain) (bool, error) // FlushChain clears the specified chain. If the chain did not exist, return error. FlushChain(table Table, chain Chain) error // DeleteChain deletes the specified chain. If the chain did not exist, return error. DeleteChain(table Table, chain Chain) error // EnsureRule checks if the specified rule is present and, if not, creates it. If the rule existed, return true. EnsureRule(position RulePosition, table Table, chain Chain, args ...string) (bool, error) // DeleteRule checks if the specified rule is present and, if so, deletes it. DeleteRule(table Table, chain Chain, args ...string) error // IsIpv6 returns true if this is managing ipv6 tables IsIpv6() bool // TODO: (BenTheElder) Unit-Test Save/SaveAll, Restore/RestoreAll // Save calls `iptables-save` for table. Save(table Table) ([]byte, error) // SaveAll calls `iptables-save`. SaveAll() ([]byte, error) // Restore runs `iptables-restore` passing data through a temporary file. // table is the Table to restore // data should be formatted like the output of Save() // flush sets the presence of the "--noflush" flag. see: FlushFlag // counters sets the "--counters" flag. see: RestoreCountersFlag Restore(table Table, data []byte, flush FlushFlag, counters RestoreCountersFlag) error // RestoreAll is the same as Restore except that no table is specified. RestoreAll(data []byte, flush FlushFlag, counters RestoreCountersFlag) error // AddReloadFunc adds a function to call on iptables reload AddReloadFunc(reloadFunc func()) // Destroy cleans up resources used by the Interface Destroy() } type Protocol byte const ( ProtocolIpv4 Protocol = iota + 1 ProtocolIpv6 ) type Table string const ( TableNAT Table = "nat" ) type Chain string const ( ChainPostrouting Chain = "POSTROUTING" ChainPrerouting Chain = "PREROUTING" ChainOutput Chain = "OUTPUT" ) const ( cmdIptablesSave string = "iptables-save" cmdIptablesRestore string = "iptables-restore" cmdIptables string = "iptables" cmdIp6tables string = "ip6tables" ) // Option flag for Restore type RestoreCountersFlag bool const RestoreCounters RestoreCountersFlag = true const NoRestoreCounters RestoreCountersFlag = false // Option flag for Restore type FlushFlag bool const FlushTables FlushFlag = true const NoFlushTables FlushFlag = false // Versions of iptables less than this do not support the -C / --check flag // (test whether a rule exists). const MinCheckVersion = "1.4.11" // Minimum iptables versions supporting the -w and -w2 flags const MinWaitVersion = "1.4.20" const MinWait2Version = "1.4.22" // runner implements Interface in terms of exec("iptables"). type runner struct { mu sync.Mutex exec utilexec.Interface dbus utildbus.Interface protocol Protocol hasCheck bool waitFlag []string reloadFuncs []func() signal chan *godbus.Signal } // New returns a new Interface which will exec iptables. func New(exec utilexec.Interface, dbus utildbus.Interface, protocol Protocol) Interface { vstring, err := GetIptablesVersionString(exec) if err != nil { glog.Warningf("Error checking iptables version, assuming version at least %s: %v", MinCheckVersion, err) vstring = MinCheckVersion } runner := &runner{ exec: exec, dbus: dbus, protocol: protocol, hasCheck: getIptablesHasCheckCommand(vstring), waitFlag: getIptablesWaitFlag(vstring), } runner.connectToFirewallD() return runner } // Destroy is part of Interface. func (runner *runner) Destroy() { if runner.signal != nil { runner.signal <- nil } } const ( firewalldName = "org.fedoraproject.FirewallD1" firewalldPath = "/org/fedoraproject/FirewallD1" firewalldInterface = "org.fedoraproject.FirewallD1" ) // Connects to D-Bus and listens for FirewallD start/restart. (On non-FirewallD-using // systems, this is effectively a no-op; we listen for the signals, but they will never be // emitted, so reload() will never be called.) func (runner *runner) connectToFirewallD() { bus, err := runner.dbus.SystemBus() if err != nil { glog.V(1).Infof("Could not connect to D-Bus system bus: %s", err) return } rule := fmt.Sprintf("type='signal',sender='%s',path='%s',interface='%s',member='Reloaded'", firewalldName, firewalldPath, firewalldInterface) bus.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, rule) rule = fmt.Sprintf("type='signal',interface='org.freedesktop.DBus',member='NameOwnerChanged',path='/org/freedesktop/DBus',sender='org.freedesktop.DBus',arg0='%s'", firewalldName) bus.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, rule) runner.signal = make(chan *godbus.Signal, 10) bus.Signal(runner.signal) go runner.dbusSignalHandler(bus) } // EnsureChain is part of Interface. func (runner *runner) EnsureChain(table Table, chain Chain) (bool, error) { fullArgs := makeFullArgs(table, chain) runner.mu.Lock() defer runner.mu.Unlock() out, err := runner.run(opCreateChain, fullArgs) if err != nil { if ee, ok := err.(utilexec.ExitError); ok { if ee.Exited() && ee.ExitStatus() == 1 { return true, nil } } return false, fmt.Errorf("error creating chain %q: %v: %s", chain, err, out) } return false, nil } // FlushChain is part of Interface. func (runner *runner) FlushChain(table Table, chain Chain) error { fullArgs := makeFullArgs(table, chain) runner.mu.Lock() defer runner.mu.Unlock() out, err := runner.run(opFlushChain, fullArgs) if err != nil { return fmt.Errorf("error flushing chain %q: %v: %s", chain, err, out) } return nil } // DeleteChain is part of Interface. func (runner *runner) DeleteChain(table Table, chain Chain) error { fullArgs := makeFullArgs(table, chain) runner.mu.Lock() defer runner.mu.Unlock() // TODO: we could call iptable -S first, ignore the output and check for non-zero return (more like DeleteRule) out, err := runner.run(opDeleteChain, fullArgs) if err != nil { return fmt.Errorf("error deleting chain %q: %v: %s", chain, err, out) } return nil } // EnsureRule is part of Interface. func (runner *runner) EnsureRule(position RulePosition, table Table, chain Chain, args ...string) (bool, error) { fullArgs := makeFullArgs(table, chain, args...) runner.mu.Lock() defer runner.mu.Unlock() exists, err := runner.checkRule(table, chain, args...) if err != nil { return false, err } if exists { return true, nil } out, err := runner.run(operation(position), fullArgs) if err != nil { return false, fmt.Errorf("error appending rule: %v: %s", err, out) } return false, nil } // DeleteRule is part of Interface. func (runner *runner) DeleteRule(table Table, chain Chain, args ...string) error { fullArgs := makeFullArgs(table, chain, args...) runner.mu.Lock() defer runner.mu.Unlock() exists, err := runner.checkRule(table, chain, args...) if err != nil { return err } if !exists { return nil } out, err := runner.run(opDeleteRule, fullArgs) if err != nil { return fmt.Errorf("error deleting rule: %v: %s", err, out) } return nil } func (runner *runner) IsIpv6() bool { return runner.protocol == ProtocolIpv6 } // Save is part of Interface. func (runner *runner) Save(table Table) ([]byte, error) { runner.mu.Lock() defer runner.mu.Unlock() // run and return args := []string{"-t", string(table)} return runner.exec.Command(cmdIptablesSave, args...).CombinedOutput() } // SaveAll is part of Interface. func (runner *runner) SaveAll() ([]byte, error) { runner.mu.Lock() defer runner.mu.Unlock() // run and return return runner.exec.Command(cmdIptablesSave, []string{}...).CombinedOutput() } // Restore is part of Interface. func (runner *runner) Restore(table Table, data []byte, flush FlushFlag, counters RestoreCountersFlag) error { // setup args args := []string{"-T", string(table)} return runner.restoreInternal(args, data, flush, counters) } // RestoreAll is part of Interface. func (runner *runner) RestoreAll(data []byte, flush FlushFlag, counters RestoreCountersFlag) error { // setup args args := make([]string, 0) return runner.restoreInternal(args, data, flush, counters) } // restoreInternal is the shared part of Restore/RestoreAll func (runner *runner) restoreInternal(args []string, data []byte, flush FlushFlag, counters RestoreCountersFlag) error { runner.mu.Lock() defer runner.mu.Unlock() if !flush { args = append(args, "--noflush") } if counters { args = append(args, "--counters") } // create temp file through which to pass data temp, err := ioutil.TempFile("", "kube-temp-iptables-restore-") if err != nil { return err } // make sure we delete the temp file defer os.Remove(temp.Name()) // Put the filename at the end of args. // NOTE: the filename must be at the end. // See: https://git.netfilter.org/iptables/commit/iptables-restore.c?id=e6869a8f59d779ff4d5a0984c86d80db70784962 args = append(args, temp.Name()) if err != nil { return err } // write data to the file _, err = temp.Write(data) temp.Close() if err != nil { return err } // run the command and return the output or an error including the output and error b, err := runner.exec.Command(cmdIptablesRestore, args...).CombinedOutput() if err != nil { return fmt.Errorf("%v (%s)", err, b) } return nil } func (runner *runner) iptablesCommand() string { if runner.IsIpv6() { return cmdIp6tables } else { return cmdIptables } } func (runner *runner) run(op operation, args []string) ([]byte, error) { iptablesCmd := runner.iptablesCommand() fullArgs := append(runner.waitFlag, string(op)) fullArgs = append(fullArgs, args...) glog.V(4).Infof("running iptables %s %v", string(op), args) return runner.exec.Command(iptablesCmd, fullArgs...).CombinedOutput() // Don't log err here - callers might not think it is an error. } // Returns (bool, nil) if it was able to check the existence of the rule, or // (<undefined>, error) if the process of checking failed. func (runner *runner) checkRule(table Table, chain Chain, args ...string) (bool, error) { if runner.hasCheck { return runner.checkRuleUsingCheck(makeFullArgs(table, chain, args...)) } else { return runner.checkRuleWithoutCheck(table, chain, args...) } } // Executes the rule check without using the "-C" flag, instead parsing iptables-save. // Present for compatibility with <1.4.11 versions of iptables. This is full // of hack and half-measures. We should nix this ASAP. func (runner *runner) checkRuleWithoutCheck(table Table, chain Chain, args ...string) (bool, error) { glog.V(1).Infof("running iptables-save -t %s", string(table)) out, err := runner.exec.Command(cmdIptablesSave, "-t", string(table)).CombinedOutput() if err != nil { return false, fmt.Errorf("error checking rule: %v", err) } // Sadly, iptables has inconsistent quoting rules for comments. Just remove all quotes. // Also, quoted multi-word comments (which are counted as a single arg) // will be unpacked into multiple args, // in order to compare against iptables-save output (which will be split at whitespace boundary) // e.g. a single arg('"this must be before the NodePort rules"') will be unquoted and unpacked into 7 args. var argsCopy []string for i := range args { tmpField := strings.Trim(args[i], "\"") argsCopy = append(argsCopy, strings.Fields(tmpField)...) } argset := sets.NewString(argsCopy...) for _, line := range strings.Split(string(out), "\n") { var fields = strings.Fields(line) // Check that this is a rule for the correct chain, and that it has // the correct number of argument (+2 for "-A <chain name>") if !strings.HasPrefix(line, fmt.Sprintf("-A %s", string(chain))) || len(fields) != len(argsCopy)+2 { continue } // Sadly, iptables has inconsistent quoting rules for comments. // Just remove all quotes. for i := range fields { fields[i] = strings.Trim(fields[i], "\"") } // TODO: This misses reorderings e.g. "-x foo ! -y bar" will match "! -x foo -y bar" if sets.NewString(fields...).IsSuperset(argset) { return true, nil } glog.V(5).Infof("DBG: fields is not a superset of args: fields=%v args=%v", fields, args) } return false, nil } // Executes the rule check using the "-C" flag func (runner *runner) checkRuleUsingCheck(args []string) (bool, error) { out, err := runner.run(opCheckRule, args) if err == nil { return true, nil } if ee, ok := err.(utilexec.ExitError); ok { // iptables uses exit(1) to indicate a failure of the operation, // as compared to a malformed commandline, for example. if ee.Exited() && ee.ExitStatus() == 1 { return false, nil } } return false, fmt.Errorf("error checking rule: %v: %s", err, out) } type operation string const ( opCreateChain operation = "-N" opFlushChain operation = "-F" opDeleteChain operation = "-X" opAppendRule operation = "-A" opCheckRule operation = "-C" opDeleteRule operation = "-D" ) func makeFullArgs(table Table, chain Chain, args ...string) []string { return append([]string{string(chain), "-t", string(table)}, args...) } // Checks if iptables has the "-C" flag func getIptablesHasCheckCommand(vstring string) bool { minVersion, err := semver.NewVersion(MinCheckVersion) if err != nil { glog.Errorf("MinCheckVersion (%s) is not a valid version string: %v", MinCheckVersion, err) return true } version, err := semver.NewVersion(vstring) if err != nil { glog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err) return true } if version.LessThan(*minVersion) { return false } return true } // Checks if iptables version has a "wait" flag func getIptablesWaitFlag(vstring string) []string { version, err := semver.NewVersion(vstring) if err != nil { glog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err) return nil } minVersion, err := semver.NewVersion(MinWaitVersion) if err != nil { glog.Errorf("MinWaitVersion (%s) is not a valid version string: %v", MinWaitVersion, err) return nil } if version.LessThan(*minVersion) { return nil } minVersion, err = semver.NewVersion(MinWait2Version) if err != nil { glog.Errorf("MinWait2Version (%s) is not a valid version string: %v", MinWait2Version, err) return nil } if version.LessThan(*minVersion) { return []string{"-w"} } else { return []string{"-w2"} } } // GetIptablesVersionString runs "iptables --version" to get the version string // in the form "X.X.X" func GetIptablesVersionString(exec utilexec.Interface) (string, error) { // this doesn't access mutable state so we don't need to use the interface / runner bytes, err := exec.Command(cmdIptables, "--version").CombinedOutput() if err != nil { return "", err } versionMatcher := regexp.MustCompile("v([0-9]+\\.[0-9]+\\.[0-9]+)") match := versionMatcher.FindStringSubmatch(string(bytes)) if match == nil { return "", fmt.Errorf("no iptables version found in string: %s", bytes) } return match[1], nil } // goroutine to listen for D-Bus signals func (runner *runner) dbusSignalHandler(bus utildbus.Connection) { firewalld := bus.Object(firewalldName, firewalldPath) for s := range runner.signal { if s == nil { // Unregister bus.Signal(runner.signal) return } switch s.Name { case "org.freedesktop.DBus.NameOwnerChanged": name := s.Body[0].(string) new_owner := s.Body[2].(string) if name != firewalldName || len(new_owner) == 0 { continue } // FirewallD startup (specifically the part where it deletes // all existing iptables rules) may not yet be complete when // we get this signal, so make a dummy request to it to // synchronize. firewalld.Call(firewalldInterface+".getDefaultZone", 0) runner.reload() case firewalldInterface + ".Reloaded": runner.reload() } } } // AddReloadFunc is part of Interface func (runner *runner) AddReloadFunc(reloadFunc func()) { runner.reloadFuncs = append(runner.reloadFuncs, reloadFunc) } // runs all reload funcs to re-sync iptables rules func (runner *runner) reload() { glog.V(1).Infof("reloading iptables rules") for _, f := range runner.reloadFuncs { f() } }
rajurs/kubernetes
pkg/util/iptables/iptables.go
GO
apache-2.0
17,355
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.ScandinavianNormalizationFilter; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; /** * Factory for {@link ScandinavianNormalizationFilter} */ public class ScandinavianNormalizationFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent { public ScandinavianNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); } @Override public TokenStream create(TokenStream tokenStream) { return new ScandinavianNormalizationFilter(tokenStream); } @Override public Object getMultiTermComponent() { return this; } }
C-Bish/elasticsearch
core/src/main/java/org/elasticsearch/index/analysis/ScandinavianNormalizationFilterFactory.java
Java
apache-2.0
1,709
/* Copyright 2015 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package operationmanager import ( "fmt" "sync" ) // Operation Manager is a thread-safe interface for keeping track of multiple pending async operations. type OperationManager interface { // Called when the operation with the given ID has started. // Creates a new channel with specified buffer size tracked with the specified ID. // Returns a read-only version of the newly created channel. // Returns an error if an entry with the specified ID already exists (previous entry must be removed by calling Close). Start(id string, bufferSize uint) (<-chan interface{}, error) // Called when the operation with the given ID has terminated. // Closes and removes the channel associated with ID. // Returns an error if no associated channel exists. Close(id string) error // Attempts to send msg to the channel associated with ID. // Returns an error if no associated channel exists. Send(id string, msg interface{}) error // Returns true if an entry with the specified ID already exists. Exists(id string) bool } // Returns a new instance of a channel manager. func NewOperationManager() OperationManager { return &operationManager{ chanMap: make(map[string]chan interface{}), } } type operationManager struct { sync.RWMutex chanMap map[string]chan interface{} } // Called when the operation with the given ID has started. // Creates a new channel with specified buffer size tracked with the specified ID. // Returns a read-only version of the newly created channel. // Returns an error if an entry with the specified ID already exists (previous entry must be removed by calling Close). func (cm *operationManager) Start(id string, bufferSize uint) (<-chan interface{}, error) { cm.Lock() defer cm.Unlock() if _, exists := cm.chanMap[id]; exists { return nil, fmt.Errorf("id %q already exists", id) } cm.chanMap[id] = make(chan interface{}, bufferSize) return cm.chanMap[id], nil } // Called when the operation with the given ID has terminated. // Closes and removes the channel associated with ID. // Returns an error if no associated channel exists. func (cm *operationManager) Close(id string) error { cm.Lock() defer cm.Unlock() if _, exists := cm.chanMap[id]; !exists { return fmt.Errorf("id %q not found", id) } close(cm.chanMap[id]) delete(cm.chanMap, id) return nil } // Attempts to send msg to the channel associated with ID. // Returns an error if no associated channel exists. func (cm *operationManager) Send(id string, msg interface{}) error { cm.RLock() defer cm.RUnlock() if _, exists := cm.chanMap[id]; !exists { return fmt.Errorf("id %q not found", id) } cm.chanMap[id] <- msg return nil } // Returns true if an entry with the specified ID already exists. func (cm *operationManager) Exists(id string) (exists bool) { cm.RLock() defer cm.RUnlock() _, exists = cm.chanMap[id] return }
thucatebay/heapster
Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/operationmanager/operationmanager.go
GO
apache-2.0
3,449
/* Excel specific API library */ /* Version: 15.0.4448.1000 */ /* Copyright (c) Microsoft Corporation. All rights reserved. */ /* Your use of this file is governed by the Microsoft Services Agreement http://go.microsoft.com/fwlink/?LinkId=266419. */ OSF.OUtil.augmentList(Microsoft.Office.WebExtension.FilterType, { OnlyVisible: "onlyVisible" }); OSF.ClientMode={ ReadWrite: 0, ReadOnly: 1 } OSF.DDA.RichInitializationReason={ 1: Microsoft.Office.WebExtension.InitializationReason.Inserted, 2: Microsoft.Office.WebExtension.InitializationReason.DocumentOpened }; Microsoft.Office.WebExtension.FileType={ Text: "text", Compressed: "compressed" }; OSF.DDA.RichClientSettingsManager={ read: function OSF_DDA_RichClientSettingsManager$Read(onCalling, onReceiving) { var keys=[]; var values=[]; if (onCalling) { onCalling(); } window.external.GetContext().GetSettings().Read(keys, values); if (onReceiving) { onReceiving(); } var serializedSettings={}; for (var index=0; index < keys.length; index++) { serializedSettings[keys[index]]=values[index]; } return serializedSettings; }, write: function OSF_DDA_RichClientSettingsManager$Write(serializedSettings, overwriteIfStale, onCalling, onReceiving) { var keys=[]; var values=[]; for (var key in serializedSettings) { keys.push(key); values.push(serializedSettings[key]); } if (onCalling) { onCalling(); } window.external.GetContext().GetSettings().Write(keys, values); if (onReceiving) { onReceiving(); } } }; OSF.DDA.DispIdHost.getRichClientDelegateMethods=function (actionId) { var delegateMethods={}; delegateMethods[OSF.DDA.DispIdHost.Delegates.ExecuteAsync]=OSF.DDA.SafeArray.Delegate.executeAsync; delegateMethods[OSF.DDA.DispIdHost.Delegates.RegisterEventAsync]=OSF.DDA.SafeArray.Delegate.registerEventAsync; delegateMethods[OSF.DDA.DispIdHost.Delegates.UnregisterEventAsync]=OSF.DDA.SafeArray.Delegate.unregisterEventAsync; function getSettingsExecuteMethod(hostDelegateMethod) { return function (args) { var status, response; try { response=hostDelegateMethod(args.hostCallArgs, args.onCalling, args.onReceiving); status=OSF.DDA.ErrorCodeManager.errorCodes.ooeSuccess; } catch (ex) { status=OSF.DDA.ErrorCodeManager.errorCodes.ooeInternalError; response={ name : Strings.OfficeOM.L_InternalError, message : ex }; } if (args.onComplete) { args.onComplete(status, response); } }; } function readSerializedSettings(hostCallArgs, onCalling, onReceiving) { return OSF.DDA.RichClientSettingsManager.read(onCalling, onReceiving); } function writeSerializedSettings(hostCallArgs, onCalling, onReceiving) { return OSF.DDA.RichClientSettingsManager.write( hostCallArgs[OSF.DDA.SettingsManager.SerializedSettings], hostCallArgs[Microsoft.Office.WebExtension.Parameters.OverwriteIfStale], onCalling, onReceiving ); } switch (actionId) { case OSF.DDA.AsyncMethodNames.RefreshAsync.id: delegateMethods[OSF.DDA.DispIdHost.Delegates.ExecuteAsync]=getSettingsExecuteMethod(readSerializedSettings); break; case OSF.DDA.AsyncMethodNames.SaveAsync.id: delegateMethods[OSF.DDA.DispIdHost.Delegates.ExecuteAsync]=getSettingsExecuteMethod(writeSerializedSettings); break; default: break; } return delegateMethods; } OSF.DDA.File=function OSF_DDA_File(handle, fileSize, sliceSize) { OSF.OUtil.defineEnumerableProperties(this, { "size": { value: fileSize }, "sliceCount": { value: Math.ceil(fileSize / sliceSize) } }); var privateState={}; privateState[OSF.DDA.FileProperties.Handle]=handle; privateState[OSF.DDA.FileProperties.SliceSize]=sliceSize; var am=OSF.DDA.AsyncMethodNames; OSF.DDA.DispIdHost.addAsyncMethods( this, [ am.GetDocumentCopyChunkAsync, am.ReleaseDocumentCopyAsync ], privateState ); } OSF.DDA.FileSliceOffset="fileSliceoffset"; OSF.DDA.CustomXmlParts=function OSF_DDA_CustomXmlParts() { this._eventDispatches=[]; var am=OSF.DDA.AsyncMethodNames; OSF.DDA.DispIdHost.addAsyncMethods(this, [ am.AddDataPartAsync, am.GetDataPartByIdAsync, am.GetDataPartsByNameSpaceAsync ]); }; OSF.DDA.CustomXmlPart=function OSF_DDA_CustomXmlPart(customXmlParts, id, builtIn) { OSF.OUtil.defineEnumerableProperties(this, { "builtIn": { value: builtIn }, "id": { value: id }, "namespaceManager": { value: new OSF.DDA.CustomXmlPrefixMappings(id) } }); var am=OSF.DDA.AsyncMethodNames; OSF.DDA.DispIdHost.addAsyncMethods(this, [ am.DeleteDataPartAsync, am.GetPartNodesAsync, am.GetPartXmlAsync ]); var customXmlPartEventDispatches=customXmlParts._eventDispatches; var dispatch=customXmlPartEventDispatches[id]; if (!dispatch) { var et=Microsoft.Office.WebExtension.EventType; dispatch=new OSF.EventDispatch([ et.DataNodeDeleted, et.DataNodeInserted, et.DataNodeReplaced ]); customXmlPartEventDispatches[id]=dispatch; } OSF.DDA.DispIdHost.addEventSupport(this, dispatch); }; OSF.DDA.CustomXmlPrefixMappings=function OSF_DDA_CustomXmlPrefixMappings(partId) { var am=OSF.DDA.AsyncMethodNames; OSF.DDA.DispIdHost.addAsyncMethods( this, [ am.AddDataPartNamespaceAsync, am.GetDataPartNamespaceAsync, am.GetDataPartPrefixAsync ], partId ); }; OSF.DDA.CustomXmlNode=function OSF_DDA_CustomXmlNode(handle, nodeType, ns, baseName) { OSF.OUtil.defineEnumerableProperties(this, { "baseName": { value: baseName }, "namespaceUri": { value: ns }, "nodeType": { value: nodeType } }); var am=OSF.DDA.AsyncMethodNames; OSF.DDA.DispIdHost.addAsyncMethods( this, [ am.GetRelativeNodesAsync, am.GetNodeValueAsync, am.GetNodeXmlAsync, am.SetNodeValueAsync, am.SetNodeXmlAsync ], handle ); }; OSF.DDA.NodeInsertedEventArgs=function OSF_DDA_NodeInsertedEventArgs(newNode, inUndoRedo) { OSF.OUtil.defineEnumerableProperties(this, { "type": { value: Microsoft.Office.WebExtension.EventType.DataNodeInserted }, "newNode": { value: newNode }, "inUndoRedo": { value: inUndoRedo } }); }; OSF.DDA.NodeReplacedEventArgs=function OSF_DDA_NodeReplacedEventArgs(oldNode, newNode, inUndoRedo) { OSF.OUtil.defineEnumerableProperties(this, { "type": { value: Microsoft.Office.WebExtension.EventType.DataNodeReplaced }, "oldNode": { value: oldNode }, "newNode": { value: newNode }, "inUndoRedo": { value: inUndoRedo } }); }; OSF.DDA.NodeDeletedEventArgs=function OSF_DDA_NodeDeletedEventArgs(oldNode, oldNextSibling, inUndoRedo) { OSF.OUtil.defineEnumerableProperties(this, { "type": { value: Microsoft.Office.WebExtension.EventType.DataNodeDeleted }, "oldNode": { value: oldNode }, "oldNextSibling": { value: oldNextSibling }, "inUndoRedo": { value: inUndoRedo } }); }; OSF.OUtil.setNamespace("SafeArray", OSF.DDA); OSF.DDA.SafeArray.Response={ Status: 0, Payload: 1 }; OSF.DDA.SafeArray.UniqueArguments={ Offset: "offset", Run: "run", BindingSpecificData: "bindingSpecificData", MergedCellGuid: "{66e7831f-81b2-42e2-823c-89e872d541b3}" }; OSF.OUtil.setNamespace("Delegate", OSF.DDA.SafeArray); OSF.DDA.SafeArray.Delegate.SpecialProcessor=function OSF_DDA_SafeArray_Delegate_SpecialProcessor() { function _2DVBArrayToJaggedArray(vbArr) { var ret; try { var rows=vbArr.ubound(1); var cols=vbArr.ubound(2); vbArr=vbArr.toArray(); if (rows==1 && cols==1) { ret=[vbArr]; } else { ret=[]; for (var row=0; row < rows; row++) { var rowArr=[]; for (var col=0; col < cols; col++) { var datum=vbArr[row * cols+col]; if (datum !=OSF.DDA.SafeArray.UniqueArguments.MergedCellGuid) { rowArr.push(datum); } } if (rowArr.length > 0) { ret.push(rowArr); } } } } catch (ex) { } return ret; } var complexTypes=[ OSF.DDA.PropertyDescriptors.FileProperties, OSF.DDA.PropertyDescriptors.FileSliceProperties, OSF.DDA.PropertyDescriptors.BindingProperties, OSF.DDA.SafeArray.UniqueArguments.BindingSpecificData, OSF.DDA.SafeArray.UniqueArguments.Offset, OSF.DDA.SafeArray.UniqueArguments.Run, OSF.DDA.PropertyDescriptors.Subset, OSF.DDA.PropertyDescriptors.DataPartProperties, OSF.DDA.PropertyDescriptors.DataNodeProperties, OSF.DDA.EventDescriptors.BindingSelectionChangedEvent, OSF.DDA.EventDescriptors.DataNodeInsertedEvent, OSF.DDA.EventDescriptors.DataNodeReplacedEvent, OSF.DDA.EventDescriptors.DataNodeDeletedEvent, OSF.DDA.DataNodeEventProperties.OldNode, OSF.DDA.DataNodeEventProperties.NewNode, OSF.DDA.DataNodeEventProperties.NextSiblingNode ]; var dynamicTypes={}; dynamicTypes[Microsoft.Office.WebExtension.Parameters.Data]=(function () { var tableRows=0; var tableHeaders=1; return { toHost: function OSF_DDA_SafeArray_Delegate_SpecialProcessor_Data$toHost(data) { if (typeof data !="string" && data[OSF.DDA.TableDataProperties.TableRows] !==undefined) { var tableData=[]; tableData[tableRows]=data[OSF.DDA.TableDataProperties.TableRows]; tableData[tableHeaders]=data[OSF.DDA.TableDataProperties.TableHeaders]; data=tableData; } return data; }, fromHost: function OSF_DDA_SafeArray_Delegate_SpecialProcessor_Data$fromHost(hostArgs) { var ret; if (hostArgs.toArray) { var dimensions=hostArgs.dimensions(); if(dimensions===2) { ret=_2DVBArrayToJaggedArray(hostArgs); } else { var array=hostArgs.toArray(); if(array.length===2 && ((array[0] !=null && array[0].toArray) || (array[1] !=null && array[1].toArray))) { ret={}; ret[OSF.DDA.TableDataProperties.TableRows]=_2DVBArrayToJaggedArray(array[tableRows]); ret[OSF.DDA.TableDataProperties.TableHeaders]=_2DVBArrayToJaggedArray(array[tableHeaders]); } else { ret=array; } } } else { ret=hostArgs; } return ret; } } })(); OSF.DDA.SafeArray.Delegate.SpecialProcessor.uber.constructor.call(this, complexTypes, dynamicTypes); this.pack=function OSF_DDA_SafeArray_Delegate_SpecialProcessor$pack(param, arg) { var value; if (this.isDynamicType(param)) { value=dynamicTypes[param].toHost(arg); } else { value=arg; } return value; }; this.unpack=function OSF_DDA_SafeArray_Delegate_SpecialProcessor$unpack(param, arg) { var value; if (this.isComplexType(param) || OSF.DDA.ListType.isListType(param)) { try { value=arg.toArray(); } catch (ex) { value=arg || {}; } } else if (this.isDynamicType(param)) { value=dynamicTypes[param].fromHost(arg); } else { value=arg; } return value; }; } OSF.OUtil.extend(OSF.DDA.SafeArray.Delegate.SpecialProcessor, OSF.DDA.SpecialProcessor); OSF.DDA.SafeArray.Delegate.ParameterMap=(function () { var parameterMap=new OSF.DDA.HostParameterMap(new OSF.DDA.SafeArray.Delegate.SpecialProcessor()); var ns; var self=parameterMap.self; function createObject(properties) { var obj=null; if (properties) { obj={}; var len=properties.length; for (var i=0; i < len; i++) { obj[properties[i].name]=properties[i].value; } } return obj; } function define(definition) { var args={}; var toHost=createObject(definition.toHost); if (definition.invertible) { args.map=toHost; } else if (definition.canonical) { args.toHost=args.fromHost=toHost; } else { args.toHost=toHost; args.fromHost=createObject(definition.fromHost); } parameterMap.setMapping(definition.type, args); } ns=OSF.DDA.FileProperties; define({ type: OSF.DDA.PropertyDescriptors.FileProperties, fromHost: [ { name: ns.Handle, value: 0 }, { name: ns.FileSize, value: 1 } ] }); define({ type: OSF.DDA.PropertyDescriptors.FileSliceProperties, fromHost: [ { name: Microsoft.Office.WebExtension.Parameters.Data, value: 0 }, { name: ns.SliceSize, value: 1} ] }); ns=OSF.DDA.BindingProperties; define({ type: OSF.DDA.PropertyDescriptors.BindingProperties, fromHost: [ { name: ns.Id, value: 0 }, { name: ns.Type, value: 1 }, { name: OSF.DDA.SafeArray.UniqueArguments.BindingSpecificData, value: 2 } ] }); define({ type: OSF.DDA.SafeArray.UniqueArguments.BindingSpecificData, fromHost: [ { name: ns.RowCount, value: 0 }, { name: ns.ColumnCount, value: 1 }, { name: ns.HasHeaders, value: 2 } ] }); ns=OSF.DDA.SafeArray.UniqueArguments; define({ type: OSF.DDA.PropertyDescriptors.Subset, toHost: [ { name: ns.Offset, value: 0 }, { name: ns.Run, value: 1 } ], canonical: true }); ns=Microsoft.Office.WebExtension.Parameters; define({ type: OSF.DDA.SafeArray.UniqueArguments.Offset, toHost: [ { name: ns.StartRow, value: 0 }, { name: ns.StartColumn, value: 1 } ], canonical: true }); define({ type: OSF.DDA.SafeArray.UniqueArguments.Run, toHost: [ { name: ns.RowCount, value: 0 }, { name: ns.ColumnCount, value: 1 } ], canonical: true }); ns=OSF.DDA.DataPartProperties; define({ type: OSF.DDA.PropertyDescriptors.DataPartProperties, fromHost: [ { name: ns.Id, value: 0 }, { name: ns.BuiltIn, value: 1 } ] }); ns=OSF.DDA.DataNodeProperties; define({ type: OSF.DDA.PropertyDescriptors.DataNodeProperties, fromHost: [ { name: ns.Handle, value: 0 }, { name: ns.BaseName, value: 1 }, { name: ns.NamespaceUri, value: 2 }, { name: ns.NodeType, value: 3 } ] }); define({ type: OSF.DDA.EventDescriptors.BindingSelectionChangedEvent, fromHost: [ { name: OSF.DDA.PropertyDescriptors.BindingProperties, value: 0 }, { name: OSF.DDA.PropertyDescriptors.Subset, value: 1 } ] }); ns=OSF.DDA.DataNodeEventProperties; define({ type: OSF.DDA.EventDescriptors.DataNodeInsertedEvent, fromHost: [ { name: ns.InUndoRedo, value: 0 }, { name: ns.NewNode, value: 1 } ] }); define({ type: OSF.DDA.EventDescriptors.DataNodeReplacedEvent, fromHost: [ { name: ns.InUndoRedo, value: 0 }, { name: ns.OldNode, value: 1 }, { name: ns.NewNode, value: 2 } ] }); define({ type: OSF.DDA.EventDescriptors.DataNodeDeletedEvent, fromHost: [ { name: ns.InUndoRedo, value: 0 }, { name: ns.OldNode, value: 1 }, { name: ns.NextSiblingNode, value: 2 } ] }); define({ type: ns.OldNode, fromHost: [ { name: OSF.DDA.PropertyDescriptors.DataNodeProperties, value: self } ] }); define({ type: ns.NewNode, fromHost: [ { name: OSF.DDA.PropertyDescriptors.DataNodeProperties, value: self } ] }); define({ type: ns.NextSiblingNode, fromHost: [ { name: OSF.DDA.PropertyDescriptors.DataNodeProperties, value: self } ] }); ns=Microsoft.Office.WebExtension.AsyncResultStatus; define({ type: OSF.DDA.PropertyDescriptors.AsyncResultStatus, fromHost: [ { name: ns.Succeeded, value: 0 }, { name: ns.Failed, value: 1 } ] }); ns=Microsoft.Office.WebExtension.CoercionType; define({ type: Microsoft.Office.WebExtension.Parameters.CoercionType, toHost: [ { name: ns.Text, value: 0 }, { name: ns.Matrix, value: 1 }, { name: ns.Table, value: 2 }, { name: ns.Html, value: 3 }, { name: ns.Ooxml, value: 4 } ] }); ns=Microsoft.Office.WebExtension.FileType; if (ns) { define({ type: Microsoft.Office.WebExtension.Parameters.FileType, toHost: [ { name: ns.Text, value: 0 }, { name: ns.Compressed, value: 5 } ] }); } ns=Microsoft.Office.WebExtension.BindingType; if (ns) { define({ type: Microsoft.Office.WebExtension.Parameters.BindingType, toHost: [ { name: ns.Text, value: 0 }, { name: ns.Matrix, value: 1 }, { name: ns.Table, value: 2 } ], invertible: true }); } ns=Microsoft.Office.WebExtension.ValueFormat; define({ type: Microsoft.Office.WebExtension.Parameters.ValueFormat, toHost: [ { name: ns.Unformatted, value: 0 }, { name: ns.Formatted, value: 1 } ] }); ns=Microsoft.Office.WebExtension.FilterType; define({ type: Microsoft.Office.WebExtension.Parameters.FilterType, toHost: [ { name: ns.All, value: 0 }, { name: ns.OnlyVisible, value: 1 } ] }); ns=Microsoft.Office.WebExtension.Parameters; var cns=OSF.DDA.MethodDispId; define({ type: cns.dispidGetSelectedDataMethod, fromHost: [ { name: ns.Data, value: self } ], toHost: [ { name: ns.CoercionType, value: 0 }, { name: ns.ValueFormat, value: 1 }, { name: ns.FilterType, value: 2 } ] }); define({ type: cns.dispidSetSelectedDataMethod, toHost: [ { name: ns.CoercionType, value: 0 }, { name: ns.Data, value: 1 } ] }); define({ type: cns.dispidGetDocumentCopyMethod, toHost: [{ name: ns.FileType, value: 0}], fromHost: [ { name: OSF.DDA.PropertyDescriptors.FileProperties, value: self } ] }); define({ type: cns.dispidGetDocumentCopyChunkMethod, toHost: [ { name: OSF.DDA.FileProperties.Handle, value: 0 }, { name: OSF.DDA.FileSliceOffset, value: 1 }, { name: OSF.DDA.FileProperties.SliceSize, value: 2 } ], fromHost: [ { name: OSF.DDA.PropertyDescriptors.FileSliceProperties, value: self } ] }); define({ type: cns.dispidReleaseDocumentCopyMethod, toHost: [{ name: OSF.DDA.FileProperties.Handle, value: 0}] }); define({ type: cns.dispidAddBindingFromSelectionMethod, fromHost: [ { name: OSF.DDA.PropertyDescriptors.BindingProperties, value: self } ], toHost: [ { name: ns.Id, value: 0 }, { name: ns.BindingType, value: 1 } ] }); define({ type: cns.dispidAddBindingFromPromptMethod, fromHost: [ { name: OSF.DDA.PropertyDescriptors.BindingProperties, value: self } ], toHost: [ { name: ns.Id, value: 0 }, { name: ns.BindingType, value: 1 }, { name: ns.PromptText, value: 2 } ] }); define({ type: cns.dispidAddBindingFromNamedItemMethod, fromHost: [ { name: OSF.DDA.PropertyDescriptors.BindingProperties, value: self } ], toHost: [ { name: ns.ItemName, value: 0 }, { name: ns.Id, value: 1 }, { name: ns.BindingType, value: 2 }, { name: ns.FailOnCollision, value: 3 } ] }); define({ type: cns.dispidReleaseBindingMethod, toHost: [ { name: ns.Id, value: 0 } ] }); define({ type: cns.dispidGetBindingMethod, fromHost: [ { name: OSF.DDA.PropertyDescriptors.BindingProperties, value: self } ], toHost: [ { name: ns.Id, value: 0 } ] }); define({ type: cns.dispidGetAllBindingsMethod, fromHost: [ { name: OSF.DDA.ListDescriptors.BindingList, value: self } ] }); define({ type: cns.dispidGetBindingDataMethod, fromHost: [ { name: ns.Data, value: self } ], toHost: [ { name: ns.Id, value: 0 }, { name: ns.CoercionType, value: 1 }, { name: ns.ValueFormat, value: 2 }, { name: ns.FilterType, value: 3 }, { name: OSF.DDA.PropertyDescriptors.Subset, value: 4 } ] }); define({ type: cns.dispidSetBindingDataMethod, toHost: [ { name: ns.Id, value: 0 }, { name: ns.CoercionType, value: 1 }, { name: ns.Data, value: 2 }, { name: OSF.DDA.SafeArray.UniqueArguments.Offset, value: 3 } ] }); define({ type: cns.dispidAddRowsMethod, toHost: [ { name: ns.Id, value: 0 }, { name: ns.Data, value: 1 } ] }); define({ type: cns.dispidAddColumnsMethod, toHost: [ { name: ns.Id, value: 0 }, { name: ns.Data, value: 1 } ] }); define({ type: cns.dispidClearAllRowsMethod, toHost: [ { name: ns.Id, value: 0 } ] }); define({ type: cns.dispidLoadSettingsMethod, fromHost: [ { name: OSF.DDA.SettingsManager.SerializedSettings, value: self } ] }); define({ type: cns.dispidSaveSettingsMethod, toHost: [ { name: OSF.DDA.SettingsManager.SerializedSettings, value: OSF.DDA.SettingsManager.SerializedSettings }, { name: Microsoft.Office.WebExtension.Parameters.OverwriteIfStale, value: Microsoft.Office.WebExtension.Parameters.OverwriteIfStale } ] }); define({ type: cns.dispidAddDataPartMethod, fromHost: [ { name: OSF.DDA.PropertyDescriptors.DataPartProperties, value: self } ], toHost: [ { name: ns.Xml, value: 0 } ] }); define({ type: cns.dispidGetDataPartByIdMethod, fromHost: [ { name: OSF.DDA.PropertyDescriptors.DataPartProperties, value: self } ], toHost: [ { name: ns.Id, value: 0 } ] }); define({ type: cns.dispidGetDataPartsByNamespaceMethod, fromHost: [ { name: OSF.DDA.ListDescriptors.DataPartList, value: self } ], toHost: [ { name: ns.Namespace, value: 0 } ] }); define({ type: cns.dispidGetDataPartXmlMethod, fromHost: [ { name: ns.Data, value: self} ], toHost: [ { name: ns.Id, value: 0 } ] }); define({ type: cns.dispidGetDataPartNodesMethod, fromHost: [ { name: OSF.DDA.ListDescriptors.DataNodeList, value: self } ], toHost: [ { name: ns.Id, value: 0 }, { name: ns.XPath, value: 1 } ] }); define({ type: cns.dispidDeleteDataPartMethod, toHost: [ { name: ns.Id, value: 0 } ] }); define({ type: cns.dispidGetDataNodeValueMethod, fromHost: [ { name: ns.Data, value: self} ], toHost: [ { name: OSF.DDA.DataNodeProperties.Handle, value: 0 } ] }); define({ type: cns.dispidGetDataNodeXmlMethod, fromHost: [ { name: ns.Data, value: self} ], toHost: [ { name: OSF.DDA.DataNodeProperties.Handle, value: 0 } ] }); define({ type: cns.dispidGetDataNodesMethod, fromHost: [ { name: OSF.DDA.ListDescriptors.DataNodeList, value: self } ], toHost: [ { name: OSF.DDA.DataNodeProperties.Handle, value: 0 }, { name: ns.XPath, value: 1 } ] }); define({ type: cns.dispidSetDataNodeValueMethod, toHost: [ { name: OSF.DDA.DataNodeProperties.Handle, value: 0 }, { name: ns.Data, value: 1 } ] }); define({ type: cns.dispidSetDataNodeXmlMethod, toHost: [ { name: OSF.DDA.DataNodeProperties.Handle, value: 0 }, { name: ns.Xml, value: 1 } ] }); define({ type: cns.dispidAddDataNamespaceMethod, toHost: [ { name: OSF.DDA.DataPartProperties.Id, value: 0 }, { name: ns.Prefix, value: 1 }, { name: ns.Namespace, value: 2 } ] }); define({ type: cns.dispidGetDataUriByPrefixMethod, fromHost: [ { name: ns.Data, value: self} ], toHost: [ { name: OSF.DDA.DataPartProperties.Id, value: 0 }, { name: ns.Prefix, value: 1 } ] }); define({ type: cns.dispidGetDataPrefixByUriMethod, fromHost: [ { name: ns.Data, value: self} ], toHost: [ { name: OSF.DDA.DataPartProperties.Id, value: 0 }, { name: ns.Namespace, value: 1 } ] }); define({ type: cns.dispidGetSelectedTaskMethod, fromHost: [ { name: ns.TaskId, value: self } ] }); define({ type: cns.dispidGetTaskMethod, fromHost: [ { name: "taskName", value: 0 }, { name: "wssTaskId", value: 1 }, { name: "resourceNames", value: 2 } ], toHost: [ { name: ns.TaskId, value: 0 } ] }); define({ type: cns.dispidGetTaskFieldMethod, fromHost: [ { name: ns.FieldValue, value: self } ], toHost: [ { name: ns.TaskId, value: 0 }, { name: ns.FieldId, value: 1 }, { name: ns.GetRawValue, value: 2 } ] }); define({ type: cns.dispidGetWSSUrlMethod, fromHost: [ { name: ns.ServerUrl, value: 0 }, { name: ns.ListName, value: 1 } ] }); define({ type: cns.dispidGetSelectedResourceMethod, fromHost: [ { name: ns.ResourceId, value: self } ] }); define({ type: cns.dispidGetResourceFieldMethod, fromHost: [ { name: ns.FieldValue, value: self } ], toHost: [ { name: ns.ResourceId, value: 0 }, { name: ns.FieldId, value: 1 }, { name: ns.GetRawValue, value: 2 } ] }); define({ type: cns.dispidGetProjectFieldMethod, fromHost: [ { name: ns.FieldValue, value: self } ], toHost: [ { name: ns.FieldId, value: 0 }, { name: ns.GetRawValue, value: 1 } ] }); define({ type: cns.dispidGetSelectedViewMethod, fromHost: [ { name: ns.ViewType, value: 0 }, { name: ns.ViewName, value: 1 } ] }); cns=OSF.DDA.EventDispId define({ type: cns.dispidDocumentSelectionChangedEvent }); define({ type: cns.dispidBindingSelectionChangedEvent, fromHost: [ {name: OSF.DDA.EventDescriptors.BindingSelectionChangedEvent, value: self} ] }); define({ type: cns.dispidBindingDataChangedEvent, fromHost: [{ name: OSF.DDA.PropertyDescriptors.BindingProperties, value: self}] }); define({ type: cns.dispidSettingsChangedEvent }); define({ type: cns.dispidDataNodeAddedEvent, fromHost: [{ name: OSF.DDA.EventDescriptors.DataNodeInsertedEvent, value: self}] }); define({ type: cns.dispidDataNodeReplacedEvent, fromHost: [{ name: OSF.DDA.EventDescriptors.DataNodeReplacedEvent, value: self}] }); define({ type: cns.dispidDataNodeDeletedEvent, fromHost: [{ name: OSF.DDA.EventDescriptors.DataNodeDeletedEvent, value: self}] }); define({ type: cns.dispidTaskSelectionChangedEvent }); define({ type: cns.dispidResourceSelectionChangedEvent }); define({ type: cns.dispidViewSelectionChangedEvent }); return parameterMap; })(); OSF.DDA.SafeArray.Delegate._onException=function OSF_DDA_SafeArray_Delegate$OnException(ex, args) { var status; var number=ex.number; if (number) { switch (number) { case -2146828218: status=OSF.DDA.ErrorCodeManager.errorCodes.ooeNoCapability; break; case -2146827850: default: status=OSF.DDA.ErrorCodeManager.errorCodes.ooeInternalError; break; } } if (args.onComplete) { args.onComplete(status || OSF.DDA.ErrorCodeManager.errorCodes.ooeInternalError); } } OSF.DDA.SafeArray.Delegate.executeAsync=function OSF_DDA_SafeArray_Delegate$ExecuteAsync(args) { try { if (args.onCalling) { args.onCalling(); } function toArray(args) { var arrArgs=args; if (OSF.OUtil.isArray(args)) { var len=arrArgs.length; for (var i=0; i < len; i++) { arrArgs[i]=toArray(arrArgs[i]); } } else if (OSF.OUtil.isDate(args)) { arrArgs=args.getVarDate(); } else if (typeof args==="object" && !OSF.OUtil.isArray(args)) { arrArgs=[]; for (var index in args) { if (!OSF.OUtil.isFunction(args[index])) { arrArgs[index]=toArray(args[index]); } } } return arrArgs; } window.external.Execute( args.dispId, toArray(args.hostCallArgs), function OSF_DDA_SafeArrayFacade$Execute_OnResponse(hostResponseArgs) { if (args.onReceiving) { args.onReceiving(); } if (args.onComplete) { var result=hostResponseArgs.toArray(); var status=result[OSF.DDA.SafeArray.Response.Status]; var payload; if (status==OSF.DDA.ErrorCodeManager.errorCodes.ooeSuccess) { if (result.length > 2) { payload=[]; for (var i=1; i < result.length; i++) payload[i - 1]=result[i]; } else { payload=result[OSF.DDA.SafeArray.Response.Payload]; } } else { payload=result[OSF.DDA.SafeArray.Response.Payload]; } args.onComplete(status, payload); } } ); } catch (ex) { OSF.DDA.SafeArray.Delegate._onException(ex, args); } }; OSF.DDA.SafeArray.Delegate._getOnAfterRegisterEvent=function OSF_DDA_SafeArrayDelegate$GetOnAfterRegisterEvent(args) { return function OSF_DDA_SafeArrayDelegate$OnAfterRegisterEvent(hostResponseArgs) { if (args.onReceiving) { args.onReceiving(); } if (args.onComplete) { var status=hostResponseArgs.toArray ? hostResponseArgs.toArray()[OSF.DDA.SafeArray.Response.Status] : hostResponseArgs; args.onComplete(status) } } } OSF.DDA.SafeArray.Delegate.registerEventAsync=function OSF_DDA_SafeArray_Delegate$RegisterEventAsync(args) { if (args.onCalling) { args.onCalling(); } var callback=OSF.DDA.SafeArray.Delegate._getOnAfterRegisterEvent(args); try { window.external.RegisterEvent( args.dispId, args.targetId, function OSF_DDA_SafeArrayDelegate$RegisterEventAsync_OnEvent(eventDispId, payload) { if (args.onEvent) { args.onEvent(payload); } }, callback ); } catch (ex) { OSF.DDA.SafeArray.Delegate._onException(ex, args); } }; OSF.DDA.SafeArray.Delegate.unregisterEventAsync=function OSF_DDA_SafeArray_Delegate$UnregisterEventAsync(args) { if (args.onCalling) { args.onCalling(); } var callback=OSF.DDA.SafeArray.Delegate._getOnAfterRegisterEvent(args); try { window.external.UnregisterEvent( args.dispId, args.targetId, callback ); } catch (ex) { OSF.DDA.SafeArray.Delegate._onException(ex, args); } }; delete Microsoft.Office.WebExtension.FileType; OSF.DDA.ExcelDocument=function OSF_DDA_ExcelDocument(officeAppContext, settings) { var bf=new OSF.DDA.BindingFacade(this); OSF.DDA.DispIdHost.addAsyncMethods(bf, [OSF.DDA.AsyncMethodNames.AddFromPromptAsync]); OSF.DDA.ExcelDocument.uber.constructor.call(this, officeAppContext, bf, settings ); OSF.OUtil.finalizeProperties(this); }; OSF.OUtil.extend(OSF.DDA.ExcelDocument, OSF.DDA.JsomDocument);
bayzid026/TrainingContent
O3652/O3652-2 Deep Dive in Office Word Add-ins/Demos/ContentWriter/ContentWriterWeb/Scripts/Office/1.1/excel-15.debug.js
JavaScript
apache-2.0
28,619
<?php /** * @link http://www.yiiframework.com/ * @copyright Copyright (c) 2008 Yii Software LLC * @license http://www.yiiframework.com/license/ */ namespace yii\i18n; use Yii; /** * GettextMessageSource represents a message source that is based on GNU Gettext. * * Each GettextMessageSource instance represents the message translations * for a single domain. And each message category represents a message context * in Gettext. Translated messages are stored as either a MO or PO file, * depending on the [[useMoFile]] property value. * * All translations are saved under the [[basePath]] directory. * * Translations in one language are kept as MO or PO files under an individual * subdirectory whose name is the language ID. The file name is specified via * [[catalog]] property, which defaults to 'messages'. * * @author Qiang Xue <qiang.xue@gmail.com> * @since 2.0 */ class GettextMessageSource extends MessageSource { const MO_FILE_EXT = '.mo'; const PO_FILE_EXT = '.po'; /** * @var string */ public $basePath = '@app/messages'; /** * @var string */ public $catalog = 'messages'; /** * @var boolean */ public $useMoFile = true; /** * @var boolean */ public $useBigEndian = false; /** * Loads the message translation for the specified $language and $category. * If translation for specific locale code such as `en-US` isn't found it * tries more generic `en`. When both are present, the `en-US` messages will be merged * over `en`. See [[loadFallbackMessages]] for details. * If the $language is less specific than [[sourceLanguage]], the method will try to * load the messages for [[sourceLanguage]]. For example: [[sourceLanguage]] is `en-GB`, * $language is `en`. The method will load the messages for `en` and merge them over `en-GB`. * * @param string $category the message category * @param string $language the target language * @return array the loaded messages. The keys are original messages, and the values are translated messages. * @see loadFallbackMessages * @see sourceLanguage */ protected function loadMessages($category, $language) { $messageFile = $this->getMessageFilePath($language); $messages = $this->loadMessagesFromFile($messageFile, $category); $fallbackLanguage = substr($language, 0, 2); $fallbackSourceLanguage = substr($this->sourceLanguage, 0, 2); if ($fallbackLanguage !== $language) { $messages = $this->loadFallbackMessages($category, $fallbackLanguage, $messages, $messageFile); } elseif ($language === $fallbackSourceLanguage) { $messages = $this->loadFallbackMessages($category, $this->sourceLanguage, $messages, $messageFile); } else { if ($messages === null) { Yii::error("The message file for category '$category' does not exist: $messageFile", __METHOD__); } } return (array) $messages; } /** * The method is normally called by [[loadMessages]] to load the fallback messages for the language. * Method tries to load the $category messages for the $fallbackLanguage and adds them to the $messages array. * * @param string $category the message category * @param string $fallbackLanguage the target fallback language * @param array $messages the array of previously loaded translation messages. * The keys are original messages, and the values are the translated messages. * @param string $originalMessageFile the path to the file with messages. Used to log an error message * in case when no translations were found. * @return array the loaded messages. The keys are original messages, and the values are the translated messages. * @since 2.0.7 */ protected function loadFallbackMessages($category, $fallbackLanguage, $messages, $originalMessageFile) { $fallbackMessageFile = $this->getMessageFilePath($fallbackLanguage); $fallbackMessages = $this->loadMessagesFromFile($fallbackMessageFile, $category); if ( $messages === null && $fallbackMessages === null && $fallbackLanguage !== $this->sourceLanguage && $fallbackLanguage !== substr($this->sourceLanguage, 0, 2) ) { Yii::error("The message file for category '$category' does not exist: $originalMessageFile " . "Fallback file does not exist as well: $fallbackMessageFile", __METHOD__); } elseif (empty($messages)) { return $fallbackMessages; } elseif (!empty($fallbackMessages)) { foreach ($fallbackMessages as $key => $value) { if (!empty($value) && empty($messages[$key])) { $messages[$key] = $fallbackMessages[$key]; } } } return (array) $messages; } /** * Returns message file path for the specified language and category. * * @param string $language the target language * @return string path to message file */ protected function getMessageFilePath($language) { $messageFile = Yii::getAlias($this->basePath) . '/' . $language . '/' . $this->catalog; if ($this->useMoFile) { $messageFile .= self::MO_FILE_EXT; } else { $messageFile .= self::PO_FILE_EXT; } return $messageFile; } /** * Loads the message translation for the specified language and category or returns null if file doesn't exist. * * @param string $messageFile path to message file * @param string $category the message category * @return array|null array of messages or null if file not found */ protected function loadMessagesFromFile($messageFile, $category) { if (is_file($messageFile)) { if ($this->useMoFile) { $gettextFile = new GettextMoFile(['useBigEndian' => $this->useBigEndian]); } else { $gettextFile = new GettextPoFile(); } $messages = $gettextFile->load($messageFile, $category); if (!is_array($messages)) { $messages = []; } return $messages; } else { return null; } } }
evoshop/evo_maa
vendor/yiisoft/yii2/i18n/GettextMessageSource.php
PHP
apache-2.0
6,402
// Copyright 2014 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package internal // This file has code for accessing metadata. // // References: // https://cloud.google.com/compute/docs/metadata import ( "fmt" "io/ioutil" "net/http" "net/url" ) const ( metadataHost = "metadata" metadataPath = "/computeMetadata/v1/" ) var ( metadataRequestHeaders = http.Header{ "Metadata-Flavor": []string{"Google"}, } ) // TODO(dsymonds): Do we need to support default values, like Python? func mustGetMetadata(key string) []byte { b, err := getMetadata(key) if err != nil { panic(fmt.Sprintf("Metadata fetch failed for '%s': %v", key, err)) } return b } func getMetadata(key string) ([]byte, error) { // TODO(dsymonds): May need to use url.Parse to support keys with query args. req := &http.Request{ Method: "GET", URL: &url.URL{ Scheme: "http", Host: metadataHost, Path: metadataPath + key, }, Header: metadataRequestHeaders, Host: metadataHost, } resp, err := http.DefaultClient.Do(req) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode != 200 { return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode) } return ioutil.ReadAll(resp.Body) }
GoogleCloudPlatform/k8s-stackdriver
prometheus-to-sd/vendor/google.golang.org/appengine/internal/metadata.go
GO
apache-2.0
1,332
// *** DO NOT MODIFY *** // AUTOGENERATED BY go generate from msg_generate.go package dns // pack*() functions func (rr *A) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packDataA(rr.A, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *AAAA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packDataAAAA(rr.AAAA, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *AFSDB) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint16(rr.Subtype, msg, off) if err != nil { return off, err } off, err = PackDomainName(rr.Hostname, msg, off, compression, compress) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *ANY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *CAA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint8(rr.Flag, msg, off) if err != nil { return off, err } off, err = packString(rr.Tag, msg, off) if err != nil { return off, err } off, err = packStringOctet(rr.Value, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *CDNSKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint16(rr.Flags, msg, off) if err != nil { return off, err } off, err = packUint8(rr.Protocol, msg, off) if err != nil { return off, err } off, err = packUint8(rr.Algorithm, msg, off) if err != nil { return off, err } off, err = packStringBase64(rr.PublicKey, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *CDS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint16(rr.KeyTag, msg, off) if err != nil { return off, err } off, err = packUint8(rr.Algorithm, msg, off) if err != nil { return off, err } off, err = packUint8(rr.DigestType, msg, off) if err != nil { return off, err } off, err = packStringHex(rr.Digest, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *CERT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint16(rr.Type, msg, off) if err != nil { return off, err } off, err = packUint16(rr.KeyTag, msg, off) if err != nil { return off, err } off, err = packUint8(rr.Algorithm, msg, off) if err != nil { return off, err } off, err = packStringBase64(rr.Certificate, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *CNAME) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = PackDomainName(rr.Target, msg, off, compression, compress) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *DHCID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packStringBase64(rr.Digest, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *DLV) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint16(rr.KeyTag, msg, off) if err != nil { return off, err } off, err = packUint8(rr.Algorithm, msg, off) if err != nil { return off, err } off, err = packUint8(rr.DigestType, msg, off) if err != nil { return off, err } off, err = packStringHex(rr.Digest, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *DNAME) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = PackDomainName(rr.Target, msg, off, compression, compress) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *DNSKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint16(rr.Flags, msg, off) if err != nil { return off, err } off, err = packUint8(rr.Protocol, msg, off) if err != nil { return off, err } off, err = packUint8(rr.Algorithm, msg, off) if err != nil { return off, err } off, err = packStringBase64(rr.PublicKey, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *DS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint16(rr.KeyTag, msg, off) if err != nil { return off, err } off, err = packUint8(rr.Algorithm, msg, off) if err != nil { return off, err } off, err = packUint8(rr.DigestType, msg, off) if err != nil { return off, err } off, err = packStringHex(rr.Digest, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *EID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packStringHex(rr.Endpoint, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *EUI48) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint48(rr.Address, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *EUI64) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint64(rr.Address, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *GID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint32(rr.Gid, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *GPOS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packString(rr.Longitude, msg, off) if err != nil { return off, err } off, err = packString(rr.Latitude, msg, off) if err != nil { return off, err } off, err = packString(rr.Altitude, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *HINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packString(rr.Cpu, msg, off) if err != nil { return off, err } off, err = packString(rr.Os, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *HIP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint8(rr.HitLength, msg, off) if err != nil { return off, err } off, err = packUint8(rr.PublicKeyAlgorithm, msg, off) if err != nil { return off, err } off, err = packUint16(rr.PublicKeyLength, msg, off) if err != nil { return off, err } off, err = packStringHex(rr.Hit, msg, off) if err != nil { return off, err } off, err = packStringBase64(rr.PublicKey, msg, off) if err != nil { return off, err } off, err = packDataDomainNames(rr.RendezvousServers, msg, off, compression, compress) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *KEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint16(rr.Flags, msg, off) if err != nil { return off, err } off, err = packUint8(rr.Protocol, msg, off) if err != nil { return off, err } off, err = packUint8(rr.Algorithm, msg, off) if err != nil { return off, err } off, err = packStringBase64(rr.PublicKey, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *KX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint16(rr.Preference, msg, off) if err != nil { return off, err } off, err = PackDomainName(rr.Exchanger, msg, off, compression, compress) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *L32) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint16(rr.Preference, msg, off) if err != nil { return off, err } off, err = packDataA(rr.Locator32, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *L64) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint16(rr.Preference, msg, off) if err != nil { return off, err } off, err = packUint64(rr.Locator64, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *LOC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint8(rr.Version, msg, off) if err != nil { return off, err } off, err = packUint8(rr.Size, msg, off) if err != nil { return off, err } off, err = packUint8(rr.HorizPre, msg, off) if err != nil { return off, err } off, err = packUint8(rr.VertPre, msg, off) if err != nil { return off, err } off, err = packUint32(rr.Latitude, msg, off) if err != nil { return off, err } off, err = packUint32(rr.Longitude, msg, off) if err != nil { return off, err } off, err = packUint32(rr.Altitude, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *LP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint16(rr.Preference, msg, off) if err != nil { return off, err } off, err = PackDomainName(rr.Fqdn, msg, off, compression, compress) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *MB) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = PackDomainName(rr.Mb, msg, off, compression, compress) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *MD) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = PackDomainName(rr.Md, msg, off, compression, compress) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *MF) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = PackDomainName(rr.Mf, msg, off, compression, compress) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *MG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = PackDomainName(rr.Mg, msg, off, compression, compress) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *MINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = PackDomainName(rr.Rmail, msg, off, compression, compress) if err != nil { return off, err } off, err = PackDomainName(rr.Email, msg, off, compression, compress) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *MR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = PackDomainName(rr.Mr, msg, off, compression, compress) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *MX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint16(rr.Preference, msg, off) if err != nil { return off, err } off, err = PackDomainName(rr.Mx, msg, off, compression, compress) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *NAPTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint16(rr.Order, msg, off) if err != nil { return off, err } off, err = packUint16(rr.Preference, msg, off) if err != nil { return off, err } off, err = packString(rr.Flags, msg, off) if err != nil { return off, err } off, err = packString(rr.Service, msg, off) if err != nil { return off, err } off, err = packString(rr.Regexp, msg, off) if err != nil { return off, err } off, err = PackDomainName(rr.Replacement, msg, off, compression, compress) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *NID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint16(rr.Preference, msg, off) if err != nil { return off, err } off, err = packUint64(rr.NodeID, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *NIMLOC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packStringHex(rr.Locator, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *NINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packStringTxt(rr.ZSData, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *NS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = PackDomainName(rr.Ns, msg, off, compression, compress) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *NSAPPTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = PackDomainName(rr.Ptr, msg, off, compression, compress) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *NSEC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = PackDomainName(rr.NextDomain, msg, off, compression, compress) if err != nil { return off, err } off, err = packDataNsec(rr.TypeBitMap, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *NSEC3) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint8(rr.Hash, msg, off) if err != nil { return off, err } off, err = packUint8(rr.Flags, msg, off) if err != nil { return off, err } off, err = packUint16(rr.Iterations, msg, off) if err != nil { return off, err } off, err = packUint8(rr.SaltLength, msg, off) if err != nil { return off, err } off, err = packStringHex(rr.Salt, msg, off) if err != nil { return off, err } off, err = packUint8(rr.HashLength, msg, off) if err != nil { return off, err } off, err = packStringBase32(rr.NextDomain, msg, off) if err != nil { return off, err } off, err = packDataNsec(rr.TypeBitMap, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *NSEC3PARAM) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint8(rr.Hash, msg, off) if err != nil { return off, err } off, err = packUint8(rr.Flags, msg, off) if err != nil { return off, err } off, err = packUint16(rr.Iterations, msg, off) if err != nil { return off, err } off, err = packUint8(rr.SaltLength, msg, off) if err != nil { return off, err } off, err = packStringHex(rr.Salt, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *OPENPGPKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packStringBase64(rr.PublicKey, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *OPT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packDataOpt(rr.Option, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *PTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = PackDomainName(rr.Ptr, msg, off, compression, compress) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *PX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint16(rr.Preference, msg, off) if err != nil { return off, err } off, err = PackDomainName(rr.Map822, msg, off, compression, compress) if err != nil { return off, err } off, err = PackDomainName(rr.Mapx400, msg, off, compression, compress) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *RFC3597) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packStringHex(rr.Rdata, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *RKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint16(rr.Flags, msg, off) if err != nil { return off, err } off, err = packUint8(rr.Protocol, msg, off) if err != nil { return off, err } off, err = packUint8(rr.Algorithm, msg, off) if err != nil { return off, err } off, err = packStringBase64(rr.PublicKey, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *RP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = PackDomainName(rr.Mbox, msg, off, compression, compress) if err != nil { return off, err } off, err = PackDomainName(rr.Txt, msg, off, compression, compress) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *RRSIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint16(rr.TypeCovered, msg, off) if err != nil { return off, err } off, err = packUint8(rr.Algorithm, msg, off) if err != nil { return off, err } off, err = packUint8(rr.Labels, msg, off) if err != nil { return off, err } off, err = packUint32(rr.OrigTtl, msg, off) if err != nil { return off, err } off, err = packUint32(rr.Expiration, msg, off) if err != nil { return off, err } off, err = packUint32(rr.Inception, msg, off) if err != nil { return off, err } off, err = packUint16(rr.KeyTag, msg, off) if err != nil { return off, err } off, err = PackDomainName(rr.SignerName, msg, off, compression, compress) if err != nil { return off, err } off, err = packStringBase64(rr.Signature, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *RT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint16(rr.Preference, msg, off) if err != nil { return off, err } off, err = PackDomainName(rr.Host, msg, off, compression, compress) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *SIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint16(rr.TypeCovered, msg, off) if err != nil { return off, err } off, err = packUint8(rr.Algorithm, msg, off) if err != nil { return off, err } off, err = packUint8(rr.Labels, msg, off) if err != nil { return off, err } off, err = packUint32(rr.OrigTtl, msg, off) if err != nil { return off, err } off, err = packUint32(rr.Expiration, msg, off) if err != nil { return off, err } off, err = packUint32(rr.Inception, msg, off) if err != nil { return off, err } off, err = packUint16(rr.KeyTag, msg, off) if err != nil { return off, err } off, err = PackDomainName(rr.SignerName, msg, off, compression, compress) if err != nil { return off, err } off, err = packStringBase64(rr.Signature, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *SOA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = PackDomainName(rr.Ns, msg, off, compression, compress) if err != nil { return off, err } off, err = PackDomainName(rr.Mbox, msg, off, compression, compress) if err != nil { return off, err } off, err = packUint32(rr.Serial, msg, off) if err != nil { return off, err } off, err = packUint32(rr.Refresh, msg, off) if err != nil { return off, err } off, err = packUint32(rr.Retry, msg, off) if err != nil { return off, err } off, err = packUint32(rr.Expire, msg, off) if err != nil { return off, err } off, err = packUint32(rr.Minttl, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *SPF) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packStringTxt(rr.Txt, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *SRV) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint16(rr.Priority, msg, off) if err != nil { return off, err } off, err = packUint16(rr.Weight, msg, off) if err != nil { return off, err } off, err = packUint16(rr.Port, msg, off) if err != nil { return off, err } off, err = PackDomainName(rr.Target, msg, off, compression, compress) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *SSHFP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint8(rr.Algorithm, msg, off) if err != nil { return off, err } off, err = packUint8(rr.Type, msg, off) if err != nil { return off, err } off, err = packStringHex(rr.FingerPrint, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *TA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint16(rr.KeyTag, msg, off) if err != nil { return off, err } off, err = packUint8(rr.Algorithm, msg, off) if err != nil { return off, err } off, err = packUint8(rr.DigestType, msg, off) if err != nil { return off, err } off, err = packStringHex(rr.Digest, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *TALINK) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = PackDomainName(rr.PreviousName, msg, off, compression, compress) if err != nil { return off, err } off, err = PackDomainName(rr.NextName, msg, off, compression, compress) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *TKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = PackDomainName(rr.Algorithm, msg, off, compression, compress) if err != nil { return off, err } off, err = packUint32(rr.Inception, msg, off) if err != nil { return off, err } off, err = packUint32(rr.Expiration, msg, off) if err != nil { return off, err } off, err = packUint16(rr.Mode, msg, off) if err != nil { return off, err } off, err = packUint16(rr.Error, msg, off) if err != nil { return off, err } off, err = packUint16(rr.KeySize, msg, off) if err != nil { return off, err } off, err = packString(rr.Key, msg, off) if err != nil { return off, err } off, err = packUint16(rr.OtherLen, msg, off) if err != nil { return off, err } off, err = packString(rr.OtherData, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *TLSA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint8(rr.Usage, msg, off) if err != nil { return off, err } off, err = packUint8(rr.Selector, msg, off) if err != nil { return off, err } off, err = packUint8(rr.MatchingType, msg, off) if err != nil { return off, err } off, err = packStringHex(rr.Certificate, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *TSIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = PackDomainName(rr.Algorithm, msg, off, compression, compress) if err != nil { return off, err } off, err = packUint48(rr.TimeSigned, msg, off) if err != nil { return off, err } off, err = packUint16(rr.Fudge, msg, off) if err != nil { return off, err } off, err = packUint16(rr.MACSize, msg, off) if err != nil { return off, err } off, err = packStringHex(rr.MAC, msg, off) if err != nil { return off, err } off, err = packUint16(rr.OrigId, msg, off) if err != nil { return off, err } off, err = packUint16(rr.Error, msg, off) if err != nil { return off, err } off, err = packUint16(rr.OtherLen, msg, off) if err != nil { return off, err } off, err = packStringHex(rr.OtherData, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *TXT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packStringTxt(rr.Txt, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *UID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint32(rr.Uid, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *UINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packString(rr.Uinfo, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *URI) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packUint16(rr.Priority, msg, off) if err != nil { return off, err } off, err = packUint16(rr.Weight, msg, off) if err != nil { return off, err } off, err = packStringOctet(rr.Target, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } func (rr *X25) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { return off, err } headerEnd := off off, err = packString(rr.PSDNAddress, msg, off) if err != nil { return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } // unpack*() functions func unpackA(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(A) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.A, off, err = unpackDataA(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackAAAA(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(AAAA) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.AAAA, off, err = unpackDataAAAA(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackAFSDB(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(AFSDB) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Subtype, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Hostname, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackANY(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(ANY) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart return rr, off, err } func unpackCAA(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(CAA) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Flag, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Tag, off, err = unpackString(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Value, off, err = unpackStringOctet(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackCDNSKEY(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(CDNSKEY) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Flags, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Protocol, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Algorithm, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { return rr, off, err } return rr, off, err } func unpackCDS(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(CDS) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.KeyTag, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Algorithm, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.DigestType, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { return rr, off, err } return rr, off, err } func unpackCERT(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(CERT) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Type, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.KeyTag, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Algorithm, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Certificate, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { return rr, off, err } return rr, off, err } func unpackCNAME(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(CNAME) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Target, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackDHCID(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(DHCID) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Digest, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { return rr, off, err } return rr, off, err } func unpackDLV(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(DLV) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.KeyTag, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Algorithm, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.DigestType, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { return rr, off, err } return rr, off, err } func unpackDNAME(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(DNAME) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Target, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackDNSKEY(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(DNSKEY) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Flags, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Protocol, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Algorithm, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { return rr, off, err } return rr, off, err } func unpackDS(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(DS) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.KeyTag, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Algorithm, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.DigestType, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { return rr, off, err } return rr, off, err } func unpackEID(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(EID) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Endpoint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { return rr, off, err } return rr, off, err } func unpackEUI48(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(EUI48) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Address, off, err = unpackUint48(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackEUI64(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(EUI64) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Address, off, err = unpackUint64(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackGID(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(GID) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Gid, off, err = unpackUint32(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackGPOS(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(GPOS) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Longitude, off, err = unpackString(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Latitude, off, err = unpackString(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Altitude, off, err = unpackString(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackHINFO(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(HINFO) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Cpu, off, err = unpackString(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Os, off, err = unpackString(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackHIP(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(HIP) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.HitLength, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.PublicKeyAlgorithm, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.PublicKeyLength, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Hit, off, err = unpackStringHex(msg, off, off+int(rr.HitLength)) if err != nil { return rr, off, err } rr.PublicKey, off, err = unpackStringBase64(msg, off, off+int(rr.PublicKeyLength)) if err != nil { return rr, off, err } rr.RendezvousServers, off, err = unpackDataDomainNames(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { return rr, off, err } return rr, off, err } func unpackKEY(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(KEY) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Flags, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Protocol, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Algorithm, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { return rr, off, err } return rr, off, err } func unpackKX(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(KX) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Preference, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Exchanger, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackL32(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(L32) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Preference, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Locator32, off, err = unpackDataA(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackL64(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(L64) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Preference, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Locator64, off, err = unpackUint64(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackLOC(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(LOC) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Version, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Size, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.HorizPre, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.VertPre, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Latitude, off, err = unpackUint32(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Longitude, off, err = unpackUint32(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Altitude, off, err = unpackUint32(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackLP(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(LP) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Preference, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Fqdn, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackMB(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(MB) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Mb, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackMD(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(MD) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Md, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackMF(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(MF) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Mf, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackMG(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(MG) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Mg, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackMINFO(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(MINFO) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Rmail, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Email, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackMR(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(MR) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Mr, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackMX(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(MX) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Preference, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Mx, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackNAPTR(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(NAPTR) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Order, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Preference, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Flags, off, err = unpackString(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Service, off, err = unpackString(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Regexp, off, err = unpackString(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Replacement, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackNID(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(NID) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Preference, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.NodeID, off, err = unpackUint64(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackNIMLOC(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(NIMLOC) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Locator, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { return rr, off, err } return rr, off, err } func unpackNINFO(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(NINFO) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.ZSData, off, err = unpackStringTxt(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackNS(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(NS) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Ns, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackNSAPPTR(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(NSAPPTR) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Ptr, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackNSEC(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(NSEC) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.NextDomain, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.TypeBitMap, off, err = unpackDataNsec(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackNSEC3(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(NSEC3) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Hash, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Flags, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Iterations, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.SaltLength, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) if err != nil { return rr, off, err } rr.HashLength, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.NextDomain, off, err = unpackStringBase32(msg, off, off+int(rr.HashLength)) if err != nil { return rr, off, err } rr.TypeBitMap, off, err = unpackDataNsec(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackNSEC3PARAM(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(NSEC3PARAM) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Hash, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Flags, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Iterations, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.SaltLength, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Salt, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { return rr, off, err } return rr, off, err } func unpackOPENPGPKEY(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(OPENPGPKEY) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { return rr, off, err } return rr, off, err } func unpackOPT(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(OPT) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Option, off, err = unpackDataOpt(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackPTR(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(PTR) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Ptr, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackPX(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(PX) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Preference, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Map822, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Mapx400, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackRFC3597(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(RFC3597) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Rdata, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { return rr, off, err } return rr, off, err } func unpackRKEY(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(RKEY) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Flags, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Protocol, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Algorithm, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { return rr, off, err } return rr, off, err } func unpackRP(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(RP) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Mbox, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Txt, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackRRSIG(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(RRSIG) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.TypeCovered, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Algorithm, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Labels, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.OrigTtl, off, err = unpackUint32(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Expiration, off, err = unpackUint32(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Inception, off, err = unpackUint32(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.KeyTag, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.SignerName, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { return rr, off, err } return rr, off, err } func unpackRT(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(RT) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Preference, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Host, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackSIG(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(SIG) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.TypeCovered, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Algorithm, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Labels, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.OrigTtl, off, err = unpackUint32(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Expiration, off, err = unpackUint32(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Inception, off, err = unpackUint32(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.KeyTag, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.SignerName, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { return rr, off, err } return rr, off, err } func unpackSOA(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(SOA) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Ns, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Mbox, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Serial, off, err = unpackUint32(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Refresh, off, err = unpackUint32(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Retry, off, err = unpackUint32(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Expire, off, err = unpackUint32(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Minttl, off, err = unpackUint32(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackSPF(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(SPF) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Txt, off, err = unpackStringTxt(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackSRV(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(SRV) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Priority, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Weight, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Port, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Target, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackSSHFP(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(SSHFP) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Algorithm, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Type, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.FingerPrint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { return rr, off, err } return rr, off, err } func unpackTA(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(TA) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.KeyTag, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Algorithm, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.DigestType, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { return rr, off, err } return rr, off, err } func unpackTALINK(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(TALINK) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.PreviousName, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.NextName, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackTKEY(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(TKEY) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Algorithm, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Inception, off, err = unpackUint32(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Expiration, off, err = unpackUint32(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Mode, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Error, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.KeySize, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Key, off, err = unpackString(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.OtherLen, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.OtherData, off, err = unpackString(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackTLSA(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(TLSA) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Usage, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Selector, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.MatchingType, off, err = unpackUint8(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { return rr, off, err } return rr, off, err } func unpackTSIG(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(TSIG) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Algorithm, off, err = UnpackDomainName(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.TimeSigned, off, err = unpackUint48(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Fudge, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.MACSize, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.MAC, off, err = unpackStringHex(msg, off, off+int(rr.MACSize)) if err != nil { return rr, off, err } rr.OrigId, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Error, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.OtherLen, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen)) if err != nil { return rr, off, err } return rr, off, err } func unpackTXT(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(TXT) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Txt, off, err = unpackStringTxt(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackUID(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(UID) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Uid, off, err = unpackUint32(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackUINFO(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(UINFO) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Uinfo, off, err = unpackString(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackURI(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(URI) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.Priority, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Weight, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err } if off == len(msg) { return rr, off, nil } rr.Target, off, err = unpackStringOctet(msg, off) if err != nil { return rr, off, err } return rr, off, err } func unpackX25(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(X25) rr.Hdr = h if noRdata(h) { return rr, off, nil } var err error rdStart := off _ = rdStart rr.PSDNAddress, off, err = unpackString(msg, off) if err != nil { return rr, off, err } return rr, off, err } var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){ TypeA: unpackA, TypeAAAA: unpackAAAA, TypeAFSDB: unpackAFSDB, TypeANY: unpackANY, TypeCAA: unpackCAA, TypeCDNSKEY: unpackCDNSKEY, TypeCDS: unpackCDS, TypeCERT: unpackCERT, TypeCNAME: unpackCNAME, TypeDHCID: unpackDHCID, TypeDLV: unpackDLV, TypeDNAME: unpackDNAME, TypeDNSKEY: unpackDNSKEY, TypeDS: unpackDS, TypeEID: unpackEID, TypeEUI48: unpackEUI48, TypeEUI64: unpackEUI64, TypeGID: unpackGID, TypeGPOS: unpackGPOS, TypeHINFO: unpackHINFO, TypeHIP: unpackHIP, TypeKEY: unpackKEY, TypeKX: unpackKX, TypeL32: unpackL32, TypeL64: unpackL64, TypeLOC: unpackLOC, TypeLP: unpackLP, TypeMB: unpackMB, TypeMD: unpackMD, TypeMF: unpackMF, TypeMG: unpackMG, TypeMINFO: unpackMINFO, TypeMR: unpackMR, TypeMX: unpackMX, TypeNAPTR: unpackNAPTR, TypeNID: unpackNID, TypeNIMLOC: unpackNIMLOC, TypeNINFO: unpackNINFO, TypeNS: unpackNS, TypeNSAPPTR: unpackNSAPPTR, TypeNSEC: unpackNSEC, TypeNSEC3: unpackNSEC3, TypeNSEC3PARAM: unpackNSEC3PARAM, TypeOPENPGPKEY: unpackOPENPGPKEY, TypeOPT: unpackOPT, TypePTR: unpackPTR, TypePX: unpackPX, TypeRKEY: unpackRKEY, TypeRP: unpackRP, TypeRRSIG: unpackRRSIG, TypeRT: unpackRT, TypeSIG: unpackSIG, TypeSOA: unpackSOA, TypeSPF: unpackSPF, TypeSRV: unpackSRV, TypeSSHFP: unpackSSHFP, TypeTA: unpackTA, TypeTALINK: unpackTALINK, TypeTKEY: unpackTKEY, TypeTLSA: unpackTLSA, TypeTSIG: unpackTSIG, TypeTXT: unpackTXT, TypeUID: unpackUID, TypeUINFO: unpackUINFO, TypeURI: unpackURI, TypeX25: unpackX25, }
clairew/kubernetes
vendor/github.com/miekg/dns/zmsg.go
GO
apache-2.0
74,603
<?php /* * This file is part of the Symfony package. * * (c) Fabien Potencier <fabien@symfony.com> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Symfony\Component\Console\Logger; use Psr\Log\AbstractLogger; use Psr\Log\InvalidArgumentException; use Psr\Log\LogLevel; use Symfony\Component\Console\Output\OutputInterface; use Symfony\Component\Console\Output\ConsoleOutputInterface; /** * PSR-3 compliant console logger. * * @author Kรฉvin Dunglas <dunglas@gmail.com> * * @link http://www.php-fig.org/psr/psr-3/ */ class ConsoleLogger extends AbstractLogger { const INFO = 'info'; const ERROR = 'error'; /** * @var OutputInterface */ private $output; /** * @var array */ private $verbosityLevelMap = array( LogLevel::EMERGENCY => OutputInterface::VERBOSITY_NORMAL, LogLevel::ALERT => OutputInterface::VERBOSITY_NORMAL, LogLevel::CRITICAL => OutputInterface::VERBOSITY_NORMAL, LogLevel::ERROR => OutputInterface::VERBOSITY_NORMAL, LogLevel::WARNING => OutputInterface::VERBOSITY_NORMAL, LogLevel::NOTICE => OutputInterface::VERBOSITY_VERBOSE, LogLevel::INFO => OutputInterface::VERBOSITY_VERY_VERBOSE, LogLevel::DEBUG => OutputInterface::VERBOSITY_DEBUG, ); /** * @var array */ private $formatLevelMap = array( LogLevel::EMERGENCY => self::ERROR, LogLevel::ALERT => self::ERROR, LogLevel::CRITICAL => self::ERROR, LogLevel::ERROR => self::ERROR, LogLevel::WARNING => self::INFO, LogLevel::NOTICE => self::INFO, LogLevel::INFO => self::INFO, LogLevel::DEBUG => self::INFO, ); /** * @param OutputInterface $output * @param array $verbosityLevelMap * @param array $formatLevelMap */ public function __construct(OutputInterface $output, array $verbosityLevelMap = array(), array $formatLevelMap = array()) { $this->output = $output; $this->verbosityLevelMap = $verbosityLevelMap + $this->verbosityLevelMap; $this->formatLevelMap = $formatLevelMap + $this->formatLevelMap; } /** * {@inheritdoc} */ public function log($level, $message, array $context = array()) { if (!isset($this->verbosityLevelMap[$level])) { throw new InvalidArgumentException(sprintf('The log level "%s" does not exist.', $level)); } // Write to the error output if necessary and available if ($this->formatLevelMap[$level] === self::ERROR && $this->output instanceof ConsoleOutputInterface) { $output = $this->output->getErrorOutput(); } else { $output = $this->output; } if ($output->getVerbosity() >= $this->verbosityLevelMap[$level]) { $output->writeln(sprintf('<%1$s>[%2$s] %3$s</%1$s>', $this->formatLevelMap[$level], $level, $this->interpolate($message, $context))); } } /** * Interpolates context values into the message placeholders. * * @author PHP Framework Interoperability Group * * @param string $message * @param array $context * * @return string */ private function interpolate($message, array $context) { // build a replacement array with braces around the context keys $replace = array(); foreach ($context as $key => $val) { if (!is_array($val) && (!is_object($val) || method_exists($val, '__toString'))) { $replace[sprintf('{%s}', $key)] = $val; } } // interpolate replacement values into the message and return return strtr($message, $replace); } }
diandianxiyu/LaravelApi
vendor/symfony/console/Logger/ConsoleLogger.php
PHP
apache-2.0
3,818
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.tools.javadoc; import com.sun.javadoc.Tag; import com.sun.tools.doclets.Taglet; import java.io.File; import java.util.Map; /** * Represents {@ignitelink Class} tag. This tag can * be used as replacement of {@link Class} tag that references to the Ignite class that is not in classpath. * Class and its arguments should have fully qualified names. */ public class IgniteLinkTaglet implements Taglet { /** */ private static final String NAME = "ignitelink"; /** * Return the name of this custom tag. */ @Override public String getName() { return NAME; } /** * @return true since this tag can be used in a field doc comment. */ @Override public boolean inField() { return true; } /** * @return true since this tag can be used in a constructor doc comment. */ @Override public boolean inConstructor() { return true; } /** * @return true since this tag can be used in a method doc comment. */ @Override public boolean inMethod() { return true; } /** * @return true since this tag can be used in an overview doc comment. */ @Override public boolean inOverview() { return true; } /** * @return true since this tag can be used in a package doc comment. */ @Override public boolean inPackage() { return true; } /** * @return true since this. */ @Override public boolean inType() { return true; } /** * Will return true since this is an inline tag. * * @return true since this is an inline tag. */ @Override public boolean isInlineTag() { return true; } /** * Register this Taglet. * * @param tagletMap the map to register this tag to. */ public static void register(Map<String, IgniteLinkTaglet> tagletMap) { IgniteLinkTaglet tag = new IgniteLinkTaglet(); Taglet t = tagletMap.get(tag.getName()); if (t != null) tagletMap.remove(tag.getName()); tagletMap.put(tag.getName(), tag); } /** * Given the <code>Tag</code> representation of this custom tag, return its string representation. * <p> * Input: org.apache.ignite.grid.spi.indexing.h2.GridH2IndexingSpi#setIndexCustomFunctionClasses(Class[]) * <p> * Output: <a href="../../../../../org/apache/ignite/grid/spi/indexing/h2/GridH2IndexingSpi.html# * setIndexCustomFunctionClasses(java.lang.Class...)"> * <code>GridH2IndexingSpi.setIndexCustomFunctionClasses(java.lang.Class[])</code></a> * * @param tag <code>Tag</code> representation of this custom tag. */ @Override public String toString(Tag tag) { if (tag.text() == null || tag.text().isEmpty()) return ""; File f = tag.position().file(); String curClass = f == null ? "" : f.getAbsolutePath().replace(File.separator, "."); String packPref = "src.main.java."; int idx = curClass.indexOf(packPref); StringBuilder path = new StringBuilder(); if (idx != -1) { curClass = curClass.substring(idx + packPref.length()); for (int i = 0, n = curClass.split("\\.").length - 2; i < n; i++) path.append("../"); } String[] tokens = tag.text().split("#"); int lastIdx = tokens[0].lastIndexOf('.'); String simpleClsName = lastIdx != -1 && lastIdx + 1 < tokens[0].length() ? tokens[0].substring(lastIdx + 1) : tokens[0]; String fullyQClsName = tokens[0].replace(".", "/"); return "<a href=\"" + path.toString() + fullyQClsName + ".html" + (tokens.length > 1 ? ("#" + tokens[1].replace("[]", "...")) : "") + "\"><code>" + simpleClsName + (tokens.length > 1 ? ("." + tokens[1]) : "") + "</code></a>"; } /** * This method should not be called since arrays of inline tags do not * exist. Method {@link #toString(Tag)} should be used to convert this * inline tag to a string. * * @param tags the array of <code>Tag</code>s representing of this custom tag. */ @Override public String toString(Tag[] tags) { return null; } }
afinka77/ignite
modules/tools/src/main/java/org/apache/ignite/tools/javadoc/IgniteLinkTaglet.java
Java
apache-2.0
5,097
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.yarn; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeId; /** * Information about launched task. */ public class IgniteContainer { /** */ public final ContainerId id; /** */ public final NodeId nodeId; /** */ public final double cpuCores; /** */ public final double mem; /** * Ignite launched task. * * @param nodeId Node id. * @param cpuCores Cpu cores count. * @param mem Memory */ public IgniteContainer(ContainerId id, NodeId nodeId, double cpuCores, double mem) { this.id = id; this.nodeId = nodeId; this.cpuCores = cpuCores; this.mem = mem; } /** * @return Id. */ public ContainerId id() { return id; } /** * @return Host. */ public NodeId nodeId() { return nodeId; } /** * @return Cores count. */ public double cpuCores() { return cpuCores; } /** * @return Memory. */ public double mem() { return mem; } /** {@inheritDoc} */ @Override public String toString() { return "IgniteTask [host=" + nodeId.getHost() + ", cpuCores=" + cpuCores + ", mem=" + mem + ']'; } }
pperalta/ignite
modules/yarn/src/main/java/org/apache/ignite/yarn/IgniteContainer.java
Java
apache-2.0
2,111
/** * Checks that the first two arguments are equal, or are numbers close enough to be considered equal * based on a specified maximum allowable difference. * * @example assert.close(3.141, Math.PI, 0.001); * * @param Number actual * @param Number expected * @param Number maxDifference (the maximum inclusive difference allowed between the actual and expected numbers) * @param String message (optional) */ function close(actual, expected, maxDifference, message) { var actualDiff = (actual === expected) ? 0 : Math.abs(actual - expected), result = actualDiff <= maxDifference; message = message || (actual + " should be within " + maxDifference + " (inclusive) of " + expected + (result ? "" : ". Actual: " + actualDiff)); QUnit.push(result, actual, expected, message); } /** * Checks that the first two arguments are equal, or are numbers close enough to be considered equal * based on a specified maximum allowable difference percentage. * * @example assert.close.percent(155, 150, 3.4); // Difference is ~3.33% * * @param Number actual * @param Number expected * @param Number maxPercentDifference (the maximum inclusive difference percentage allowed between the actual and expected numbers) * @param String message (optional) */ close.percent = function closePercent(actual, expected, maxPercentDifference, message) { var actualDiff, result; if (actual === expected) { actualDiff = 0; result = actualDiff <= maxPercentDifference; } else if (actual !== 0 && expected !== 0 && expected !== Infinity && expected !== -Infinity) { actualDiff = Math.abs(100 * (actual - expected) / expected); result = actualDiff <= maxPercentDifference; } else { // Dividing by zero (0)! Should return `false` unless the max percentage was `Infinity` actualDiff = Infinity; result = maxPercentDifference === Infinity; } message = message || (actual + " should be within " + maxPercentDifference + "% (inclusive) of " + expected + (result ? "" : ". Actual: " + actualDiff + "%")); QUnit.push(result, actual, expected, message); }; /** * Checks that the first two arguments are numbers with differences greater than the specified * minimum difference. * * @example assert.notClose(3.1, Math.PI, 0.001); * * @param Number actual * @param Number expected * @param Number minDifference (the minimum exclusive difference allowed between the actual and expected numbers) * @param String message (optional) */ function notClose(actual, expected, minDifference, message) { var actualDiff = Math.abs(actual - expected), result = actualDiff > minDifference; message = message || (actual + " should not be within " + minDifference + " (exclusive) of " + expected + (result ? "" : ". Actual: " + actualDiff)); QUnit.push(result, actual, expected, message); } /** * Checks that the first two arguments are numbers with differences greater than the specified * minimum difference percentage. * * @example assert.notClose.percent(156, 150, 3.5); // Difference is 4.0% * * @param Number actual * @param Number expected * @param Number minPercentDifference (the minimum exclusive difference percentage allowed between the actual and expected numbers) * @param String message (optional) */ notClose.percent = function notClosePercent(actual, expected, minPercentDifference, message) { var actualDiff, result; if (actual === expected) { actualDiff = 0; result = actualDiff > minPercentDifference; } else if (actual !== 0 && expected !== 0 && expected !== Infinity && expected !== -Infinity) { actualDiff = Math.abs(100 * (actual - expected) / expected); result = actualDiff > minPercentDifference; } else { // Dividing by zero (0)! Should only return `true` if the min percentage was `Infinity` actualDiff = Infinity; result = minPercentDifference !== Infinity; } message = message || (actual + " should not be within " + minPercentDifference + "% (exclusive) of " + expected + (result ? "" : ". Actual: " + actualDiff + "%")); QUnit.push(result, actual, expected, message); }; QUnit.extend(QUnit.assert, { close: close, notClose: notClose });
doroftec/EasyDayProject
EasyDayProject/src/main/webapp/node_modules/jquery-ui/external/qunit-assert-close/qunit-assert-close.js
JavaScript
apache-2.0
4,181
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.spi.checkpoint.jdbc; import org.apache.ignite.spi.checkpoint.GridCheckpointSpiAbstractTest; import org.apache.ignite.testframework.junits.spi.GridSpiTest; import org.hsqldb.jdbc.jdbcDataSource; /** * Grid jdbc checkpoint SPI default config self test. */ @GridSpiTest(spi = JdbcCheckpointSpi.class, group = "Checkpoint SPI") public class JdbcCheckpointSpiDefaultConfigSelfTest extends GridCheckpointSpiAbstractTest<JdbcCheckpointSpi> { /** {@inheritDoc} */ @Override protected void spiConfigure(JdbcCheckpointSpi spi) throws Exception { jdbcDataSource ds = new jdbcDataSource(); ds.setDatabase("jdbc:hsqldb:mem:gg_test_" + getClass().getSimpleName()); ds.setUser("sa"); ds.setPassword(""); spi.setDataSource(ds); // Default BLOB type is not valid for hsqldb. spi.setValueFieldType("longvarbinary"); super.spiConfigure(spi); } }
alexzaitzev/ignite
modules/core/src/test/java/org/apache/ignite/spi/checkpoint/jdbc/JdbcCheckpointSpiDefaultConfigSelfTest.java
Java
apache-2.0
1,744
package test func init() { testCases = append(testCases, (*bool)(nil), (*boolAlias)(nil), (*byte)(nil), (*byteAlias)(nil), (*float32)(nil), (*float32Alias)(nil), (*float64)(nil), (*float64Alias)(nil), (*int8)(nil), (*int8Alias)(nil), (*int16)(nil), (*int16Alias)(nil), (*int32)(nil), (*int32Alias)(nil), (*int64)(nil), (*int64Alias)(nil), (*string)(nil), (*stringAlias)(nil), (*uint8)(nil), (*uint8Alias)(nil), (*uint16)(nil), (*uint16Alias)(nil), (*uint32)(nil), (*uint32Alias)(nil), (*uintptr)(nil), (*uintptrAlias)(nil), (*struct { A int8Alias `json:"a"` B int16Alias `json:"stream"` C int32Alias `json:"c"` D int64Alias `json:"d"` E uintAlias `json:"e"` F uint16Alias `json:"f"` G uint32Alias `json:"g"` H uint64Alias `json:"h"` I float32Alias `json:"i"` J float64Alias `json:"j"` K stringAlias `json:"k"` L intAlias `json:"l"` M uintAlias `json:"m"` N boolAlias `json:"n"` O uintptrAlias `json:"o"` })(nil), ) } type boolAlias bool type byteAlias byte type float32Alias float32 type float64Alias float64 type ptrFloat64Alias *float64 type int8Alias int8 type int16Alias int16 type int32Alias int32 type ptrInt32Alias *int32 type int64Alias int64 type stringAlias string type ptrStringAlias *string type uint8Alias uint8 type uint16Alias uint16 type uint32Alias uint32 type uintptrAlias uintptr type uintAlias uint type uint64Alias uint64 type intAlias int
markllama/origin
vendor/github.com/json-iterator/go/type_tests/builtin_test.go
GO
apache-2.0
1,492
ydn.debug.log('ydn.db', 500); // // //asyncTest("Get index", function () { // expect(4); // // var db = new ydn.db.Storage(db_name_put, schema_index); // console.log(db.getSchema()); // var value_1 = 'test ' + Math.random(); // var value_2 = 'test ' + Math.random(); // db.put(store_inline, {id: 1, value: value_1, tag: 'a'}); // db.put(store_inline, {id: 2, value: value_2, tag: 'b'}); // db.put(store_inline, {id: 3, value: value_2, tag: 'c'}); // var keyRange = ydn.db.KeyRange.only('a'); // var dir = 'next'; // var q = db.query().from(store_inline, index_name, dir, keyRange); // db.fetch(q).then(function (x) { // console.log(db.getSchema()); // equal(1, x.length, 'result length'); // equal('a', x[0].id, 'a value'); // var keyRange = ydn.db.KeyRange.only('c'); // var q = db.query().from(store_inline, index_name, dir, keyRange); // db.fetch(q).then(function (x) { // equal(1, x.length, 'result length'); // equal('c', x[0].id, 'c value'); // start(); // }, function (e) { // ok(false, e.message); // start(); // }); // }, function (e) { // ok(false, e.message); // start(); // }); // //});
brendajimo/tallermoodle
lib/ydn.db/test/qunit/test_qunit.js
JavaScript
apache-2.0
1,170
var entityMap = require("../maps/entities.json"), legacyMap = require("../maps/legacy.json"), xmlMap = require("../maps/xml.json"), decodeCodePoint = require("./decode_codepoint.js"); var decodeXMLStrict = getStrictDecoder(xmlMap), decodeHTMLStrict = getStrictDecoder(entityMap); function getStrictDecoder(map) { var keys = Object.keys(map).join("|"), replace = getReplacer(map); keys += "|#[xX][\\da-fA-F]+|#\\d+"; var re = new RegExp("&(?:" + keys + ");", "g"); return function(str) { return String(str).replace(re, replace); }; } var decodeHTML = (function() { var legacy = Object.keys(legacyMap).sort(sorter); var keys = Object.keys(entityMap).sort(sorter); for (var i = 0, j = 0; i < keys.length; i++) { if (legacy[j] === keys[i]) { keys[i] += ";?"; j++; } else { keys[i] += ";"; } } var re = new RegExp("&(?:" + keys.join("|") + "|#[xX][\\da-fA-F]+;?|#\\d+;?)", "g"), replace = getReplacer(entityMap); function replacer(str) { if (str.substr(-1) !== ";") str += ";"; return replace(str); } //TODO consider creating a merged map return function(str) { return String(str).replace(re, replacer); }; })(); function sorter(a, b) { return a < b ? 1 : -1; } function getReplacer(map) { return function replace(str) { if (str.charAt(1) === "#") { if (str.charAt(2) === "X" || str.charAt(2) === "x") { return decodeCodePoint(parseInt(str.substr(3), 16)); } return decodeCodePoint(parseInt(str.substr(2), 10)); } return map[str.slice(1, -1)]; }; } module.exports = { XML: decodeXMLStrict, HTML: decodeHTML, HTMLStrict: decodeHTMLStrict };
henryhe1/henryhe1.github.io
node_modules/entities/lib/decode.js
JavaScript
apache-2.0
1,833
/*************************GO-LICENSE-START********************************* * Copyright 2014 ThoughtWorks, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *************************GO-LICENSE-END***********************************/ package com.thoughtworks.go.server.service.dd.reporting; import java.io.PrintWriter; import java.io.StringWriter; import java.util.Map; import java.util.Set; import com.thoughtworks.go.config.materials.dependency.DependencyMaterialConfig; import com.thoughtworks.go.domain.materials.MaterialConfig; import com.thoughtworks.go.server.dao.PipelineDao; import com.thoughtworks.go.server.domain.PipelineTimeline; public class ReportingFanInGraphContext { Map<String, MaterialConfig> fingerprintScmMaterialMap; PipelineTimeline pipelineTimeline; Map<DependencyMaterialConfig, Set<MaterialConfig>> pipelineScmDepMap; Map<String, DependencyMaterialConfig> fingerprintDepMaterialMap; StringWriter sw; PrintWriter out; PipelineDao pipelineDao; }
justinholmes/gocd
server/src/com/thoughtworks/go/server/service/dd/reporting/ReportingFanInGraphContext.java
Java
apache-2.0
1,516
# # xmlrpc/base64.rb # Copyright (C) 2001, 2002, 2003 by Michael Neumann (mneumann@ntecs.de) # # Released under the same term of license as Ruby. module XMLRPC # :nodoc: # This class is necessary for 'xmlrpc4r' to determine that a string should # be transmitted base64-encoded and not as a raw-string. # # You can use XMLRPC::Base64 on the client and server-side as a # parameter and/or return-value. class Base64 # Creates a new XMLRPC::Base64 instance with string +str+ as the # internal string. When +state+ is +:dec+ it assumes that the # string +str+ is not in base64 format (perhaps already decoded), # otherwise if +state+ is +:enc+ it decodes +str+ # and stores it as the internal string. def initialize(str, state = :dec) case state when :enc @str = Base64.decode(str) when :dec @str = str else raise ArgumentError, "wrong argument; either :enc or :dec" end end # Returns the decoded internal string. def decoded @str end # Returns the base64 encoded internal string. def encoded Base64.encode(@str) end # Decodes string +str+ with base64 and returns that value. def Base64.decode(str) str.gsub(/\s+/, "").unpack("m")[0] end # Encodes string +str+ with base64 and returns that value. def Base64.encode(str) [str].pack("m") end end end # module XMLRPC =begin = History $Id$ =end
xli/gocd
tools/jruby-1.7.11/lib/ruby/2.0/xmlrpc/base64.rb
Ruby
apache-2.0
1,396
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.test.utils import override_settings from openstack_dashboard import policy from openstack_dashboard import policy_backend from openstack_dashboard.test import helpers as test class PolicyTestCase(test.TestCase): @override_settings(POLICY_CHECK_FUNCTION=policy_backend.check) def test_policy_check_set(self): value = policy.check((("identity", "admin_required"),), request=self.request) self.assertFalse(value) @override_settings(POLICY_CHECK_FUNCTION=None) def test_policy_check_not_set(self): value = policy.check((("identity", "admin_required"),), request=self.request) self.assertTrue(value) class PolicyBackendTestCaseAdmin(test.BaseAdminViewTests): @override_settings(POLICY_CHECK_FUNCTION=policy_backend.check) def test_policy_check_set_admin(self): value = policy.check((("identity", "admin_required"),), request=self.request) self.assertTrue(value)
pranavtendolkr/horizon
openstack_dashboard/test/tests/policy.py
Python
apache-2.0
1,609
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.binary; /** * Binary object field. Can be used to speed object field lookup. */ public interface BinaryField { /** * Get field's name. * * @return Name. */ public String name(); /** * Check whether field exists in the object. * * @param obj Object. * @return {@code True} if exists. */ public boolean exists(BinaryObject obj); /** * Get field's value from the given object. * * @param obj Object. * @return Value. */ public <T> T value(BinaryObject obj); }
vldpyatkov/ignite
modules/core/src/main/java/org/apache/ignite/binary/BinaryField.java
Java
apache-2.0
1,386
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package field import ( "encoding/json" "fmt" "strings" utilerrors "k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/util/sets" ) // Error is an implementation of the 'error' interface, which represents a // field-level validation error. type Error struct { Type ErrorType Field string BadValue interface{} Detail string } var _ error = &Error{} // Error implements the error interface. func (v *Error) Error() string { return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody()) } // ErrorBody returns the error message without the field name. This is useful // for building nice-looking higher-level error reporting. func (v *Error) ErrorBody() string { var s string switch v.Type { case ErrorTypeRequired, ErrorTypeForbidden, ErrorTypeTooLong, ErrorTypeInternal: s = fmt.Sprintf("%s", v.Type) default: var bad string badBytes, err := json.Marshal(v.BadValue) if err != nil { bad = err.Error() } else { bad = string(badBytes) } s = fmt.Sprintf("%s: %s", v.Type, bad) } if len(v.Detail) != 0 { s += fmt.Sprintf(": %s", v.Detail) } return s } // ErrorType is a machine readable value providing more detail about why // a field is invalid. These values are expected to match 1-1 with // CauseType in api/types.go. type ErrorType string // TODO: These values are duplicated in api/types.go, but there's a circular dep. Fix it. const ( // ErrorTypeNotFound is used to report failure to find a requested value // (e.g. looking up an ID). See NotFound(). ErrorTypeNotFound ErrorType = "FieldValueNotFound" // ErrorTypeRequired is used to report required values that are not // provided (e.g. empty strings, null values, or empty arrays). See // Required(). ErrorTypeRequired ErrorType = "FieldValueRequired" // ErrorTypeDuplicate is used to report collisions of values that must be // unique (e.g. unique IDs). See Duplicate(). ErrorTypeDuplicate ErrorType = "FieldValueDuplicate" // ErrorTypeInvalid is used to report malformed values (e.g. failed regex // match, too long, out of bounds). See Invalid(). ErrorTypeInvalid ErrorType = "FieldValueInvalid" // ErrorTypeNotSupported is used to report unknown values for enumerated // fields (e.g. a list of valid values). See NotSupported(). ErrorTypeNotSupported ErrorType = "FieldValueNotSupported" // ErrorTypeForbidden is used to report valid (as per formatting rules) // values which would be accepted under some conditions, but which are not // permitted by the current conditions (such as security policy). See // Forbidden(). ErrorTypeForbidden ErrorType = "FieldValueForbidden" // ErrorTypeTooLong is used to report that the given value is too long. // This is similar to ErrorTypeInvalid, but the error will not include the // too-long value. See TooLong(). ErrorTypeTooLong ErrorType = "FieldValueTooLong" // ErrorTypeInternal is used to report other errors that are not related // to user input. See InternalError(). ErrorTypeInternal ErrorType = "InternalError" ) // String converts a ErrorType into its corresponding canonical error message. func (t ErrorType) String() string { switch t { case ErrorTypeNotFound: return "Not found" case ErrorTypeRequired: return "Required value" case ErrorTypeDuplicate: return "Duplicate value" case ErrorTypeInvalid: return "Invalid value" case ErrorTypeNotSupported: return "Unsupported value" case ErrorTypeForbidden: return "Forbidden" case ErrorTypeTooLong: return "Too long" case ErrorTypeInternal: return "Internal error" default: panic(fmt.Sprintf("unrecognized validation error: %q", string(t))) } } // NotFound returns a *Error indicating "value not found". This is // used to report failure to find a requested value (e.g. looking up an ID). func NotFound(field *Path, value interface{}) *Error { return &Error{ErrorTypeNotFound, field.String(), value, ""} } // Required returns a *Error indicating "value required". This is used // to report required values that are not provided (e.g. empty strings, null // values, or empty arrays). func Required(field *Path, detail string) *Error { return &Error{ErrorTypeRequired, field.String(), "", detail} } // Duplicate returns a *Error indicating "duplicate value". This is // used to report collisions of values that must be unique (e.g. names or IDs). func Duplicate(field *Path, value interface{}) *Error { return &Error{ErrorTypeDuplicate, field.String(), value, ""} } // Invalid returns a *Error indicating "invalid value". This is used // to report malformed values (e.g. failed regex match, too long, out of bounds). func Invalid(field *Path, value interface{}, detail string) *Error { return &Error{ErrorTypeInvalid, field.String(), value, detail} } // NotSupported returns a *Error indicating "unsupported value". // This is used to report unknown values for enumerated fields (e.g. a list of // valid values). func NotSupported(field *Path, value interface{}, validValues []string) *Error { detail := "" if validValues != nil && len(validValues) > 0 { detail = "supported values: " + strings.Join(validValues, ", ") } return &Error{ErrorTypeNotSupported, field.String(), value, detail} } // Forbidden returns a *Error indicating "forbidden". This is used to // report valid (as per formatting rules) values which would be accepted under // some conditions, but which are not permitted by current conditions (e.g. // security policy). func Forbidden(field *Path, detail string) *Error { return &Error{ErrorTypeForbidden, field.String(), "", detail} } // TooLong returns a *Error indicating "too long". This is used to // report that the given value is too long. This is similar to // Invalid, but the returned error will not include the too-long // value. func TooLong(field *Path, value interface{}, maxLength int) *Error { return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d characters", maxLength)} } // InternalError returns a *Error indicating "internal error". This is used // to signal that an error was found that was not directly related to user // input. The err argument must be non-nil. func InternalError(field *Path, err error) *Error { return &Error{ErrorTypeInternal, field.String(), nil, err.Error()} } // ErrorList holds a set of Errors. It is plausible that we might one day have // non-field errors in this same umbrella package, but for now we don't, so // we can keep it simple and leave ErrorList here. type ErrorList []*Error // NewErrorTypeMatcher returns an errors.Matcher that returns true // if the provided error is a Error and has the provided ErrorType. func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher { return func(err error) bool { if e, ok := err.(*Error); ok { return e.Type == t } return false } } // ToAggregate converts the ErrorList into an errors.Aggregate. func (list ErrorList) ToAggregate() utilerrors.Aggregate { errs := make([]error, 0, len(list)) errorMsgs := sets.NewString() for _, err := range list { msg := fmt.Sprintf("%v", err) if errorMsgs.Has(msg) { continue } errorMsgs.Insert(msg) errs = append(errs, err) } return utilerrors.NewAggregate(errs) } func fromAggregate(agg utilerrors.Aggregate) ErrorList { errs := agg.Errors() list := make(ErrorList, len(errs)) for i := range errs { list[i] = errs[i].(*Error) } return list } // Filter removes items from the ErrorList that match the provided fns. func (list ErrorList) Filter(fns ...utilerrors.Matcher) ErrorList { err := utilerrors.FilterOut(list.ToAggregate(), fns...) if err == nil { return nil } // FilterOut takes an Aggregate and returns an Aggregate return fromAggregate(err.(utilerrors.Aggregate)) }
hodovska/kubernetes
pkg/util/validation/field/errors.go
GO
apache-2.0
8,293
// Load modules var Util = require('util'); var Stream = require('stream'); var Fs = require('fs'); var Zlib = require('zlib'); var Lab = require('lab'); var Shot = require('../lib'); // Declare internals var internals = {}; // Test shortcuts var lab = exports.lab = Lab.script(); var describe = lab.describe; var it = lab.it; var expect = Lab.expect; describe('Shot', function () { describe('#inject', function () { it('returns non-chunked payload', function (done) { var output = 'example.com:8080|/hello'; var dispatch = function (req, res) { res.writeHead(200, { 'Content-Type': 'text/plain', 'Content-Length': output.length }); res.end(req.headers.host + '|' + req.url); }; Shot.inject(dispatch, 'http://example.com:8080/hello', function (res) { expect(res.headers.date).to.exist; expect(res.headers.connection).to.exist; expect(res.headers['transfer-encoding']).to.not.exist; expect(res.payload).to.equal(output); done(); }); }); it('returns single buffer payload', function (done) { var dispatch = function (req, res) { res.writeHead(200, { 'Content-Type': 'text/plain' }); res.end(req.headers.host + '|' + req.url); }; Shot.inject(dispatch, { url: 'http://example.com:8080/hello' }, function (res) { expect(res.headers.date).to.exist; expect(res.headers.connection).to.exist; expect(res.headers['transfer-encoding']).to.equal('chunked'); expect(res.payload).to.equal('example.com:8080|/hello'); done(); }); }); it('passes headers', function (done) { var dispatch = function (req, res) { res.writeHead(200, { 'Content-Type': 'text/plain' }); res.end(req.headers.super); }; Shot.inject(dispatch, { method: 'get', url: 'http://example.com:8080/hello', headers: { Super: 'duper' } }, function (res) { expect(res.payload).to.equal('duper'); done(); }); }); it('leaves user-agent unmodified', function (done) { var dispatch = function (req, res) { res.writeHead(200, { 'Content-Type': 'text/plain' }); res.end(req.headers['user-agent']); }; Shot.inject(dispatch, { method: 'get', url: 'http://example.com:8080/hello', headers: { 'user-agent': 'duper' } }, function (res) { expect(res.payload).to.equal('duper'); done(); }); }); it('returns chunked payload', function (done) { var dispatch = function (req, res) { res.writeHead(200, 'OK'); res.write('a'); res.write('b'); res.end(); }; Shot.inject(dispatch, { method: 'get', url: '/' }, function (res) { expect(res.headers.date).to.exist; expect(res.headers.connection).to.exist; expect(res.headers['transfer-encoding']).to.equal('chunked'); expect(res.payload).to.equal('ab'); done(); }); }); it('returns chunked payload with trailer', function (done) { var dispatch = function (req, res) { res.setHeader('Trailer', 'Server-Authorization'); res.setHeader('Transfer-Encoding', 'chunked'); res.writeHead(200, 'OK'); res.write('a'); res.write('b'); res.addTrailers({ 'Test': 123 }); res.end(); }; Shot.inject(dispatch, { method: 'get', url: '/' }, function (res) { expect(res.payload).to.equal('ab'); expect(res.headers.test).to.equal('123'); done(); }); }); it('parses zipped payload', function (done) { var dispatch = function (req, res) { res.writeHead(200, 'OK'); var stream = Fs.createReadStream('./package.json'); stream.pipe(Zlib.createGzip()).pipe(res); }; Shot.inject(dispatch, { method: 'get', url: '/' }, function (res) { Fs.readFile('./package.json', { encoding: 'utf-8' }, function (err, file) { Zlib.unzip(new Buffer(res.payload, 'binary'), function (err, unzipped) { expect(err).to.not.exist; expect(unzipped.toString('utf-8')).to.deep.equal(file); done(); }); }); }); }); it('returns multi buffer payload', function (done) { var dispatch = function (req, res) { res.writeHead(200); res.write('a'); res.write(new Buffer('b')); res.end(); }; Shot.inject(dispatch, { method: 'get', url: '/' }, function (res) { expect(res.payload).to.equal('ab'); done(); }); }); it('returns null payload', function (done) { var dispatch = function (req, res) { res.writeHead(200, { 'Content-Length': 0 }); res.end(); }; Shot.inject(dispatch, { method: 'get', url: '/' }, function (res) { expect(res.payload).to.equal(''); done(); }); }); it('allows ending twice', function (done) { var dispatch = function (req, res) { res.writeHead(200, { 'Content-Length': 0 }); res.end(); res.end(); }; Shot.inject(dispatch, { method: 'get', url: '/' }, function (res) { expect(res.payload).to.equal(''); done(); }); }); it('identifies injection object', function (done) { var dispatch = function (req, res) { expect(Shot.isInjection(req)).to.equal(true); expect(Shot.isInjection(res)).to.equal(true); res.writeHead(200, { 'Content-Length': 0 }); res.end(); }; Shot.inject(dispatch, { method: 'get', url: '/' }, function (res) { done(); }); }); it('pipes response', function (done) { var Read = function () { Stream.Readable.call(this); }; Util.inherits(Read, Stream.Readable); Read.prototype._read = function (size) { this.push('hi'); this.push(null); }; var finished = false; var dispatch = function (req, res) { res.writeHead(200); var stream = new Read(); res.on('finish', function () { finished = true; }); stream.pipe(res); }; Shot.inject(dispatch, { method: 'get', url: '/' }, function (res) { expect(finished).to.equal(true); expect(res.payload).to.equal('hi'); done(); }); }); it('pipes response with old stream', function (done) { var Read = function () { Stream.Readable.call(this); }; Util.inherits(Read, Stream.Readable); Read.prototype._read = function (size) { this.push('hi'); this.push(null); }; var finished = false; var dispatch = function (req, res) { res.writeHead(200); var stream = new Read(); stream.pause(); var stream2 = new Stream.Readable().wrap(stream); stream.resume(); res.on('finish', function () { finished = true; }); stream2.pipe(res); }; Shot.inject(dispatch, { method: 'get', url: '/' }, function (res) { expect(finished).to.equal(true); expect(res.payload).to.equal('hi'); done(); }); }); it('echos object payload', function (done) { var dispatch = function (req, res) { res.writeHead(200, { 'content-type': req.headers['content-type'] }); req.pipe(res); }; Shot.inject(dispatch, { method: 'post', url: '/test', payload: { a: 1 } }, function (res) { expect(res.headers['content-type']).to.equal('application/json'); expect(res.payload).to.equal('{"a":1}'); done(); }); }); it('echos object payload without payload', function (done) { var dispatch = function (req, res) { res.writeHead(200); req.pipe(res); }; Shot.inject(dispatch, { method: 'post', url: '/test' }, function (res) { expect(res.payload).to.equal(''); done(); }); }); it('retains content-type header', function (done) { var dispatch = function (req, res) { res.writeHead(200, { 'content-type': req.headers['content-type'] }); req.pipe(res); }; Shot.inject(dispatch, { method: 'post', url: '/test', payload: { a: 1 }, headers: { 'content-type': 'something' } }, function (res) { expect(res.headers['content-type']).to.equal('something'); expect(res.payload).to.equal('{"a":1}'); done(); }); }); }); describe('#writeHead', function () { it('returns single buffer payload', function (done) { var reply = 'Hello World'; var dispatch = function (req, res) { res.writeHead(200, 'OK', { 'Content-Type': 'text/plain', 'Content-Length': reply.length }); res.end(reply); }; Shot.inject(dispatch, { method: 'get', url: '/' }, function (res) { expect(res.payload).to.equal(reply); done(); }); }); }); describe('#_read', function () { it('plays payload', function (done) { var dispatch = function (req, res) { var buffer = ''; req.on('readable', function () { buffer += req.read() || ''; }); req.on('error', function (err) { }); req.on('close', function () { }); req.on('end', function () { res.writeHead(200, { 'Content-Length': 0 }); res.end(buffer); req.destroy(); }); }; var body = 'something special just for you'; Shot.inject(dispatch, { method: 'get', url: '/', payload: body }, function (res) { expect(res.payload).to.equal(body); done(); }); }); it('simulates split', function (done) { var dispatch = function (req, res) { var buffer = ''; req.on('readable', function () { buffer += req.read() || ''; }); req.on('error', function (err) { }); req.on('close', function () { }); req.on('end', function () { res.writeHead(200, { 'Content-Length': 0 }); res.end(buffer); req.destroy(); }); }; var body = 'something special just for you'; Shot.inject(dispatch, { method: 'get', url: '/', payload: body, simulate: { split: true } }, function (res) { expect(res.payload).to.equal(body); done(); }); }); it('simulates error', function (done) { var dispatch = function (req, res) { req.on('readable', function () { }); req.on('error', function (err) { res.writeHead(200, { 'Content-Length': 0 }); res.end('error'); }); }; var body = 'something special just for you'; Shot.inject(dispatch, { method: 'get', url: '/', payload: body, simulate: { error: true } }, function (res) { expect(res.payload).to.equal('error'); done(); }); }); it('simulates no end without payload', function (done) { var end = false; var dispatch = function (req, res) { req.resume(); req.on('end', function () { end = true; }); }; var replied = false; Shot.inject(dispatch, { method: 'get', url: '/', simulate: { end: false } }, function (res) { replied = true; }); setTimeout(function () { expect(end).to.equal(false); expect(replied).to.equal(false); done(); }, 10); }); it('simulates no end with payload', function (done) { var end = false; var dispatch = function (req, res) { req.resume(); req.on('end', function () { end = true; }); }; var replied = false; Shot.inject(dispatch, { method: 'get', url: '/', payload: '1234567', simulate: { end: false } }, function (res) { replied = true; }); setTimeout(function () { expect(end).to.equal(false); expect(replied).to.equal(false); done(); }, 10); }); it('simulates close', function (done) { var dispatch = function (req, res) { var buffer = ''; req.on('readable', function () { buffer += req.read() || ''; }); req.on('error', function (err) { }); req.on('close', function () { res.writeHead(200, { 'Content-Length': 0 }); res.end('close'); }); req.on('end', function () { }); }; var body = 'something special just for you'; Shot.inject(dispatch, { method: 'get', url: '/', payload: body, simulate: { close: true } }, function (res) { expect(res.payload).to.equal('close'); done(); }); }); }); });
voiceofrae/bloc-chat
node_modules/hapi/node_modules/shot/test/index.js
JavaScript
apache-2.0
15,290
// The Computer Language Benchmarks Game // http://benchmarksgame.alioth.debian.org/ // // contributed by the Rust Project Developers // Copyright (c) 2012-2014 The Rust Project Developers // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // - Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // - Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in // the documentation and/or other materials provided with the // distribution. // // - Neither the name of "The Computer Language Benchmarks Game" nor // the name of "The Computer Language Shootout Benchmarks" nor the // names of its contributors may be used to endorse or promote // products derived from this software without specific prior // written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED // OF THE POSSIBILITY OF SUCH DAMAGE. use std::sync::mpsc::{channel, Sender, Receiver}; use std::thread; fn start(n_tasks: i32, token: i32) { let (tx, mut rx) = channel(); tx.send(token).unwrap(); let mut guards = Vec::with_capacity(n_tasks as usize); for i in 2 .. n_tasks + 1 { let (tx, next_rx) = channel(); let cur_rx = std::mem::replace(&mut rx, next_rx); guards.push(thread::spawn(move|| roundtrip(i, tx, cur_rx))); } let guard = thread::spawn(move|| roundtrip(1, tx, rx)); } fn roundtrip(id: i32, tx: Sender<i32>, rx: Receiver<i32>) { for token in rx.iter() { if token == 1 { println!("{}", id); break; } tx.send(token - 1).unwrap(); } } fn main() { let mut args = std::env::args(); let token = if std::env::var_os("RUST_BENCH").is_some() { 2000000 } else { args.nth(1).and_then(|arg| arg.parse().ok()).unwrap_or(1000) }; let n_tasks = args.next() .and_then(|arg| arg.parse().ok()) .unwrap_or(503); start(n_tasks, token); }
zaeleus/rust
src/test/bench/shootout-threadring.rs
Rust
apache-2.0
2,911
<!DOCTYPE html> <html> <head> <meta charset="utf-8"> <link rel="shortcut icon" type="image/ico" href="http://www.datatables.net/favicon.ico"> <meta name="viewport" content="initial-scale=1.0, maximum-scale=2.0"> <title>AutoFill example - Fill plug-ins</title> <link rel="stylesheet" type="text/css" href="../../../../media/css/jquery.dataTables.css"> <link rel="stylesheet" type="text/css" href="../../css/autoFill.datatables.css"> <link rel="stylesheet" type="text/css" href="../../../../examples/resources/syntax/shCore.css"> <link rel="stylesheet" type="text/css" href="../../../../examples/resources/demo.css"> <style type="text/css" class="init"> </style> <script type="text/javascript" language="javascript" src="//code.jquery.com/jquery-1.11.3.min.js"></script> <script type="text/javascript" language="javascript" src="../../../../media/js/jquery.dataTables.js"></script> <script type="text/javascript" language="javascript" src="../../js/dataTables.autoFill.js"></script> <script type="text/javascript" language="javascript" src="../../../../examples/resources/syntax/shCore.js"></script> <script type="text/javascript" language="javascript" src="../../../../examples/resources/demo.js"></script> <script type="text/javascript" language="javascript" class="init"> $.fn.dataTable.AutoFill.actions.names = { available: function ( dt, cells ) { // Only available if a single column is being // filled and it is the first column return cells[0].length === 1 && cells[0][0].index.column === 0; }, option: function ( dt, cells ) { // Ask the user if they want to change the surname only return 'Fill only surname - retain first name'; }, execute: function ( dt, cells, node ) { // Modify the name and set the new values var surname = cells[0][0].data.split(' ')[1]; for ( var i=0, ien=cells.length ; i<ien ; i++ ) { var name = cells[i][0].data.split(' '); cells[i][0].set = name[0]+' '+surname; } } } $(document).ready(function() { $('#example').DataTable( { autoFill: true } ); } ); </script> </head> <body class="dt-example"> <div class="container"> <section> <h1>AutoFill example <span>Fill plug-ins</span></h1> <div class="info"> <p>AutoFill provides a <a href="fills.html">number of built in fill types</a>, but these built in options can be augmented with additional options using plug-ins.</p> <p>The fill options are provided by plug-ins which are attached to the <code>$.fn.dataTable.AutoFill.actions</code> object. Each property in this object must be an object that provides three functions:</p> <ul class="markdown"> <li><code>available</code> - Determine if the data that the user dragged the fill over is suitable for this fill type</li> <li><code>option</code> - Returns a question the user will be asked for if they want to use this fill type</li> <li><code>execute</code> - Modifies the data if this fill type is selected</li> </ul> <p>The example shows a plug-in that will operate only on the first column in the table and will change only the surname - retaining the forename from the original cell. While slightly contrived as an example, it demonstrates how plug-ins can perform potentially complex operations.</p> <p>For full details about creating fill plug-ins for AutoFill, please refer to the <a href="https://datatables.net/extensions/autofill">online documentation</a>.</p> </div> <table id="example" class="display nowrap" cellspacing="0" width="100%"> <thead> <tr> <th>Name</th> <th>Position</th> <th>Office</th> <th>Age</th> <th>Start date</th> <th>Salary</th> </tr> </thead> <tfoot> <tr> <th>Name</th> <th>Position</th> <th>Office</th> <th>Age</th> <th>Start date</th> <th>Salary</th> </tr> </tfoot> <tbody> <tr> <td>Tiger Nixon</td> <td>System Architect</td> <td>Edinburgh</td> <td>61</td> <td>2011/04/25</td> <td>$320,800</td> </tr> <tr> <td>Garrett Winters</td> <td>Accountant</td> <td>Tokyo</td> <td>63</td> <td>2011/07/25</td> <td>$170,750</td> </tr> <tr> <td>Ashton Cox</td> <td>Junior Technical Author</td> <td>San Francisco</td> <td>66</td> <td>2009/01/12</td> <td>$86,000</td> </tr> <tr> <td>Cedric Kelly</td> <td>Senior Javascript Developer</td> <td>Edinburgh</td> <td>22</td> <td>2012/03/29</td> <td>$433,060</td> </tr> <tr> <td>Airi Satou</td> <td>Accountant</td> <td>Tokyo</td> <td>33</td> <td>2008/11/28</td> <td>$162,700</td> </tr> <tr> <td>Brielle Williamson</td> <td>Integration Specialist</td> <td>New York</td> <td>61</td> <td>2012/12/02</td> <td>$372,000</td> </tr> <tr> <td>Herrod Chandler</td> <td>Sales Assistant</td> <td>San Francisco</td> <td>59</td> <td>2012/08/06</td> <td>$137,500</td> </tr> <tr> <td>Rhona Davidson</td> <td>Integration Specialist</td> <td>Tokyo</td> <td>55</td> <td>2010/10/14</td> <td>$327,900</td> </tr> <tr> <td>Colleen Hurst</td> <td>Javascript Developer</td> <td>San Francisco</td> <td>39</td> <td>2009/09/15</td> <td>$205,500</td> </tr> <tr> <td>Sonya Frost</td> <td>Software Engineer</td> <td>Edinburgh</td> <td>23</td> <td>2008/12/13</td> <td>$103,600</td> </tr> <tr> <td>Jena Gaines</td> <td>Office Manager</td> <td>London</td> <td>30</td> <td>2008/12/19</td> <td>$90,560</td> </tr> <tr> <td>Quinn Flynn</td> <td>Support Lead</td> <td>Edinburgh</td> <td>22</td> <td>2013/03/03</td> <td>$342,000</td> </tr> <tr> <td>Charde Marshall</td> <td>Regional Director</td> <td>San Francisco</td> <td>36</td> <td>2008/10/16</td> <td>$470,600</td> </tr> <tr> <td>Haley Kennedy</td> <td>Senior Marketing Designer</td> <td>London</td> <td>43</td> <td>2012/12/18</td> <td>$313,500</td> </tr> <tr> <td>Tatyana Fitzpatrick</td> <td>Regional Director</td> <td>London</td> <td>19</td> <td>2010/03/17</td> <td>$385,750</td> </tr> <tr> <td>Michael Silva</td> <td>Marketing Designer</td> <td>London</td> <td>66</td> <td>2012/11/27</td> <td>$198,500</td> </tr> <tr> <td>Paul Byrd</td> <td>Chief Financial Officer (CFO)</td> <td>New York</td> <td>64</td> <td>2010/06/09</td> <td>$725,000</td> </tr> <tr> <td>Gloria Little</td> <td>Systems Administrator</td> <td>New York</td> <td>59</td> <td>2009/04/10</td> <td>$237,500</td> </tr> <tr> <td>Bradley Greer</td> <td>Software Engineer</td> <td>London</td> <td>41</td> <td>2012/10/13</td> <td>$132,000</td> </tr> <tr> <td>Dai Rios</td> <td>Personnel Lead</td> <td>Edinburgh</td> <td>35</td> <td>2012/09/26</td> <td>$217,500</td> </tr> <tr> <td>Jenette Caldwell</td> <td>Development Lead</td> <td>New York</td> <td>30</td> <td>2011/09/03</td> <td>$345,000</td> </tr> <tr> <td>Yuri Berry</td> <td>Chief Marketing Officer (CMO)</td> <td>New York</td> <td>40</td> <td>2009/06/25</td> <td>$675,000</td> </tr> <tr> <td>Caesar Vance</td> <td>Pre-Sales Support</td> <td>New York</td> <td>21</td> <td>2011/12/12</td> <td>$106,450</td> </tr> <tr> <td>Doris Wilder</td> <td>Sales Assistant</td> <td>Sidney</td> <td>23</td> <td>2010/09/20</td> <td>$85,600</td> </tr> <tr> <td>Angelica Ramos</td> <td>Chief Executive Officer (CEO)</td> <td>London</td> <td>47</td> <td>2009/10/09</td> <td>$1,200,000</td> </tr> <tr> <td>Gavin Joyce</td> <td>Developer</td> <td>Edinburgh</td> <td>42</td> <td>2010/12/22</td> <td>$92,575</td> </tr> <tr> <td>Jennifer Chang</td> <td>Regional Director</td> <td>Singapore</td> <td>28</td> <td>2010/11/14</td> <td>$357,650</td> </tr> <tr> <td>Brenden Wagner</td> <td>Software Engineer</td> <td>San Francisco</td> <td>28</td> <td>2011/06/07</td> <td>$206,850</td> </tr> <tr> <td>Fiona Green</td> <td>Chief Operating Officer (COO)</td> <td>San Francisco</td> <td>48</td> <td>2010/03/11</td> <td>$850,000</td> </tr> <tr> <td>Shou Itou</td> <td>Regional Marketing</td> <td>Tokyo</td> <td>20</td> <td>2011/08/14</td> <td>$163,000</td> </tr> <tr> <td>Michelle House</td> <td>Integration Specialist</td> <td>Sidney</td> <td>37</td> <td>2011/06/02</td> <td>$95,400</td> </tr> <tr> <td>Suki Burks</td> <td>Developer</td> <td>London</td> <td>53</td> <td>2009/10/22</td> <td>$114,500</td> </tr> <tr> <td>Prescott Bartlett</td> <td>Technical Author</td> <td>London</td> <td>27</td> <td>2011/05/07</td> <td>$145,000</td> </tr> <tr> <td>Gavin Cortez</td> <td>Team Leader</td> <td>San Francisco</td> <td>22</td> <td>2008/10/26</td> <td>$235,500</td> </tr> <tr> <td>Martena Mccray</td> <td>Post-Sales support</td> <td>Edinburgh</td> <td>46</td> <td>2011/03/09</td> <td>$324,050</td> </tr> <tr> <td>Unity Butler</td> <td>Marketing Designer</td> <td>San Francisco</td> <td>47</td> <td>2009/12/09</td> <td>$85,675</td> </tr> <tr> <td>Howard Hatfield</td> <td>Office Manager</td> <td>San Francisco</td> <td>51</td> <td>2008/12/16</td> <td>$164,500</td> </tr> <tr> <td>Hope Fuentes</td> <td>Secretary</td> <td>San Francisco</td> <td>41</td> <td>2010/02/12</td> <td>$109,850</td> </tr> <tr> <td>Vivian Harrell</td> <td>Financial Controller</td> <td>San Francisco</td> <td>62</td> <td>2009/02/14</td> <td>$452,500</td> </tr> <tr> <td>Timothy Mooney</td> <td>Office Manager</td> <td>London</td> <td>37</td> <td>2008/12/11</td> <td>$136,200</td> </tr> <tr> <td>Jackson Bradshaw</td> <td>Director</td> <td>New York</td> <td>65</td> <td>2008/09/26</td> <td>$645,750</td> </tr> <tr> <td>Olivia Liang</td> <td>Support Engineer</td> <td>Singapore</td> <td>64</td> <td>2011/02/03</td> <td>$234,500</td> </tr> <tr> <td>Bruno Nash</td> <td>Software Engineer</td> <td>London</td> <td>38</td> <td>2011/05/03</td> <td>$163,500</td> </tr> <tr> <td>Sakura Yamamoto</td> <td>Support Engineer</td> <td>Tokyo</td> <td>37</td> <td>2009/08/19</td> <td>$139,575</td> </tr> <tr> <td>Thor Walton</td> <td>Developer</td> <td>New York</td> <td>61</td> <td>2013/08/11</td> <td>$98,540</td> </tr> <tr> <td>Finn Camacho</td> <td>Support Engineer</td> <td>San Francisco</td> <td>47</td> <td>2009/07/07</td> <td>$87,500</td> </tr> <tr> <td>Serge Baldwin</td> <td>Data Coordinator</td> <td>Singapore</td> <td>64</td> <td>2012/04/09</td> <td>$138,575</td> </tr> <tr> <td>Zenaida Frank</td> <td>Software Engineer</td> <td>New York</td> <td>63</td> <td>2010/01/04</td> <td>$125,250</td> </tr> <tr> <td>Zorita Serrano</td> <td>Software Engineer</td> <td>San Francisco</td> <td>56</td> <td>2012/06/01</td> <td>$115,000</td> </tr> <tr> <td>Jennifer Acosta</td> <td>Junior Javascript Developer</td> <td>Edinburgh</td> <td>43</td> <td>2013/02/01</td> <td>$75,650</td> </tr> <tr> <td>Cara Stevens</td> <td>Sales Assistant</td> <td>New York</td> <td>46</td> <td>2011/12/06</td> <td>$145,600</td> </tr> <tr> <td>Hermione Butler</td> <td>Regional Director</td> <td>London</td> <td>47</td> <td>2011/03/21</td> <td>$356,250</td> </tr> <tr> <td>Lael Greer</td> <td>Systems Administrator</td> <td>London</td> <td>21</td> <td>2009/02/27</td> <td>$103,500</td> </tr> <tr> <td>Jonas Alexander</td> <td>Developer</td> <td>San Francisco</td> <td>30</td> <td>2010/07/14</td> <td>$86,500</td> </tr> <tr> <td>Shad Decker</td> <td>Regional Director</td> <td>Edinburgh</td> <td>51</td> <td>2008/11/13</td> <td>$183,000</td> </tr> <tr> <td>Michael Bruce</td> <td>Javascript Developer</td> <td>Singapore</td> <td>29</td> <td>2011/06/27</td> <td>$183,000</td> </tr> <tr> <td>Donna Snider</td> <td>Customer Support</td> <td>New York</td> <td>27</td> <td>2011/01/25</td> <td>$112,000</td> </tr> </tbody> </table> <ul class="tabs"> <li class="active">Javascript</li> <li>HTML</li> <li>CSS</li> <li>Ajax</li> <li>Server-side script</li> </ul> <div class="tabs"> <div class="js"> <p>The Javascript shown below is used to initialise the table shown in this example:</p><code class="multiline language-js">$.fn.dataTable.AutoFill.actions.names = { available: function ( dt, cells ) { // Only available if a single column is being // filled and it is the first column return cells[0].length === 1 &amp;&amp; cells[0][0].index.column === 0; }, option: function ( dt, cells ) { // Ask the user if they want to change the surname only return 'Fill only surname - retain first name'; }, execute: function ( dt, cells, node ) { // Modify the name and set the new values var surname = cells[0][0].data.split(' ')[1]; for ( var i=0, ien=cells.length ; i&lt;ien ; i++ ) { var name = cells[i][0].data.split(' '); cells[i][0].set = name[0]+' '+surname; } } } $(document).ready(function() { $('#example').DataTable( { autoFill: true } ); } );</code> <p>In addition to the above code, the following Javascript library files are loaded for use in this example:</p> <ul> <li><a href="//code.jquery.com/jquery-1.11.3.min.js">//code.jquery.com/jquery-1.11.3.min.js</a></li> <li><a href="../../../../media/js/jquery.dataTables.js">../../../../media/js/jquery.dataTables.js</a></li> <li><a href="../../js/dataTables.autoFill.js">../../js/dataTables.autoFill.js</a></li> </ul> </div> <div class="table"> <p>The HTML shown below is the raw HTML table element, before it has been enhanced by DataTables:</p> </div> <div class="css"> <div> <p>This example uses a little bit of additional CSS beyond what is loaded from the library files (below), in order to correctly display the table. The additional CSS used is shown below:</p><code class="multiline language-css"></code> </div> <p>The following CSS library files are loaded for use in this example to provide the styling of the table:</p> <ul> <li><a href="../../../../media/css/jquery.dataTables.css">../../../../media/css/jquery.dataTables.css</a></li> <li><a href="../../css/autoFill.datatables.css">../../css/autoFill.datatables.css</a></li> </ul> </div> <div class="ajax"> <p>This table loads data by Ajax. The latest data that has been loaded is shown below. This data will update automatically as any additional data is loaded.</p> </div> <div class="php"> <p>The script used to perform the server-side processing for this table is shown below. Please note that this is just an example script using PHP. Server-side processing scripts can be written in any language, using <a href="//datatables.net/manual/server-side">the protocol described in the DataTables documentation</a>.</p> </div> </div> </section> </div> <section> <div class="footer"> <div class="gradient"></div> <div class="liner"> <h2>Other examples</h2> <div class="toc"> <div class="toc-group"> <h3><a href="./index.html">Initialisation</a></h3> <ul class="toc active"> <li><a href="./simple.html">Basic initialisation</a></li> <li><a href="./fills.html">Fill types</a></li> <li><a href="./keyTable.html">KeyTable integration</a></li> <li><a href="./events.html">Events</a></li> <li><a href="./alwaysAsk.html">Always confirm action</a></li> <li><a href="./columns.html">Column selector</a></li> <li><a href="./focus.html">Click focus</a></li> <li><a href="./scrolling.html">Scrolling DataTable</a></li> <li class="active"><a href="./plugins.html">Fill plug-ins</a></li> </ul> </div> <div class="toc-group"> <h3><a href="../styling/index.html">Styling</a></h3> <ul class="toc"> <li><a href="../styling/bootstrap.html">Bootstrap styling</a></li> <li><a href="../styling/foundation.html">Foundation styling</a></li> <li><a href="../styling/jqueryui.html">jQuery UI styling</a></li> </ul> </div> </div> <div class="epilogue"> <p>Please refer to the <a href="http://www.datatables.net">DataTables documentation</a> for full information about its API properties and methods.<br> Additionally, there are a wide range of <a href="http://www.datatables.net/extensions">extensions</a> and <a href= "http://www.datatables.net/plug-ins">plug-ins</a> which extend the capabilities of DataTables.</p> <p class="copyright">DataTables designed and created by <a href="http://www.sprymedia.co.uk">SpryMedia Ltd</a> &#169; 2007-2015<br> DataTables is licensed under the <a href="http://www.datatables.net/mit">MIT license</a>.</p> </div> </div> </div> </section> </body> </html>
wanghao524151/scrapy_joy
static/DataTables-1.10.9/extensions/AutoFill/examples/initialisation/plugins.html
HTML
apache-2.0
18,829
/* * Copyright (c) 2014, WSO2 Inc. (http://www.wso2.org) All Rights Reserved. * * WSO2 Inc. licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except * in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.wso2.carbon.identity.application.common.model; public class SPMLProvisioningConnectorConfig extends ProvisioningConnectorConfig { /** * */ private static final long serialVersionUID = -8336211694205008436L; @Override public String getName() { return "spml"; } @Override public boolean isValid() { return false; } }
IndunilRathnayake/carbon-identity
components/application-mgt/org.wso2.carbon.identity.application.common/src/main/java/org/wso2/carbon/identity/application/common/model/SPMLProvisioningConnectorConfig.java
Java
apache-2.0
1,062
/* * Copyright 2000-2014 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.jetbrains.python.inspections.quickfix; import com.intellij.codeInspection.LocalQuickFix; import com.intellij.codeInspection.ProblemDescriptor; import com.intellij.openapi.project.Project; import com.intellij.psi.PsiElement; import com.jetbrains.python.PyBundle; import com.jetbrains.python.psi.*; import org.jetbrains.annotations.NotNull; /** * Created by IntelliJ IDEA. * User: Alexey.Ivanov * Date: 19.02.2010 * Time: 18:50:24 */ public class ReplaceBuiltinsQuickFix implements LocalQuickFix { @NotNull @Override public String getName() { return PyBundle.message("INTN.convert.builtin.import"); } @NotNull public String getFamilyName() { return PyBundle.message("INTN.Family.convert.builtin"); } @Override public void applyFix(@NotNull Project project, @NotNull ProblemDescriptor descriptor) { PyElementGenerator elementGenerator = PyElementGenerator.getInstance(project); PsiElement importStatement = descriptor.getPsiElement(); if (importStatement instanceof PyImportStatement) { for (PyImportElement importElement : ((PyImportStatement)importStatement).getImportElements()) { PyReferenceExpression importReference = importElement.getImportReferenceExpression(); if (importReference != null) { if ("__builtin__".equals(importReference.getName())) { importReference.replace(elementGenerator.createFromText(LanguageLevel.getDefault(), PyReferenceExpression.class, "builtins")); } if ("builtins".equals(importReference.getName())) { importReference.replace(elementGenerator.createFromText(LanguageLevel.getDefault(), PyReferenceExpression.class, "__builtin__")); } } } } } }
ivan-fedorov/intellij-community
python/src/com/jetbrains/python/inspections/quickfix/ReplaceBuiltinsQuickFix.java
Java
apache-2.0
2,342
SELECT "c_last_name" , "c_first_name" , "substr"("s_city", 1, 30) , "ss_ticket_number" , "amt" , "profit" FROM ( SELECT "ss_ticket_number" , "ss_customer_sk" , "store"."s_city" , "sum"("ss_coupon_amt") "amt" , "sum"("ss_net_profit") "profit" FROM ${database}.${schema}.store_sales , ${database}.${schema}.date_dim , ${database}.${schema}.store , ${database}.${schema}.household_demographics WHERE ("store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk") AND ("store_sales"."ss_store_sk" = "store"."s_store_sk") AND ("store_sales"."ss_hdemo_sk" = "household_demographics"."hd_demo_sk") AND (("household_demographics"."hd_dep_count" = 6) OR ("household_demographics"."hd_vehicle_count" > 2)) AND ("date_dim"."d_dow" = 1) AND ("date_dim"."d_year" IN (1999 , (1999 + 1) , (1999 + 2))) AND ("store"."s_number_employees" BETWEEN 200 AND 295) GROUP BY "ss_ticket_number", "ss_customer_sk", "ss_addr_sk", "store"."s_city" ) ms , ${database}.${schema}.customer WHERE ("ss_customer_sk" = "c_customer_sk") ORDER BY "c_last_name" ASC, "c_first_name" ASC, "substr"("s_city", 1, 30) ASC, "profit" ASC LIMIT 100
ocono-tech/presto
presto-benchto-benchmarks/src/main/resources/sql/presto/tpcds/q79.sql
SQL
apache-2.0
1,196
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v2alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/api/v1" ) // +genclient=true // Job represents the configuration of a single job. type Job struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec is a structure defining the expected behavior of a job. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status // +optional Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status is a structure describing current status of a job. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status // +optional Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // JobList is a collection of jobs. type JobList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of Job. Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"` } // JobTemplate describes a template for creating copies of a predefined pod. type JobTemplate struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Template defines jobs that will be created from this template // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status // +optional Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` } // JobTemplateSpec describes the data a Job should have when created from a template type JobTemplateSpec struct { // Standard object's metadata of the jobs created from this template. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the job. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status // +optional Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } // JobSpec describes how the job execution will look like. type JobSpec struct { // Parallelism specifies the maximum desired number of pods the job should // run at any given time. The actual number of pods running in steady state will // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), // i.e. when the work left to do is less than max parallelism. // More info: http://kubernetes.io/docs/user-guide/jobs // +optional Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` // Completions specifies the desired number of successfully finished pods the // job should be run with. Setting to nil means that the success of any // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. // More info: http://kubernetes.io/docs/user-guide/jobs // +optional Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"` // Optional duration in seconds relative to the startTime that the job may be active // before the system tries to terminate it; value must be positive integer // +optional ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"` // Selector is a label query over pods that should match the pod count. // Normally, the system sets this field for you. // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` // ManualSelector controls generation of pod labels and pod selectors. // Leave `manualSelector` unset unless you are certain what you are doing. // When false or unset, the system pick labels unique to this job // and appends those labels to the pod template. When true, // the user is responsible for picking unique labels and specifying // the selector. Failure to pick a unique label may cause this // and other jobs to not function correctly. However, You may see // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` // API. // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md // +optional ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"` // Template is the object that describes the pod that will be created when // executing a job. // More info: http://kubernetes.io/docs/user-guide/jobs Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` } // JobStatus represents the current state of a Job. type JobStatus struct { // Conditions represent the latest available observations of an object's current state. // More info: http://kubernetes.io/docs/user-guide/jobs // +optional Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` // StartTime represents time when the job was acknowledged by the Job Manager. // It is not guaranteed to be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. // +optional StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` // CompletionTime represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. // +optional CompletionTime *metav1.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` // Active is the number of actively running pods. // +optional Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"` // Succeeded is the number of pods which reached Phase Succeeded. // +optional Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"` // Failed is the number of pods which reached Phase Failed. // +optional Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` } type JobConditionType string // These are valid conditions of a job. const ( // JobComplete means the job has completed its execution. JobComplete JobConditionType = "Complete" // JobFailed means the job has failed its execution. JobFailed JobConditionType = "Failed" ) // JobCondition describes current state of a job. type JobCondition struct { // Type of job condition, Complete or Failed. Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"` // Status of the condition, one of True, False, Unknown. Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` // Last time the condition was checked. // +optional LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` // Last time the condition transit from one status to another. // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` // (brief) reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` // Human readable message indicating details about last transition. // +optional Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` } // +genclient=true // CronJob represents the configuration of a single cron job. type CronJob struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec is a structure defining the expected behavior of a job, including the schedule. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status // +optional Spec CronJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status is a structure describing current status of a job. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status // +optional Status CronJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // CronJobList is a collection of cron jobs. type CronJobList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of CronJob. Items []CronJob `json:"items" protobuf:"bytes,2,rep,name=items"` } // CronJobSpec describes how the job execution will look like and when it will actually run. type CronJobSpec struct { // Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. Schedule string `json:"schedule" protobuf:"bytes,1,opt,name=schedule"` // Optional deadline in seconds for starting the job if it misses scheduled // time for any reason. Missed jobs executions will be counted as failed ones. // +optional StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"` // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. // +optional ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"` // Suspend flag tells the controller to suspend subsequent executions, it does // not apply to already started executions. Defaults to false. // +optional Suspend *bool `json:"suspend,omitempty" protobuf:"varint,4,opt,name=suspend"` // JobTemplate is the object that describes the job that will be created when // executing a CronJob. JobTemplate JobTemplateSpec `json:"jobTemplate" protobuf:"bytes,5,opt,name=jobTemplate"` } // ConcurrencyPolicy describes how the job will be handled. // Only one of the following concurrent policies may be specified. // If none of the following policies is specified, the default one // is AllowConcurrent. type ConcurrencyPolicy string const ( // AllowConcurrent allows CronJobs to run concurrently. AllowConcurrent ConcurrencyPolicy = "Allow" // ForbidConcurrent forbids concurrent runs, skipping next run if previous // hasn't finished yet. ForbidConcurrent ConcurrencyPolicy = "Forbid" // ReplaceConcurrent cancels currently running job and replaces it with a new one. ReplaceConcurrent ConcurrencyPolicy = "Replace" ) // CronJobStatus represents the current state of a cron job. type CronJobStatus struct { // Active holds pointers to currently running jobs. // +optional Active []v1.ObjectReference `json:"active,omitempty" protobuf:"bytes,1,rep,name=active"` // LastScheduleTime keeps information of when was the last time the job was successfully scheduled. // +optional LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty" protobuf:"bytes,4,opt,name=lastScheduleTime"` }
elipapa/kubernetes
pkg/apis/batch/v2alpha1/types.go
GO
apache-2.0
12,314
"""A ScrolledText widget feels like a text widget but also has a vertical scroll bar on its right. (Later, options may be added to add a horizontal bar as well, to make the bars disappear automatically when not needed, to move them to the other side of the window, etc.) Configuration options are passed to the Text widget. A Frame widget is inserted between the master and the text, to hold the Scrollbar widget. Most methods calls are inherited from the Text widget; Pack, Grid and Place methods are redirected to the Frame widget however. """ __all__ = ['ScrolledText'] from Tkinter import Frame, Text, Scrollbar, Pack, Grid, Place from Tkconstants import RIGHT, LEFT, Y, BOTH class ScrolledText(Text): def __init__(self, master=None, **kw): self.frame = Frame(master) self.vbar = Scrollbar(self.frame) self.vbar.pack(side=RIGHT, fill=Y) kw.update({'yscrollcommand': self.vbar.set}) Text.__init__(self, self.frame, **kw) self.pack(side=LEFT, fill=BOTH, expand=True) self.vbar['command'] = self.yview # Copy geometry methods of self.frame -- hack! methods = vars(Pack).keys() + vars(Grid).keys() + vars(Place).keys() for m in methods: if m[0] != '_' and m != 'config' and m != 'configure': setattr(self, m, getattr(self.frame, m)) def __str__(self): return str(self.frame) def example(): import __main__ from Tkconstants import END stext = ScrolledText(bg='white', height=10) stext.insert(END, __main__.__doc__) stext.pack(fill=BOTH, side=LEFT, expand=True) stext.focus_set() stext.mainloop() if __name__ == "__main__": example()
Lh4cKg/sl4a
python/src/Lib/lib-tk/ScrolledText.py
Python
apache-2.0
1,701
/* * Translated default messages for the jQuery validation plugin. * Locale: KA (Georgian; แƒฅแƒแƒ แƒ—แƒฃแƒšแƒ˜) */ $.extend($.validator.messages, { required: "แƒแƒ› แƒ•แƒ”แƒšแƒ˜แƒก แƒจแƒ”แƒ•แƒกแƒ”แƒ‘แƒ แƒแƒฃแƒชแƒ˜แƒšแƒ”แƒ‘แƒ”แƒšแƒ˜แƒ.", remote: "แƒ’แƒ—แƒฎแƒแƒ•แƒ— แƒ›แƒ˜แƒฃแƒ—แƒ˜แƒ—แƒแƒ— แƒกแƒฌแƒแƒ แƒ˜ แƒ›แƒœแƒ˜แƒจแƒ•แƒœแƒ”แƒšแƒแƒ‘แƒ.", email: "แƒ’แƒ—แƒฎแƒแƒ•แƒ— แƒ›แƒ˜แƒฃแƒ—แƒ˜แƒ—แƒแƒ— แƒ”แƒš-แƒคแƒแƒกแƒขแƒ˜แƒก แƒ™แƒแƒ แƒ”แƒฅแƒขแƒฃแƒšแƒ˜ แƒ›แƒ˜แƒกแƒแƒ›แƒแƒ แƒ—แƒ˜.", url: "แƒ’แƒ—แƒฎแƒแƒ•แƒ— แƒ›แƒ˜แƒฃแƒ—แƒ˜แƒ—แƒแƒ— แƒ™แƒแƒ แƒ”แƒฅแƒขแƒฃแƒšแƒ˜ URL.", date: "แƒ’แƒ—แƒฎแƒแƒ•แƒ— แƒ›แƒ˜แƒฃแƒ—แƒ˜แƒ—แƒแƒ— แƒ™แƒแƒ แƒ”แƒฅแƒขแƒฃแƒšแƒ˜ แƒ—แƒแƒ แƒ˜แƒฆแƒ˜.", dateISO: "แƒ’แƒ—แƒฎแƒแƒ•แƒ— แƒ›แƒ˜แƒฃแƒ—แƒ˜แƒ—แƒแƒ— แƒ™แƒแƒ แƒ”แƒฅแƒขแƒฃแƒšแƒ˜ แƒ—แƒแƒ แƒ˜แƒฆแƒ˜ ISO แƒคแƒแƒ แƒ›แƒแƒขแƒจแƒ˜.", number: "แƒ’แƒ—แƒฎแƒแƒ•แƒ— แƒ›แƒ˜แƒฃแƒ—แƒ˜แƒ—แƒแƒ— แƒชแƒ˜แƒคแƒ แƒ˜.", digits: "แƒ’แƒ—แƒฎแƒแƒ•แƒ— แƒ›แƒ˜แƒฃแƒ—แƒ˜แƒ—แƒแƒ— แƒ›แƒฎแƒแƒšแƒแƒ“ แƒชแƒ˜แƒคแƒ แƒ”แƒ‘แƒ˜.", creditcard: "แƒ’แƒ—แƒฎแƒแƒ•แƒ— แƒ›แƒ˜แƒฃแƒ—แƒ˜แƒ—แƒแƒ— แƒกแƒแƒ™แƒ แƒ”แƒ“แƒ˜แƒขแƒ แƒ‘แƒแƒ แƒแƒ—แƒ˜แƒก แƒ™แƒแƒ แƒ”แƒฅแƒขแƒฃแƒšแƒ˜ แƒœแƒแƒ›แƒ”แƒ แƒ˜.", equalTo: "แƒ’แƒ—แƒฎแƒแƒ•แƒ— แƒ›แƒ˜แƒฃแƒ—แƒ˜แƒ—แƒแƒ— แƒแƒกแƒ”แƒ—แƒ˜แƒ•แƒ” แƒ›แƒœแƒ˜แƒจแƒ•แƒœแƒ”แƒšแƒแƒ‘แƒ แƒ™แƒ˜แƒ“แƒ”แƒ• แƒ”แƒ แƒ—แƒฎแƒ”แƒš.", extension: "แƒ’แƒ—แƒฎแƒแƒ•แƒ— แƒแƒ˜แƒ แƒฉแƒ˜แƒแƒ— แƒคแƒแƒ˜แƒšแƒ˜ แƒ™แƒแƒ แƒ”แƒฅแƒขแƒฃแƒšแƒ˜ แƒ’แƒแƒคแƒแƒ แƒ—แƒแƒ”แƒ‘แƒ˜แƒ—.", maxlength: $.validator.format("แƒ“แƒแƒกแƒแƒจแƒ•แƒ”แƒ‘แƒ˜แƒ แƒแƒ แƒแƒฃแƒ›แƒ”แƒขแƒ”แƒก {0} แƒกแƒ˜แƒ›แƒ‘แƒแƒšแƒ."), minlength: $.validator.format("แƒแƒฃแƒชแƒ˜แƒšแƒ”แƒ‘แƒ”แƒšแƒ˜แƒ แƒจแƒ”แƒ˜แƒงแƒ•แƒแƒœแƒแƒ— แƒ›แƒ˜แƒœแƒ˜แƒ›แƒฃแƒ› {0} แƒกแƒ˜แƒ›แƒ‘แƒแƒšแƒ."), rangelength: $.validator.format("แƒขแƒ”แƒฅแƒกแƒขแƒจแƒ˜ แƒกแƒ˜แƒ›แƒ‘แƒแƒšแƒแƒ”แƒ‘แƒ˜แƒก แƒ แƒแƒแƒ“แƒ”แƒœแƒแƒ‘แƒ แƒฃแƒœแƒ“แƒ แƒ˜แƒงแƒแƒก {0}-แƒ“แƒแƒœ {1}-แƒ›แƒ“แƒ”."), range: $.validator.format("แƒ’แƒ—แƒฎแƒแƒ•แƒ— แƒจแƒ”แƒ˜แƒงแƒ•แƒแƒœแƒแƒ— แƒชแƒ˜แƒคแƒ แƒ˜ {0}-แƒ“แƒแƒœ {1}-แƒ›แƒ“แƒ”."), max: $.validator.format("แƒ’แƒ—แƒฎแƒแƒ•แƒ— แƒจแƒ”แƒ˜แƒงแƒ•แƒแƒœแƒแƒ— แƒชแƒ˜แƒคแƒ แƒ˜ แƒ แƒแƒ›แƒ”แƒšแƒ˜แƒช แƒœแƒแƒ™แƒšแƒ”แƒ‘แƒ˜แƒ แƒแƒœ แƒฃแƒ“แƒ แƒ˜แƒกย {0}-แƒก."), min: $.validator.format("แƒ’แƒ—แƒฎแƒแƒ•แƒ— แƒจแƒ”แƒ˜แƒงแƒ•แƒแƒœแƒแƒ— แƒชแƒ˜แƒคแƒ แƒ˜ แƒ แƒแƒ›แƒ”แƒšแƒ˜แƒช แƒ›แƒ”แƒขแƒ˜แƒ แƒแƒœ แƒฃแƒ“แƒ แƒ˜แƒกย {0}-แƒก.") });
brianrandell/rgroup
src/Before/Rg.Web/lib/jquery-validation/src/localization/messages_ka.js
JavaScript
apache-2.0
2,331
/* * Copyright 2013 The Netty Project * * The Netty Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package io.netty.channel.udt; import com.barchart.udt.TypeUDT; import com.barchart.udt.nio.KindUDT; import io.netty.buffer.ByteBufAllocator; import io.netty.channel.ChannelConfig; import io.netty.channel.ChannelException; import io.netty.channel.ChannelOption; import io.netty.channel.MessageSizeEstimator; import io.netty.channel.RecvByteBufAllocator; /** * A {@link ChannelConfig} for a {@link UdtServerChannel}. * <p> * Note that {@link TypeUDT#DATAGRAM} message oriented channels treat * {@code "receiveBufferSize"} and {@code "sendBufferSize"} as maximum message * size. If received or sent message does not fit specified sizes, * {@link ChannelException} will be thrown. */ public interface UdtServerChannelConfig extends UdtChannelConfig { /** * Gets {@link KindUDT#ACCEPTOR} channel backlog via * {@link ChannelOption#SO_BACKLOG}. */ int getBacklog(); /** * Sets {@link KindUDT#ACCEPTOR} channel backlog via * {@link ChannelOption#SO_BACKLOG}. */ UdtServerChannelConfig setBacklog(int backlog); @Override UdtServerChannelConfig setConnectTimeoutMillis(int connectTimeoutMillis); @Override UdtServerChannelConfig setMaxMessagesPerRead(int maxMessagesPerRead); @Override UdtServerChannelConfig setWriteSpinCount(int writeSpinCount); @Override UdtServerChannelConfig setAllocator(ByteBufAllocator allocator); @Override UdtServerChannelConfig setRecvByteBufAllocator(RecvByteBufAllocator allocator); @Override UdtServerChannelConfig setAutoRead(boolean autoRead); @Override UdtServerChannelConfig setProtocolReceiveBufferSize(int size); @Override UdtServerChannelConfig setProtocolSendBufferSize(int size); @Override UdtServerChannelConfig setReceiveBufferSize(int receiveBufferSize); @Override UdtServerChannelConfig setReuseAddress(boolean reuseAddress); @Override UdtServerChannelConfig setSendBufferSize(int sendBufferSize); @Override UdtServerChannelConfig setSoLinger(int soLinger); @Override UdtServerChannelConfig setSystemReceiveBufferSize(int size); @Override UdtServerChannelConfig setSystemSendBufferSize(int size); @Override UdtServerChannelConfig setWriteBufferHighWaterMark(int writeBufferHighWaterMark); @Override UdtServerChannelConfig setWriteBufferLowWaterMark(int writeBufferLowWaterMark); @Override UdtServerChannelConfig setMessageSizeEstimator(MessageSizeEstimator estimator); }
phlizik/netty
transport-udt/src/main/java/io/netty/channel/udt/UdtServerChannelConfig.java
Java
apache-2.0
3,151
// This file was automatically generated from template.soy. // Please don't edit this file by hand. if (typeof planepage == 'undefined') { var planepage = {}; } planepage.messages = function(opt_data, opt_ignored, opt_ijData) { return '<div style="display: none"><span id="Plane_rows">ํ–‰ ์ˆ˜: %1</span><span id="Plane_getRows">ํ–‰ ์ˆ˜ (%1)</span><span id="Plane_rows1">1๋“ฑ์„ ํ–‰ ์ˆ˜: %1</span><span id="Plane_getRows1">1๋“ฑ์„ ํ–‰ ์ˆ˜ (%1)</span><span id="Plane_rows2">2๋“ฑ์„ ํ–‰ ์ˆ˜: %1</span><span id="Plane_getRows2">2๋“ฑ์„ ํ–‰ ์ˆ˜ (%1)</span><span id="Plane_seats">์ขŒ์„ ์ˆ˜: %1</span><span id="Plane_placeholder">?</span><span id="Plane_setSeats">์ขŒ์„์ˆ˜ =</span></div>'; }; planepage.start = function(opt_data, opt_ignored, opt_ijData) { var output = planepage.messages(null, null, opt_ijData) + '<table width="100%"><tr><td><h1><a href="https://developers.google.com/blockly/">Blockly</a>&rlm; &gt; <a href="../index.html">Demos</a>&rlm; &gt; <span id="title">๋น„ํ–‰๊ธฐ ์ขŒ์„ ๊ณ„์‚ฐ๊ธฐ</span> &nbsp; '; var iLimit37 = opt_ijData.maxLevel + 1; for (var i37 = 1; i37 < iLimit37; i37++) { output += ' ' + ((i37 == opt_ijData.level) ? '<span class="tab" id="selected">' + soy.$$escapeHtml(i37) + '</span>' : (i37 < opt_ijData.level) ? '<a class="tab previous" href="?lang=' + soy.$$escapeHtml(opt_ijData.lang) + '&level=' + soy.$$escapeHtml(i37) + '">' + soy.$$escapeHtml(i37) + '</a>' : '<a class="tab" href="?lang=' + soy.$$escapeHtml(opt_ijData.lang) + '&level=' + soy.$$escapeHtml(i37) + '">' + soy.$$escapeHtml(i37) + '</a>'); } output += '</h1></td><td class="farSide"><span ' + ((opt_ijData.lang == 'en') ? 'id="languageBorder"' : '') + ' style="padding: 10px"><select id="languageMenu"></select></span></td></tr></table><script src="slider.js"><\/script><svg id="plane" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" width="600" height="320" viewBox="0 110 600 320"><defs><g id="row1st"><rect class="seat1st" width="10" height="10" x="75" y="243" /><rect class="seat1st" width="10" height="10" x="75" y="254" /><rect class="seat1st" width="10" height="10" x="75" y="272" /><rect class="seat1st" width="10" height="10" x="75" y="283" /></g><g id="row2nd"><rect class="seat2nd" width="10" height="8" x="75" y="243" /><rect class="seat2nd" width="10" height="8" x="75" y="251" /><rect class="seat2nd" width="10" height="8" x="75" y="269" /><rect class="seat2nd" width="10" height="8" x="75" y="277" /><rect class="seat2nd" width="10" height="8" x="75" y="285" /></g><linearGradient id="grad1" x1="0%" y1="100%" x2="0%" y2="0%"><stop offset="0%" style="stop-color:#fff;stop-opacity:0" /><stop offset="100%" style="stop-color:#fff;stop-opacity:1" /></linearGradient><linearGradient id="grad2" x1="0%" y1="0%" x2="0%" y2="100%"><stop offset="0%" style="stop-color:#fff;stop-opacity:0" /><stop offset="100%" style="stop-color:#fff;stop-opacity:1" /></linearGradient></defs><path d="m 214,270 l 159,-254 31,-16 -74,189 0,162 74,189 -31,16 z" id="wing" /><path d="m 577,270 22,-93 -27,6 -44,88 44,88 27,6 z" id="tail" /><path d="m 577,270 l -94,24 h -407 c -38,0 -75,-13 -75,-26 c 0,-13 38,-26 75,-26 h 407 z" id="fuselage" /><rect width="610" height="100" x="-5" y="110" fill="url(#grad1)" /><rect width="610" height="100" x="-5" y="330" fill="url(#grad2)" /><text id="row1stText" x="55" y="380"></text><text id="row2ndText" x="55" y="420"></text><text x="55" y="210"><tspan id="seatText"></tspan><tspan id="seatYes" style="fill: #0c0;" dy="10">&#x2713;</tspan><tspan id="seatNo" style="fill: #f00;" dy="10">&#x2717;</tspan></text>' + ((opt_ijData.level > 1) ? '<rect id="crew_right" class="crew" width="10" height="10" x="35" y="254" /><rect id="crew_left" class="crew" width="10" height="10" x="35" y="272" />' : '') + '</svg><p>'; switch (opt_ijData.level) { case 1: output += '๋น„ํ–‰๊ธฐ๋Š” ์Šน๊ฐ ์ขŒ์„์˜ ํ–‰ ์ˆ˜๊ฐ€ ์žˆ์Šต๋‹ˆ๋‹ค. ๊ฐ ํ–‰์—๋Š” ์‹œํŠธ ๋„ค ๊ฐœ๊ฐ€ ํฌํ•จ๋˜์–ด ์žˆ์Šต๋‹ˆ๋‹ค.'; break; case 2: output += '๋น„ํ–‰๊ธฐ๋Š” ๋น„ํ–‰ ๊ฐ‘ํŒ(์กฐ์ข…์‚ฌ์™€ ๋ถ€์กฐ์ข…์‚ฌ์šฉ)์—์„œ ์ขŒ์„ ๋‘ ๊ฐœ๊ฐ€ ์žˆ๊ณ , ์Šน๊ฐ ์ขŒ์„์˜ ํ–‰ ์ˆ˜๊ฐ€ ์žˆ์Šต๋‹ˆ๋‹ค. ๊ฐ ํ–‰์—๋Š” ์‹œํŠธ ๋„ค ๊ฐœ๊ฐ€ ํฌํ•จ๋˜์–ด ์žˆ์Šต๋‹ˆ๋‹ค.'; break; case 3: output += '๋น„ํ–‰๊ธฐ๋Š” ๋น„ํ–‰ ๊ฐ‘ํŒ(์กฐ์ข…์‚ฌ์™€ ๋ถ€์กฐ์ข…์‚ฌ์šฉ)์—์„œ ์ขŒ์„ ๋‘ ๊ฐœ๊ฐ€ ์žˆ๊ณ , 1๋“ฑ์„๊ณผ 2๋“ฑ์„ ์Šน๊ฐ ์ขŒ์„์˜ ํ–‰ ์ˆ˜๊ฐ€ ์žˆ์Šต๋‹ˆ๋‹ค. ๊ฐ 1๋“ฑ์„ ํ–‰์—๋Š” ์‹œํŠธ ๋„ค ๊ฐœ๊ฐ€ ํฌํ•จ๋˜์–ด ์žˆ์Šต๋‹ˆ๋‹ค. ๊ฐ 2๋“ฑ์„ ํ–‰์—๋Š” ์‹œํŠธ ๋‹ค์„ฏ ๊ฐœ๊ฐ€ ํฌํ•จ๋˜์–ด ์žˆ์Šต๋‹ˆ๋‹ค.'; break; } output += '</p><p>ํ–‰์ด ๋ฐ”๋€(์œ„) ๋น„ํ–‰๊ธฐ์— ์ขŒ์„์˜ ์ด ์ˆ˜๋ฅผ ๊ณ„์‚ฐํ•˜๋Š” ๊ณต์‹(์•„๋ž˜)์„ ๊ตฌ์ถ•ํ•˜์„ธ์š”.</p><script src="../../blockly_compressed.js"><\/script><script src="../../blocks_compressed.js"><\/script><script src="../../javascript_compressed.js"><\/script><script src="../../msg/js/' + soy.$$escapeHtml(opt_ijData.lang) + '.js"><\/script><script src="blocks.js"><\/script>' + planepage.toolbox(null, null, opt_ijData) + '<div id="blockly"></div>'; return output; }; planepage.toolbox = function(opt_data, opt_ignored, opt_ijData) { return '<xml id="toolbox" style="display: none"><block type="math_number"></block><block type="math_arithmetic"></block><block type="math_arithmetic"><field name="OP">MULTIPLY</field></block>' + ((opt_ijData.level <= 2) ? '<block type="plane_get_rows"></block>' : '<block type="plane_get_rows1st"></block><block type="plane_get_rows2nd"></block>') + '</xml>'; };
faizshukri/blockly
demos/plane/generated/ko.js
JavaScript
apache-2.0
5,535
/* * Copyright 2000-2009 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.uiDesigner.actions; import com.intellij.openapi.actionSystem.AnActionEvent; import com.intellij.uiDesigner.CaptionSelection; import com.intellij.uiDesigner.UIDesignerBundle; import com.jgoodies.forms.layout.FormLayout; import java.util.ArrayList; /** * @author yole */ public class GroupRowsColumnsAction extends RowColumnAction { public GroupRowsColumnsAction() { super(UIDesignerBundle.message("action.group.columns"), null, UIDesignerBundle.message("action.group.rows"), null); } @Override public void update(final AnActionEvent e) { super.update(e); CaptionSelection selection = CaptionSelection.DATA_KEY.getData(e.getDataContext()); if (selection != null) { e.getPresentation().setEnabled(selection.getContainer() != null && selection.getContainer().getLayout() instanceof FormLayout && getCellsToGroup(selection).length > 1 && !isGrouped(selection)); } } public static boolean isGrouped(final CaptionSelection selection) { FormLayout layout = (FormLayout) selection.getContainer().getLayout(); int[][] groups = selection.isRow() ? layout.getRowGroups() : layout.getColumnGroups(); final int[] indices = selection.getSelection(); for (int[] group : groups) { if (intersect(group, indices)) return true; } return false; } public static boolean intersect(final int[] group, final int[] indices) { for (int groupMember : group) { for (int index : indices) { if (groupMember == index+1) return true; } } return false; } protected void actionPerformed(CaptionSelection selection) { FormLayout layout = (FormLayout) selection.getContainer().getLayout(); int[][] oldGroups = selection.isRow() ? layout.getRowGroups() : layout.getColumnGroups(); int[][] newGroups = new int[oldGroups.length + 1][]; System.arraycopy(oldGroups, 0, newGroups, 0, oldGroups.length); int[] cellsToGroup = getCellsToGroup(selection); newGroups [oldGroups.length] = new int [cellsToGroup.length]; for(int i=0; i<cellsToGroup.length; i++) { newGroups [oldGroups.length] [i] = cellsToGroup [i]+1; } if (selection.isRow()) { layout.setRowGroups(newGroups); } else { layout.setColumnGroups(newGroups); } } private static int[] getCellsToGroup(CaptionSelection selection) { ArrayList<Integer> cells = new ArrayList<Integer>(); int[] selectedIndices = selection.getSelection(); for(int i: selectedIndices) { if (!selection.getContainer().getGridLayoutManager().isGapCell(selection.getContainer(), selection.isRow(), i)) { cells.add(i); } } int[] result = new int[cells.size()]; for(int i=0; i<cells.size(); i++) { result [i] = cells.get(i).intValue(); } return result; } }
SerCeMan/intellij-community
plugins/ui-designer/src/com/intellij/uiDesigner/actions/GroupRowsColumnsAction.java
Java
apache-2.0
3,436
package com.alibaba.common.lang; import com.alibaba.common.lang.exception.ChainedRuntimeException; /** * ๅฝ“<code>ObjectUtil.clone</code>ๆ–นๆณ•่ขซ่ฐƒ็”จๆ—ถ๏ผŒๅฆ‚ๆžœ่ขซๅคๅˆถ็š„ๅฏน่ฑกไธๆ”ฏๆŒ่ฏฅๆ“ไฝœ๏ผŒๅˆ™ๆŠ›ๅ‡บ่ฏฅๅผ‚ๅธธใ€‚ * * <p> * ๆณจๆ„๏ผŒๅ’Œ<code>java.lang.CloneNotSupportedException</code>ไธๅŒ๏ผŒ่ฏฅๅผ‚ๅธธไปŽ * <code>RuntimeException</code>ๆดพ็”Ÿใ€‚ * </p> * * @author Michael Zhou * @version $Id: CloneNotSupportedException.java 1291 2005-03-04 03:23:30Z * baobao $ */ public class CloneNotSupportedException extends ChainedRuntimeException { private static final long serialVersionUID = 3257281439807584562L; /** * ๆž„้€ ไธ€ไธช็ฉบ็š„ๅผ‚ๅธธ. */ public CloneNotSupportedException() { super(); } /** * ๆž„้€ ไธ€ไธชๅผ‚ๅธธ, ๆŒ‡ๆ˜Žๅผ‚ๅธธ็š„่ฏฆ็ป†ไฟกๆฏ. * * @param message * ่ฏฆ็ป†ไฟกๆฏ */ public CloneNotSupportedException(String message) { super(message); } /** * ๆž„้€ ไธ€ไธชๅผ‚ๅธธ, ๆŒ‡ๆ˜Žๅผ•่ตท่ฟ™ไธชๅผ‚ๅธธ็š„่ตทๅ› . * * @param cause * ๅผ‚ๅธธ็š„่ตทๅ›  */ public CloneNotSupportedException(Throwable cause) { super(cause); } /** * ๆž„้€ ไธ€ไธชๅผ‚ๅธธ, ๆŒ‡ๆ˜Žๅผ•่ตท่ฟ™ไธชๅผ‚ๅธธ็š„่ตทๅ› . * * @param message * ่ฏฆ็ป†ไฟกๆฏ * @param cause * ๅผ‚ๅธธ็š„่ตทๅ›  */ public CloneNotSupportedException(String message, Throwable cause) { super(message, cause); } }
jeary/RocketMQ
rocketmq-filtersrv/src/main/java/com/alibaba/common/lang/CloneNotSupportedException.java
Java
apache-2.0
1,520
package units import ( "fmt" "strconv" "strings" ) // Ulimit is a human friendly version of Rlimit. type Ulimit struct { Name string Hard int64 Soft int64 } // Rlimit specifies the resource limits, such as max open files. type Rlimit struct { Type int `json:"type,omitempty"` Hard uint64 `json:"hard,omitempty"` Soft uint64 `json:"soft,omitempty"` } const ( // magic numbers for making the syscall // some of these are defined in the syscall package, but not all. // Also since Windows client doesn't get access to the syscall package, need to // define these here rlimitAs = 9 rlimitCore = 4 rlimitCPU = 0 rlimitData = 2 rlimitFsize = 1 rlimitLocks = 10 rlimitMemlock = 8 rlimitMsgqueue = 12 rlimitNice = 13 rlimitNofile = 7 rlimitNproc = 6 rlimitRss = 5 rlimitRtprio = 14 rlimitRttime = 15 rlimitSigpending = 11 rlimitStack = 3 ) var ulimitNameMapping = map[string]int{ //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. "core": rlimitCore, "cpu": rlimitCPU, "data": rlimitData, "fsize": rlimitFsize, "locks": rlimitLocks, "memlock": rlimitMemlock, "msgqueue": rlimitMsgqueue, "nice": rlimitNice, "nofile": rlimitNofile, "nproc": rlimitNproc, "rss": rlimitRss, "rtprio": rlimitRtprio, "rttime": rlimitRttime, "sigpending": rlimitSigpending, "stack": rlimitStack, } // ParseUlimit parses and returns a Ulimit from the specified string. func ParseUlimit(val string) (*Ulimit, error) { parts := strings.SplitN(val, "=", 2) if len(parts) != 2 { return nil, fmt.Errorf("invalid ulimit argument: %s", val) } if _, exists := ulimitNameMapping[parts[0]]; !exists { return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) } var ( soft int64 hard = &soft // default to soft in case no hard was set temp int64 err error ) switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { case 2: temp, err = strconv.ParseInt(limitVals[1], 10, 64) if err != nil { return nil, err } hard = &temp fallthrough case 1: soft, err = strconv.ParseInt(limitVals[0], 10, 64) if err != nil { return nil, err } default: return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) } if *hard != -1 { if soft == -1 { return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: soft: -1 (unlimited), hard: %d", *hard) } if soft > *hard { return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) } } return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil } // GetRlimit returns the RLimit corresponding to Ulimit. func (u *Ulimit) GetRlimit() (*Rlimit, error) { t, exists := ulimitNameMapping[u.Name] if !exists { return nil, fmt.Errorf("invalid ulimit name %s", u.Name) } return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil } func (u *Ulimit) String() string { return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) }
zhangmingld/kubernetes
vendor/github.com/docker/go-units/ulimit.go
GO
apache-2.0
3,189
package archive import ( "archive/tar" "fmt" "io" "io/ioutil" "os" "path" "sort" "testing" ) func TestHardLinkOrder(t *testing.T) { names := []string{"file1.txt", "file2.txt", "file3.txt"} msg := []byte("Hey y'all") // Create dir src, err := ioutil.TempDir("", "docker-hardlink-test-src-") if err != nil { t.Fatal(err) } //defer os.RemoveAll(src) for _, name := range names { func() { fh, err := os.Create(path.Join(src, name)) if err != nil { t.Fatal(err) } defer fh.Close() if _, err = fh.Write(msg); err != nil { t.Fatal(err) } }() } // Create dest, with changes that includes hardlinks dest, err := ioutil.TempDir("", "docker-hardlink-test-dest-") if err != nil { t.Fatal(err) } os.RemoveAll(dest) // we just want the name, at first if err := copyDir(src, dest); err != nil { t.Fatal(err) } defer os.RemoveAll(dest) for _, name := range names { for i := 0; i < 5; i++ { if err := os.Link(path.Join(dest, name), path.Join(dest, fmt.Sprintf("%s.link%d", name, i))); err != nil { t.Fatal(err) } } } // get changes changes, err := ChangesDirs(dest, src) if err != nil { t.Fatal(err) } // sort sort.Sort(changesByPath(changes)) // ExportChanges ar, err := ExportChanges(dest, changes) if err != nil { t.Fatal(err) } hdrs, err := walkHeaders(ar) if err != nil { t.Fatal(err) } // reverse sort sort.Sort(sort.Reverse(changesByPath(changes))) // ExportChanges arRev, err := ExportChanges(dest, changes) if err != nil { t.Fatal(err) } hdrsRev, err := walkHeaders(arRev) if err != nil { t.Fatal(err) } // line up the two sets sort.Sort(tarHeaders(hdrs)) sort.Sort(tarHeaders(hdrsRev)) // compare Size and LinkName for i := range hdrs { if hdrs[i].Name != hdrsRev[i].Name { t.Errorf("headers - expected name %q; but got %q", hdrs[i].Name, hdrsRev[i].Name) } if hdrs[i].Size != hdrsRev[i].Size { t.Errorf("headers - %q expected size %d; but got %d", hdrs[i].Name, hdrs[i].Size, hdrsRev[i].Size) } if hdrs[i].Typeflag != hdrsRev[i].Typeflag { t.Errorf("headers - %q expected type %d; but got %d", hdrs[i].Name, hdrs[i].Typeflag, hdrsRev[i].Typeflag) } if hdrs[i].Linkname != hdrsRev[i].Linkname { t.Errorf("headers - %q expected linkname %q; but got %q", hdrs[i].Name, hdrs[i].Linkname, hdrsRev[i].Linkname) } } } type tarHeaders []tar.Header func (th tarHeaders) Len() int { return len(th) } func (th tarHeaders) Swap(i, j int) { th[j], th[i] = th[i], th[j] } func (th tarHeaders) Less(i, j int) bool { return th[i].Name < th[j].Name } func walkHeaders(r io.Reader) ([]tar.Header, error) { t := tar.NewReader(r) headers := []tar.Header{} for { hdr, err := t.Next() if err != nil { if err == io.EOF { break } return headers, err } headers = append(headers, *hdr) } return headers, nil }
LoHChina/docker
pkg/archive/changes_posix_test.go
GO
apache-2.0
2,875
/*** Copyright (c) 2012 CommonsWare, LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. From _The Busy Coder's Guide to Android Development_ https://commonsware.com/Android */ package com.commonsware.watchauth; import android.content.BroadcastReceiver; import android.content.Context; import android.content.Intent; public class UnlockReceiver extends BroadcastReceiver { @Override public void onReceive(Context ctxt, Intent intent) { Intent i=new Intent(ctxt, AuthDetectionService.class); i.setAction(AuthDetectionService.CMD_UNLOCK); ctxt.startService(i); } }
VinayakDeshpande11/cw-omnibus
SmartWatch/WatchAuth/src/com/commonsware/watchauth/UnlockReceiver.java
Java
apache-2.0
1,091
/* * Copyright 2000-2012 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.xdebugger.impl.ui; import com.intellij.ide.ui.customization.CustomizableActionGroupProvider; import com.intellij.xdebugger.impl.actions.XDebuggerActions; /** * @author nik */ public class XDebugTabCustomizableActionGroupProvider extends CustomizableActionGroupProvider { @Override public void registerGroups(CustomizableActionGroupRegistrar registrar) { registrar.addCustomizableActionGroup(XDebuggerActions.TOOL_WINDOW_TOP_TOOLBAR_GROUP, "Debug Tool Window Top Toolbar"); registrar.addCustomizableActionGroup(XDebuggerActions.TOOL_WINDOW_LEFT_TOOLBAR_GROUP, "Debug Tool Window Left Toolbar"); } }
Lekanich/intellij-community
platform/xdebugger-impl/src/com/intellij/xdebugger/impl/ui/XDebugTabCustomizableActionGroupProvider.java
Java
apache-2.0
1,238
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.lib.jobcontrol; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.mapred.jobcontrol.Job; import org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob.State; import org.apache.hadoop.util.StringUtils; /** * This class encapsulates a set of MapReduce jobs and its dependency. * * It tracks the states of the jobs by placing them into different tables * according to their states. * * This class provides APIs for the client app to add a job to the group * and to get the jobs in the group in different states. When a job is * added, an ID unique to the group is assigned to the job. * * This class has a thread that submits jobs when they become ready, * monitors the states of the running jobs, and updates the states of jobs * based on the state changes of their depending jobs states. The class * provides APIs for suspending/resuming the thread, and * for stopping the thread. * */ @InterfaceAudience.Public @InterfaceStability.Evolving public class JobControl implements Runnable { private static final Log LOG = LogFactory.getLog(JobControl.class); // The thread can be in one of the following state public static enum ThreadState {RUNNING, SUSPENDED,STOPPED, STOPPING, READY}; private ThreadState runnerState; // the thread state private LinkedList<ControlledJob> jobsInProgress = new LinkedList<ControlledJob>(); private LinkedList<ControlledJob> successfulJobs = new LinkedList<ControlledJob>(); private LinkedList<ControlledJob> failedJobs = new LinkedList<ControlledJob>(); private long nextJobID; private String groupName; /** * Construct a job control for a group of jobs. * @param groupName a name identifying this group */ public JobControl(String groupName) { this.nextJobID = -1; this.groupName = groupName; this.runnerState = ThreadState.READY; } private static List<ControlledJob> toList( LinkedList<ControlledJob> jobs) { ArrayList<ControlledJob> retv = new ArrayList<ControlledJob>(); for (ControlledJob job : jobs) { retv.add(job); } return retv; } synchronized private List<ControlledJob> getJobsIn(State state) { LinkedList<ControlledJob> l = new LinkedList<ControlledJob>(); for(ControlledJob j: jobsInProgress) { if(j.getJobState() == state) { l.add(j); } } return l; } /** * @return the jobs in the waiting state */ public List<ControlledJob> getWaitingJobList() { return getJobsIn(State.WAITING); } /** * @return the jobs in the running state */ public List<ControlledJob> getRunningJobList() { return getJobsIn(State.RUNNING); } /** * @return the jobs in the ready state */ public List<ControlledJob> getReadyJobsList() { return getJobsIn(State.READY); } /** * @return the jobs in the success state */ synchronized public List<ControlledJob> getSuccessfulJobList() { return toList(this.successfulJobs); } synchronized public List<ControlledJob> getFailedJobList() { return toList(this.failedJobs); } private String getNextJobID() { nextJobID += 1; return this.groupName + this.nextJobID; } /** * Add a new controlled job. * @param aJob the new controlled job */ synchronized public String addJob(ControlledJob aJob) { String id = this.getNextJobID(); aJob.setJobID(id); aJob.setJobState(State.WAITING); jobsInProgress.add(aJob); return id; } /** * Add a new job. * @param aJob the new job */ synchronized public String addJob(Job aJob) { return addJob((ControlledJob) aJob); } /** * Add a collection of jobs * * @param jobs */ public void addJobCollection(Collection<ControlledJob> jobs) { for (ControlledJob job : jobs) { addJob(job); } } /** * @return the thread state */ public ThreadState getThreadState() { return this.runnerState; } /** * set the thread state to STOPPING so that the * thread will stop when it wakes up. */ public void stop() { this.runnerState = ThreadState.STOPPING; } /** * suspend the running thread */ public void suspend () { if (this.runnerState == ThreadState.RUNNING) { this.runnerState = ThreadState.SUSPENDED; } } /** * resume the suspended thread */ public void resume () { if (this.runnerState == ThreadState.SUSPENDED) { this.runnerState = ThreadState.RUNNING; } } synchronized public boolean allFinished() { return jobsInProgress.isEmpty(); } /** * The main loop for the thread. * The loop does the following: * Check the states of the running jobs * Update the states of waiting jobs * Submit the jobs in ready state */ public void run() { try { this.runnerState = ThreadState.RUNNING; while (true) { while (this.runnerState == ThreadState.SUSPENDED) { try { Thread.sleep(5000); } catch (Exception e) { //TODO the thread was interrupted, do something!!! } } synchronized(this) { Iterator<ControlledJob> it = jobsInProgress.iterator(); while(it.hasNext()) { ControlledJob j = it.next(); LOG.debug("Checking state of job "+j); switch(j.checkState()) { case SUCCESS: successfulJobs.add(j); it.remove(); break; case FAILED: case DEPENDENT_FAILED: failedJobs.add(j); it.remove(); break; case READY: j.submit(); break; case RUNNING: case WAITING: //Do Nothing break; } } } if (this.runnerState != ThreadState.RUNNING && this.runnerState != ThreadState.SUSPENDED) { break; } try { Thread.sleep(5000); } catch (Exception e) { //TODO the thread was interrupted, do something!!! } if (this.runnerState != ThreadState.RUNNING && this.runnerState != ThreadState.SUSPENDED) { break; } } }catch(Throwable t) { LOG.error("Error while trying to run jobs.",t); //Mark all jobs as failed because we got something bad. failAllJobs(t); } this.runnerState = ThreadState.STOPPED; } synchronized private void failAllJobs(Throwable t) { String message = "Unexpected System Error Occured: "+ StringUtils.stringifyException(t); Iterator<ControlledJob> it = jobsInProgress.iterator(); while(it.hasNext()) { ControlledJob j = it.next(); try { j.failJob(message); } catch (IOException e) { LOG.error("Error while tyring to clean up "+j.getJobName(), e); } catch (InterruptedException e) { LOG.error("Error while tyring to clean up "+j.getJobName(), e); } finally { failedJobs.add(j); it.remove(); } } } }
Wajihulhassan/Hadoop-2.7.0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java
Java
apache-2.0
8,340
#region License, Terms and Author(s) // // ELMAH - Error Logging Modules and Handlers for ASP.NET // Copyright (c) 2004-9 Atif Aziz. All rights reserved. // // Author(s): // // Atif Aziz, http://www.raboof.com // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #endregion [assembly: Elmah.Scc("$Id: DelegatedContextExpression.cs 623 2009-05-30 00:46:46Z azizatif $")] namespace Elmah.Assertions { using System; internal sealed class DelegatedContextExpression : IContextExpression { private readonly Func<object, object> _handler; public DelegatedContextExpression(Func<object, object> handler) { if (handler == null) throw new ArgumentNullException("handler"); _handler = handler; } public Func<object, object> Handler { get { return _handler; } } public object Evaluate(object context) { return _handler(context); } public override string ToString() { return Handler.ToString(); } } }
clearwavebuild/elmah
src/Elmah/Assertions/DelegatedContextExpression.cs
C#
apache-2.0
1,674
# # Author:: Adam Jacob (<adam@opscode.com>) # Copyright:: Copyright (c) 2008 Opscode, Inc. # License:: Apache License, Version 2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # require 'chef/resource' class Chef class Resource class User < Chef::Resource identity_attr :username state_attrs :uid, :gid, :home default_action :create allowed_actions :create, :remove, :modify, :manage, :lock, :unlock def initialize(name, run_context=nil) super @username = name @comment = nil @uid = nil @gid = nil @home = nil @shell = nil @password = nil @system = false @manage_home = false @force = false @non_unique = false @supports = { :manage_home => false, :non_unique => false } @iterations = 27855 @salt = nil end def username(arg=nil) set_or_return( :username, arg, :kind_of => [ String ] ) end def comment(arg=nil) set_or_return( :comment, arg, :kind_of => [ String ] ) end def uid(arg=nil) set_or_return( :uid, arg, :kind_of => [ String, Integer ] ) end def gid(arg=nil) set_or_return( :gid, arg, :kind_of => [ String, Integer ] ) end alias_method :group, :gid def home(arg=nil) set_or_return( :home, arg, :kind_of => [ String ] ) end def shell(arg=nil) set_or_return( :shell, arg, :kind_of => [ String ] ) end def password(arg=nil) set_or_return( :password, arg, :kind_of => [ String ] ) end def salt(arg=nil) set_or_return( :salt, arg, :kind_of => [ String ] ) end def iterations(arg=nil) set_or_return( :iterations, arg, :kind_of => [ Integer ] ) end def system(arg=nil) set_or_return( :system, arg, :kind_of => [ TrueClass, FalseClass ] ) end def manage_home(arg=nil) set_or_return( :manage_home, arg, :kind_of => [ TrueClass, FalseClass ] ) end def force(arg=nil) set_or_return( :force, arg, :kind_of => [ TrueClass, FalseClass ] ) end def non_unique(arg=nil) set_or_return( :non_unique, arg, :kind_of => [ TrueClass, FalseClass ] ) end end end end
zshuo/chef
lib/chef/resource/user.rb
Ruby
apache-2.0
3,328
using System; using System.Collections.Generic; using System.Linq; using NUnit.Framework; namespace MoreLinq.Test { /// <summary> /// Verify the behavior of the RunLengthEncode() operator /// </summary> [TestFixture] public class RunLengthEncodeTests { /// <summary> /// Verify that the RunLengthEncode() methods behave in a lazy manner. /// </summary> [Test] public void TestRunLengthEncodeIsLazy() { new BreakingSequence<int>().RunLengthEncode(); new BreakingSequence<int>().RunLengthEncode(EqualityComparer<int>.Default); } /// <summary> /// Verify that invoking RunLengthEncode on an empty sequence results in an exception /// </summary> [Test] [ExpectedException(typeof(ArgumentNullException))] public void TestRunLengthEncodeNullSequence() { const IEnumerable<int> sequence = null; sequence.RunLengthEncode(); } /// <summary> /// Verify that invoking RunLengthEncode on an empty sequence results in an exception /// </summary> [Test] [ExpectedException(typeof(ArgumentNullException))] public void TestRunLengthEncodeNullSequence2() { const IEnumerable<int> sequence = null; sequence.RunLengthEncode(EqualityComparer<int>.Default); } /// <summary> /// Verify that run-length encoding an empty sequence results in an empty sequence. /// </summary> [Test] public void TestRunLengthEncodeEmptySequence() { var sequence = Enumerable.Empty<int>(); var result = sequence.RunLengthEncode(); Assert.IsTrue(result.SequenceEqual(sequence.Select(x => new KeyValuePair<int, int>(x, x)))); } /// <summary> /// Verify that run-length encoding correctly accepts and uses custom equality comparers. /// </summary> [Test] public void TestRunLengthEncodeCustomComparer() { var sequence = new[] { "a", "A", "a", "b", "b", "B", "B" }; var result = sequence.RunLengthEncode(StringComparer.CurrentCultureIgnoreCase) .Select(kvp => new KeyValuePair<string, int>(kvp.Key.ToLower(), kvp.Value)); var expectedResult = new[] {new KeyValuePair<string, int>("a", 3), new KeyValuePair<string, int>("b", 4)}; Assert.IsTrue(result.SequenceEqual(expectedResult)); } /// <summary> /// Verify that run-length encoding a known sequence produced a correct result. /// </summary> [Test] public void TestRunLengthEncodeResults() { var sequence = new[] { 1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6 }; var expectedResult = Enumerable.Range(1, 6).Select(x => new KeyValuePair<int, int>(x, x)); var result = sequence.RunLengthEncode(); Assert.IsTrue(result.SequenceEqual(expectedResult)); } /// <summary> /// Verify that run-length encoding a sequence with no runs produces a correct result. /// </summary> [Test] public void TestRunLengthEncodeNoRuns() { var sequence = Enumerable.Range(1, 10); var result = sequence.RunLengthEncode(); var expectedResult = sequence.Select(x => new KeyValuePair<int, int>(x, 1)); Assert.IsTrue(result.SequenceEqual(expectedResult)); } /// <summary> /// Verify that run-length encoding a sequence consisting of a single repeated value /// produces a correct result. /// </summary> [Test] public void TestRunLengthEncodeOneRun() { const char value = 'q'; const int repeatCount = 10; var sequence = Enumerable.Repeat(value, repeatCount); var result = sequence.RunLengthEncode(); var expectedResult = new[] { new KeyValuePair<char, int>(value, repeatCount) }; Assert.IsTrue(result.SequenceEqual(expectedResult)); } } }
KalebDark/morelinq
MoreLinq.Test/RunLengthEncodeTest.cs
C#
apache-2.0
4,355