text
stringlengths 2
1.04M
| meta
dict |
---|---|
using Newtonsoft.Json;
using System.Linq;
namespace SensorThings.Core
{
public class HomeDocument
{
[JsonProperty("value")]
public Entity[] Entities { get; set; }
public string GetUrlByEntityName(string name)
{
var url = (from i in Entities where i.Name == name select i.Url).FirstOrDefault();
return url;
}
}
public class Entity
{
public string Name { get; set; }
public string Url { get; set; }
}
}
| {
"content_hash": "412a677089887a53e59cad07b7db0d28",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 94,
"avg_line_length": 22.130434782608695,
"alnum_prop": 0.5756385068762279,
"repo_name": "Geodan/sensorthings-net-sdk",
"id": "d206dc2b0dae75afa74520c055d6b246aeec4f82",
"size": "511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sensorthings-net-sdk/Core/HomeDocument.cs",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "55539"
}
],
"symlink_target": ""
} |
package platform
import (
"bytes"
"fmt"
"os"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"text/template"
boshdpresolv "github.com/cloudfoundry/bosh-agent/infrastructure/devicepathresolver"
"github.com/cloudfoundry/bosh-agent/platform/cdrom"
boshcert "github.com/cloudfoundry/bosh-agent/platform/cert"
boshdisk "github.com/cloudfoundry/bosh-agent/platform/disk"
boshnet "github.com/cloudfoundry/bosh-agent/platform/net"
boshstats "github.com/cloudfoundry/bosh-agent/platform/stats"
boshvitals "github.com/cloudfoundry/bosh-agent/platform/vitals"
boshsettings "github.com/cloudfoundry/bosh-agent/settings"
boshdir "github.com/cloudfoundry/bosh-agent/settings/directories"
boshdirs "github.com/cloudfoundry/bosh-agent/settings/directories"
bosherr "github.com/cloudfoundry/bosh-utils/errors"
boshcmd "github.com/cloudfoundry/bosh-utils/fileutil"
boshlog "github.com/cloudfoundry/bosh-utils/logger"
boshretry "github.com/cloudfoundry/bosh-utils/retrystrategy"
boshsys "github.com/cloudfoundry/bosh-utils/system"
boshuuid "github.com/cloudfoundry/bosh-utils/uuid"
)
const (
ephemeralDiskPermissions = os.FileMode(0750)
persistentDiskPermissions = os.FileMode(0700)
logDirPermissions = os.FileMode(0750)
runDirPermissions = os.FileMode(0750)
jobsDirPermissions = os.FileMode(0750)
packagesDirPermissions = os.FileMode(0755)
userBaseDirPermissions = os.FileMode(0755)
disksDirPermissions = os.FileMode(0755)
userRootLogDirPermissions = os.FileMode(0775)
tmpDirPermissions = os.FileMode(0755) // 0755 to make sure that vcap user can use new temp dir
blobsDirPermissions = os.FileMode(0700)
sshDirPermissions = os.FileMode(0700)
sshAuthKeysFilePermissions = os.FileMode(0600)
minRootEphemeralSpaceInBytes = uint64(1024 * 1024 * 1024)
)
type LinuxOptions struct {
// When set to true loop back device
// is not going to be overlayed over /tmp to limit /tmp dir size
UseDefaultTmpDir bool
// When set to true persistent disk will be assumed to be pre-formatted;
// otherwise agent will partition and format it right before mounting
UsePreformattedPersistentDisk bool
// When set to true persistent disk will be mounted as a bind-mount
BindMountPersistentDisk bool
// When set to true and no ephemeral disk is mounted, the agent will create
// a partition on the same device as the root partition to use as the
// ephemeral disk
CreatePartitionIfNoEphemeralDisk bool
// When set to true the agent will skip both root and ephemeral disk partitioning
SkipDiskSetup bool
// Strategy for resolving device paths;
// possible values: virtio, scsi, iscsi, ""
DevicePathResolutionType string
// Strategy for resolving ephemeral & persistent disk partitioners;
// possible values: parted, "" (default is sfdisk if disk < 2TB, parted otherwise)
PartitionerType string
}
type linux struct {
fs boshsys.FileSystem
cmdRunner boshsys.CmdRunner
collector boshstats.Collector
compressor boshcmd.Compressor
copier boshcmd.Copier
dirProvider boshdirs.Provider
vitalsService boshvitals.Service
cdutil cdrom.CDUtil
diskManager boshdisk.Manager
netManager boshnet.Manager
certManager boshcert.Manager
monitRetryStrategy boshretry.RetryStrategy
devicePathResolver boshdpresolv.DevicePathResolver
options LinuxOptions
state *BootstrapState
logger boshlog.Logger
defaultNetworkResolver boshsettings.DefaultNetworkResolver
uuidGenerator boshuuid.Generator
auditLogger AuditLogger
}
func NewLinuxPlatform(
fs boshsys.FileSystem,
cmdRunner boshsys.CmdRunner,
collector boshstats.Collector,
compressor boshcmd.Compressor,
copier boshcmd.Copier,
dirProvider boshdirs.Provider,
vitalsService boshvitals.Service,
cdutil cdrom.CDUtil,
diskManager boshdisk.Manager,
netManager boshnet.Manager,
certManager boshcert.Manager,
monitRetryStrategy boshretry.RetryStrategy,
devicePathResolver boshdpresolv.DevicePathResolver,
state *BootstrapState,
options LinuxOptions,
logger boshlog.Logger,
defaultNetworkResolver boshsettings.DefaultNetworkResolver,
uuidGenerator boshuuid.Generator,
auditLogger AuditLogger,
) Platform {
return &linux{
fs: fs,
cmdRunner: cmdRunner,
collector: collector,
compressor: compressor,
copier: copier,
dirProvider: dirProvider,
vitalsService: vitalsService,
cdutil: cdutil,
diskManager: diskManager,
netManager: netManager,
certManager: certManager,
monitRetryStrategy: monitRetryStrategy,
devicePathResolver: devicePathResolver,
state: state,
options: options,
logger: logger,
defaultNetworkResolver: defaultNetworkResolver,
uuidGenerator: uuidGenerator,
auditLogger: auditLogger,
}
}
const logTag = "linuxPlatform"
func (p linux) AssociateDisk(name string, settings boshsettings.DiskSettings) error {
disksDir := p.dirProvider.DisksDir()
err := p.fs.MkdirAll(disksDir, disksDirPermissions)
if err != nil {
bosherr.WrapError(err, "Associating disk: ")
}
linkPath := path.Join(disksDir, name)
devicePath, _, err := p.devicePathResolver.GetRealDevicePath(settings)
if err != nil {
return bosherr.WrapErrorf(err, "Associating disk with name %s", name)
}
return p.fs.Symlink(devicePath, linkPath)
}
func (p linux) GetFs() (fs boshsys.FileSystem) {
return p.fs
}
func (p linux) GetRunner() (runner boshsys.CmdRunner) {
return p.cmdRunner
}
func (p linux) GetCompressor() (runner boshcmd.Compressor) {
return p.compressor
}
func (p linux) GetCopier() (runner boshcmd.Copier) {
return p.copier
}
func (p linux) GetDirProvider() (dirProvider boshdir.Provider) {
return p.dirProvider
}
func (p linux) GetVitalsService() (service boshvitals.Service) {
return p.vitalsService
}
func (p linux) GetFileContentsFromCDROM(fileName string) (content []byte, err error) {
contents, err := p.cdutil.GetFilesContents([]string{fileName})
if err != nil {
return []byte{}, err
}
return contents[0], nil
}
func (p linux) GetFilesContentsFromDisk(diskPath string, fileNames []string) ([][]byte, error) {
return p.diskManager.GetUtil().GetFilesContents(diskPath, fileNames)
}
func (p linux) GetDevicePathResolver() (devicePathResolver boshdpresolv.DevicePathResolver) {
return p.devicePathResolver
}
func (p linux) GetAuditLogger() AuditLogger {
return p.auditLogger
}
func (p linux) SetupNetworking(networks boshsettings.Networks) (err error) {
return p.netManager.SetupNetworking(networks, nil)
}
func (p linux) GetConfiguredNetworkInterfaces() ([]string, error) {
return p.netManager.GetConfiguredNetworkInterfaces()
}
func (p linux) GetCertManager() boshcert.Manager {
return p.certManager
}
func (p linux) GetHostPublicKey() (string, error) {
hostPublicKeyPath := "/etc/ssh/ssh_host_rsa_key.pub"
hostPublicKey, err := p.fs.ReadFileString(hostPublicKeyPath)
if err != nil {
return "", bosherr.WrapErrorf(err, "Unable to read host public key file: %s", hostPublicKeyPath)
}
return hostPublicKey, nil
}
func (p linux) SetupRuntimeConfiguration() (err error) {
_, _, _, err = p.cmdRunner.RunCommand("bosh-agent-rc")
if err != nil {
err = bosherr.WrapError(err, "Shelling out to bosh-agent-rc")
}
return
}
func (p linux) CreateUser(username, basePath string) error {
err := p.fs.MkdirAll(basePath, userBaseDirPermissions)
if err != nil {
return bosherr.WrapError(err, "Making user base path")
}
args := []string{"-m", "-b", basePath, "-s", "/bin/bash", username}
_, _, _, err = p.cmdRunner.RunCommand("useradd", args...)
if err != nil {
return bosherr.WrapError(err, "Shelling out to useradd")
}
userHomeDir, err := p.fs.HomeDir(username)
if err != nil {
return bosherr.WrapErrorf(err, "Unable to retrieve home directory for user %s", username)
}
_, _, _, err = p.cmdRunner.RunCommand("chmod", "700", userHomeDir)
if err != nil {
return bosherr.WrapError(err, "Shelling out to chmod")
}
return nil
}
func (p linux) AddUserToGroups(username string, groups []string) error {
_, _, _, err := p.cmdRunner.RunCommand("usermod", "-G", strings.Join(groups, ","), username)
if err != nil {
return bosherr.WrapError(err, "Shelling out to usermod")
}
return nil
}
func (p linux) DeleteEphemeralUsersMatching(reg string) error {
compiledReg, err := regexp.Compile(reg)
if err != nil {
return bosherr.WrapError(err, "Compiling regexp")
}
matchingUsers, err := p.findEphemeralUsersMatching(compiledReg)
if err != nil {
return bosherr.WrapError(err, "Finding ephemeral users")
}
for _, user := range matchingUsers {
err = p.deleteUser(user)
if err != nil {
return bosherr.WrapError(err, "Deleting user")
}
}
return nil
}
func (p linux) deleteUser(user string) (err error) {
_, _, _, err = p.cmdRunner.RunCommand("userdel", "-r", user)
return
}
func (p linux) findEphemeralUsersMatching(reg *regexp.Regexp) (matchingUsers []string, err error) {
passwd, err := p.fs.ReadFileString("/etc/passwd")
if err != nil {
err = bosherr.WrapError(err, "Reading /etc/passwd")
return
}
for _, line := range strings.Split(passwd, "\n") {
user := strings.Split(line, ":")[0]
matchesPrefix := strings.HasPrefix(user, boshsettings.EphemeralUserPrefix)
matchesReg := reg.MatchString(user)
if matchesPrefix && matchesReg {
matchingUsers = append(matchingUsers, user)
}
}
return
}
func (p linux) SetupBoshSettingsDisk() (err error) {
path := filepath.Dir(p.GetAgentSettingsPath(true))
err = p.fs.MkdirAll(path, 0700)
if err != nil {
err = bosherr.WrapError(err, "Setting up Bosh Settings Disk")
return
}
return p.diskManager.GetMounter().MountTmpfs(path, "16m")
}
func (p linux) GetAgentSettingsPath(tmpfs bool) string {
if tmpfs {
return filepath.Join(p.dirProvider.BoshSettingsDir(), "settings.json")
}
return filepath.Join(p.dirProvider.BoshDir(), "settings.json")
}
func (p linux) GetPersistentDiskSettingsPath(tmpfs bool) string {
if tmpfs {
return filepath.Join(p.dirProvider.BoshSettingsDir(), "persistent_disk_hints.json")
}
return filepath.Join(p.dirProvider.BoshDir(), "persistent_disk_hints.json")
}
func (p linux) SetupRootDisk(ephemeralDiskPath string) error {
if p.options.SkipDiskSetup {
return nil
}
//if there is ephemeral disk we can safely autogrow, if not we should not.
if (ephemeralDiskPath == "") && (p.options.CreatePartitionIfNoEphemeralDisk == true) {
p.logger.Info(logTag, "No Ephemeral Disk provided, Skipping growing of the Root Filesystem")
return nil
}
// in case growpart is not available for another flavour of linux, don't stop the agent from running,
// without this integration-test would not run since the bosh-lite vm doesn't have it
if p.cmdRunner.CommandExists("growpart") == false {
p.logger.Info(logTag, "The program 'growpart' is not installed, Root Filesystem cannot be grown")
return nil
}
rootDevicePath, rootDeviceNumber, err := p.findRootDevicePathAndNumber()
if err != nil {
return bosherr.WrapError(err, "findRootDevicePath")
}
stdout, _, _, err := p.cmdRunner.RunCommand(
"growpart",
rootDevicePath,
strconv.Itoa(rootDeviceNumber),
)
if err != nil {
if strings.Contains(stdout, "NOCHANGE") == false {
return bosherr.WrapError(err, "growpart")
}
}
_, _, _, err = p.cmdRunner.RunCommand("resize2fs", "-f", p.partitionPath(rootDevicePath, rootDeviceNumber))
if err != nil {
return bosherr.WrapError(err, "resize2fs")
}
return nil
}
func (p linux) SetupSSH(publicKeys []string, username string) error {
homeDir, err := p.fs.HomeDir(username)
if err != nil {
return bosherr.WrapError(err, "Finding home dir for user")
}
sshPath := path.Join(homeDir, ".ssh")
err = p.fs.MkdirAll(sshPath, sshDirPermissions)
if err != nil {
return bosherr.WrapError(err, "Making ssh directory")
}
err = p.fs.Chown(sshPath, username)
if err != nil {
return bosherr.WrapError(err, "Chowning ssh directory")
}
authKeysPath := path.Join(sshPath, "authorized_keys")
publicKeyString := strings.Join(publicKeys, "\n")
err = p.fs.WriteFileString(authKeysPath, publicKeyString)
if err != nil {
return bosherr.WrapError(err, "Creating authorized_keys file")
}
err = p.fs.Chown(authKeysPath, username)
if err != nil {
return bosherr.WrapError(err, "Chowning key path")
}
err = p.fs.Chmod(authKeysPath, sshAuthKeysFilePermissions)
if err != nil {
return bosherr.WrapError(err, "Chmoding key path")
}
return nil
}
func (p linux) SetUserPassword(user, encryptedPwd string) (err error) {
if encryptedPwd == "" {
encryptedPwd = "*"
}
_, _, _, err = p.cmdRunner.RunCommand("usermod", "-p", encryptedPwd, user)
if err != nil {
err = bosherr.WrapError(err, "Shelling out to usermod")
}
return
}
func (p linux) SetupRecordsJSONPermission(path string) error {
if err := p.fs.Chmod(path, 0640); err != nil {
return bosherr.WrapError(err, "Chmoding records JSON file")
}
if err := p.fs.Chown(path, "root:vcap"); err != nil {
return bosherr.WrapError(err, "Chowning records JSON file")
}
return nil
}
const EtcHostsTemplate = `127.0.0.1 localhost {{ . }}
# The following lines are desirable for IPv6 capable hosts
::1 localhost ip6-localhost ip6-loopback {{ . }}
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
ff02::3 ip6-allhosts
`
func (p linux) SaveDNSRecords(dnsRecords boshsettings.DNSRecords, hostname string) error {
dnsRecordsContents, err := p.generateDefaultEtcHosts(hostname)
if err != nil {
return bosherr.WrapError(err, "Generating default /etc/hosts")
}
for _, dnsRecord := range dnsRecords.Records {
dnsRecordsContents.WriteString(fmt.Sprintf("%s %s\n", dnsRecord[0], dnsRecord[1]))
}
uuid, err := p.uuidGenerator.Generate()
if err != nil {
return bosherr.WrapError(err, "Generating UUID")
}
etcHostsUUIDFileName := fmt.Sprintf("/etc/hosts-%s", uuid)
err = p.fs.WriteFileQuietly(etcHostsUUIDFileName, dnsRecordsContents.Bytes())
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Writing to %s", etcHostsUUIDFileName))
}
err = p.fs.Rename(etcHostsUUIDFileName, "/etc/hosts")
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Renaming %s to /etc/hosts", etcHostsUUIDFileName))
}
return nil
}
func (p linux) SetupIPv6(config boshsettings.IPv6) error {
return p.netManager.SetupIPv6(config, nil)
}
func (p linux) SetupHostname(hostname string) error {
if !p.state.Linux.HostsConfigured {
_, _, _, err := p.cmdRunner.RunCommand("hostname", hostname)
if err != nil {
return bosherr.WrapError(err, "Setting hostname")
}
err = p.fs.WriteFileString("/etc/hostname", hostname)
if err != nil {
return bosherr.WrapError(err, "Writing to /etc/hostname")
}
buffer, err := p.generateDefaultEtcHosts(hostname)
if err != nil {
return err
}
err = p.fs.WriteFile("/etc/hosts", buffer.Bytes())
if err != nil {
return bosherr.WrapError(err, "Writing to /etc/hosts")
}
p.state.Linux.HostsConfigured = true
err = p.state.SaveState()
if err != nil {
return bosherr.WrapError(err, "Setting up hostname")
}
}
return nil
}
func (p linux) SetupLogrotate(groupName, basePath, size string) (err error) {
buffer := bytes.NewBuffer([]byte{})
t := template.Must(template.New("logrotate-d-config").Parse(etcLogrotateDTemplate))
type logrotateArgs struct {
BasePath string
Size string
}
err = t.Execute(buffer, logrotateArgs{basePath, size})
if err != nil {
err = bosherr.WrapError(err, "Generating logrotate config")
return
}
err = p.fs.WriteFile(path.Join("/etc/logrotate.d", groupName), buffer.Bytes())
if err != nil {
err = bosherr.WrapError(err, "Writing to /etc/logrotate.d")
return
}
_, _, _, _ = p.cmdRunner.RunCommand("/var/vcap/bosh/bin/setup-logrotate.sh")
return
}
// Logrotate config file - /etc/logrotate.d/<group-name>
// Stemcell stage logrotate_config configures logrotate to run every hour
const etcLogrotateDTemplate = `# Generated by bosh-agent
{{ .BasePath }}/data/sys/log/*.log {{ .BasePath }}/data/sys/log/.*.log {{ .BasePath }}/data/sys/log/*/*.log {{ .BasePath }}/data/sys/log/*/.*.log {{ .BasePath }}/data/sys/log/*/*/*.log {{ .BasePath }}/data/sys/log/*/*/.*.log {
missingok
rotate 7
compress
copytruncate
size={{ .Size }}
}
`
func (p linux) SetTimeWithNtpServers(servers []string) (err error) {
serversFilePath := path.Join(p.dirProvider.BaseDir(), "/bosh/etc/ntpserver")
if len(servers) == 0 {
return
}
err = p.fs.WriteFileString(serversFilePath, strings.Join(servers, " "))
if err != nil {
err = bosherr.WrapErrorf(err, "Writing to %s", serversFilePath)
return
}
// Make a best effort to sync time now but don't error
_, _, _, _ = p.cmdRunner.RunCommand("sync-time")
return
}
func (p linux) SetupEphemeralDiskWithPath(realPath string, desiredSwapSizeInBytes *uint64, labelPrefix string) error {
p.logger.Info(logTag, "Setting up ephemeral disk...")
mountPoint := p.dirProvider.DataDir()
mountPointGlob := path.Join(mountPoint, "*")
contents, err := p.fs.Glob(mountPointGlob)
if err != nil {
return bosherr.WrapErrorf(err, "Globbing ephemeral disk mount point `%s'", mountPointGlob)
}
if contents != nil && len(contents) > 0 {
// When agent bootstraps for the first time data directory should be empty.
// It might be non-empty on subsequent agent restarts. The ephemeral disk setup
// should be idempotent and partitioning will be skipped if disk is already
// partitioned as needed. If disk is not partitioned as needed we still want to
// partition it even if data directory is not empty.
p.logger.Debug(logTag, "Existing ephemeral mount `%s' is not empty. Contents: %s", mountPoint, contents)
}
err = p.fs.MkdirAll(mountPoint, ephemeralDiskPermissions)
if err != nil {
return bosherr.WrapError(err, "Creating data dir")
}
if p.options.SkipDiskSetup {
return nil
}
var swapPartitionPath, dataPartitionPath string
// Agent can only setup ephemeral data directory either on ephemeral device
// or on separate root partition.
// The real path can be empty if CPI did not provide ephemeral disk
// or if the provided disk was not found.
if realPath == "" {
if !p.options.CreatePartitionIfNoEphemeralDisk {
// Agent can not use root partition for ephemeral data directory.
return bosherr.Error("No ephemeral disk found, cannot use root partition as ephemeral disk")
}
swapPartitionPath, dataPartitionPath, err = p.createEphemeralPartitionsOnRootDevice(desiredSwapSizeInBytes, labelPrefix)
if err != nil {
return bosherr.WrapError(err, "Creating ephemeral partitions on root device")
}
} else {
swapPartitionPath, dataPartitionPath, err = p.partitionEphemeralDisk(realPath, desiredSwapSizeInBytes, labelPrefix)
if err != nil {
return bosherr.WrapError(err, "Partitioning ephemeral disk")
}
}
if len(swapPartitionPath) > 0 {
canonicalSwapPartitionPath, err := resolveCanonicalLink(p.cmdRunner, swapPartitionPath)
if err != nil {
return err
}
p.logger.Info(logTag, "Formatting `%s' (canonical path: %s) as swap", swapPartitionPath, canonicalSwapPartitionPath)
err = p.diskManager.GetFormatter().Format(canonicalSwapPartitionPath, boshdisk.FileSystemSwap)
if err != nil {
return bosherr.WrapError(err, "Formatting swap")
}
p.logger.Info(logTag, "Mounting `%s' (canonical path: %s) as swap", swapPartitionPath, canonicalSwapPartitionPath)
err = p.diskManager.GetMounter().SwapOn(canonicalSwapPartitionPath)
if err != nil {
return bosherr.WrapError(err, "Mounting swap")
}
}
canonicalDataPartitionPath, err := resolveCanonicalLink(p.cmdRunner, dataPartitionPath)
if err != nil {
return err
}
p.logger.Info(logTag, "Formatting `%s' (canonical path: %s) as ext4", dataPartitionPath, canonicalDataPartitionPath)
err = p.diskManager.GetFormatter().Format(canonicalDataPartitionPath, boshdisk.FileSystemExt4)
if err != nil {
return bosherr.WrapError(err, "Formatting data partition with ext4")
}
p.logger.Info(logTag, "Mounting `%s' (canonical path: %s) at `%s'", dataPartitionPath, canonicalDataPartitionPath, mountPoint)
err = p.diskManager.GetMounter().Mount(canonicalDataPartitionPath, mountPoint)
if err != nil {
return bosherr.WrapError(err, "Mounting data partition")
}
return nil
}
func (p linux) SetupRawEphemeralDisks(devices []boshsettings.DiskSettings) (err error) {
if p.options.SkipDiskSetup {
return nil
}
p.logger.Info(logTag, "Setting up raw ephemeral disks")
for i, device := range devices {
realPath, _, err := p.devicePathResolver.GetRealDevicePath(device)
if err != nil {
return bosherr.WrapError(err, "Getting real device path")
}
// check if device is already partitioned correctly
stdout, stderr, _, err := p.cmdRunner.RunCommand(
"parted",
"-s",
realPath,
"p",
)
if err != nil {
// "unrecognised disk label" is acceptable, since the disk may not have been partitioned
if strings.Contains(stdout, "unrecognised disk label") == false &&
strings.Contains(stderr, "unrecognised disk label") == false {
return bosherr.WrapError(err, "Setting up raw ephemeral disks")
}
}
if strings.Contains(stdout, "Partition Table: gpt") && strings.Contains(stdout, "raw-ephemeral-") {
continue
}
// change to gpt partition type, change units to percentage, make partition with name and span from 0-100%
p.logger.Info(logTag, "Creating partition on `%s'", realPath)
_, _, _, err = p.cmdRunner.RunCommand(
"parted",
"-s",
realPath,
"mklabel",
"gpt",
"unit",
"%",
"mkpart",
fmt.Sprintf("raw-ephemeral-%d", i),
"0",
"100",
)
if err != nil {
return bosherr.WrapError(err, "Setting up raw ephemeral disks")
}
}
return nil
}
func (p linux) SetupDataDir(jobConfig boshsettings.JobDir, runConfig boshsettings.RunDir) error {
dataDir := p.dirProvider.DataDir()
sysDataDir := path.Join(dataDir, "sys")
logDir := path.Join(sysDataDir, "log")
err := p.fs.MkdirAll(logDir, logDirPermissions)
if err != nil {
return bosherr.WrapErrorf(err, "Making %s dir", logDir)
}
_, _, _, err = p.cmdRunner.RunCommand("chown", "root:vcap", sysDataDir)
if err != nil {
return bosherr.WrapErrorf(err, "chown %s", sysDataDir)
}
_, _, _, err = p.cmdRunner.RunCommand("chown", "root:vcap", logDir)
if err != nil {
return bosherr.WrapErrorf(err, "chown %s", logDir)
}
jobsDir := p.dirProvider.DataJobsDir()
err = p.fs.MkdirAll(jobsDir, jobsDirPermissions)
if err != nil {
return bosherr.WrapErrorf(err, "Making %s dir", jobsDir)
}
sensitiveDir := p.dirProvider.SensitiveBlobsDir()
err = p.fs.MkdirAll(sensitiveDir, blobsDirPermissions)
if err != nil {
return bosherr.WrapErrorf(err, "Making %s dir", sensitiveDir)
}
if jobConfig.TmpFS {
size := jobConfig.TmpFSSize
if size == "" {
size = "100m"
}
if err = p.diskManager.GetMounter().MountTmpfs(jobsDir, size); err != nil {
return err
}
if err = p.diskManager.GetMounter().MountTmpfs(sensitiveDir, size); err != nil {
return err
}
}
_, _, _, err = p.cmdRunner.RunCommand("chown", "root:vcap", jobsDir)
if err != nil {
return bosherr.WrapErrorf(err, "chown %s", jobsDir)
}
_, _, _, err = p.cmdRunner.RunCommand("chown", "root:vcap", sensitiveDir)
if err != nil {
return bosherr.WrapErrorf(err, "chown %s", sensitiveDir)
}
packagesDir := p.dirProvider.PkgDir()
err = p.fs.MkdirAll(packagesDir, packagesDirPermissions)
if err != nil {
return bosherr.WrapErrorf(err, "Making %s dir", packagesDir)
}
_, _, _, err = p.cmdRunner.RunCommand("chown", "root:vcap", packagesDir)
if err != nil {
return bosherr.WrapErrorf(err, "chown %s", packagesDir)
}
err = p.setupRunDir(sysDataDir, runConfig.TmpFSSize)
if err != nil {
return err
}
sysDir := path.Join(path.Dir(dataDir), "sys")
err = p.fs.Symlink(sysDataDir, sysDir)
if err != nil {
return bosherr.WrapErrorf(err, "Symlinking '%s' to '%s'", sysDir, sysDataDir)
}
return nil
}
func (p linux) setupRunDir(sysDir, tmppFSSize string) error {
runDir := path.Join(sysDir, "run")
_, runDirIsMounted, err := p.IsMountPoint(runDir)
if err != nil {
return bosherr.WrapErrorf(err, "Checking for mount point %s", runDir)
}
if !runDirIsMounted {
err = p.fs.MkdirAll(runDir, runDirPermissions)
if err != nil {
return bosherr.WrapErrorf(err, "Making %s dir", runDir)
}
if tmppFSSize == "" {
tmppFSSize = "16m"
}
err = p.diskManager.GetMounter().MountTmpfs(runDir, tmppFSSize)
if err != nil {
return bosherr.WrapErrorf(err, "Mounting tmpfs to %s", runDir)
}
_, _, _, err = p.cmdRunner.RunCommand("chown", "root:vcap", runDir)
if err != nil {
return bosherr.WrapErrorf(err, "chown %s", runDir)
}
}
return nil
}
func (p linux) SetupHomeDir() error {
mounter := boshdisk.NewLinuxBindMounter(p.diskManager.GetMounter())
isMounted, err := mounter.IsMounted("/home")
if err != nil {
return bosherr.WrapError(err, "Setup home dir, checking if mounted")
}
if !isMounted {
err := mounter.Mount("/home", "/home")
if err != nil {
return bosherr.WrapError(err, "Setup home dir, mounting home")
}
err = mounter.RemountInPlace("/home", "nodev")
if err != nil {
return bosherr.WrapError(err, "Setup home dir, remount in place")
}
}
return nil
}
func (p linux) SetupBlobsDir() error {
blobsDirPath := p.dirProvider.BlobsDir()
err := p.fs.MkdirAll(blobsDirPath, blobsDirPermissions)
if err != nil {
return bosherr.WrapError(err, "Creating blobs dir")
}
_, _, _, err = p.cmdRunner.RunCommand("chown", "root:vcap", blobsDirPath)
if err != nil {
return bosherr.WrapErrorf(err, "chown %s", blobsDirPath)
}
return nil
}
func (p linux) SetupCanRestartDir() error {
canRebootDir := p.dirProvider.CanRestartDir()
err := p.fs.MkdirAll(canRebootDir, 0740)
if err != nil {
return bosherr.WrapError(err, "Creating canReboot dir")
}
if err = p.diskManager.GetMounter().MountTmpfs(canRebootDir, "16m"); err != nil {
return err
}
_, _, _, err = p.cmdRunner.RunCommand("chown", "root:vcap", canRebootDir)
if err != nil {
return bosherr.WrapError(err, "Chowning canrestart dir")
}
return nil
}
func (p linux) SetupTmpDir() error {
systemTmpDir := "/tmp"
boshTmpDir := p.dirProvider.TmpDir()
boshRootTmpPath := path.Join(p.dirProvider.DataDir(), "root_tmp")
err := p.fs.MkdirAll(boshTmpDir, tmpDirPermissions)
if err != nil {
return bosherr.WrapError(err, "Creating temp dir")
}
err = os.Setenv("TMPDIR", boshTmpDir)
if err != nil {
return bosherr.WrapError(err, "Setting TMPDIR")
}
err = p.changeTmpDirPermissions(systemTmpDir)
if err != nil {
return err
}
// /var/tmp is used for preserving temporary files between system reboots
varTmpDir := "/var/tmp"
err = p.changeTmpDirPermissions(varTmpDir)
if err != nil {
return err
}
if p.options.UseDefaultTmpDir {
return nil
}
_, _, _, err = p.cmdRunner.RunCommand("mkdir", "-p", boshRootTmpPath)
if err != nil {
return bosherr.WrapError(err, "Creating root tmp dir")
}
err = p.changeTmpDirPermissions(boshRootTmpPath)
if err != nil {
return bosherr.WrapError(err, "Chmoding root tmp dir")
}
err = p.bindMountDir(boshRootTmpPath, systemTmpDir)
if err != nil {
return err
}
err = p.bindMountDir(boshRootTmpPath, varTmpDir)
if err != nil {
return err
}
return nil
}
func (p linux) SetupSharedMemory() error {
for _, mnt := range []string{"/dev/shm", "/run/shm"} {
err := p.remountWithSecurityFlags(mnt)
if err != nil {
return err
}
}
return nil
}
func (p linux) remountWithSecurityFlags(mountPt string) error {
mounter := p.diskManager.GetMounter()
_, mounted, err := mounter.IsMountPoint(mountPt)
if err != nil {
return err
}
if mounted {
return mounter.RemountInPlace(mountPt, "noexec", "nodev", "nosuid")
}
return nil
}
func (p linux) SetupLogDir() error {
logDir := "/var/log"
boshRootLogPath := path.Join(p.dirProvider.DataDir(), "root_log")
err := p.fs.MkdirAll(boshRootLogPath, userRootLogDirPermissions)
if err != nil {
return bosherr.WrapError(err, "Creating root log dir")
}
_, _, _, err = p.cmdRunner.RunCommand("chmod", "0771", boshRootLogPath)
if err != nil {
return bosherr.WrapError(err, "Chmoding /var/log dir")
}
auditDirPath := path.Join(boshRootLogPath, "audit")
_, _, _, err = p.cmdRunner.RunCommand("mkdir", "-p", auditDirPath)
if err != nil {
return bosherr.WrapError(err, "Creating audit log dir")
}
_, _, _, err = p.cmdRunner.RunCommand("chmod", "0750", auditDirPath)
if err != nil {
return bosherr.WrapError(err, "Chmoding audit log dir")
}
sysstatDirPath := path.Join(boshRootLogPath, "sysstat")
_, _, _, err = p.cmdRunner.RunCommand("mkdir", "-p", sysstatDirPath)
if err != nil {
return bosherr.WrapError(err, "Creating sysstat log dir")
}
_, _, _, err = p.cmdRunner.RunCommand("chmod", "0755", sysstatDirPath)
if err != nil {
return bosherr.WrapError(err, "Chmoding sysstat log dir")
}
// change ownership
_, _, _, err = p.cmdRunner.RunCommand("chown", "root:syslog", boshRootLogPath)
if err != nil {
return bosherr.WrapError(err, "Chowning root log dir")
}
err = p.ensureFile(fmt.Sprintf("%s/btmp", boshRootLogPath), "root:utmp", "0600")
if err != nil {
return err
}
err = p.ensureFile(fmt.Sprintf("%s/wtmp", boshRootLogPath), "root:utmp", "0664")
if err != nil {
return err
}
err = p.bindMountDir(boshRootLogPath, logDir)
if err != nil {
return err
}
result, err := p.fs.ReadFileString("/etc/passwd")
if err != nil {
return nil
}
rx := regexp.MustCompile("(?m)^_chrony:")
if rx.MatchString(result) {
chronyDirPath := path.Join(boshRootLogPath, "chrony")
_, _, _, err = p.cmdRunner.RunCommand("mkdir", "-p", chronyDirPath)
if err != nil {
return bosherr.WrapError(err, "Creating chrony log dir")
}
_, _, _, err = p.cmdRunner.RunCommand("chmod", "0700", chronyDirPath)
if err != nil {
return bosherr.WrapError(err, "Chmoding chrony log dir")
}
_, _, _, err = p.cmdRunner.RunCommand("chown", "_chrony:_chrony", chronyDirPath)
if err != nil {
return bosherr.WrapError(err, "Chowning chrony log dir")
}
}
return nil
}
func (p linux) ensureFile(path, owner, mode string) error {
_, _, _, err := p.cmdRunner.RunCommand("touch", path)
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Touching '%s' file", path))
}
_, _, _, err = p.cmdRunner.RunCommand("chown", owner, path)
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Chowning '%s' file", path))
}
_, _, _, err = p.cmdRunner.RunCommand("chmod", mode, path)
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Chmoding '%s' file", path))
}
return nil
}
func (p linux) SetupLoggingAndAuditing() error {
_, _, _, err := p.cmdRunner.RunCommand("/var/vcap/bosh/bin/bosh-start-logging-and-auditing")
if err != nil {
return bosherr.WrapError(err, "Running start logging and audit script")
}
return nil
}
func (p linux) bindMountDir(mountSource, mountPoint string) error {
bindMounter := boshdisk.NewLinuxBindMounter(p.diskManager.GetMounter())
mounted, err := bindMounter.IsMounted(mountPoint)
if !mounted && err == nil {
err = bindMounter.Mount(mountSource, mountPoint)
if err != nil {
return bosherr.WrapErrorf(err, "Bind mounting %s dir over %s", mountSource, mountPoint)
}
} else if err != nil {
return err
}
return bindMounter.RemountInPlace(mountPoint, "nodev", "noexec", "nosuid")
}
func (p linux) changeTmpDirPermissions(path string) error {
_, _, _, err := p.cmdRunner.RunCommand("chown", "root:vcap", path)
if err != nil {
return bosherr.WrapErrorf(err, "chown %s", path)
}
_, _, _, err = p.cmdRunner.RunCommand("chmod", "1777", path)
if err != nil {
return bosherr.WrapErrorf(err, "chmod %s", path)
}
return nil
}
func (p linux) MountPersistentDisk(diskSetting boshsettings.DiskSettings, mountPoint string) error {
p.logger.Debug(logTag, "Mounting persistent disk %+v at %s", diskSetting, mountPoint)
realPath, _, err := p.devicePathResolver.GetRealDevicePath(diskSetting)
if err != nil {
return bosherr.WrapError(err, "Getting real device path")
}
devicePath, isMountPoint, err := p.IsMountPoint(mountPoint)
if err != nil {
return bosherr.WrapError(err, "Checking mount point")
}
p.logger.Info(logTag, "realPath = %s, devicePath = %s, isMountPoint = %t", realPath, devicePath, isMountPoint)
partitionPath := p.partitionPath(realPath, 1)
if isMountPoint {
if partitionPath == devicePath {
p.logger.Info(logTag, "device: %s is already mounted on %s, skipping mounting", devicePath, mountPoint)
return nil
}
mountPoint = p.dirProvider.StoreMigrationDir()
}
err = p.fs.MkdirAll(mountPoint, persistentDiskPermissions)
if err != nil {
return bosherr.WrapErrorf(err, "Creating directory %s", mountPoint)
}
if !p.options.UsePreformattedPersistentDisk {
partitions := []boshdisk.Partition{
{Type: boshdisk.PartitionTypeLinux},
}
partitioner, err := p.diskManager.GetPersistentDevicePartitioner(diskSetting.Partitioner)
if err != nil {
return bosherr.WrapError(err, "Selecting partitioner")
}
err = partitioner.Partition(realPath, partitions)
if err != nil {
return bosherr.WrapError(err, "Partitioning disk")
}
persistentDiskFS := diskSetting.FileSystemType
switch persistentDiskFS {
case boshdisk.FileSystemExt4, boshdisk.FileSystemXFS:
case boshdisk.FileSystemDefault:
persistentDiskFS = boshdisk.FileSystemExt4
default:
return bosherr.Error(fmt.Sprintf(`The filesystem type "%s" is not supported`, diskSetting.FileSystemType))
}
err = p.diskManager.GetFormatter().Format(partitionPath, persistentDiskFS)
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Formatting partition with %s", diskSetting.FileSystemType))
}
realPath = partitionPath
}
err = p.diskManager.GetMounter().Mount(realPath, mountPoint, diskSetting.MountOptions...)
if err != nil {
return bosherr.WrapError(err, "Mounting partition")
}
managedSettingsPath := filepath.Join(p.dirProvider.BoshDir(), "managed_disk_settings.json")
err = p.fs.WriteFileString(managedSettingsPath, diskSetting.ID)
if err != nil {
return bosherr.WrapError(err, "Writing managed_disk_settings.json")
}
return nil
}
func (p linux) UnmountPersistentDisk(diskSettings boshsettings.DiskSettings) (bool, error) {
p.logger.Debug(logTag, "Unmounting persistent disk %+v", diskSettings)
realPath, timedOut, err := p.devicePathResolver.GetRealDevicePath(diskSettings)
if timedOut {
return false, nil
}
if err != nil {
return false, bosherr.WrapError(err, "Getting real device path")
}
if !p.options.UsePreformattedPersistentDisk {
realPath = p.partitionPath(realPath, 1)
}
return p.diskManager.GetMounter().Unmount(realPath)
}
func (p linux) GetEphemeralDiskPath(diskSettings boshsettings.DiskSettings) string {
realPath, _, err := p.devicePathResolver.GetRealDevicePath(diskSettings)
if err != nil {
return ""
}
return realPath
}
func (p linux) IsPersistentDiskMountable(diskSettings boshsettings.DiskSettings) (bool, error) {
realPath, _, err := p.devicePathResolver.GetRealDevicePath(diskSettings)
if err != nil {
return false, bosherr.WrapErrorf(err, "Validating path: %s", diskSettings.Path)
}
stdout, stderr, _, _ := p.cmdRunner.RunCommand("sfdisk", "-d", realPath)
if strings.Contains(stderr, "unrecognized partition table type") {
return false, nil
}
lines := len(strings.Split(stdout, "\n"))
return lines > 4, nil
}
func (p linux) IsMountPoint(path string) (string, bool, error) {
return p.diskManager.GetMounter().IsMountPoint(path)
}
func (p linux) MigratePersistentDisk(fromMountPoint, toMountPoint string) (err error) {
p.logger.Debug(logTag, "Migrating persistent disk %v to %v", fromMountPoint, toMountPoint)
err = p.diskManager.GetMounter().RemountAsReadonly(fromMountPoint)
if err != nil {
err = bosherr.WrapError(err, "Remounting persistent disk as readonly")
return
}
// Golang does not implement a file copy that would allow us to preserve dates...
// So we have to shell out to tar to perform the copy instead of delegating to the FileSystem
tarCopy := fmt.Sprintf("(tar -C %s -cf - .) | (tar -C %s -xpf -)", fromMountPoint, toMountPoint)
_, _, _, err = p.cmdRunner.RunCommand("sh", "-c", tarCopy)
if err != nil {
err = bosherr.WrapError(err, "Copying files from old disk to new disk")
return
}
// Find iSCSI device id of fromMountPoint
var iscsiID string
if p.options.DevicePathResolutionType == "iscsi" {
mounts, err := p.diskManager.GetMountsSearcher().SearchMounts()
if err != nil {
err = bosherr.WrapError(err, "Search persistent disk as readonly")
return err
}
for _, mount := range mounts {
if mount.MountPoint == fromMountPoint {
r := regexp.MustCompile(`\/dev\/mapper\/(.*?)-part1`)
matches := r.FindStringSubmatch(mount.PartitionPath)
if len(matches) > 1 {
iscsiID = matches[1]
}
}
}
}
_, err = p.diskManager.GetMounter().Unmount(fromMountPoint)
if err != nil {
err = bosherr.WrapError(err, "Unmounting old persistent disk")
return
}
err = p.diskManager.GetMounter().Remount(toMountPoint, fromMountPoint)
if err != nil {
err = bosherr.WrapError(err, "Remounting new disk on original mountpoint")
}
if p.options.DevicePathResolutionType == "iscsi" && iscsiID != "" {
p.flushMultipathDevice(iscsiID)
}
return
}
func (p linux) IsPersistentDiskMounted(diskSettings boshsettings.DiskSettings) (bool, error) {
p.logger.Debug(logTag, "Checking whether persistent disk %+v is mounted", diskSettings)
realPath, timedOut, err := p.devicePathResolver.GetRealDevicePath(diskSettings)
if timedOut {
p.logger.Debug(logTag, "Timed out resolving device path for %+v, ignoring", diskSettings)
return false, nil
}
if err != nil {
return false, bosherr.WrapError(err, "Getting real device path")
}
if !p.options.UsePreformattedPersistentDisk {
realPath = p.partitionPath(realPath, 1)
}
return p.diskManager.GetMounter().IsMounted(realPath)
}
func (p linux) StartMonit() error {
err := p.fs.Symlink(path.Join("/etc", "sv", "monit"), path.Join("/etc", "service", "monit"))
if err != nil {
return bosherr.WrapError(err, "Symlinking /etc/service/monit to /etc/sv/monit")
}
err = p.monitRetryStrategy.Try()
if err != nil {
return bosherr.WrapError(err, "Retrying to start monit")
}
return nil
}
func (p linux) SetupMonitUser() error {
monitUserFilePath := path.Join(p.dirProvider.BaseDir(), "monit", "monit.user")
err := p.fs.WriteFileString(monitUserFilePath, "vcap:random-password")
if err != nil {
return bosherr.WrapError(err, "Writing monit user file")
}
return nil
}
func (p linux) GetMonitCredentials() (username, password string, err error) {
monitUserFilePath := path.Join(p.dirProvider.BaseDir(), "monit", "monit.user")
credContent, err := p.fs.ReadFileString(monitUserFilePath)
if err != nil {
err = bosherr.WrapError(err, "Reading monit user file")
return
}
credParts := strings.SplitN(credContent, ":", 2)
if len(credParts) != 2 {
err = bosherr.Error("Malformated monit user file, expecting username and password separated by ':'")
return
}
username = credParts[0]
password = credParts[1]
return
}
func (p linux) PrepareForNetworkingChange() error {
err := p.fs.RemoveAll("/etc/udev/rules.d/70-persistent-net.rules")
if err != nil {
return bosherr.WrapError(err, "Removing network rules file")
}
return nil
}
func (p linux) DeleteARPEntryWithIP(ip string) error {
_, _, _, err := p.cmdRunner.RunCommand("ip", "neigh", "flush", "to", ip)
if err != nil {
return bosherr.WrapError(err, "Deleting arp entry")
}
return nil
}
func (p linux) GetDefaultNetwork() (boshsettings.Network, error) {
return p.defaultNetworkResolver.GetDefaultNetwork()
}
func (p linux) calculateEphemeralDiskPartitionSizes(diskSizeInBytes uint64, desiredSwapSizeInBytes *uint64) (uint64, uint64, error) {
memStats, err := p.collector.GetMemStats()
if err != nil {
return uint64(0), uint64(0), bosherr.WrapError(err, "Getting mem stats")
}
totalMemInBytes := memStats.Total
var swapSizeInBytes uint64
if desiredSwapSizeInBytes == nil {
if totalMemInBytes > diskSizeInBytes/2 {
swapSizeInBytes = diskSizeInBytes / 2
} else {
swapSizeInBytes = totalMemInBytes
}
} else {
swapSizeInBytes = *desiredSwapSizeInBytes
}
linuxSizeInBytes := diskSizeInBytes - swapSizeInBytes
return swapSizeInBytes, linuxSizeInBytes, nil
}
func (p linux) findRootDevicePathAndNumber() (string, int, error) {
mounts, err := p.diskManager.GetMountsSearcher().SearchMounts()
if err != nil {
return "", 0, bosherr.WrapError(err, "Searching mounts")
}
for _, mount := range mounts {
if mount.IsRoot() {
p.logger.Debug(logTag, "Found root partition: `%s'", mount.PartitionPath)
stdout, _, _, err := p.cmdRunner.RunCommand("readlink", "-f", mount.PartitionPath)
if err != nil {
return "", 0, bosherr.WrapError(err, "Shelling out to readlink")
}
rootPartition := strings.Trim(stdout, "\n")
p.logger.Debug(logTag, "Symlink is: `%s'", rootPartition)
validNVMeRootPartition := regexp.MustCompile(`^/dev/[a-z]+\dn\dp\d$`)
validSCSIRootPartition := regexp.MustCompile(`^/dev/[a-z]+\d$`)
isValidNVMePath := validNVMeRootPartition.MatchString(rootPartition)
isValidSCSIPath := validSCSIRootPartition.MatchString(rootPartition)
if !isValidNVMePath && !isValidSCSIPath {
return "", 0, bosherr.Errorf("Root partition has an invalid name%s", rootPartition)
}
devPath := rootPartition[:len(rootPartition)-1]
if isValidNVMePath {
devPath = rootPartition[:len(rootPartition)-2]
}
devNum, err := strconv.Atoi(rootPartition[len(rootPartition)-1:])
if err != nil {
return "", 0, bosherr.WrapError(err, "Parsing device number failed")
}
return devPath, devNum, nil
}
}
return "", 0, bosherr.Error("Getting root partition device")
}
func (p linux) createEphemeralPartitionsOnRootDevice(desiredSwapSizeInBytes *uint64, labelPrefix string) (string, string, error) {
p.logger.Info(logTag, "Creating swap & ephemeral partitions on root disk...")
p.logger.Debug(logTag, "Determining root device")
rootDevicePath, rootDeviceNumber, err := p.findRootDevicePathAndNumber()
if err != nil {
return "", "", bosherr.WrapError(err, "Finding root partition device")
}
p.logger.Debug(logTag, "Found root device `%s'", rootDevicePath)
p.logger.Debug(logTag, "Getting remaining size of `%s'", rootDevicePath)
remainingSizeInBytes, err := p.diskManager.GetRootDevicePartitioner().GetDeviceSizeInBytes(rootDevicePath)
if err != nil {
return "", "", bosherr.WrapError(err, "Getting root device remaining size")
}
if remainingSizeInBytes < minRootEphemeralSpaceInBytes {
return "", "", newInsufficientSpaceError(remainingSizeInBytes, minRootEphemeralSpaceInBytes)
}
swapPartitionPath, dataPartitionPath, err := p.partitionDisk(remainingSizeInBytes, desiredSwapSizeInBytes, rootDevicePath, rootDeviceNumber+1, p.diskManager.GetRootDevicePartitioner(), labelPrefix)
if err != nil {
return "", "", bosherr.WrapErrorf(err, "Partitioning root device `%s'", rootDevicePath)
}
return swapPartitionPath, dataPartitionPath, nil
}
func (p linux) partitionEphemeralDisk(realPath string, desiredSwapSizeInBytes *uint64, labelPrefix string) (string, string, error) {
p.logger.Info(logTag, "Creating swap & ephemeral partitions on ephemeral disk...")
p.logger.Debug(logTag, "Getting device size of `%s'", realPath)
diskSizeInBytes, err := p.diskManager.GetEphemeralDevicePartitioner().GetDeviceSizeInBytes(realPath)
if err != nil {
return "", "", bosherr.WrapError(err, "Getting device size")
}
swapPartitionPath, dataPartitionPath, err := p.partitionDisk(diskSizeInBytes, desiredSwapSizeInBytes, realPath, 1, p.diskManager.GetEphemeralDevicePartitioner(), labelPrefix)
if err != nil {
return "", "", bosherr.WrapErrorf(err, "Partitioning ephemeral disk '%s'", realPath)
}
return swapPartitionPath, dataPartitionPath, nil
}
func (p linux) partitionDisk(availableSize uint64, desiredSwapSizeInBytes *uint64, partitionPath string, partitionStartCount int, partitioner boshdisk.Partitioner, labelPrefix string) (string, string, error) {
p.logger.Debug(logTag, "Calculating partition sizes of `%s', with available size %dB", partitionPath, availableSize)
swapSizeInBytes, linuxSizeInBytes, err := p.calculateEphemeralDiskPartitionSizes(availableSize, desiredSwapSizeInBytes)
if err != nil {
return "", "", bosherr.WrapError(err, "Calculating partition sizes")
}
var partitions []boshdisk.Partition
var swapPartitionPath string
var dataPartitionPath string
labelPrefix = prepareDiskLabelPrefix(labelPrefix)
if swapSizeInBytes == 0 {
partitions = []boshdisk.Partition{
{NamePrefix: labelPrefix, SizeInBytes: linuxSizeInBytes, Type: boshdisk.PartitionTypeLinux},
}
swapPartitionPath = ""
dataPartitionPath = p.partitionPath(partitionPath, partitionStartCount)
} else {
partitions = []boshdisk.Partition{
{NamePrefix: labelPrefix, SizeInBytes: swapSizeInBytes, Type: boshdisk.PartitionTypeSwap},
{NamePrefix: labelPrefix, SizeInBytes: linuxSizeInBytes, Type: boshdisk.PartitionTypeLinux},
}
swapPartitionPath = p.partitionPath(partitionPath, partitionStartCount)
dataPartitionPath = p.partitionPath(partitionPath, partitionStartCount+1)
}
p.logger.Info(logTag, "Partitioning `%s' with %s", partitionPath, partitions)
err = partitioner.Partition(partitionPath, partitions)
return swapPartitionPath, dataPartitionPath, err
}
func (p linux) RemoveDevTools(packageFileListPath string) error {
content, err := p.fs.ReadFileString(packageFileListPath)
if err != nil {
return bosherr.WrapErrorf(err, "Unable to read Development Tools list file: %s", packageFileListPath)
}
content = strings.TrimSpace(content)
pkgFileList := strings.Split(content, "\n")
for _, pkgFile := range pkgFileList {
_, _, _, err = p.cmdRunner.RunCommand("rm", "-rf", pkgFile)
if err != nil {
return bosherr.WrapErrorf(err, "Unable to remove package file: %s", pkgFile)
}
}
return nil
}
func (p linux) RemoveStaticLibraries(staticLibrariesListFilePath string) error {
content, err := p.fs.ReadFileString(staticLibrariesListFilePath)
if err != nil {
return bosherr.WrapErrorf(err, "Unable to read static libraries list file: %s", staticLibrariesListFilePath)
}
content = strings.TrimSpace(content)
librariesList := strings.Split(content, "\n")
for _, library := range librariesList {
_, _, _, err = p.cmdRunner.RunCommand("rm", "-rf", library)
if err != nil {
return bosherr.WrapErrorf(err, "Unable to remove static library: %s", library)
}
}
return nil
}
func (p linux) partitionPath(devicePath string, partitionNumber int) string {
switch {
case strings.HasPrefix(devicePath, "/dev/nvme"):
return fmt.Sprintf("%sp%s", devicePath, strconv.Itoa(partitionNumber))
case strings.HasPrefix(devicePath, "/dev/mapper/"):
return fmt.Sprintf("%s-part%s", devicePath, strconv.Itoa(partitionNumber))
default:
return fmt.Sprintf("%s%s", devicePath, strconv.Itoa(partitionNumber))
}
}
func (p linux) generateDefaultEtcHosts(hostname string) (*bytes.Buffer, error) {
buffer := bytes.NewBuffer([]byte{})
t := template.Must(template.New("etc-hosts").Parse(EtcHostsTemplate))
err := t.Execute(buffer, hostname)
if err != nil {
return nil, err
}
return buffer, nil
}
func (p linux) flushMultipathDevice(id string) error {
p.logger.Debug(logTag, "Flush multipath device: %s", id)
result, _, _, err := p.cmdRunner.RunCommand("multipath", "-ll")
if err != nil {
return bosherr.WrapErrorf(err, "Get multipath information")
}
if strings.Contains(result, id) {
_, _, _, err := p.cmdRunner.RunCommand("multipath", "-f", id)
if err != nil {
return bosherr.WrapErrorf(err, "Flush multipath device")
}
}
return nil
}
type insufficientSpaceError struct {
spaceFound uint64
spaceRequired uint64
}
func newInsufficientSpaceError(spaceFound, spaceRequired uint64) insufficientSpaceError {
return insufficientSpaceError{
spaceFound: spaceFound,
spaceRequired: spaceRequired,
}
}
func (i insufficientSpaceError) Error() string {
return fmt.Sprintf("Insufficient remaining disk space (%dB) for ephemeral partition (min: %dB)", i.spaceFound, i.spaceRequired)
}
func (p linux) Shutdown() error {
_, _, _, err := p.cmdRunner.RunCommand("shutdown", "-P", "0")
if err != nil {
return bosherr.WrapErrorf(err, "Failed to shutdown")
}
return nil
}
func resolveCanonicalLink(cmdRunner boshsys.CmdRunner, path string) (string, error) {
stdout, _, _, err := cmdRunner.RunCommand("readlink", "-f", path)
if err != nil {
return "", bosherr.WrapError(err, "Shelling out to readlink")
}
return strings.Trim(stdout, "\n"), nil
}
func prepareDiskLabelPrefix(labelPrefix string) string {
// Keep 36 chars to avoid too long GPT partition names
labelPrefix = "bosh-partition-" + labelPrefix
if len(labelPrefix) > 33 {
// Remain one dash and two digits space
labelPrefix = labelPrefix[0:32]
}
return labelPrefix
}
| {
"content_hash": "78831f74a6aebeaf4552cde4f168e52e",
"timestamp": "",
"source": "github",
"line_count": 1642,
"max_line_length": 226,
"avg_line_length": 30.102923264311816,
"alnum_prop": 0.709785753302717,
"repo_name": "gu-bin/bosh-agent",
"id": "bb85ac33893cfbe2785a8b59f0f7abd4fa093d44",
"size": "49429",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "platform/linux_platform.go",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "732"
},
{
"name": "Go",
"bytes": "2438325"
},
{
"name": "PowerShell",
"bytes": "5953"
},
{
"name": "Ruby",
"bytes": "8494"
},
{
"name": "Shell",
"bytes": "25409"
}
],
"symlink_target": ""
} |
{% load i18n %}
{% extends "html/documents/page.html" %}
{% block title %}{% trans %}New Document{% endtrans %} | {% trans %}Documents{% endtrans %}{% endblock %}
{% block module_title %}{{ folder.name }}{% endblock %}
{% block module_subtitle %}{% trans %}Folder{% endtrans %}{% endblock %}
{% block module_topmenu %}
<a class="top-menu add-link" href="{% url documents_folder_add_typed folder.id %}">{% trans %}New Folder{% endtrans %}</a>
<a class="top-menu add-link-active" href="{% url documents_document_add_typed folder.id %}">{% trans %}New Document{% endtrans %}</a>
<a class="top-menu add-link" href="{% url documents_weblink_add_typed folder.id %}">{% trans %}New Web Link{% endtrans %}</a>
<a class="top-menu add-link" href="{% url documents_file_upload_typed folder.id %}">{% trans %}Upload File{% endtrans %}</a>
<a class="top-menu view-link" href="{% url documents_folder_view folder.id %}">{% trans %}View{% endtrans %}</a>
{% if user.profile.has_permission(folder, mode='w') %}
<a class="top-menu edit-link" href="{% url documents_folder_edit folder.id %}">{% trans %}Edit{% endtrans %}</a>
<a class="top-menu delete-link" href="{% url documents_folder_delete folder.id %}">{% trans %}Delete{% endtrans %}</a>
{% endif %}
{% endblock %}
{% block module_content %}
<form action="" method="post" class="content-form">
{% csrf_token %}
<ul class="content-form-fields">
{{ form.as_ul()|safe }}
</ul>
<div class="content-form-submit">
<input type="submit" name="save" value="{% trans %}Create Document{% endtrans %}">
<input type="submit" name="cancel" value="{% trans %}Cancel{% endtrans %}" class="cancel" />
</div>
</form>
{% endblock %}
| {
"content_hash": "664b97d33bc9fa30660773d97c6e545a",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 133,
"avg_line_length": 49.794117647058826,
"alnum_prop": 0.6302421736562316,
"repo_name": "Sofcom/treeio",
"id": "c3997b83d54a070f9fa281fe4b98d4068ec32de0",
"size": "1693",
"binary": false,
"copies": "4",
"ref": "refs/heads/2.0",
"path": "templates/html/documents/document_add_typed.html",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "400811"
},
{
"name": "HTML",
"bytes": "1508469"
},
{
"name": "JavaScript",
"bytes": "2137383"
},
{
"name": "Nginx",
"bytes": "2335"
},
{
"name": "PHP",
"bytes": "25856"
},
{
"name": "Python",
"bytes": "2931287"
},
{
"name": "Shell",
"bytes": "17020"
}
],
"symlink_target": ""
} |
package org.python.util.install.driver;
import java.io.File;
import java.io.IOException;
import junit.framework.TestCase;
import org.python.util.install.FileHelper;
import org.python.util.install.Installation;
import org.python.util.install.JavaVersionTester;
public class NormalVerifierTest extends TestCase {
private static final String DQ = "\"";
private NormalVerifier _verifier;
protected void setUp() throws Exception {
super.setUp();
_verifier = new NormalVerifier();
// use a directory containing spaces as target directory
File targetDir = createTargetDirectory();
assertTrue(targetDir.exists());
assertTrue(targetDir.isDirectory());
_verifier.setTargetDir(targetDir);
}
protected void tearDown() throws Exception {
super.tearDown();
if (_verifier.getTargetDir() != null) {
File autotestFile = new File(_verifier.getTargetDir().getCanonicalPath(),
NormalVerifier.AUTOTEST_PY);
if (autotestFile.exists()) {
assertTrue(autotestFile.delete());
}
}
}
// have to install jython first in order to activate this test
public void testVerify() throws Exception {}
public void testGetSimpleCommand() throws Exception {
String prefix = _verifier.getTargetDir().getCanonicalPath().concat(File.separator);
String expectedCommand = prefix.concat("jython");
if (Installation.isWindows()) {
expectedCommand = expectedCommand.concat(".bat");
}
String expectedArgument = prefix.concat("autotest.py");
String[] command = _verifier.getSimpleCommand();
assertNotNull(command);
assertEquals(2, command.length);
assertEquals(expectedCommand, command[0]);
assertEquals(expectedArgument, command[1]);
}
public void testDoShellScriptTests() {
assertTrue(_verifier.doShellScriptTests());
}
public void testGetShellScriptTestCommandDir() throws DriverException, IOException {
String expectedDir = _verifier.getTargetDir()
.getCanonicalPath()
.concat(File.separator)
.concat("bin");
assertEquals(expectedDir, _verifier.getShellScriptTestCommandDir().getCanonicalPath());
}
public void testGetShellScriptTestContents() throws Exception {
String contents = _verifier.getShellScriptTestContents();
// common asserts
assertNotNull(contents);
assertFalse(contents.length() == 0);
assertFalse(contents.indexOf("{0}") > 0);
assertFalse(contents.indexOf("{1}") > 0);
assertFalse(contents.indexOf("{2}") > 0);
assertFalse(contents.indexOf("{3}") > 0);
assertTrue(contents.indexOf("autotest.py") > 0);
String targetDirPath = _verifier.getTargetDir().getCanonicalPath();
String upScriptPath = _verifier.getSimpleCommand()[1];
String javaHome = System.getProperty(JavaVersionTester.JAVA_HOME, ""); // change this ++++++
assertTrue(javaHome.length() > 0);
// platform specific asserts
if (Installation.isWindows()) {
assertTrue(contents.indexOf("set _INSTALL_DIR=") > 0);
assertTrue(contents.indexOf("set _INSTALL_DIR=".concat(targetDirPath)) > 0);
assertTrue(contents.indexOf("set _SCRIPT=") > 0);
assertTrue(contents.indexOf("set _SCRIPT=".concat(upScriptPath)) > 0);
assertTrue(contents.indexOf("set _JAVA_HOME=") > 0);
assertTrue(contents.indexOf("set _JAVA_HOME=".concat(javaHome)) > 0);
} else {
System.out.println(contents);
assertTrue(contents.indexOf("_INSTALL_DIR=") > 0);
assertTrue(contents.indexOf("_INSTALL_DIR=".concat(quote(targetDirPath))) > 0);
assertTrue(contents.indexOf("_SCRIPT=") > 0);
assertTrue(contents.indexOf("_SCRIPT=".concat(quote(upScriptPath))) > 0);
assertTrue(contents.indexOf("_JAVA_HOME=") > 0);
assertTrue(contents.indexOf("_JAVA_HOME=".concat(quote(javaHome))) > 0);
}
}
public void testGetShellScriptTestCommand() throws Exception {
String prefix = _verifier.getShellScriptTestCommandDir()
.getCanonicalPath()
.concat(File.separator);
String expectedCommand = prefix.concat("jython_test");
if (Installation.isWindows()) {
expectedCommand = expectedCommand.concat(".bat");
}
String[] command = _verifier.getShellScriptTestCommand();
assertNotNull(command);
assertEquals(1, command.length);
String commandFileName = command[0];
assertEquals(expectedCommand, commandFileName);
File commandFile = new File(commandFileName);
assertTrue(commandFile.exists());
String contents = FileHelper.readAll(commandFile);
assertNotNull(contents);
assertFalse(contents.length() == 0);
assertEquals(_verifier.getShellScriptTestContents(), contents);
}
private File createTargetDirectory() throws IOException {
File tmpFile = File.createTempFile("NormalVerifierTest_", "with spaces");
FileHelper.createTempDirectory(tmpFile);
return tmpFile;
}
private String quote(String value) {
return DQ.concat(value).concat(DQ);
}
}
| {
"content_hash": "0fc87acbb03a2f718049ab0a9664b387",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 100,
"avg_line_length": 41.49618320610687,
"alnum_prop": 0.6414643119941134,
"repo_name": "nelmiux/CarnotKE",
"id": "a8d29980bdc7903b0537c21bb66f5ad37490c2c3",
"size": "5436",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "jyhton/installer/test/java/org/python/util/install/driver/NormalVerifierTest.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1605"
},
{
"name": "Batchfile",
"bytes": "23996"
},
{
"name": "C",
"bytes": "2514"
},
{
"name": "CSS",
"bytes": "83366"
},
{
"name": "GAP",
"bytes": "129850"
},
{
"name": "Groff",
"bytes": "42"
},
{
"name": "HTML",
"bytes": "12867403"
},
{
"name": "Java",
"bytes": "16007057"
},
{
"name": "JavaScript",
"bytes": "11934"
},
{
"name": "Makefile",
"bytes": "2261"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Perl",
"bytes": "9821"
},
{
"name": "Python",
"bytes": "41375827"
},
{
"name": "R",
"bytes": "2740"
},
{
"name": "Shell",
"bytes": "70220"
},
{
"name": "Visual Basic",
"bytes": "962"
},
{
"name": "XSLT",
"bytes": "218435"
}
],
"symlink_target": ""
} |
<?php
namespace dnocode\awsddb\ar;
interface ActiveRecordInterface
{
/**
hash primarykey of principal index
*/
public static function primaryKey();
/**
rangekey of principal index
*/
public static function rangeKey();
public static function globalIndexes();
/**
* Returns the list of all attribute names of the record.
* @return array list of attribute names.
*/
public function attributes();
/**
* Returns the named attribute value.
* If this record is the result of a query and the attribute is not loaded,
* null will be returned.
* @param string $name the attribute name
* @return mixed the attribute value. Null if the attribute is not set or does not exist.
* @see hasAttribute()
*/
public function getAttribute($name);
/**
* Sets the named attribute value.
* @param string $name the attribute name.
* @param mixed $value the attribute value.
* @see hasAttribute()
*/
public function setAttribute($name, $value);
/**
* Returns a value indicating whether the record has an attribute with the specified name.
* @param string $name the name of the attribute
* @return boolean whether the record has an attribute with the specified name.
*/
public function hasAttribute($name);
/**
* Returns the primary key value(s).
* @param boolean $asArray whether to return the primary key value as an array. If true,
* the return value will be an array with attribute names as keys and attribute values as values.
* Note that for composite primary keys, an array will always be returned regardless of this parameter value.
* @return mixed the primary key value. An array (attribute name => attribute value) is returned if the primary key
* is composite or `$asArray` is true. A string is returned otherwise (null will be returned if
* the key value is null).
*/
public function getPrimaryKey($asArray = false);
/**
* Returns the old primary key value(s).
* This refers to the primary key value that is populated into the record
* after executing a find method (e.g. find(), findOne()).
* The value remains unchanged even if the primary key attribute is manually assigned with a different value.
* @param boolean $asArray whether to return the primary key value as an array. If true,
* the return value will be an array with column name as key and column value as value.
* If this is false (default), a scalar value will be returned for non-composite primary key.
* @property mixed The old primary key value. An array (column name => column value) is
* returned if the primary key is composite. A string is returned otherwise (null will be
* returned if the key value is null).
* @return mixed the old primary key value. An array (column name => column value) is returned if the primary key
* is composite or `$asArray` is true. A string is returned otherwise (null will be returned if
* the key value is null).
*/
public function getOldPrimaryKey($asArray = false);
/**
* Returns a value indicating whether the given set of attributes represents the primary key for this model
* @param array $keys the set of attributes to check
* @return boolean whether the given set of attributes represents the primary key for this model
*/
public static function isPrimaryKey($keys);
/**
* Creates an [[ActiveQueryInterface|ActiveQuery]] instance for query purpose.
*
* The returned [[ActiveQueryInterface|ActiveQuery]] instance can be further customized by calling
* methods defined in [[ActiveQueryInterface]] before `one()` or `all()` is called to return
* populated ActiveRecord instances. For example,
*
* ```php
* // find the customer whose ID is 1
* $customer = Customer::find()->where(['id' => 1])->one();
*
* // find all active customers and order them by their age:
* $customers = Customer::find()
* ->where(['status' => 1])
* ->orderBy('age')
* ->all();
* ```
*
* This method is also called by [[BaseActiveRecord::hasOne()]] and [[BaseActiveRecord::hasMany()]] to
* create a relational query.
*
* You may override this method to return a customized query. For example,
*
* ```php
* class Customer extends ActiveRecord
* {
* public static function find()
* {
* // use CustomerQuery instead of the default ActiveQuery
* return new CustomerQuery(get_called_class());
* }
* }
* ```
*
* The following code shows how to apply a default condition for all queries:
*
* ```php
* class Customer extends ActiveRecord
* {
* public static function find()
* {
* return parent::find()->where(['deleted' => false]);
* }
* }
*
* // Use andWhere()/orWhere() to apply the default condition
* // SELECT FROM customer WHERE `deleted`=:deleted AND age>30
* $customers = Customer::find()->andWhere('age>30')->all();
*
* // Use where() to ignore the default condition
* // SELECT FROM customer WHERE age>30
* $customers = Customer::find()->where('age>30')->all();
*
* @return static|ActiveQueryInterface the newly created [[ActiveQueryInterface|ActiveQuery]] instance.
*/
public static function find();
/**
* Returns a single active record model instance by a primary key or an array of column values.
*
* The method accepts:
*
* - a scalar value (integer or string): query by a single primary key value and return the
* corresponding record (or null if not found).
* - an array of name-value pairs: query by a set of attribute values and return a single record
* matching all of them (or null if not found).
*
* Note that this method will automatically call the `one()` method and return an
* [[ActiveRecordInterface|ActiveRecord]] instance. For example,
*
* ```php
* // find a single customer whose primary key value is 10
* $customer = Customer::findOne(10);
*
* // the above code is equivalent to:
* $customer = Customer::find()->where(['id' => 10])->one();
*
* // find the first customer whose age is 30 and whose status is 1
* $customer = Customer::findOne(['age' => 30, 'status' => 1]);
*
* // the above code is equivalent to:
* $customer = Customer::find()->where(['age' => 30, 'status' => 1])->one();
* ```
*
* @param mixed $condition primary key value or a set of column values
* @return static ActiveRecord instance matching the condition, or null if nothing matches.
*/
public static function findOne($condition);
/**
* Returns a list of active record models that match the specified primary key value(s) or a set of column values.
*
* The method accepts:
*
* - a scalar value (integer or string): query by a single primary key value and return an array containing the
* corresponding record (or an empty array if not found).
* - an array of scalar values (integer or string): query by a list of primary key values and return the
* corresponding records (or an empty array if none was found).
* Note that an empty condition will result in an empty result as it will be interpreted as a search for
* primary keys and not an empty `WHERE` condition.
* - an array of name-value pairs: query by a set of attribute values and return an array of records
* matching all of them (or an empty array if none was found).
*
* Note that this method will automatically call the `all()` method and return an array of
* [[ActiveRecordInterface|ActiveRecord]] instances. For example,
*
* ```php
* // find the customers whose primary key value is 10
* $customers = Customer::findAll(10);
*
* // the above code is equivalent to:
* $customers = Customer::find()->where(['id' => 10])->all();
*
* // find the customers whose primary key value is 10, 11 or 12.
* $customers = Customer::findAll([10, 11, 12]);
*
* // the above code is equivalent to:
* $customers = Customer::find()->where(['id' => [10, 11, 12]])->all();
*
* // find customers whose age is 30 and whose status is 1
* $customers = Customer::findAll(['age' => 30, 'status' => 1]);
*
* // the above code is equivalent to:
* $customers = Customer::find()->where(['age' => 30, 'status' => 1])->all();
* ```
*
* @param mixed $condition primary key value or a set of column values
* @return array an array of ActiveRecord instance, or an empty array if nothing matches.
*/
// public static function findAll($condition);
/**
* Updates records using the provided attribute values and conditions.
* For example, to change the status to be 1 for all customers whose status is 2:
*
* ~~~
* Customer::updateAll(['status' => 1], ['status' => '2']);
* ~~~
*
* @param array $attributes attribute values (name-value pairs) to be saved for the record.
* Unlike [[update()]] these are not going to be validated.
* @param array $condition the condition that matches the records that should get updated.
* Please refer to [[QueryInterface::where()]] on how to specify this parameter.
* An empty condition will match all records.
* @return integer the number of rows updated
*/
public static function updateAll($attributes, $condition = null);
/**
* Deletes records using the provided conditions.
* WARNING: If you do not specify any condition, this method will delete ALL rows in the table.
*
* For example, to delete all customers whose status is 3:
*
* ~~~
* Customer::deleteAll([status = 3]);
* ~~~
*
* @param array $condition the condition that matches the records that should get deleted.
* Please refer to [[QueryInterface::where()]] on how to specify this parameter.
* An empty condition will match all records.
* @return integer the number of rows deleted
*/
public static function deleteAll($condition = null);
/**
* Saves the current record.
*
* This method will call [[insert()]] when [[getIsNewRecord()|isNewRecord]] is true, or [[update()]]
* when [[getIsNewRecord()|isNewRecord]] is false.
*
* For example, to save a customer record:
*
* ~~~
* $customer = new Customer; // or $customer = Customer::findOne($id);
* $customer->name = $name;
* $customer->email = $email;
* $customer->save();
* ~~~
*
* @param boolean $runValidation whether to perform validation before saving the record.
* If the validation fails, the record will not be saved to database. `false` will be returned
* in this case.
* @param array $attributeNames list of attributes that need to be saved. Defaults to null,
* meaning all attributes that are loaded from DB will be saved.
* @return boolean whether the saving succeeds
*/
public function save($runValidation = true, $attributeNames = null);
/**
* Inserts the record into the database using the attribute values of this record.
*
* Usage example:
*
* ```php
* $customer = new Customer;
* $customer->name = $name;
* $customer->email = $email;
* $customer->insert();
* ```
*
* @param boolean $runValidation whether to perform validation before saving the record.
* If the validation fails, the record will not be inserted into the database.
* @param array $attributes list of attributes that need to be saved. Defaults to null,
* meaning all attributes that are loaded from DB will be saved.
* @return boolean whether the attributes are valid and the record is inserted successfully.
*/
public function insert($runValidation = true, $attributes = null);
/**
* Saves the changes to this active record into the database.
*
* Usage example:
*
* ```php
* $customer = Customer::findOne($id);
* $customer->name = $name;
* $customer->email = $email;
* $customer->update();
* ```
*
* @param boolean $runValidation whether to perform validation before saving the record.
* If the validation fails, the record will not be inserted into the database.
* @param array $attributeNames list of attributes that need to be saved. Defaults to null,
* meaning all attributes that are loaded from DB will be saved.
* @return integer|boolean the number of rows affected, or false if validation fails
* or updating process is stopped for other reasons.
* Note that it is possible that the number of rows affected is 0, even though the
* update execution is successful.
*/
public function update($runValidation = true, $attributeNames = null);
/**
* Deletes the record from the database.
*
* @return integer|boolean the number of rows deleted, or false if the deletion is unsuccessful for some reason.
* Note that it is possible that the number of rows deleted is 0, even though the deletion execution is successful.
*/
public function delete();
/**
* Returns a value indicating whether the current record is new (not saved in the database).
* @return boolean whether the record is new and should be inserted when calling [[save()]].
*/
public function getIsNewRecord();
/**
* Returns a value indicating whether the given active record is the same as the current one.
* Two [[getIsNewRecord()|new]] records are considered to be not equal.
* @param static $record record to compare to
* @return boolean whether the two active records refer to the same row in the same database table.
*/
public function equals($record);
/**
* Returns the relation map
* beetween class and property
*/
public function relationsMap();
/**
* Establishes the relationship between two records.
*
* The relationship is established by setting the foreign key value(s) in one record
* to be the corresponding primary key value(s) in the other record.
* The record with the foreign key will be saved into database without performing validation.
*
* If the relationship involves a junction table, a new row will be inserted into the
* junction table which contains the primary key values from both records.
*
* This method requires that the primary key value is not null.
*
* @param string $name the case sensitive name of the relationship.
* @param static $model the record to be linked with the current one.
* @param array $extraColumns additional column values to be saved into the junction table.
* This parameter is only meaningful for a relationship involving a junction table
* (i.e., a relation set with `[[ActiveQueryInterface::via()]]`.)
*/
// public function link($name, $model, $extraColumns = []);
/**
* Destroys the relationship between two records.
*
* The record with the foreign key of the relationship will be deleted if `$delete` is true.
* Otherwise, the foreign key will be set null and the record will be saved without validation.
*
* @param string $name the case sensitive name of the relationship.
* @param static $model the model to be unlinked from the current one.
* @param boolean $delete whether to delete the model that contains the foreign key.
* If false, the model's foreign key will be set null and saved.
* If true, the model containing the foreign key will be deleted.
*/
// public function unlink($name, $model, $delete = false);
/**
* Returns the connection used by this AR class.
* @return mixed the database connection used by this AR class.
*/
public static function getDb();
}
| {
"content_hash": "0b433b54dca58e73244e30645d6581e5",
"timestamp": "",
"source": "github",
"line_count": 387,
"max_line_length": 119,
"avg_line_length": 41.96640826873385,
"alnum_prop": 0.6479896558093713,
"repo_name": "dnocode/dnocode-yii2-awsddb",
"id": "188a6f0b4d65fea0d8251ecaac342b42c0f2b50c",
"size": "16385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ar/ActiveRecordInterface.php",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "PHP",
"bytes": "130882"
}
],
"symlink_target": ""
} |
<div class="umb-editor umb-radiobuttons" ng-controller="Umbraco.PropertyEditors.RadioButtonsController">
<ul class="unstyled">
<li ng-repeat="item in model.config.items">
<label class="radio umb-radiobuttons__label">
<input type="radio" name="radiobuttons-{{model.alias}}"
value="{{item.id}}"
ng-model="model.value"
class="umb-radiobuttons__input" />
<div class="umb-radiobuttons__state">
<i class="umb-radiobuttons__icon icon-check" aria-hidden="true"></i>
<span class="umb-radiobuttons__label-text">{{item.value}}</span>
</div>
</label>
</li>
</ul>
</div>
| {
"content_hash": "81164a21cd5ac83143d17a8d9e9fead1",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 105,
"avg_line_length": 46,
"alnum_prop": 0.5127877237851662,
"repo_name": "aaronpowell/Umbraco-CMS",
"id": "7af6491641edc2365bb058d6f82051c7362dfd3f",
"size": "784",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev-v7",
"path": "src/Umbraco.Web.UI.Client/src/views/propertyeditors/radiobuttons/radiobuttons.html",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "232039"
},
{
"name": "Batchfile",
"bytes": "711"
},
{
"name": "C#",
"bytes": "18610658"
},
{
"name": "CSS",
"bytes": "598135"
},
{
"name": "HTML",
"bytes": "828235"
},
{
"name": "JavaScript",
"bytes": "3755324"
},
{
"name": "PLpgSQL",
"bytes": "80494"
},
{
"name": "PowerShell",
"bytes": "69263"
},
{
"name": "XSLT",
"bytes": "50045"
}
],
"symlink_target": ""
} |
/**
* These are all the transformations that occur *within* block-level
* tags like paragraphs, headers, and list items.
*/
showdown.subParser('spanGamut', function (text, options, globals) {
'use strict';
text = showdown.subParser('codeSpans')(text, options, globals);
text = showdown.subParser('escapeSpecialCharsWithinTagAttributes')(text, options, globals);
text = showdown.subParser('encodeBackslashEscapes')(text, options, globals);
// Process anchor and image tags. Images must come first,
// because ![foo][f] looks like an anchor.
text = showdown.subParser('images')(text, options, globals);
text = showdown.subParser('anchors')(text, options, globals);
// Make links out of things like `<http://example.com/>`
// Must come after _DoAnchors(), because you can use < and >
// delimiters in inline links like [this](<url>).
text = showdown.subParser('autoLinks')(text, options, globals);
text = showdown.subParser('encodeAmpsAndAngles')(text, options, globals);
text = showdown.subParser('italicsAndBold')(text, options, globals);
// Do hard breaks:
text = text.replace(/ +\n/g, ' <br />\n');
return text;
});
| {
"content_hash": "17be4bab9a6fd2f96ee6137ab55e618d",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 93,
"avg_line_length": 40.03448275862069,
"alnum_prop": 0.7011197243755384,
"repo_name": "bsansouci/showdown",
"id": "0193c3d54906f91df764d829832e025d44f97dc4",
"size": "1161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/subParsers/spanGamut.js",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "17845"
},
{
"name": "JavaScript",
"bytes": "123021"
}
],
"symlink_target": ""
} |
import sys
from mauto import gui
from mauto.api import library
def show():
gui.show()
def select_repo():
gui.select_repo()
def list_macros():
return library.macros.keys()
def new_macro(*arg, **kwds):
return library.new_macro(*arg, **kwds)
def get_macro(name):
return library.get(name)
def remove_macro(name):
if library.get(name):
library.remove_macro(name)
def save_macro(name):
return library.save_macro(name)
def get_filepath(name):
return library.get_filepath(name)
def __main__():
app = gui.QtGui.QApplication(sys.argv)
w = gui.Layout()
w.show()
sys.exit(app.exec_())
if __name__ == "__main__":
__main__()
| {
"content_hash": "f416cb853539322e5df49dace06bf2fd",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 42,
"avg_line_length": 14.416666666666666,
"alnum_prop": 0.6257225433526011,
"repo_name": "csaez/mauto",
"id": "9233f7c2437fb7455f008dd6631a612f6d896ac5",
"size": "692",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mauto/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52303"
}
],
"symlink_target": ""
} |
<?php
/**
* VAT validation controller
*
* @category Mage
* @package Mage_Adminhtml
* @author Magento Core Team <core@magentocommerce.com>
*/
class Mage_Adminhtml_Customer_System_Config_ValidatevatController extends Mage_Adminhtml_Controller_Action
{
/**
* Perform customer VAT ID validation
*
* @return Varien_Object
*/
protected function _validate()
{
return Mage::helper('customer')->checkVatNumber(
$this->getRequest()->getParam('country'),
$this->getRequest()->getParam('vat')
);
}
/**
* Check whether vat is valid
*
* @return void
*/
public function validateAction()
{
$result = $this->_validate();
$this->getResponse()->setBody((int)$result->getIsValid());
}
/**
* Retrieve validation result as JSON
*
* @return void
*/
public function validateAdvancedAction()
{
/** @var $coreHelper Mage_Core_Helper_Data */
$coreHelper = Mage::helper('core');
$result = $this->_validate();
$valid = $result->getIsValid();
$success = $result->getRequestSuccess();
// ID of the store where order is placed
$storeId = $this->getRequest()->getParam('store_id');
// Sanitize value if needed
if (!is_null($storeId)) {
$storeId = (int)$storeId;
}
$groupId = Mage::helper('customer')->getCustomerGroupIdBasedOnVatNumber(
$this->getRequest()->getParam('country'), $result, $storeId
);
$body = $coreHelper->jsonEncode(array(
'valid' => $valid,
'group' => $groupId,
'success' => $success
));
$this->getResponse()->setBody($body);
}
}
| {
"content_hash": "dd0a190e23e5af234f02294ab0248622",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 106,
"avg_line_length": 26.176470588235293,
"alnum_prop": 0.5567415730337079,
"repo_name": "andou/magento-bare",
"id": "b16cf880892122148dceeab461756d10319f9a39",
"size": "2738",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "project/app/app/code/core/Mage/Adminhtml/controllers/Customer/System/Config/ValidatevatController.php",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "19946"
},
{
"name": "ApacheConf",
"bytes": "6736"
},
{
"name": "Batchfile",
"bytes": "1036"
},
{
"name": "CSS",
"bytes": "1655606"
},
{
"name": "HTML",
"bytes": "5682039"
},
{
"name": "JavaScript",
"bytes": "1036420"
},
{
"name": "PHP",
"bytes": "44372363"
},
{
"name": "PowerShell",
"bytes": "1028"
},
{
"name": "Puppet",
"bytes": "965"
},
{
"name": "Ruby",
"bytes": "393"
},
{
"name": "Shell",
"bytes": "2036"
},
{
"name": "XSLT",
"bytes": "2135"
}
],
"symlink_target": ""
} |
<?php
use Illuminate\Database\Migrations\Migration;
use Illuminate\Database\Schema\Blueprint;
class CreateUsersTable extends Migration {
/**
* Run the migrations.
*
* @return void
*/
public function up()
{
Schema::create('users', function(Blueprint $table)
{
$table->increments('id');
$table->string('username');
$table->string('email');
$table->string('password');
$table->string('remember_token');
$table->timestamps();
});
}
/**
* Reverse the migrations.
*
* @return void
*/
public function down()
{
Schema::drop('users');
}
}
| {
"content_hash": "c14174c4c94ef1d73544402de837a991",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 52,
"avg_line_length": 15.837837837837839,
"alnum_prop": 0.6313993174061433,
"repo_name": "Daniel-HM/Portfolio",
"id": "fe4bd6d153997b4a4b4b1f8fcb0f9fedacd4ad46",
"size": "586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "database/migrations/2014_07_03_153421_create_users_table.php",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "356"
},
{
"name": "CSS",
"bytes": "6084"
},
{
"name": "JavaScript",
"bytes": "17431"
},
{
"name": "PHP",
"bytes": "112752"
}
],
"symlink_target": ""
} |
class SNMPwalk
attr_accessor :switchport
def initialize(switchips)
print "Getting switch interface names "
@switchport = {}
switchips.each do |switchip|
switch = Resolv.new.getname(switchip).split(/\./)[0]
ports = {}
snmpoutput = `snmpwalk -v2c -c public #{switchip} 1.3.6.1.2.1.2.2.1.2`
snmpoutput.each_line do |line|
split = line.split(/\s/)
port = split[0].split(/\./).last
if split[3] =~ /GigabitEthernet/
portname = split[3].scan(/\d+/).join('/')
else
portname = split[3]
end
hashp = { port => portname }
ports.merge!(hashp)
end
hashs = { switch => ports }
@switchport.merge!(hashs)
print "."
end
puts " done."
@switchport
end
def self.mapswitchportname(agent_address,iface)
if $switchportnames.switchport["#{agent_address}"]["#{iface}"]
$switchportnames.switchport["#{agent_address}"]["#{iface}"]
else
agent_address
end
end
end
| {
"content_hash": "b7e325506ea07bc1ce27234ba8be28b7",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 78,
"avg_line_length": 26.725,
"alnum_prop": 0.5481758652946679,
"repo_name": "lazy404/sflow",
"id": "9aa869da4894d6f7065ecbb8b131c9236868c8d2",
"size": "1069",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/sflow/snmp/iface_names.rb",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ruby",
"bytes": "21098"
}
],
"symlink_target": ""
} |
@protocol XipSlideDownVCDelegate <NSObject>
@optional
- (void) topWillOpen;
- (void) topDidOpen;
- (void) topWillClose;
- (void) topDidClose;
@end
| {
"content_hash": "572b555d577917f3596a602fbaec93bf",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 43,
"avg_line_length": 15,
"alnum_prop": 0.7266666666666667,
"repo_name": "xiplias/XipSlideDown",
"id": "33682a9215c08f3e328ad0b027b671013181c596",
"size": "150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Classes/XipSlideDownVCDelegate.h",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Objective-C",
"bytes": "4400"
},
{
"name": "Ruby",
"bytes": "5308"
}
],
"symlink_target": ""
} |
package org.apache.jackrabbit.oak.spi.security.authentication.external;
import java.util.HashMap;
import java.util.Map;
import javax.jcr.Credentials;
import javax.jcr.GuestCredentials;
import javax.jcr.SimpleCredentials;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.Configuration;
import javax.security.auth.login.LoginException;
import org.apache.jackrabbit.api.security.user.User;
import org.apache.jackrabbit.api.security.user.UserManager;
import org.apache.jackrabbit.oak.api.ContentSession;
import org.apache.jackrabbit.oak.security.authentication.user.LoginModuleImpl;
import org.apache.jackrabbit.oak.spi.security.authentication.external.basic.DefaultSyncContext;
import org.apache.jackrabbit.oak.spi.security.authentication.external.impl.ExternalLoginModule;
import org.apache.jackrabbit.oak.spi.security.user.UserConstants;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static junit.framework.TestCase.fail;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
/**
* Testing improvements made for <a href="https://issues.apache.org/jira/browse/OAK-3508">OAK-3508</a>
*/
public class PreAuthDefaultExternalLoginModuleTest extends ExternalLoginModuleTestBase {
private Map<String, Object> preAuthOptions = new HashMap<>();
@Before
public void before() throws Exception {
super.before();
}
@After
public void after() throws Exception {
super.after();
}
/**
* Example {
* your.org.PreAuthenticationLoginModule optional;
* org.apache.jackrabbit.oak.security.authentication.user.LoginModuleImpl optional;
* org.apache.jackrabbit.oak.spi.security.authentication.external.impl.ExternalLoginModule sufficient
* sync.handlerName="your-synchandler_name"
* idp.name="your_idp_name";
* };
*/
@Override
protected Configuration getConfiguration() {
return new Configuration() {
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String s) {
AppConfigurationEntry entry1 = new AppConfigurationEntry(
PreAuthLoginModule.class.getName(),
AppConfigurationEntry.LoginModuleControlFlag.OPTIONAL,
preAuthOptions);
AppConfigurationEntry entry2 = new AppConfigurationEntry(
ExternalLoginModule.class.getName(),
AppConfigurationEntry.LoginModuleControlFlag.SUFFICIENT,
options);
AppConfigurationEntry entry3 = new AppConfigurationEntry(
LoginModuleImpl.class.getName(),
AppConfigurationEntry.LoginModuleControlFlag.SUFFICIENT,
new HashMap<String, Object>());
return new AppConfigurationEntry[]{entry1, entry2, entry3};
}
};
}
@Test
public void testNonExistingUser() throws Exception {
PreAuthCredentials creds = new PreAuthCredentials("nonExisting");
ContentSession cs = null;
try {
cs = login(creds);
fail();
} catch (LoginException e) {
// success
} finally {
if (cs != null) {
cs.close();
}
assertEquals(PreAuthCredentials.PRE_AUTH_DONE, creds.getMessage());
root.refresh();
assertNull(getUserManager(root).getAuthorizable(TestIdentityProvider.ID_TEST_USER));
}
}
@Test
public void testLocalUser() throws Exception {
User testUser = getTestUser();
PreAuthCredentials creds = new PreAuthCredentials(testUser.getID());
ContentSession cs = null;
try {
cs = login(creds);
assertEquals(PreAuthCredentials.PRE_AUTH_DONE, creds.getMessage());
assertEquals(testUser.getID(), cs.getAuthInfo().getUserID());
} finally {
if (cs != null) {
cs.close();
}
}
}
@Test
public void testExternalUser() throws Exception {
PreAuthCredentials creds = new PreAuthCredentials(TestIdentityProvider.ID_TEST_USER);
ContentSession cs = null;
try {
cs = login(creds);
assertEquals(PreAuthCredentials.PRE_AUTH_DONE, creds.getMessage());
assertEquals(TestIdentityProvider.ID_TEST_USER, cs.getAuthInfo().getUserID());
// user needs to be synchronized upon login
root.refresh();
assertNotNull(getUserManager(root).getAuthorizable(TestIdentityProvider.ID_TEST_USER));
} finally {
if (cs != null) {
cs.close();
}
}
}
@Test
public void testExistingExternalReSync() throws Exception {
// sync user upfront
UserManager uMgr = getUserManager(root);
SyncContext syncContext = new DefaultSyncContext(syncConfig, idp, uMgr, getValueFactory(root));
SyncResult result = syncContext.sync(idp.getUser(TestIdentityProvider.ID_TEST_USER));
long lastSynced = result.getIdentity().lastSynced();
root.commit();
PreAuthCredentials creds = new PreAuthCredentials(TestIdentityProvider.ID_TEST_USER);
ContentSession cs = null;
try {
// wait until the synced user is expired
waitUntilExpired(uMgr.getAuthorizable(TestIdentityProvider.ID_TEST_USER, User.class), root, syncConfig.user().getExpirationTime());
cs = login(creds);
assertEquals(PreAuthCredentials.PRE_AUTH_DONE, creds.getMessage());
assertEquals(TestIdentityProvider.ID_TEST_USER, cs.getAuthInfo().getUserID());
root.refresh();
User u = getUserManager(root).getAuthorizable(TestIdentityProvider.ID_TEST_USER, User.class);
assertNotNull(u);
// user _should_ be re-synced
assertFalse(lastSynced == DefaultSyncContext.createSyncedIdentity(u).lastSynced());
} finally {
if (cs != null) {
cs.close();
}
}
}
@Test
public void testExistingExternalNoSync() throws Exception {
// prevent expiration of the user
syncConfig.user().setExpirationTime(Long.MAX_VALUE);
// sync user upfront
SyncContext syncContext = new DefaultSyncContext(syncConfig, idp, getUserManager(root), getValueFactory(root));
SyncResult result = syncContext.sync(idp.getUser(TestIdentityProvider.ID_TEST_USER));
long lastSynced = result.getIdentity().lastSynced();
root.commit();
PreAuthCredentials creds = new PreAuthCredentials(TestIdentityProvider.ID_TEST_USER);
ContentSession cs = null;
try {
cs = login(creds);
assertEquals(PreAuthCredentials.PRE_AUTH_DONE, creds.getMessage());
assertEquals(TestIdentityProvider.ID_TEST_USER, cs.getAuthInfo().getUserID());
root.refresh();
User u = getUserManager(root).getAuthorizable(TestIdentityProvider.ID_TEST_USER, User.class);
assertNotNull(u);
// user _should_ not have been re-synced
assertEquals(lastSynced, DefaultSyncContext.createSyncedIdentity(u).lastSynced());
} finally {
if (cs != null) {
cs.close();
}
}
}
@Test
public void testForeign() throws Exception {
// sync foreign user into the repository
// NOTE: that should be considered a bug by the tool that does the sync
// as it uses an IDP that is not configured with the login-chain!
ExternalIdentityProvider foreign = new TestIdentityProvider("foreign");
SyncContext syncContext = new DefaultSyncContext(syncConfig, foreign, getUserManager(root), getValueFactory(root));
SyncResult result = syncContext.sync(foreign.getUser(TestIdentityProvider.ID_TEST_USER));
long lastSynced = result.getIdentity().lastSynced();
root.commit();
PreAuthCredentials creds = new PreAuthCredentials(TestIdentityProvider.ID_TEST_USER);
ContentSession cs = null;
try {
// login should succeed due the fact that the _LoginModuleImpl_ succeeds for
// an existing authorizable if _pre_auth_ is enabled.
cs = login(creds);
assertEquals(PreAuthCredentials.PRE_AUTH_DONE, creds.getMessage());
// foreign user _must_ not have been touched by the _ExternalLoginModule_
root.refresh();
User u = getUserManager(root).getAuthorizable(TestIdentityProvider.ID_TEST_USER, User.class);
assertNotNull(u);
assertEquals(lastSynced, DefaultSyncContext.createSyncedIdentity(u).lastSynced());
} finally {
if (cs != null) {
cs.close();
}
}
}
@Test
public void testInvalidPreAuthCreds() throws Exception {
PreAuthCredentials creds = new PreAuthCredentials(null);
ContentSession cs = null;
try {
cs = login(creds);
fail();
} catch (LoginException e) {
// success
} finally {
if (cs != null) {
cs.close();
}
assertEquals(PreAuthCredentials.PRE_AUTH_FAIL, creds.getMessage());
root.refresh();
assertNull(getUserManager(root).getAuthorizable(TestIdentityProvider.ID_TEST_USER));
}
}
@Test
public void testGuest() throws Exception {
ContentSession cs = null;
try {
cs = login(new GuestCredentials());
assertEquals(UserConstants.DEFAULT_ANONYMOUS_ID, cs.getAuthInfo().getUserID());
} finally {
if (cs != null) {
cs.close();
}
}
}
@Test
public void testSimpleLocal() throws Exception {
User testUser = getTestUser();
ContentSession cs = null;
try {
cs = login(new SimpleCredentials(testUser.getID(), testUser.getID().toCharArray()));
assertEquals(testUser.getID(), cs.getAuthInfo().getUserID());
} finally {
if (cs != null) {
cs.close();
}
}
}
@Test
public void testSimpleLocalDisabled() throws Exception {
User testUser = getTestUser();
testUser.disable("disable");
root.commit();
ContentSession cs = null;
try {
cs = login(new SimpleCredentials(testUser.getID(), testUser.getID().toCharArray()));
fail();
} catch (LoginException e) {
// success
} finally {
if (cs != null) {
cs.close();
}
}
}
@Test
public void testSimpleNonExisting() throws Exception {
ContentSession cs = null;
try {
cs = login(new SimpleCredentials("nonExisting", new char[0]));
fail();
} catch (LoginException e) {
// success
} finally {
if (cs != null) {
cs.close();
}
}
}
@Test
public void testSimpleExternal() throws Exception {
// verify that authentication against the IDP succeeds with the given creds.
Credentials creds = new SimpleCredentials(TestIdentityProvider.ID_TEST_USER, new char[0]);
ExternalUser externalUser = idp.authenticate(creds);
assertNotNull(externalUser);
assertEquals(TestIdentityProvider.ID_TEST_USER, externalUser.getId());
// => repo login must also succeed and the user must be synced.
ContentSession cs = null;
try {
cs = login(creds);
assertEquals(TestIdentityProvider.ID_TEST_USER, cs.getAuthInfo().getUserID());
root.refresh();
User u = getUserManager(root).getAuthorizable(TestIdentityProvider.ID_TEST_USER, User.class);
assertNotNull(u);
} finally {
if (cs != null) {
cs.close();
}
}
}
}
| {
"content_hash": "ca7c197b94bce524a4dfac7ac1711ebd",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 143,
"avg_line_length": 35.82608695652174,
"alnum_prop": 0.6150485436893204,
"repo_name": "francescomari/jackrabbit-oak",
"id": "74a84889a3307dc2661dd221b28aef49bba69884",
"size": "13162",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "oak-auth-external/src/test/java/org/apache/jackrabbit/oak/spi/security/authentication/external/PreAuthDefaultExternalLoginModuleTest.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24043"
},
{
"name": "Groovy",
"bytes": "145590"
},
{
"name": "HTML",
"bytes": "1406"
},
{
"name": "Java",
"bytes": "26649321"
},
{
"name": "JavaScript",
"bytes": "42991"
},
{
"name": "Perl",
"bytes": "7585"
},
{
"name": "Shell",
"bytes": "17516"
}
],
"symlink_target": ""
} |
using Microsoft.VisualStudio.TestTools.UnitTesting;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Net.Http;
using System.Text;
using System.Threading.Tasks;
namespace Moksy.IntegrationTest
{
[TestClass]
public class ProxyTests : TestBase
{
public ProxyTests()
{
}
[TestInitialize]
public void Initialize()
{
Proxy = new Proxy(10011);
Proxy.DeleteAll();
}
Proxy Proxy;
[TestMethod]
public void SanityNoSimulations()
{
var all = Proxy.GetAll();
Assert.AreEqual(0, all.Count());
}
[TestMethod]
public void AddOneSimulationAndGetAll()
{
var simulation = Moksy.Common.SimulationFactory.When.Get().From("/Product").With.Header("MyHeader", "HeaderValue").Return.Body("This content").With.StatusCode(System.Net.HttpStatusCode.PreconditionFailed);
Proxy.Add(simulation);
var all = Proxy.GetAll();
Assert.AreEqual(1, all.Count());
var first = all.First();
Assert.AreEqual("/Product", first.Condition.SimulationConditionContent.Pattern);
Assert.AreEqual(HttpMethod.Get, first.Condition.SimulationConditionContent.HttpMethod);
Assert.AreEqual(1, first.Condition.RequestHeaders.Count);
Assert.AreEqual("MyHeader", first.Condition.RequestHeaders[0].Name);
Assert.AreEqual("HeaderValue", first.Condition.RequestHeaders[0].Value);
Assert.AreEqual(System.Net.HttpStatusCode.PreconditionFailed, first.Response.SimulationResponseContent.HttpStatusCode);
Assert.AreEqual("This content", first.Response.SimulationResponseContent.Content);
}
[TestMethod]
public void AddOneSimulationAndGetByName()
{
var simulation = Moksy.Common.SimulationFactory.New("FirstOne").Get().From("/Product").With.Header("MyHeader", "HeaderValue").Return.Body("This content").With.StatusCode(System.Net.HttpStatusCode.PreconditionFailed);
Proxy.Add(simulation);
var existing = Proxy.GetByName("FirstOne");
Assert.IsNotNull(existing);
}
[TestMethod]
public void AddOneSimulationAndGetByNameFailsBecauseCaseSensitive()
{
var simulation = Moksy.Common.SimulationFactory.New("FirstOne").Get().From("/Product").With.Header("MyHeader", "HeaderValue").Return.Body("This content").With.StatusCode(System.Net.HttpStatusCode.PreconditionFailed);
Proxy.Add(simulation);
var existing = Proxy.GetByName("FirstONE");
Assert.IsNull(existing);
}
[TestMethod]
public void AddOneSimulationAndGetByNameNotExist()
{
var simulation = Moksy.Common.SimulationFactory.New("FirstOne").Get().From("/Product").With.Header("MyHeader", "HeaderValue").Return.Body("This content").With.StatusCode(System.Net.HttpStatusCode.PreconditionFailed);
Proxy.Add(simulation);
var existing = Proxy.GetByName("FirstOneNotExist");
Assert.IsNull(existing);
}
[TestMethod]
public void AddOnSimulationAndDeleteIt()
{
var simulation1 = Moksy.Common.SimulationFactory.New("First").Get().From("/Product").With.Header("MyHeader", "HeaderValue").Return.Body("This content").With.StatusCode(System.Net.HttpStatusCode.PreconditionFailed);
Proxy.Add(simulation1);
var simulation2 = Moksy.Common.SimulationFactory.New("Second").Get().From("/Product").With.Header("MyHeader", "HeaderValue").Return.Body("This content").With.StatusCode(System.Net.HttpStatusCode.PreconditionFailed);
Proxy.Add(simulation2);
var all = Proxy.GetAll();
Assert.AreEqual(2, all.Count());
var response = Proxy.DeleteByName("Second");
Assert.AreEqual(System.Net.HttpStatusCode.NoContent, response);
all = Proxy.GetAll();
Assert.AreEqual(1, all.Count());
var match = all.FirstOrDefault(f => f.Name == "First");
Assert.IsNotNull(match);
match = all.FirstOrDefault(f => f.Name == "Second");
Assert.IsNull(match);
}
[TestMethod]
public void AddOnSimulationAndDeleteItButLeaveData()
{
var simulation1 = Moksy.Common.SimulationFactory.New("First").Get().FromImdb("/Pet").AsJson().Return.StatusCode(System.Net.HttpStatusCode.OK);
Proxy.Add(simulation1);
var simulation2 = Moksy.Common.SimulationFactory.New("Second").Post().ToImdb("/Pet").Then.Return.StatusCode(System.Net.HttpStatusCode.Created).And.AddToImdb("{Kind}");
Proxy.Add(simulation2);
var response = Post("/Pet", new Pet() { Kind = "Dog" });
var code = Proxy.DeleteByName("Second");
Assert.AreEqual(System.Net.HttpStatusCode.NoContent, code);
response = Get("/Pet");
Assert.AreEqual(System.Net.HttpStatusCode.OK, response.StatusCode);
Assert.AreEqual(@"{""Kind"":""Dog""}", response.Content);
}
[TestMethod]
public void AddOnSimulationAndDeleteItButRemoveData()
{
var simulation1 = Moksy.Common.SimulationFactory.New("First").Get().FromImdb("/Pet").Return.StatusCode(System.Net.HttpStatusCode.OK);
Proxy.Add(simulation1);
var simulation2 = Moksy.Common.SimulationFactory.New("Second").Post().ToImdb("/Pet").Then.Return.StatusCode(System.Net.HttpStatusCode.Created).And.AddToImdb("{Kind}");
Proxy.Add(simulation2);
var response = Post("/Pet", new Pet() { Kind = "Dog" });
var code = Proxy.DeleteByName("Second", true);
Assert.AreEqual(System.Net.HttpStatusCode.NoContent, code);
response = Get("/Pet");
Assert.AreEqual(System.Net.HttpStatusCode.OK, response.StatusCode);
Assert.AreEqual(@"", response.Content);
}
[TestMethod]
public void DeleteSimulationDataRetainRule()
{
var simulation1 = Moksy.Common.SimulationFactory.New("First").Get().FromImdb("/Pet").Return.StatusCode(System.Net.HttpStatusCode.OK).Return.Body("StillExists");
Proxy.Add(simulation1);
var simulation2 = Moksy.Common.SimulationFactory.New("Second").Post().ToImdb("/Pet").Then.Return.StatusCode(System.Net.HttpStatusCode.Created).And.AddToImdb("{Kind}");
Proxy.Add(simulation2);
var response = Post("/Pet", new Pet() { Kind = "Dog" });
var code = Proxy.DeleteByName("First", true, true);
Assert.AreEqual(System.Net.HttpStatusCode.NoContent, code);
response = Get("/Pet");
Assert.AreEqual(System.Net.HttpStatusCode.OK, response.StatusCode);
Assert.AreEqual(@"StillExists", response.Content);
}
[TestMethod]
public void ConfigureRemoteMachine()
{
Proxy proxy = new Proxy(10011);
proxy.Start();
}
/*
[TestMethod]
public void StartWithLog()
{
Proxy.Exit();
Proxy.Start("/Log");
}
/*
[TestMethod]
public void ProxyExit()
{
var all = Proxy.GetAll();
Assert.IsNotNull(all);
Proxy.Exit();
try
{
all = Proxy.GetAll();
Assert.Fail("It was expected the service would be finished by now. ");
}
catch (System.Net.WebException wex)
{
}
}
[TestMethod]
public void StartRemoteMachine()
{
// Start it on the local host - we will treat this instance as a remote connection.
Proxy localProxy = new Moksy.Proxy(30011);
Assert.IsTrue(localProxy.Start());
Assert.IsTrue(localProxy.IsLocalhost);
// This simulates a remove machine by passing in the local machine name as the Proxy Host.
var hostname = System.Environment.ExpandEnvironmentVariables("%COMPUTERNAME%");
var proxy = new Proxy(hostname, 30011);
Assert.IsTrue(proxy.Start());
Assert.IsFalse(proxy.IsLocalhost);
var s = Moksy.Common.SimulationFactory.New("First").Get().From("/Pet").Return.StatusCode(System.Net.HttpStatusCode.OK).Body("Dog");
proxy.Add(s);
RestSharp.IRestClient client = new RestSharp.RestClient(proxy.Root);
RestSharp.IRestRequest request = new RestSharp.RestRequest("/Pet", RestSharp.Method.GET);
request.RequestFormat = RestSharp.DataFormat.Json;
var response = client.Execute(request);
Assert.AreEqual("Dog", response.Content);
}
*/
}
}
| {
"content_hash": "9538ab4d7c8b8adb309e88a91b7b90fc",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 228,
"avg_line_length": 36.75409836065574,
"alnum_prop": 0.613514719000892,
"repo_name": "aidancasey/Moksy",
"id": "3581da7cb9512ec47547848d559350f6b1305a41",
"size": "8970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Moksy.IntegrationTest/ProxyTests.cs",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1055"
},
{
"name": "C#",
"bytes": "874600"
},
{
"name": "PowerShell",
"bytes": "255"
}
],
"symlink_target": ""
} |
<div class="container">
<div *ngIf="!izbor" class="row" >
<ba-card title="" baCardClass="with-scroll">
<ng2-smart-table [settings]="settings" [source]="source" (delete)="onDelete($event)" (edit)="onEdit($event)" (create)="onCreate()"></ng2-smart-table>
</ba-card>
</div>
</div>
<div *ngIf="izbor" class="container">
<form [formGroup]="myForm" #f="ngForm" >
<div class="form-group">
<input hidden type="number" class="form-control" id="id" [(ngModel)]="podstanicaPotrosnja.id" formControlName="id">
</div>
<div class="form-group">
<input hidden type="number" class="form-control" id="version" [(ngModel)]="podstanicaPotrosnja.version" formControlName="version">
</div>
<div>
<div *ngIf="godina" class="form-group">
<label>Godina</label>
<select class="form-control" id="godina" formControlName="godina" [(ngModel)]="godina" (ngModelChange)="onGodinaSelected($event)">
<option *ngFor="let item of godine" [ngValue]="item">{{item}}</option>
</select>
</div>
<div *ngIf="mesec" class="form-group">
<label>Mesec</label>
<select class="form-control" id="mesec" formControlName="mesec" [(ngModel)]="mesec.id" (ngModelChange)="onMesecSelected($event)">
<option *ngFor="let item of meseci; let i = index" [ngValue]="item.id">{{item.naz}}</option>
</select>
</div>
<div [hidden]="proveraRn==0" class="alert alert-danger">Već ste uneli račun za izabrani mesec i godinu!</div>
</div>
<div class="form-group">
<label for="potrosnjaKwh">Potrošnja (kWh)</label>
<input type="text" class="form-control" id="potrosnjaKwh" required [(ngModel)]="podstanicaPotrosnja.potrosnjaKwh" formControlName="potrosnjaKwh">
</div>
<div class="container-fluid">
<button *ngIf="izbor" type="button" class="btn btn-primary" (click)="onSubmit()">Sačuvaj</button>
<button *ngIf="izbor" type="button" class="btn btn-primary" (click)="onCancel();">Odustani</button>
</div>
</form>
</div>
<div bsModal #childModal="bs-modal" class="modal fade" tabindex="-1" role="dialog" aria-labelledby="mySmallModalLabel" aria-hidden="true">
<div class="modal-dialog modal-sm">
<div class="modal-content">
<div class="modal-header">
<button class="close" aria-label="Close" (click)="hideChildModal()">
<span aria-hidden="true">×</span>
</button>
<h4 class="modal-title">Brisanje potrošnje podstanice</h4>
</div>
<div class="modal-body">
Da li ste sigurni da želite da obrišete potrošnju?
</div>
<div class="modal-footer">
<button class="btn btn-primary confirm-btn" (click)="onDeleteConfirm()">Da</button>
<button class="btn btn-primary confirm-btn" (click)="hideChildModal()">Ne</button>
</div>
</div>
</div>
</div>
| {
"content_hash": "fe13c98fa4427708bd9545ec5bb32a7c",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 155,
"avg_line_length": 45,
"alnum_prop": 0.6295138888888889,
"repo_name": "radni1234/ng2-admin",
"id": "ba53da0b9f53af719d74eab84c99934d14284cf2",
"size": "2888",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/app/pages/daljinskogrejanje/components/podstanica_potrosnja/podstanica_potrosnja.component.html",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "123345"
},
{
"name": "Dockerfile",
"bytes": "380"
},
{
"name": "HTML",
"bytes": "410843"
},
{
"name": "JavaScript",
"bytes": "35084"
},
{
"name": "Shell",
"bytes": "153"
},
{
"name": "TypeScript",
"bytes": "902603"
}
],
"symlink_target": ""
} |
<?xml version="1.0" encoding="latin1" ?>
<!DOCTYPE cref SYSTEM "cref.dtd">
<cref>
<header>
<copyright>
<year>1996</year><year>2009</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
The contents of this file are subject to the Erlang Public License,
Version 1.1, (the "License"); you may not use this file except in
compliance with the License. You should have received a copy of the
Erlang Public License along with this software. If not, it can be
retrieved online at http://www.erlang.org/.
Software distributed under the License is distributed on an "AS IS"
basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
the License for the specific language governing rights and limitations
under the License.
</legalnotice>
<title>erl_format</title>
<prepared>Torbjörn Törnkvist</prepared>
<responsible>Torbjörn Törnkvist</responsible>
<docno></docno>
<approved>Bjarne Däcker</approved>
<checked>Torbjörn Törnkvist</checked>
<date>961016</date>
<rev>A</rev>
<file>erl_format.sgml</file>
</header>
<lib>erl_format</lib>
<libsummary>Create and Match Erlang Terms</libsummary>
<description>
<p>This module contains two routines - one general function for
creating Erlang terms and one for pattern matching Erlang terms.</p>
</description>
<funcs>
<func>
<name><ret>ETERM *</ret><nametext>erl_format(FormatStr, ... )</nametext></name>
<fsummary>Creates an Erlang term</fsummary>
<type>
<v>char *FormatStr;</v>
</type>
<desc>
<p>This is a general function for creating Erlang terms using
a format specifier and a corresponding set of arguments, much
in the way <c><![CDATA[printf()]]></c> works.</p>
<p><c><![CDATA[FormatStr]]></c> is a format specification string. The set
of valid format specifiers is as follows:</p>
<list type="bulleted">
<item>
<p>~i - Integer</p>
</item>
<item>
<p>~f - Floating point</p>
</item>
<item>
<p>~a - Atom</p>
</item>
<item>
<p>~s - String</p>
</item>
<item>
<p>~w - Arbitrary Erlang term</p>
</item>
</list>
<p>For each format specifier that appears in <c><![CDATA[FormatStr]]></c>,
there must be a corresponding argument following
<c><![CDATA[FormatStr]]></c>. An Erlang term is built according to the
<c><![CDATA[FormatStr]]></c> with values and Erlang terms substituted from
the corresponding arguments and according to the individual
format specifiers. For example:</p>
<code type="none"><![CDATA[
erl_format("[{name,~a},{age,~i},{data,~w}]",
"madonna",
21,
erl_format("[{adr,~s,~i}]","E-street",42));
]]></code>
<p>This will create an <c><![CDATA[(ETERM *)]]></c> structure corresponding
to the Erlang term:
<c><![CDATA[[{name,madonna},{age,21},{data,[{adr,"E-street",42}]}]]]></c></p>
<p>The function returns an Erlang term, or NULL if
<c><![CDATA[FormatStr]]></c> does not describe a valid Erlang term.</p>
</desc>
</func>
<func>
<name><ret>int</ret><nametext>erl_match(Pattern, Term)</nametext></name>
<fsummary>Performs pattern matching</fsummary>
<type>
<v>ETERM *Pattern,*Term;</v>
</type>
<desc>
<p>This function is used to perform pattern matching similar
to that done in Erlang. Refer to an Erlang manual for matching
rules and more examples.</p>
<p><c><![CDATA[Pattern]]></c> is an Erlang term, possibly containing unbound
variables. </p>
<p><c><![CDATA[Term]]></c> is an Erlang term that we wish to match against
<c><![CDATA[Pattern]]></c>.</p>
<p><c><![CDATA[Term]]></c> and <c><![CDATA[Pattern]]></c> are compared, and any
unbound variables in <c><![CDATA[Pattern]]></c> are bound to corresponding
values in <c><![CDATA[Term]]></c>. </p>
<p>If <c><![CDATA[Term]]></c> and <c><![CDATA[Pattern]]></c> can be matched, the
function returns a non-zero value and binds any unbound
variables in <c><![CDATA[Pattern]]></c>. If <c><![CDATA[Term]]></c><c><![CDATA[Pattern]]></c> do
not match, the function returns 0. For example:</p>
<code type="none"><![CDATA[
ETERM *term, *pattern, *pattern2;
term1 = erl_format("{14,21}");
term2 = erl_format("{19,19}");
pattern1 = erl_format("{A,B}");
pattern2 = erl_format("{F,F}");
if (erl_match(pattern1, term1)) {
/* match succeeds:
* A gets bound to 14,
* B gets bound to 21
*/
...
}
if (erl_match(pattern2, term1)) {
/* match fails because F cannot be
* bound to two separate values, 14 and 21
*/
...
}
if (erl_match(pattern2, term2)) {
/* match succeeds and F gets bound to 19 */
...
}
]]></code>
<p><c><![CDATA[erl_var_content()]]></c> can be used to retrieve the
content of any variables bound as a result of a call to
<c><![CDATA[erl_match()]]></c>.</p>
</desc>
</func>
</funcs>
</cref>
| {
"content_hash": "f70860f691d0b1a96676c52080467b65",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 106,
"avg_line_length": 38.312056737588655,
"alnum_prop": 0.581081081081081,
"repo_name": "racker/omnibus",
"id": "5699485845593bf6b533a5cd621775fc31db752a",
"size": "5402",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "source/otp_src_R14B02/lib/erl_interface/doc/src/erl_format.xml",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "21896"
},
{
"name": "ActionScript",
"bytes": "7811"
},
{
"name": "Ada",
"bytes": "913692"
},
{
"name": "Assembly",
"bytes": "546596"
},
{
"name": "Awk",
"bytes": "147229"
},
{
"name": "C",
"bytes": "118056858"
},
{
"name": "C#",
"bytes": "1871806"
},
{
"name": "C++",
"bytes": "28581121"
},
{
"name": "CLIPS",
"bytes": "6933"
},
{
"name": "CSS",
"bytes": "162089"
},
{
"name": "Clojure",
"bytes": "79070"
},
{
"name": "D",
"bytes": "4925"
},
{
"name": "DOT",
"bytes": "1898"
},
{
"name": "Emacs Lisp",
"bytes": "625560"
},
{
"name": "Erlang",
"bytes": "79712366"
},
{
"name": "FORTRAN",
"bytes": "3755"
},
{
"name": "Java",
"bytes": "5632652"
},
{
"name": "JavaScript",
"bytes": "1240931"
},
{
"name": "Logos",
"bytes": "119270"
},
{
"name": "Objective-C",
"bytes": "1088478"
},
{
"name": "PHP",
"bytes": "39064"
},
{
"name": "Pascal",
"bytes": "66389"
},
{
"name": "Perl",
"bytes": "4971637"
},
{
"name": "PowerShell",
"bytes": "1885"
},
{
"name": "Prolog",
"bytes": "5214"
},
{
"name": "Python",
"bytes": "912999"
},
{
"name": "R",
"bytes": "4009"
},
{
"name": "Racket",
"bytes": "2713"
},
{
"name": "Ragel in Ruby Host",
"bytes": "24585"
},
{
"name": "Rebol",
"bytes": "106436"
},
{
"name": "Ruby",
"bytes": "27360215"
},
{
"name": "Scala",
"bytes": "5487"
},
{
"name": "Scheme",
"bytes": "5036"
},
{
"name": "Scilab",
"bytes": "771"
},
{
"name": "Shell",
"bytes": "8793006"
},
{
"name": "Tcl",
"bytes": "3330919"
},
{
"name": "Visual Basic",
"bytes": "10926"
},
{
"name": "XQuery",
"bytes": "4276"
},
{
"name": "XSLT",
"bytes": "2003063"
},
{
"name": "eC",
"bytes": "4568"
}
],
"symlink_target": ""
} |
<?php
namespace Surume\Loop;
use Surume\Loop\Flow\FlowController;
interface LoopExtendedInterface extends LoopInterface
{
/**
* @return LoopModelInterface
*/
public function model();
/**
* @param bool $all
* @return LoopExtendedInterface
*/
public function flush($all = false);
/**
* @param LoopExtendedInterface $loop
* @param bool $all
* @return LoopExtendedInterface
*/
public function export(LoopExtendedInterface $loop, $all = false);
/**
* @param LoopExtendedInterface $loop
* @param bool $all
* @return LoopExtendedInterface
*/
public function import(LoopExtendedInterface $loop, $all = false);
/**
* @param LoopExtendedInterface $loop
* @param bool $all
* @return LoopExtendedInterface
*/
public function swap(LoopExtendedInterface $loop, $all = false);
/**
*
*/
public function tick();
/**
*
*/
public function start();
/**
*
*/
public function stop();
/**
* @param mixed $flowController
*/
public function setFlowController($flowController);
/**
* @return FlowController
*/
public function getFlowController();
}
| {
"content_hash": "dc6a191ad0e9a0589bcee279222d42b6",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 70,
"avg_line_length": 18.954545454545453,
"alnum_prop": 0.6003197442046363,
"repo_name": "khelle/surume",
"id": "b2c4ab4b913ab4fcc92b9c78db3c54e7f41f4261",
"size": "1251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Loop/LoopExtendedInterface.php",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "941937"
}
],
"symlink_target": ""
} |
{% extends "helpdesk/help_base.html" %}
{% block title %}django-helpdesk API Documentation{% endblock %}
{% block heading %}django-helpdesk API Documentation{% endblock %}
{% block content %}
<h2>Contents</h2>
<ul>
<li><a href='#introduction'>Introduction</a></li>
<li><a href='#request'>Request Basics & Authentication</a></li>
<li><a href='#response'>Responses</a></li>
<li><a href='#methods'>Method Documentation</a>
<ul>
<li><a href='#method_create_ticket'>create_ticket</a></li>
<li><a href='#method_delete_ticket'>delete_ticket</a></li>
<li><a href='#method_hold_ticket'>hold_ticket</a></li>
<li><a href='#method_unhold_ticket'>unhold_ticket</a></li>
<li><a href='#method_add_followup'>add_followup</a></li>
<li><a href='#method_resolve'>resolve</a></li>
<li><a href='#method_list_queues'>list_queues</a></li>
<li><a href='#method_find_user'>find_user</a></li>
</ul>
</li>
</ul>
<h2 id='introduction'>Introduction</h2>
<p>django-helpdesk provides a powerful <acronym title='Application Programming Interface'>API</acronym> to allow you to interact with your helpdesk tickets by a means not otherwise provided by the helpdesk.</p>
<p>For example, you may use this API to implement a system to automatically open a ticket when an invoice is raised in your invoicing system, or to automatically close a ticket from an instant messenger application.</p>
<p>Your use of this system is open-ended: most business cases should be addressible with a little bit of coding to allow you to interact nicely with your helpdesk.</p>
<h2 id='request'>Request Basics & Authentication</h2>
<p>All requests to the API must be made using <acroynm title='HyperText Transfer Protocol'>HTTP</acronym> POST requests. Any request that is not made using POST will raise an error.</p>
<p>Your requests must be made up of the following elements:</p>
<ol>
<li>A <em>method</em>, or action. This tells the API what core functionality to execute.</li>
<li>A <em>username</em> and <em>password</em> which are valid and active within your helpdesk system. You may wish to create a specific API user just for API usage.</li>
<li>A set of <em>data</em> to be saved into the database. This data will vary from request to request, and is outlined in <a href='#methods'>Methods</a> below.</li>
</ol>
<p>To build your request, send a HTTP POST request to <em>{% url 'helpdesk_api' "method" %}</em>, where <em>method</em> is the name of a <a href='#methods'>valid method</a> from the list below.</p>
<p>Your POST must include both <em>user</em> and <em>password</em> parameters.</p>
<p>A sample request for the method <em>hold_ticket</em> may look like this:</p>
<ul>
<li>A HTTP POST to <em>{% url 'helpdesk_api' "hold_ticket" %}</em></li>
<li>A set of POST data containing:<ul>
<li>username=susan</li>
<li>password=fido</li>
<li>ticket=31794</li>
</ul></li>
</ul>
<p>To complete this from a command-line using the <a href='http://curl.haxx.se/'>cURL</a> application, you may use a command such as this:</p>
<pre>/usr/bin/curl {% url 'helpdesk_api' "hold_ticket" %} --data "user=susan&password=fido&ticket=31794"</pre>
<p>In <a href='http://www.php.net/'>PHP</a>, providing you have access to the <a href='http://www.php.net/curl'>cURL libraries</a>, you may use code such as this:</p>
<pre><?php
$api = curl_init();
curl_setopt($api, CURLOPT_URL, "{% url 'helpdesk_api' "hold_ticket" %}");
curl_setopt($api, CURLOPT_POST, 1);
curl_setopt($api, CURLOPT_POSTFIELDS, "user=susan&password=fido&ticket=31794");
$result = curl_exec($api);
curl_close($api);
echo $result;
?></pre>
<p>Note that cURL expects all data to be urlencoded, this is left as an exercise for the reader.</p>
<h2 id='response'>Responses</h2>
<p>The API system makes proper use of the following HTTP response codes:</p>
<dl>
<dt>200</dt>
<dd>OK - Data updated successfully</dd>
<dt>400</dt>
<dd>ERROR - Generic error. See returned text for details</dd>
<dt>404</dt>
<dd>ERROR - Data not found (eg, incorrect ticket). See returned text for details</dd>
<dt>403</dt>
<dd>ERROR - Invalid permissions (eg, incorrect username and/or password)</dd>
<dt>405</dt>
<dd>ERROR - Invalid method. You probably tried using GET, PUT or DELETE however we require POST.</dd>
</dl>
<p>Responses will have one of two content-types:</p>
<dl>
<dt>text/plain</dt>
<dd>Any error messages, or simple responses (eg a ticket ID)</dd>
<dt>text/json</dt>
<dd>Any complex responses, such as a list of data.</dd>
</dl>
<h2 id='methods'>Method Documentation</h2>
<p>The following public methods are available for use via the API. Each of them requires <a href='#request'>a valid request and authentication</a>, and each has it's own parameters as described below.</p>
<ul>
<li><a href='#method_create_ticket'>create_ticket</a></li>
<li><a href='#method_delete_ticket'>delete_ticket</a></li>
<li><a href='#method_hold_ticket'>hold_ticket</a></li>
<li><a href='#method_unhold_ticket'>unhold_ticket</a></li>
<li><a href='#method_add_followup'>add_followup</a></li>
<li><a href='#method_resolve'>resolve</a></li>
<li><a href='#method_list_queues'>list_queues</a></li>
<li><a href='#method_find_user'>find_user</a></li>
</ul>
<h3 id='method_create_ticket'>create_ticket</h3>
<p>This method creates a new helpdesk ticket.</p>
<h4>Parameters</h4>
<dl>
<dt>queue</dt>
<dd>Queue ID (use <a href='#method_list_queues'>list_queues</a> to get queue ID's) - this is an integer field.</dd>
<dt>title</dt>
<dd>Title or header of this ticket. Character field, maximum 100 characters.</dd>
<dt>submitter_email</dt>
<dd>(Optional) e-mail address of the person submitting this ticket. This e-mail address will receive copies of all public updates to this ticket, and will receive a notification when the ticket is created.</dd>
<dt>assigned_to</dt>
<dd>(Optional) Integer ID of the user to which this ticket should be assigned. Use <a href='#method_find_user'>find_user</a> to find a user ID from a username.</dd>
<dt>priority</dt>
<dd>(Optional) Priority as an integer from 1 (high) to 5 (low). Defaults to 3 if no priority given.</dd>
</dl>
<h4>Response</h4>
<p>This method responds with <strong>plain-text</strong>.</p>
<p>If you receive a 200 OK <a href='#response'>response</a>, then the content of the response will be the ticket ID.</p>
<h3 id='method_delete_ticket'>delete_ticket</h3>
<p>When given a ticket ID and confirmation, this method will delete a ticket entirely. This also deletes any followups, attachments, and other details.</p>
<h4>Parameters</h4>
<dl>
<dt>ticket</dt>
<dd>The numeric ticket ID to be deleted</dd>
<dt>confirm</dt>
<dd>You must provide this field, with any value, to enable deletion to continue</dd>
</dl>
<h4>Response</h4>
<p>A standard <a href='#response'>200 OK response</a> is given on success, or an error message on failure.</p>
<h3 id='method_hold_ticket'>hold_ticket</h3>
<p>If a ticket needs to be placed on hold, preventing it from being escalated, use this method.</p>
<h4>Parameters</h4>
<dl>
<dt>ticket</dt>
<dd>The numeric ticket ID to be placed on hold</dd>
</dl>
<h4>Response</h4>
<p>A standard <a href='#response'>200 OK response</a> is given on success, or an error message on failure.</p>
<h3 id='method_unhold_ticket'>unhold_ticket</h3>
<p>If a ticket is currently on hold and you wish to remove that hold, use this method.</p>
<h4>Parameters</h4>
<dl>
<dt>ticket</dt>
<dd>The numeric ticket ID to be taken off hold</dd>
</dl>
<h4>Response</h4>
<p>A standard <a href='#response'>200 OK response</a> is given on success, or an error message on failure.</p>
<h3 id='method_add_followup'>add_followup</h3>
<p>This method adds a comment / followup to a ticket. The followup can be public, in which case it is e-mailed to the submitter, or private. The followup will also be sent to others involved in the ticket: The owner and the queue notification / CC address.</p>
<h4>Parameters</h4>
<dl>
<dt>ticket</dt>
<dd>The numeric ticket ID to which this followup should be added</dd>
<dt>message</dt>
<dd>Text of 'unlimited' length - optionally formatted with HTML - to add to the message.</dd>
<dt>public</dt>
<dd>Either 'y' for public, or 'n' for private. This is optional, and it is assumed that followups are private if it is not provided. Private tickets are <strong>not</strong> e-mailed to the ticket submitter.</dd>
</dl>
<h4>Response</h4>
<p>A standard <a href='#response'>200 OK response</a> is given on success, or an error message on failure.</p>
<h3 id='method_resolve'>resolve</h3>
<p>This method adds a resolution to a ticket and marks it as resolved. The resolution will be e-mailed to everybody involved with the ticket, including the submitter.</p>
<h4>Parameters</h4>
<dl>
<dt>ticket</dt>
<dd>The numeric ticket ID to which this followup should be added</dd>
<dt>resolution</dt>
<dd>Text of 'unlimited' length - optionally formatted with HTML. This is the resolution for this ticket.</dd>
</dl>
<h4>Response</h4>
<p>A standard <a href='#response'>200 OK response</a> is given on success, or an error message on failure.</p>
<h3 id='method_list_queues'>list_queues</h3>
<p>This method provides a JSON-parsable list of queues, letting you access the individual queue ID in order to create tickets.</p>
<h4>Response</h4>
<p>This method responds with <strong>json</strong>.</p>
<p>It provides a list of queues in JSON format. The fields provided are ID and Title.</p>
<h3 id='method_find_user'>find_user</h3>
<p>When given a username, this method provides the related numeric user ID - commonly used when creating or reassigning tickets.</p>
<h4>Parameters</h4>
<dl>
<dt>username</dt>
<dd>The case-sensitive username of the user for which you require the user ID</dd>
</dl>
<h4>Response</h4>
<p>This method responds with <strong>plain-text</strong>.</p>
<p>If you receive a 200 OK <a href='#response'>response</a>, then the content of the response will be the users ID.</p>
{% endblock %}
| {
"content_hash": "d6071a1c6a0dacbcfc409aff08393486",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 268,
"avg_line_length": 43.51660516605166,
"alnum_prop": 0.5994233867548546,
"repo_name": "temnoregg/django-helpdesk",
"id": "bcb7cb92d7657e78173a676c5de3b7b1c565942e",
"size": "11793",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "helpdesk/templates/helpdesk/help_api.html",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "5926"
},
{
"name": "HTML",
"bytes": "108212"
},
{
"name": "JavaScript",
"bytes": "42249"
},
{
"name": "Python",
"bytes": "457805"
},
{
"name": "Shell",
"bytes": "708"
}
],
"symlink_target": ""
} |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Web.Http;
namespace Yuvia.Web
{
public static class WebApiConfig
{
public static void Register(HttpConfiguration config)
{
// Web API configuration and services
// Web API routes
config.MapHttpAttributeRoutes();
config.Routes.MapHttpRoute(
name: "DefaultApi",
routeTemplate: "api/{controller}/{id}",
defaults: new { id = RouteParameter.Optional }
);
}
}
}
| {
"content_hash": "e818a6c1d782ac9104e13678bb28c1b1",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 62,
"avg_line_length": 24.333333333333332,
"alnum_prop": 0.577054794520548,
"repo_name": "codehuntsman/yuvia",
"id": "be0bbd98b21f1594576018061216bb81df3a395b",
"size": "586",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "Source/Yuvia/Presentation/Yuvia.Web/Configuration/WebApiConfig.cs",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "92"
},
{
"name": "C#",
"bytes": "25563"
},
{
"name": "CSS",
"bytes": "125"
}
],
"symlink_target": ""
} |
ejb-multi-server: EJB Communication Across Servers
======================================================
Author: Wolf-Dieter Fink
Level: Advanced
Technologies: EJB, EAR
Summary: EJB applications deployed to different servers that communicate via EJB remote calls
Target Platform: EAP
Source: <https://github.com/jboss-jdf/jboss-as-quickstart/>
What is it?
-----------
This quickstart demonstrates communication between applications deployed to different servers. Each application is deployed as an EAR and contains a simple EJB3.1 bean. The only function of each bean is to log the invocation.
This example cosists of the following Maven projects, each with a shared parent:
| **Sub-project** | **Description** |
|:-----------|:-----------|
| `app-main` | An application that can be called by the `client`. It can also call the different sub-applications. |
| `app-one` and `app-two` | These are simple applications that contain an EJB sub-project to build the `ejb.jar` file and an EAR sub-project to build the `app.ear` file. Each application contains only one EJB that logs a statement on a method call and returns the `jboss.node.name` and credentials. |
| `app-web` | A simple WAR application. It consists of one Servlet that demonstrates how to invoke EJBs on a different server. |
| `client` | This project builds the standalone client and executes it.|
The root `pom.xml` builds each of the subprojects in an appropriate order.
The server configuration is done using CLI batch scripts located in the root of the quickstart folder.
System requirements
-------------------
All you need to build this project is Java 6.0 (Java SDK 1.6) or better, Maven 3.0 or better.
The application this project produces is designed to be run on any of the following:
JBoss Enterprise Application Platform 6.0
JBoss Enterprise Application Platform 6.1
JBoss AS 7.1
Configure Maven
---------------
If you have not yet done so, you must [Configure Maven](../README.md#mavenconfiguration) before testing the quickstarts.
Modify the CLI Scripts (if you are running JBoss Enterprise Application Platform 6.0 or JBoss AS 7.1 servers)
---------------------------
The CLI scripts provided with this quickstart target JBoss Enterprise Application Platform 6.1. If you are running older versions of the server, JBoss Enterprise Application Platform 6.0 or JBoss AS 7.1, you must modify the scripts to work against these servers.
1. Open the `install-domain.cli` file located in the root of this quickstart folder for editing.
2. Find the lines that contain the following text:
*** NOTE: If you are running JBoss
3. Follow the _Note_ instructions to comment or uncomment the lines in the file.
4. Save the file.
Start JBoss Enterprise Application Platform 6 or JBoss AS 7 Server
---------------------------
1. Unzip or install a fresh JBoss Enterprise Application Platform 6 or JBoss AS 7 instance.
2. Open a command line and navigate to the root of the server directory. Start the server using the following command:
bin/domain.sh
3. Open a new command line, navigate to the root directory of this quickstart, and run the following command:
JBOSS_HOME/bin/jboss-cli.sh --connect --file=install-domain.cli
This script configures and starts multiple servers needed to run this quickstart. You should see "outcome" => "success" for all of the commands.
Add the Application Users
---------------
The following users must be added to the `ApplicationRealm` to run this quickstart. Be sure to use the names and passwords specified in the table as they are required to run this example.
| **UserName** | **Realm** | **Password** | **Roles** |
|:-----------|:-----------|:-----------|:-----------|
| quickuser| ApplicationRealm | quick-123 | _leave blank for none_ |
| quickuser1 | ApplicationRealm | quick123+ | _leave blank for none_ |
| quickuser2 | ApplicationRealm | quick+123 | _leave blank for none_ |
If you are running JBoss Enterprise Application Platform 6.1, you can add the users using the following commands:
bin/add-user.sh -a -u quickuser -p quick-123 --silent
bin/add-user.sh -a -u quickuser1 -p quick123+ --silent
bin/add-user.sh -a -u quickuser2 -p quick+123 --silent
If you are running JBoss Enterprise Application Platform 6.0 or JBoss AS 7.1, you must use the add-user utility. For an example of how to use the add-user utility, see instructions in the root README file located here: [Add User](../README.md#addapplicationuser).
Build and Deploy the Quickstart
-------------------------
_NOTE: The following build command assumes you have configured your Maven user settings. If you have not, you must include Maven setting arguments on the command line. See [Build and Deploy the Quickstarts](../README.md#buildanddeploy) for complete instructions and additional options._
1. Make sure you have started and configured the JBoss Server successful as described above.
2. Open a command line and navigate to the root directory of this quickstart.
3. Type this command to build the artifacts:
mvn clean install
4. Open a new command line and navigate to the root directory of this quickstart. Deploy the applications using the provided CLI batch script by typing the following command:
JBOSS_HOME/bin/jboss-cli.sh --connect --file=deploy-domain.cli
This will deploy the app-*.ear files to different server-groups of the running domain.
_NOTE: If ERRORs appear in the server.log when the installing or deploying the quickstart, please stop the domain and restart it. This should ensure further steps run correctly._
Access the Remote Client Application
---------------------
1. Make sure that the deployments are successful as described above.
2. Navigate to the quickstart `client/` subdirectory.
3. Type this command to run the application:
mvn exec:java
The client will output the following information provided by the applications:
InvokeAll succeed: MainApp[anonymous]@master:app-main > [ app1[anonymous]@master:app-oneA > app2[quickuser2]@master:app-two ; app2[quickuser2]@master:app-two ]
This output shows that the `MainApp` is called with the user `anonymous` at node `master:app-main` and the sub-call is proceeded by the `master:app-oneA` node and `master:app-two` node as `quickuser2`.
Review the server log files to see the bean invocations on the servers.
4. To invoke the bean that uses the `scoped-client-context`, you must pass a property. Type the following command
mvn exec:java -DUseEjbClient34=true
The invocation of `appTwo` will not work since the secured method will be called and there is no Role for the user defined. Try to update the user `quickuser1` and `quickuser2` and give them one of the Roles `AppTwo` or `Intern`. After that the invocation will be successful. The log output of the `appTwo` servers shows which Role is applied to the user. The output of the client will show you a simple line with the information provided by the different applications:
InvokeAll succeed: MainEjbClient34App[anonymous]@master:app-main > [ {app1[quickuser1]@master:app-oneA, app1[quickuser2]@master:app-oneB, app1[quickuser2]@master:app-oneB, app1[quickuser1]@master:app-oneA, app1[quickuser1]@master:app-oneA, app1[quickuser1]@master:app-oneA, app1[quickuser2]@master:app-oneB, app1[quickuser1]@master:app-oneA} > appTwo loop(7 time A-B expected){app2[quickuser1]@master:app-twoA, app2[quickuser2]@master:app-twoB, app2[quickuser1]@master:app-twoA, app2[quickuser2]@master:app-twoB, app2[quickuser1]@master:app-twoA, app2[quickuser2]@master:app-twoB, app2[quickuser1]@master:app-twoA, app2[quickuser2]@master:app-twoB, app2[quickuser1]@master:app-twoA, app2[quickuser2]@master:app-twoB, app2[quickuser1]@master:app-twoA, app2[quickuser2]@master:app-twoB, app2[quickuser1]@master:app-twoA, app2[quickuser2]@master:app-twoB, app2[quickuser1]@master:app-twoA, app2[quickuser2]@master:app-twoB} ]
The line shows that the bean `MainEjbClient34App` is not secured and called at `app-main` server. The sub-calls to `app-one#` are using the scoped-context and the cluster view needs a time to be established. This is shown as the cluster-view call the `appOne` with the user `quickuser2`. `AppTwo` is called with two different scoped-context settings. Both are used alternately 7 times.
_NOTE:_
* _If exec is called multiple times, the invocation for `app1` might use `app-oneA` and `app-oneB` node due to cluster loadbalancing._
* _If you use a version from JBoss Enterprise Platform 6.1, a new feature will deny the invocation of unsecured methods of `appOne`/`appTwo` since security is enabled but the method does not include @Roles. You need to set 'default-missing-method-permissions-deny-access = false' for the `ejb3` subsystem within the domain profile "ha" and "default" to allow the method invocation. See the install-domain.cli script._
* _For JBoss Enterprise Application Platform 6.0 and AS 7.1.x, the scoped-client-context is not implemented. Therefore you will not see a difference between step 3 and step 4, the properties of the InitialContext will be ignored._
* _For JBoss Enterprise Application Platform 6.0 and AS 7.1.x, the client library must not be changed for this test. But if additional tests are added or a newer server version is used, you might update the property `<jboss.client.bom.version>7.1.1.Final</jboss.client.bom.version>` in the root `pom.xml` to an appropriate version._
Access the JSF application inside the main-application
---------------------
1. Make sure that the deployments are successful as described above.
2. Use a browser to access the JSF application at the following URL: <http://localhost:8080/multi-server-MainApp/>
3. Insert a message in the Text input and invoke the different methods. The result is shown in the browser.
4. See server logfiles and find your given message logged as INFO.
_NOTE :_
* _If you try to invoke `MainEjbClient34App` you need to update the user `quickuser1` and `quickuser2` and give them one of the Roles `AppTwo` or `Intern`._
* _Remember that the scoped-client will be implemented at first with EAP6.1 and will not work before._
Access the Servlet application deployed as a WAR inside a minimal server
---------------------
1. Make sure that the deployments are successful as described above.
2. Use a browser to access the Servlet at the following URL: <http://localhost:8380/appweb/>
3. The Servlet will invoke the remote EJBs directly and show the results, compare that the invocation is successful
_NOTE : If a version from JBoss EAP6.1 is used, a new feature will deny the invocation of unsecured methods of `appOne`/`appTwo` since security is enabled but the method does not include @Roles. You need to set 'default-missing-method-permissions-deny-access = false' for the `ejb3` subsystem within the domain profile "ha" and "default" to allow the method invocation._
Undeploy the Archive
--------------------
1. Make sure you have started the JBoss Server as described above.
2. Open a command line and navigate to the root directory of this quickstart.
3. When you are finished testing, type this command to undeploy the archive:
JBOSS_HOME/bin/jboss-cli.sh --connect --file=undeploy-domain.cli
Run the Quickstart in JBoss Developer Studio or Eclipse
-------------------------------------
You can also start the server and deploy the quickstarts from Eclipse using JBoss tools. For more information, see [Use JBoss Developer Studio or Eclipse to Run the Quickstarts](../README.md#useeclipse)
Debug the Application
------------------------------------
If you want to debug the source code or look at the Javadocs of any library in the project, run either of the following commands to pull them into your local repository. The IDE should then detect them.
mvn dependency:sources
mvn dependency:resolve -Dclassifier=javadoc
| {
"content_hash": "ed588efe898a9e08be6024f4da8ae1c2",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 935,
"avg_line_length": 60.18,
"alnum_prop": 0.7362080425390495,
"repo_name": "lindstae/MyTestRepo",
"id": "ffa10b6d49103dc9b7cf736d29df0d922e76c4a9",
"size": "12036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ejb-multi-server/README.md",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "7428"
},
{
"name": "Java",
"bytes": "1807026"
},
{
"name": "JavaScript",
"bytes": "1624924"
},
{
"name": "Objective-C",
"bytes": "700304"
},
{
"name": "Perl",
"bytes": "890"
},
{
"name": "Ruby",
"bytes": "5266"
},
{
"name": "Shell",
"bytes": "20431"
}
],
"symlink_target": ""
} |
./recompile.sh
# Check command line of command line arguments
if [ $# -lt 2 ]; then
echo "Illegal number of command line arguments"
echo "Usage: MODE PATH"
exit
fi
MODE=$1
CONFDIR=$2
OUTPUTDIR=../test/m_bucket/optimality_logs/
rm $OUTPUTDIR/log*
TESTCONFS=( `ls ${CONFDIR}/` )
COUNT=${#TESTCONFS[@]}
OS_TYPE=`uname -s`
declare -i i
i=1
ALL_OK=true
TMPFILE=""
echo ""
# Run all tests given, one by one
for TEST in ${TESTCONFS[@]}
do
echo "Running test $i ($TEST) out of ${COUNT}..."
./squall_local.sh $MODE $CONFDIR/$TEST > $OUTPUTDIR/log_$TEST
if [ "`cat $OUTPUTDIR/log_$TEST | tail -n 1 | cut -d' ' -f1`" != "OK:" ]; then
echo "Error: Test $TEST failed."
ALL_OK=false
else
echo "Test $TEST completed successfully..."
fi
i+=1
done
echo ""
if $ALL_OK ; then
echo "ALL TESTS OK!"
else
echo "Some tests failed. Check log files"
fi
| {
"content_hash": "a0380eb2770864af84524d3beead7292",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 79,
"avg_line_length": 20.13953488372093,
"alnum_prop": 0.6443418013856813,
"repo_name": "khuevu/squall",
"id": "9c15a66e416d7b1303166c6627d22c936573b326",
"size": "878",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bin/Solaris/loop_squall_local.sh",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "161759"
},
{
"name": "Java",
"bytes": "2597298"
},
{
"name": "Makefile",
"bytes": "569"
},
{
"name": "Perl",
"bytes": "75098"
},
{
"name": "Prolog",
"bytes": "4799"
},
{
"name": "Python",
"bytes": "15207"
},
{
"name": "Ruby",
"bytes": "31566"
},
{
"name": "Scala",
"bytes": "50718"
},
{
"name": "Shell",
"bytes": "104242"
}
],
"symlink_target": ""
} |
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.crazycake</groupId>
<artifactId>shiro-redis</artifactId>
<version>3.3.2</version>
<packaging>jar</packaging>
<name>shiro-redis</name>
<description>shiro only provide the support of ehcache and concurrentHashMap. Here is an implement of redis cache can be used by shiro. Hope it will help you!</description>
<url>https://github.com/alexxiyang/shiro-redis</url>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
</properties>
<licenses>
<license>
<name>The Apache Software License, Version 2.0</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
</license>
</licenses>
<dependencies>
<dependency>
<groupId>redis.clients</groupId>
<artifactId>jedis</artifactId>
<version>3.3.0</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.30</version>
</dependency>
<dependency>
<groupId>org.apache.shiro</groupId>
<artifactId>shiro-core</artifactId>
<version>1.6.0</version>
</dependency>
<!-- Test -->
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-api</artifactId>
<version>5.6.2</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
<version>1.7.30</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>1.2</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<version>3.5.7</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.github.javafaker</groupId>
<artifactId>javafaker</artifactId>
<version>1.0.2</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest</artifactId>
<version>2.2</version>
<scope>test</scope>
</dependency>
</dependencies>
<developers>
<developer>
<id>alexxiyang</id>
<name>Alex Yang</name>
<email>alexxiyang@gmail.com</email>
<timezone>GMT-7</timezone>
<organizationUrl>https://github.com/alexxiyang</organizationUrl>
<roles>
</roles>
</developer>
</developers>
<scm>
<connection>scm:git:https://github.com/alexxiyang/shiro-redis.git</connection>
<developerConnection>scm:git:https://github.com/alexxiyang/shiro-redis.git</developerConnection>
<url>https://github.com/alexxiyang/shiro-redis.git</url>
</scm>
<distributionManagement>
<snapshotRepository>
<id>ossrh</id>
<url>https://oss.sonatype.org/content/repositories/snapshots</url>
</snapshotRepository>
<repository>
<id>ossrh</id>
<url>https://oss.sonatype.org/service/local/staging/deploy/maven2/</url>
</repository>
</distributionManagement>
<build>
<finalName>shiro-redis</finalName>
<plugins>
<plugin>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.8.0</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
<encoding>UTF-8</encoding>
<compilerArgument>-nowarn</compilerArgument>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<version>3.1.0</version>
<executions>
<execution>
<id>checkstyle</id>
<phase>validate</phase>
<goals>
<goal>check</goal>
</goals>
<configuration>
<failOnViolation>true</failOnViolation>
<consoleOutput>true</consoleOutput>
<configLocation>checkstyle.xml</configLocation>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>2.22.0</version>
<!-- Make JUnit 5 works with Maven 3.6.3 -->
<dependencies>
<dependency>
<groupId>org.junit.platform</groupId>
<artifactId>junit-platform-surefire-provider</artifactId>
<version>1.3.2</version>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-engine</artifactId>
<version>5.6.2</version>
</dependency>
</dependencies>
</plugin>
</plugins>
</build>
<profiles>
<profile>
<id>release-sign-artifacts</id>
<activation>
<property>
<name>release</name>
<value>true</value>
</property>
</activation>
<properties>
<gpg.keyname>D688E942</gpg.keyname> <!-- GPG Key ID to use for signing -->
<release.username>alexxiyang</release.username>
</properties>
<build>
<plugins>
<plugin>
<groupId>org.sonatype.plugins</groupId>
<artifactId>nexus-staging-maven-plugin</artifactId>
<version>1.6.8</version>
<extensions>true</extensions>
<configuration>
<serverId>ossrh</serverId>
<nexusUrl>https://oss.sonatype.org/</nexusUrl>
<autoReleaseAfterClose>true</autoReleaseAfterClose>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>3.2.1</version>
<executions>
<execution>
<id>attach-sources</id>
<goals>
<goal>jar-no-fork</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>3.2.0</version>
<executions>
<execution>
<id>attach-javadocs</id>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-gpg-plugin</artifactId>
<version>1.6</version>
<executions>
<execution>
<id>sign-artifacts</id>
<phase>verify</phase>
<goals>
<goal>sign</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
</profiles>
</project>
| {
"content_hash": "90f9c8845e83efb4e5867d013d1285ee",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 173,
"avg_line_length": 27.72222222222222,
"alnum_prop": 0.653769076614768,
"repo_name": "alexxiyang/shiro-redis",
"id": "00d3bfb586f3e7e9bbf62377c387b7d16902f6aa",
"size": "6487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pom.xml",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "87185"
}
],
"symlink_target": ""
} |
import abc
import collections
import os
import re
import shutil
import time
import netaddr
from neutron_lib import constants
from neutron_lib import exceptions
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import excutils
from oslo_utils import uuidutils
import six
from neutron._i18n import _, _LI, _LW, _LE
from neutron.agent.common import utils as agent_common_utils
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
from neutron.agent.linux import iptables_manager
from neutron.common import constants as n_const
from neutron.common import exceptions as n_exc
from neutron.common import ipv6_utils
from neutron.common import utils as common_utils
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.ipam import utils as ipam_utils
LOG = logging.getLogger(__name__)
UDP = 'udp'
TCP = 'tcp'
DNS_PORT = 53
DHCPV4_PORT = 67
DHCPV6_PORT = 547
METADATA_DEFAULT_PREFIX = 16
METADATA_DEFAULT_IP = '169.254.169.254'
METADATA_DEFAULT_CIDR = '%s/%d' % (METADATA_DEFAULT_IP,
METADATA_DEFAULT_PREFIX)
METADATA_PORT = 80
WIN2k3_STATIC_DNS = 249
NS_PREFIX = 'qdhcp-'
DNSMASQ_SERVICE_NAME = 'dnsmasq'
class DictModel(dict):
"""Convert dict into an object that provides attribute access to values."""
def __init__(self, *args, **kwargs):
"""Convert dict values to DictModel values."""
super(DictModel, self).__init__(*args, **kwargs)
def needs_upgrade(item):
"""Check if `item` is a dict and needs to be changed to DictModel.
"""
return isinstance(item, dict) and not isinstance(item, DictModel)
def upgrade(item):
"""Upgrade item if it needs to be upgraded."""
if needs_upgrade(item):
return DictModel(item)
else:
return item
for key, value in six.iteritems(self):
if isinstance(value, (list, tuple)):
# Keep the same type but convert dicts to DictModels
self[key] = type(value)(
(upgrade(item) for item in value)
)
elif needs_upgrade(value):
# Change dict instance values to DictModel instance values
self[key] = DictModel(value)
def __getattr__(self, name):
try:
return self[name]
except KeyError as e:
raise AttributeError(e)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
del self[name]
def __str__(self):
pairs = ['%s=%s' % (k, v) for k, v in self.items()]
return ', '.join(sorted(pairs))
class NetModel(DictModel):
def __init__(self, d):
super(NetModel, self).__init__(d)
self._ns_name = "%s%s" % (NS_PREFIX, self.id)
@property
def namespace(self):
return self._ns_name
@six.add_metaclass(abc.ABCMeta)
class DhcpBase(object):
def __init__(self, conf, network, process_monitor,
version=None, plugin=None):
self.conf = conf
self.network = network
self.process_monitor = process_monitor
self.device_manager = DeviceManager(self.conf, plugin)
self.version = version
@abc.abstractmethod
def enable(self):
"""Enables DHCP for this network."""
@abc.abstractmethod
def disable(self, retain_port=False):
"""Disable dhcp for this network."""
def restart(self):
"""Restart the dhcp service for the network."""
self.disable(retain_port=True)
self.enable()
@abc.abstractproperty
def active(self):
"""Boolean representing the running state of the DHCP server."""
@abc.abstractmethod
def reload_allocations(self):
"""Force the DHCP server to reload the assignment database."""
@classmethod
def existing_dhcp_networks(cls, conf):
"""Return a list of existing networks ids that we have configs for."""
raise NotImplementedError()
@classmethod
def check_version(cls):
"""Execute version checks on DHCP server."""
raise NotImplementedError()
@classmethod
def get_isolated_subnets(cls, network):
"""Returns a dict indicating whether or not a subnet is isolated"""
raise NotImplementedError()
@classmethod
def should_enable_metadata(cls, conf, network):
"""True if the metadata-proxy should be enabled for the network."""
raise NotImplementedError()
@six.add_metaclass(abc.ABCMeta)
class DhcpLocalProcess(DhcpBase):
PORTS = []
def __init__(self, conf, network, process_monitor, version=None,
plugin=None):
super(DhcpLocalProcess, self).__init__(conf, network, process_monitor,
version, plugin)
self.confs_dir = self.get_confs_dir(conf)
self.network_conf_dir = os.path.join(self.confs_dir, network.id)
common_utils.ensure_dir(self.network_conf_dir)
@staticmethod
def get_confs_dir(conf):
return os.path.abspath(os.path.normpath(conf.dhcp_confs))
def get_conf_file_name(self, kind):
"""Returns the file name for a given kind of config file."""
return os.path.join(self.network_conf_dir, kind)
def _remove_config_files(self):
shutil.rmtree(self.network_conf_dir, ignore_errors=True)
def _enable_dhcp(self):
"""check if there is a subnet within the network with dhcp enabled."""
for subnet in self.network.subnets:
if subnet.enable_dhcp:
return True
return False
def enable(self):
"""Enables DHCP for this network by spawning a local process."""
if self.active:
self.restart()
elif self._enable_dhcp():
common_utils.ensure_dir(self.network_conf_dir)
interface_name = self.device_manager.setup(self.network)
self.interface_name = interface_name
self.spawn_process()
def _get_process_manager(self, cmd_callback=None):
return external_process.ProcessManager(
conf=self.conf,
uuid=self.network.id,
namespace=self.network.namespace,
default_cmd_callback=cmd_callback,
pid_file=self.get_conf_file_name('pid'),
run_as_root=True)
def disable(self, retain_port=False):
"""Disable DHCP for this network by killing the local process."""
self.process_monitor.unregister(self.network.id, DNSMASQ_SERVICE_NAME)
self._get_process_manager().disable()
if not retain_port:
self._destroy_namespace_and_port()
self._remove_config_files()
def _destroy_namespace_and_port(self):
try:
self.device_manager.destroy(self.network, self.interface_name)
except RuntimeError:
LOG.warning(_LW('Failed trying to delete interface: %s'),
self.interface_name)
ns_ip = ip_lib.IPWrapper(namespace=self.network.namespace)
try:
ns_ip.netns.delete(self.network.namespace)
except RuntimeError:
LOG.warning(_LW('Failed trying to delete namespace: %s'),
self.network.namespace)
def _get_value_from_conf_file(self, kind, converter=None):
"""A helper function to read a value from one of the state files."""
file_name = self.get_conf_file_name(kind)
msg = _('Error while reading %s')
try:
with open(file_name, 'r') as f:
try:
return converter(f.read()) if converter else f.read()
except ValueError:
msg = _('Unable to convert value in %s')
except IOError:
msg = _('Unable to access %s')
LOG.debug(msg, file_name)
return None
@property
def interface_name(self):
return self._get_value_from_conf_file('interface')
@interface_name.setter
def interface_name(self, value):
interface_file_path = self.get_conf_file_name('interface')
common_utils.replace_file(interface_file_path, value)
@property
def active(self):
return self._get_process_manager().active
@abc.abstractmethod
def spawn_process(self):
pass
class Dnsmasq(DhcpLocalProcess):
# The ports that need to be opened when security policies are active
# on the Neutron port used for DHCP. These are provided as a convenience
# for users of this class.
PORTS = {constants.IP_VERSION_4:
[(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV4_PORT)],
constants.IP_VERSION_6:
[(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV6_PORT)],
}
_TAG_PREFIX = 'tag%d'
_ID = 'id:'
@classmethod
def check_version(cls):
pass
@classmethod
def existing_dhcp_networks(cls, conf):
"""Return a list of existing networks ids that we have configs for."""
confs_dir = cls.get_confs_dir(conf)
try:
return [
c for c in os.listdir(confs_dir)
if uuidutils.is_uuid_like(c)
]
except OSError:
return []
def _build_cmdline_callback(self, pid_file):
# We ignore local resolv.conf if dns servers are specified
# or if local resolution is explicitly disabled.
_no_resolv = (
'--no-resolv' if self.conf.dnsmasq_dns_servers or
not self.conf.dnsmasq_local_resolv else '')
cmd = [
'dnsmasq',
'--no-hosts',
_no_resolv,
'--strict-order',
'--except-interface=lo',
'--pid-file=%s' % pid_file,
'--dhcp-hostsfile=%s' % self.get_conf_file_name('host'),
'--addn-hosts=%s' % self.get_conf_file_name('addn_hosts'),
'--dhcp-optsfile=%s' % self.get_conf_file_name('opts'),
'--dhcp-leasefile=%s' % self.get_conf_file_name('leases'),
'--dhcp-match=set:ipxe,175',
]
if self.device_manager.driver.bridged:
cmd += [
'--bind-interfaces',
'--interface=%s' % self.interface_name,
]
else:
cmd += [
'--bind-dynamic',
'--interface=%s' % self.interface_name,
'--interface=tap*',
'--bridge-interface=%s,tap*' % self.interface_name,
]
possible_leases = 0
for i, subnet in enumerate(self.network.subnets):
mode = None
# if a subnet is specified to have dhcp disabled
if not subnet.enable_dhcp:
continue
if subnet.ip_version == 4:
mode = 'static'
else:
# Note(scollins) If the IPv6 attributes are not set, set it as
# static to preserve previous behavior
addr_mode = getattr(subnet, 'ipv6_address_mode', None)
ra_mode = getattr(subnet, 'ipv6_ra_mode', None)
if (addr_mode in [n_const.DHCPV6_STATEFUL,
n_const.DHCPV6_STATELESS] or
not addr_mode and not ra_mode):
mode = 'static'
cidr = netaddr.IPNetwork(subnet.cidr)
if self.conf.dhcp_lease_duration == -1:
lease = 'infinite'
else:
lease = '%ss' % self.conf.dhcp_lease_duration
# mode is optional and is not set - skip it
if mode:
if subnet.ip_version == 4:
cmd.append('--dhcp-range=%s%s,%s,%s,%s' %
('set:', self._TAG_PREFIX % i,
cidr.network, mode, lease))
else:
cmd.append('--dhcp-range=%s%s,%s,%s,%d,%s' %
('set:', self._TAG_PREFIX % i,
cidr.network, mode,
cidr.prefixlen, lease))
possible_leases += cidr.size
if cfg.CONF.advertise_mtu:
mtu = getattr(self.network, 'mtu', 0)
# Do not advertise unknown mtu
if mtu > 0:
cmd.append('--dhcp-option-force=option:mtu,%d' % mtu)
# Cap the limit because creating lots of subnets can inflate
# this possible lease cap.
cmd.append('--dhcp-lease-max=%d' %
min(possible_leases, self.conf.dnsmasq_lease_max))
cmd.append('--conf-file=%s' % self.conf.dnsmasq_config_file)
if self.conf.dnsmasq_dns_servers:
cmd.extend(
'--server=%s' % server
for server in self.conf.dnsmasq_dns_servers)
if self.conf.dhcp_domain:
cmd.append('--domain=%s' % self.conf.dhcp_domain)
if self.conf.dhcp_broadcast_reply:
cmd.append('--dhcp-broadcast')
if self.conf.dnsmasq_base_log_dir:
log_dir = os.path.join(
self.conf.dnsmasq_base_log_dir,
self.network.id)
try:
if not os.path.exists(log_dir):
os.makedirs(log_dir)
except OSError:
LOG.error(_LE('Error while create dnsmasq log dir: %s'),
log_dir)
else:
log_filename = os.path.join(log_dir, 'dhcp_dns_log')
cmd.append('--log-queries')
cmd.append('--log-dhcp')
cmd.append('--log-facility=%s' % log_filename)
return cmd
def spawn_process(self):
"""Spawn the process, if it's not spawned already."""
# we only need to generate the lease file the first time dnsmasq starts
# rather than on every reload since dnsmasq will keep the file current
self._output_init_lease_file()
self._spawn_or_reload_process(reload_with_HUP=False)
def _spawn_or_reload_process(self, reload_with_HUP):
"""Spawns or reloads a Dnsmasq process for the network.
When reload_with_HUP is True, dnsmasq receives a HUP signal,
or it's reloaded if the process is not running.
"""
self._output_config_files()
pm = self._get_process_manager(
cmd_callback=self._build_cmdline_callback)
pm.enable(reload_cfg=reload_with_HUP)
self.process_monitor.register(uuid=self.network.id,
service_name=DNSMASQ_SERVICE_NAME,
monitored_process=pm)
def _release_lease(self, mac_address, ip, client_id):
"""Release a DHCP lease."""
if netaddr.IPAddress(ip).version == constants.IP_VERSION_6:
# Note(SridharG) dhcp_release is only supported for IPv4
# addresses. For more details, please refer to man page.
return
cmd = ['dhcp_release', self.interface_name, ip, mac_address]
if client_id:
cmd.append(client_id)
ip_wrapper = ip_lib.IPWrapper(namespace=self.network.namespace)
ip_wrapper.netns.execute(cmd, run_as_root=True)
def _output_config_files(self):
self._output_hosts_file()
self._output_addn_hosts_file()
self._output_opts_file()
def reload_allocations(self):
"""Rebuild the dnsmasq config and signal the dnsmasq to reload."""
# If all subnets turn off dhcp, kill the process.
if not self._enable_dhcp():
self.disable()
LOG.debug('Killing dnsmasq for network since all subnets have '
'turned off DHCP: %s', self.network.id)
return
self._release_unused_leases()
self._spawn_or_reload_process(reload_with_HUP=True)
LOG.debug('Reloading allocations for network: %s', self.network.id)
self.device_manager.update(self.network, self.interface_name)
def _sort_fixed_ips_for_dnsmasq(self, fixed_ips, v6_nets):
"""Sort fixed_ips so that stateless IPv6 subnets appear first.
For example, If a port with v6 extra_dhcp_opts is on a network with
IPv4 and IPv6 stateless subnets. Then dhcp host file will have
below 2 entries for same MAC,
fa:16:3e:8f:9d:65,30.0.0.5,set:aabc7d33-4874-429e-9637-436e4232d2cd
(entry for IPv4 dhcp)
fa:16:3e:8f:9d:65,set:aabc7d33-4874-429e-9637-436e4232d2cd
(entry for stateless IPv6 for v6 options)
dnsmasq internal details for processing host file entries
1) dnsmasq reads the host file from EOF.
2) So it first picks up stateless IPv6 entry,
fa:16:3e:8f:9d:65,set:aabc7d33-4874-429e-9637-436e4232d2cd
3) But dnsmasq doesn't have sufficient checks to skip this entry and
pick next entry, to process dhcp IPv4 request.
4) So dnsmasq uses this entry to process dhcp IPv4 request.
5) As there is no ip in this entry, dnsmasq logs "no address available"
and fails to send DHCPOFFER message.
As we rely on internal details of dnsmasq to understand and fix the
issue, Ihar sent a mail to dnsmasq-discuss mailing list
http://lists.thekelleys.org.uk/pipermail/dnsmasq-discuss/2015q2/
009650.html
So if we reverse the order of writing entries in host file,
so that entry for stateless IPv6 comes first,
then dnsmasq can correctly fetch the IPv4 address.
"""
return sorted(
fixed_ips,
key=lambda fip: ((fip.subnet_id in v6_nets) and (
v6_nets[fip.subnet_id].ipv6_address_mode == (
n_const.DHCPV6_STATELESS))),
reverse=True)
def _iter_hosts(self):
"""Iterate over hosts.
For each host on the network we yield a tuple containing:
(
port, # a DictModel instance representing the port.
alloc, # a DictModel instance of the allocated ip and subnet.
# if alloc is None, it means there is no need to allocate
# an IPv6 address because of stateless DHCPv6 network.
host_name, # Host name.
name, # Canonical hostname in the format 'hostname[.domain]'.
no_dhcp, # A flag indicating that the address doesn't need a DHCP
# IP address.
no_opts, # A flag indication that options shouldn't be written
)
"""
v6_nets = dict((subnet.id, subnet) for subnet in
self.network.subnets if subnet.ip_version == 6)
for port in self.network.ports:
fixed_ips = self._sort_fixed_ips_for_dnsmasq(port.fixed_ips,
v6_nets)
# Confirm whether Neutron server supports dns_name attribute in the
# ports API
dns_assignment = getattr(port, 'dns_assignment', None)
if dns_assignment:
dns_ip_map = {d.ip_address: d for d in dns_assignment}
for alloc in fixed_ips:
no_dhcp = False
no_opts = False
if alloc.subnet_id in v6_nets:
addr_mode = v6_nets[alloc.subnet_id].ipv6_address_mode
no_dhcp = addr_mode in (n_const.IPV6_SLAAC,
n_const.DHCPV6_STATELESS)
# we don't setup anything for SLAAC. It doesn't make sense
# to provide options for a client that won't use DHCP
no_opts = addr_mode == n_const.IPV6_SLAAC
# If dns_name attribute is supported by ports API, return the
# dns_assignment generated by the Neutron server. Otherwise,
# generate hostname and fqdn locally (previous behaviour)
if dns_assignment:
hostname = dns_ip_map[alloc.ip_address].hostname
fqdn = dns_ip_map[alloc.ip_address].fqdn
else:
hostname = 'host-%s' % alloc.ip_address.replace(
'.', '-').replace(':', '-')
fqdn = hostname
if self.conf.dhcp_domain:
fqdn = '%s.%s' % (fqdn, self.conf.dhcp_domain)
yield (port, alloc, hostname, fqdn, no_dhcp, no_opts)
def _get_port_extra_dhcp_opts(self, port):
return getattr(port, edo_ext.EXTRADHCPOPTS, False)
def _output_init_lease_file(self):
"""Write a fake lease file to bootstrap dnsmasq.
The generated file is passed to the --dhcp-leasefile option of dnsmasq.
This is used as a bootstrapping mechanism to avoid NAKing active leases
when a dhcp server is scheduled to another agent. Using a leasefile
will also prevent dnsmasq from NAKing or ignoring renewals after a
restart.
Format is as follows:
epoch-timestamp mac_addr ip_addr hostname client-ID
"""
filename = self.get_conf_file_name('leases')
buf = six.StringIO()
LOG.debug('Building initial lease file: %s', filename)
# we make up a lease time for the database entry
if self.conf.dhcp_lease_duration == -1:
# Even with an infinite lease, a client may choose to renew a
# previous lease on reboot or interface bounce so we should have
# an entry for it.
# Dnsmasq timestamp format for an infinite lease is 0.
timestamp = 0
else:
timestamp = int(time.time()) + self.conf.dhcp_lease_duration
dhcp_enabled_subnet_ids = [s.id for s in self.network.subnets
if s.enable_dhcp]
for host_tuple in self._iter_hosts():
port, alloc, hostname, name, no_dhcp, no_opts = host_tuple
# don't write ip address which belongs to a dhcp disabled subnet
# or an IPv6 SLAAC/stateless subnet
if no_dhcp or alloc.subnet_id not in dhcp_enabled_subnet_ids:
continue
ip_address = self._format_address_for_dnsmasq(alloc.ip_address)
# all that matters is the mac address and IP. the hostname and
# client ID will be overwritten on the next renewal.
buf.write('%s %s %s * *\n' %
(timestamp, port.mac_address, ip_address))
contents = buf.getvalue()
common_utils.replace_file(filename, contents)
LOG.debug('Done building initial lease file %s with contents:\n%s',
filename, contents)
return filename
@staticmethod
def _format_address_for_dnsmasq(address):
# (dzyu) Check if it is legal ipv6 address, if so, need wrap
# it with '[]' to let dnsmasq to distinguish MAC address from
# IPv6 address.
if netaddr.valid_ipv6(address):
return '[%s]' % address
return address
def _output_hosts_file(self):
"""Writes a dnsmasq compatible dhcp hosts file.
The generated file is sent to the --dhcp-hostsfile option of dnsmasq,
and lists the hosts on the network which should receive a dhcp lease.
Each line in this file is in the form::
'mac_address,FQDN,ip_address'
IMPORTANT NOTE: a dnsmasq instance does not resolve hosts defined in
this file if it did not give a lease to a host listed in it (e.g.:
multiple dnsmasq instances on the same network if this network is on
multiple network nodes). This file is only defining hosts which
should receive a dhcp lease, the hosts resolution in itself is
defined by the `_output_addn_hosts_file` method.
"""
buf = six.StringIO()
filename = self.get_conf_file_name('host')
LOG.debug('Building host file: %s', filename)
dhcp_enabled_subnet_ids = [s.id for s in self.network.subnets
if s.enable_dhcp]
# NOTE(ihrachyshka): the loop should not log anything inside it, to
# avoid potential performance drop when lots of hosts are dumped
for host_tuple in self._iter_hosts():
port, alloc, hostname, name, no_dhcp, no_opts = host_tuple
if no_dhcp:
if not no_opts and self._get_port_extra_dhcp_opts(port):
buf.write('%s,%s%s\n' %
(port.mac_address, 'set:', port.id))
continue
# don't write ip address which belongs to a dhcp disabled subnet.
if alloc.subnet_id not in dhcp_enabled_subnet_ids:
continue
ip_address = self._format_address_for_dnsmasq(alloc.ip_address)
if self._get_port_extra_dhcp_opts(port):
client_id = self._get_client_id(port)
if client_id and len(port.extra_dhcp_opts) > 1:
buf.write('%s,%s%s,%s,%s,%s%s\n' %
(port.mac_address, self._ID, client_id, name,
ip_address, 'set:', port.id))
elif client_id and len(port.extra_dhcp_opts) == 1:
buf.write('%s,%s%s,%s,%s\n' %
(port.mac_address, self._ID, client_id, name,
ip_address))
else:
buf.write('%s,%s,%s,%s%s\n' %
(port.mac_address, name, ip_address,
'set:', port.id))
else:
buf.write('%s,%s,%s\n' %
(port.mac_address, name, ip_address))
common_utils.replace_file(filename, buf.getvalue())
LOG.debug('Done building host file %s', filename)
return filename
def _get_client_id(self, port):
if self._get_port_extra_dhcp_opts(port):
for opt in port.extra_dhcp_opts:
if opt.opt_name == edo_ext.CLIENT_ID:
return opt.opt_value
def _read_hosts_file_leases(self, filename):
leases = set()
try:
with open(filename) as f:
for l in f.readlines():
host = l.strip().split(',')
mac = host[0]
client_id = None
if host[1].startswith('set:'):
continue
if host[1].startswith(self._ID):
ip = host[3].strip('[]')
client_id = host[1][len(self._ID):]
else:
ip = host[2].strip('[]')
leases.add((ip, mac, client_id))
except (OSError, IOError):
LOG.debug('Error while reading hosts file %s', filename)
return leases
def _release_unused_leases(self):
filename = self.get_conf_file_name('host')
old_leases = self._read_hosts_file_leases(filename)
new_leases = set()
dhcp_port_exists = False
dhcp_port_on_this_host = self.device_manager.get_device_id(
self.network)
for port in self.network.ports:
client_id = self._get_client_id(port)
for alloc in port.fixed_ips:
new_leases.add((alloc.ip_address, port.mac_address, client_id))
if port.device_id == dhcp_port_on_this_host:
dhcp_port_exists = True
for ip, mac, client_id in old_leases - new_leases:
self._release_lease(mac, ip, client_id)
if not dhcp_port_exists:
self.device_manager.driver.unplug(
self.interface_name, namespace=self.network.namespace)
def _output_addn_hosts_file(self):
"""Writes a dnsmasq compatible additional hosts file.
The generated file is sent to the --addn-hosts option of dnsmasq,
and lists the hosts on the network which should be resolved even if
the dnsmasq instance did not give a lease to the host (see the
`_output_hosts_file` method).
Each line in this file is in the same form as a standard /etc/hosts
file.
"""
buf = six.StringIO()
for host_tuple in self._iter_hosts():
port, alloc, hostname, fqdn, no_dhcp, no_opts = host_tuple
# It is compulsory to write the `fqdn` before the `hostname` in
# order to obtain it in PTR responses.
if alloc:
buf.write('%s\t%s %s\n' % (alloc.ip_address, fqdn, hostname))
addn_hosts = self.get_conf_file_name('addn_hosts')
common_utils.replace_file(addn_hosts, buf.getvalue())
return addn_hosts
def _output_opts_file(self):
"""Write a dnsmasq compatible options file."""
options, subnet_index_map = self._generate_opts_per_subnet()
options += self._generate_opts_per_port(subnet_index_map)
name = self.get_conf_file_name('opts')
common_utils.replace_file(name, '\n'.join(options))
return name
def _generate_opts_per_subnet(self):
options = []
subnet_index_map = {}
if self.conf.enable_isolated_metadata or self.conf.force_metadata:
subnet_to_interface_ip = self._make_subnet_interface_ip_map()
isolated_subnets = self.get_isolated_subnets(self.network)
for i, subnet in enumerate(self.network.subnets):
addr_mode = getattr(subnet, 'ipv6_address_mode', None)
if (not subnet.enable_dhcp or
(subnet.ip_version == 6 and
addr_mode == n_const.IPV6_SLAAC)):
continue
if subnet.dns_nameservers:
options.append(
self._format_option(
subnet.ip_version, i, 'dns-server',
','.join(
Dnsmasq._convert_to_literal_addrs(
subnet.ip_version, subnet.dns_nameservers))))
else:
# use the dnsmasq ip as nameservers only if there is no
# dns-server submitted by the server
subnet_index_map[subnet.id] = i
if self.conf.dhcp_domain and subnet.ip_version == 6:
options.append('tag:tag%s,option6:domain-search,%s' %
(i, ''.join(self.conf.dhcp_domain)))
gateway = subnet.gateway_ip
host_routes = []
for hr in subnet.host_routes:
if hr.destination == constants.IPv4_ANY:
if not gateway:
gateway = hr.nexthop
else:
host_routes.append("%s,%s" % (hr.destination, hr.nexthop))
# Add host routes for isolated network segments
if (self.conf.force_metadata or
(isolated_subnets[subnet.id] and
self.conf.enable_isolated_metadata and
subnet.ip_version == 4)):
subnet_dhcp_ip = subnet_to_interface_ip[subnet.id]
host_routes.append(
'%s/32,%s' % (METADATA_DEFAULT_IP, subnet_dhcp_ip)
)
elif not isolated_subnets[subnet.id] and gateway:
host_routes.append(
'%s/32,%s' % (METADATA_DEFAULT_IP, gateway)
)
if subnet.ip_version == 4:
host_routes.extend(["%s,0.0.0.0" % (s.cidr) for s in
self.network.subnets
if (s.ip_version == 4 and
s.cidr != subnet.cidr)])
if host_routes:
if gateway:
host_routes.append("%s,%s" % (constants.IPv4_ANY,
gateway))
options.append(
self._format_option(subnet.ip_version, i,
'classless-static-route',
','.join(host_routes)))
options.append(
self._format_option(subnet.ip_version, i,
WIN2k3_STATIC_DNS,
','.join(host_routes)))
if gateway:
options.append(self._format_option(subnet.ip_version,
i, 'router',
gateway))
else:
options.append(self._format_option(subnet.ip_version,
i, 'router'))
return options, subnet_index_map
def _generate_opts_per_port(self, subnet_index_map):
options = []
dhcp_ips = collections.defaultdict(list)
for port in self.network.ports:
if self._get_port_extra_dhcp_opts(port):
port_ip_versions = set(
[netaddr.IPAddress(ip.ip_address).version
for ip in port.fixed_ips])
for opt in port.extra_dhcp_opts:
if opt.opt_name == edo_ext.CLIENT_ID:
continue
opt_ip_version = opt.ip_version
if opt_ip_version in port_ip_versions:
options.append(
self._format_option(opt_ip_version, port.id,
opt.opt_name, opt.opt_value))
else:
LOG.info(_LI("Cannot apply dhcp option %(opt)s "
"because it's ip_version %(version)d "
"is not in port's address IP versions"),
{'opt': opt.opt_name,
'version': opt_ip_version})
# provides all dnsmasq ip as dns-server if there is more than
# one dnsmasq for a subnet and there is no dns-server submitted
# by the server
if port.device_owner == constants.DEVICE_OWNER_DHCP:
for ip in port.fixed_ips:
i = subnet_index_map.get(ip.subnet_id)
if i is None:
continue
dhcp_ips[i].append(ip.ip_address)
for i, ips in dhcp_ips.items():
for ip_version in (4, 6):
vx_ips = [ip for ip in ips
if netaddr.IPAddress(ip).version == ip_version]
if len(vx_ips) > 1:
options.append(
self._format_option(
ip_version, i, 'dns-server',
','.join(
Dnsmasq._convert_to_literal_addrs(ip_version,
vx_ips))))
return options
def _make_subnet_interface_ip_map(self):
ip_dev = ip_lib.IPDevice(self.interface_name,
namespace=self.network.namespace)
subnet_lookup = dict(
(netaddr.IPNetwork(subnet.cidr), subnet.id)
for subnet in self.network.subnets
)
retval = {}
for addr in ip_dev.addr.list():
ip_net = netaddr.IPNetwork(addr['cidr'])
if ip_net in subnet_lookup:
retval[subnet_lookup[ip_net]] = addr['cidr'].split('/')[0]
return retval
def _format_option(self, ip_version, tag, option, *args):
"""Format DHCP option by option name or code."""
option = str(option)
pattern = "(tag:(.*),)?(.*)$"
matches = re.match(pattern, option)
extra_tag = matches.groups()[0]
option = matches.groups()[2]
if isinstance(tag, int):
tag = self._TAG_PREFIX % tag
if not option.isdigit():
if ip_version == 4:
option = 'option:%s' % option
else:
option = 'option6:%s' % option
if extra_tag:
tags = ('tag:' + tag, extra_tag[:-1], '%s' % option)
else:
tags = ('tag:' + tag, '%s' % option)
return ','.join(tags + args)
@staticmethod
def _convert_to_literal_addrs(ip_version, ips):
if ip_version == 4:
return ips
return ['[' + ip + ']' for ip in ips]
@classmethod
def get_isolated_subnets(cls, network):
"""Returns a dict indicating whether or not a subnet is isolated
A subnet is considered non-isolated if there is a port connected to
the subnet, and the port's ip address matches that of the subnet's
gateway. The port must be owned by a neutron router.
"""
isolated_subnets = collections.defaultdict(lambda: True)
subnets = dict((subnet.id, subnet) for subnet in network.subnets)
for port in network.ports:
if port.device_owner not in constants.ROUTER_INTERFACE_OWNERS:
continue
for alloc in port.fixed_ips:
if subnets[alloc.subnet_id].gateway_ip == alloc.ip_address:
isolated_subnets[alloc.subnet_id] = False
return isolated_subnets
@classmethod
def should_enable_metadata(cls, conf, network):
"""Determine whether the metadata proxy is needed for a network
This method returns True for truly isolated networks (ie: not attached
to a router) when enable_isolated_metadata is True, or for all the
networks when the force_metadata flags is True.
This method also returns True when enable_metadata_network is True,
and the network passed as a parameter has a subnet in the link-local
CIDR, thus characterizing it as a "metadata" network. The metadata
network is used by solutions which do not leverage the l3 agent for
providing access to the metadata service via logical routers built
with 3rd party backends.
"""
if conf.force_metadata:
return True
if conf.enable_metadata_network and conf.enable_isolated_metadata:
# check if the network has a metadata subnet
meta_cidr = netaddr.IPNetwork(METADATA_DEFAULT_CIDR)
if any(netaddr.IPNetwork(s.cidr) in meta_cidr
for s in network.subnets):
return True
if not conf.enable_isolated_metadata:
return False
isolated_subnets = cls.get_isolated_subnets(network)
return any(isolated_subnets[subnet.id] for subnet in network.subnets)
class DeviceManager(object):
def __init__(self, conf, plugin):
self.conf = conf
self.plugin = plugin
self.driver = agent_common_utils.load_interface_driver(conf)
def get_interface_name(self, network, port):
"""Return interface(device) name for use by the DHCP process."""
return self.driver.get_device_name(port)
def get_device_id(self, network):
"""Return a unique DHCP device ID for this host on the network."""
# There could be more than one dhcp server per network, so create
# a device id that combines host and network ids
return common_utils.get_dhcp_agent_device_id(network.id,
self.conf.host)
def _set_default_route(self, network, device_name):
"""Sets the default gateway for this dhcp namespace.
This method is idempotent and will only adjust the route if adjusting
it would change it from what it already is. This makes it safe to call
and avoids unnecessary perturbation of the system.
"""
device = ip_lib.IPDevice(device_name, namespace=network.namespace)
gateway = device.route.get_gateway()
if gateway:
gateway = gateway.get('gateway')
for subnet in network.subnets:
skip_subnet = (
subnet.ip_version != 4
or not subnet.enable_dhcp
or subnet.gateway_ip is None)
if skip_subnet:
continue
if gateway != subnet.gateway_ip:
LOG.debug('Setting gateway for dhcp netns on net %(n)s to '
'%(ip)s',
{'n': network.id, 'ip': subnet.gateway_ip})
# Check for and remove the on-link route for the old
# gateway being replaced, if it is outside the subnet
is_old_gateway_not_in_subnet = (gateway and
not ipam_utils.check_subnet_ip(
subnet.cidr, gateway))
if is_old_gateway_not_in_subnet:
v4_onlink = device.route.list_onlink_routes(
constants.IP_VERSION_4)
v6_onlink = device.route.list_onlink_routes(
constants.IP_VERSION_6)
existing_onlink_routes = set(
r['cidr'] for r in v4_onlink + v6_onlink)
if gateway in existing_onlink_routes:
device.route.delete_route(gateway, scope='link')
is_new_gateway_not_in_subnet = (subnet.gateway_ip and
not ipam_utils.check_subnet_ip(
subnet.cidr,
subnet.gateway_ip))
if is_new_gateway_not_in_subnet:
device.route.add_route(subnet.gateway_ip, scope='link')
device.route.add_gateway(subnet.gateway_ip)
return
# No subnets on the network have a valid gateway. Clean it up to avoid
# confusion from seeing an invalid gateway here.
if gateway is not None:
LOG.debug('Removing gateway for dhcp netns on net %s', network.id)
device.route.delete_gateway(gateway)
def _setup_existing_dhcp_port(self, network, device_id, dhcp_subnets):
"""Set up the existing DHCP port, if there is one."""
# To avoid pylint thinking that port might be undefined after
# the following loop...
port = None
# Look for an existing DHCP port for this network.
for port in network.ports:
port_device_id = getattr(port, 'device_id', None)
if port_device_id == device_id:
# If using gateway IPs on this port, we can skip the
# following code, whose purpose is just to review and
# update the Neutron-allocated IP addresses for the
# port.
if self.driver.use_gateway_ips:
return port
# Otherwise break out, as we now have the DHCP port
# whose subnets and addresses we need to review.
break
else:
return None
# Compare what the subnets should be against what is already
# on the port.
dhcp_enabled_subnet_ids = set(dhcp_subnets)
port_subnet_ids = set(ip.subnet_id for ip in port.fixed_ips)
# If those differ, we need to call update.
if dhcp_enabled_subnet_ids != port_subnet_ids:
# Collect the subnets and fixed IPs that the port already
# has, for subnets that are still in the DHCP-enabled set.
wanted_fixed_ips = []
for fixed_ip in port.fixed_ips:
if fixed_ip.subnet_id in dhcp_enabled_subnet_ids:
wanted_fixed_ips.append(
{'subnet_id': fixed_ip.subnet_id,
'ip_address': fixed_ip.ip_address})
# Add subnet IDs for new DHCP-enabled subnets.
wanted_fixed_ips.extend(
dict(subnet_id=s)
for s in dhcp_enabled_subnet_ids - port_subnet_ids)
# Update the port to have the calculated subnets and fixed
# IPs. The Neutron server will allocate a fresh IP for
# each subnet that doesn't already have one.
port = self.plugin.update_dhcp_port(
port.id,
{'port': {'network_id': network.id,
'fixed_ips': wanted_fixed_ips}})
if not port:
raise exceptions.Conflict()
return port
def _setup_reserved_dhcp_port(self, network, device_id, dhcp_subnets):
"""Setup the reserved DHCP port, if there is one."""
LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist. Checking for a reserved port.',
{'device_id': device_id, 'network_id': network.id})
for port in network.ports:
port_device_id = getattr(port, 'device_id', None)
if port_device_id == n_const.DEVICE_ID_RESERVED_DHCP_PORT:
try:
port = self.plugin.update_dhcp_port(
port.id, {'port': {'network_id': network.id,
'device_id': device_id}})
except oslo_messaging.RemoteError as e:
if e.exc_type == n_exc.DhcpPortInUse:
LOG.info(_LI("Skipping DHCP port %s as it is "
"already in use"), port.id)
continue
raise
if port:
return port
def _setup_new_dhcp_port(self, network, device_id, dhcp_subnets):
"""Create and set up new DHCP port for the specified network."""
LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist. Creating new one.',
{'device_id': device_id, 'network_id': network.id})
# Make a list of the subnets that need a unique IP address for
# this DHCP port.
if self.driver.use_gateway_ips:
unique_ip_subnets = []
else:
unique_ip_subnets = [dict(subnet_id=s) for s in dhcp_subnets]
port_dict = dict(
name='',
admin_state_up=True,
device_id=device_id,
network_id=network.id,
tenant_id=network.tenant_id,
fixed_ips=unique_ip_subnets)
return self.plugin.create_dhcp_port({'port': port_dict})
def setup_dhcp_port(self, network):
"""Create/update DHCP port for the host if needed and return port."""
# The ID that the DHCP port will have (or already has).
device_id = self.get_device_id(network)
# Get the set of DHCP-enabled subnets on this network.
dhcp_subnets = {subnet.id: subnet for subnet in network.subnets
if subnet.enable_dhcp}
# There are 3 cases: either the DHCP port already exists (but
# might need to be updated for a changed set of subnets); or
# some other code has already prepared a 'reserved' DHCP port,
# and we just need to adopt that; or we need to create a new
# DHCP port. Try each of those in turn until we have a DHCP
# port.
for setup_method in (self._setup_existing_dhcp_port,
self._setup_reserved_dhcp_port,
self._setup_new_dhcp_port):
dhcp_port = setup_method(network, device_id, dhcp_subnets)
if dhcp_port:
break
else:
raise exceptions.Conflict()
# Convert subnet_id to subnet dict
fixed_ips = [dict(subnet_id=fixed_ip.subnet_id,
ip_address=fixed_ip.ip_address,
subnet=dhcp_subnets[fixed_ip.subnet_id])
for fixed_ip in dhcp_port.fixed_ips]
ips = [DictModel(item) if isinstance(item, dict) else item
for item in fixed_ips]
dhcp_port.fixed_ips = ips
return dhcp_port
def _update_dhcp_port(self, network, port):
for index in range(len(network.ports)):
if network.ports[index].id == port.id:
network.ports[index] = port
break
else:
network.ports.append(port)
def _cleanup_stale_devices(self, network, dhcp_port):
LOG.debug("Cleaning stale devices for network %s", network.id)
dev_name = self.driver.get_device_name(dhcp_port)
ns_ip = ip_lib.IPWrapper(namespace=network.namespace)
for d in ns_ip.get_devices(exclude_loopback=True):
# delete all devices except current active DHCP port device
if d.name != dev_name:
LOG.debug("Found stale device %s, deleting", d.name)
self.driver.unplug(d.name, namespace=network.namespace)
def setup(self, network):
"""Create and initialize a device for network's DHCP on this host."""
port = self.setup_dhcp_port(network)
self._update_dhcp_port(network, port)
interface_name = self.get_interface_name(network, port)
if ip_lib.ensure_device_is_ready(interface_name,
namespace=network.namespace):
LOG.debug('Reusing existing device: %s.', interface_name)
else:
try:
self.driver.plug(network.id,
port.id,
interface_name,
port.mac_address,
namespace=network.namespace,
mtu=network.get('mtu'))
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Unable to plug DHCP port for '
'network %s. Releasing port.'),
network.id)
self.plugin.release_dhcp_port(network.id, port.device_id)
self.fill_dhcp_udp_checksums(namespace=network.namespace)
ip_cidrs = []
for fixed_ip in port.fixed_ips:
subnet = fixed_ip.subnet
if not ipv6_utils.is_auto_address_subnet(subnet):
net = netaddr.IPNetwork(subnet.cidr)
ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
ip_cidrs.append(ip_cidr)
if self.driver.use_gateway_ips:
# For each DHCP-enabled subnet, add that subnet's gateway
# IP address to the Linux device for the DHCP port.
for subnet in network.subnets:
if not subnet.enable_dhcp:
continue
gateway = subnet.gateway_ip
if gateway:
net = netaddr.IPNetwork(subnet.cidr)
ip_cidrs.append('%s/%s' % (gateway, net.prefixlen))
if self.conf.enable_isolated_metadata:
ip_cidrs.append(METADATA_DEFAULT_CIDR)
self.driver.init_l3(interface_name, ip_cidrs,
namespace=network.namespace)
self._set_default_route(network, interface_name)
try:
self._cleanup_stale_devices(network, port)
except Exception:
# catch everything as we don't want to fail because of
# cleanup step
LOG.error(_LE("Exception during stale dhcp device cleanup"))
return interface_name
def update(self, network, device_name):
"""Update device settings for the network's DHCP on this host."""
self._set_default_route(network, device_name)
def destroy(self, network, device_name):
"""Destroy the device used for the network's DHCP on this host."""
if device_name:
self.driver.unplug(device_name, namespace=network.namespace)
else:
LOG.debug('No interface exists for network %s', network.id)
self.plugin.release_dhcp_port(network.id,
self.get_device_id(network))
def fill_dhcp_udp_checksums(self, namespace):
"""Ensure DHCP reply packets always have correct UDP checksums."""
iptables_mgr = iptables_manager.IptablesManager(use_ipv6=False,
namespace=namespace)
ipv4_rule = ('-p udp -m udp --dport %d -j CHECKSUM --checksum-fill'
% constants.DHCP_RESPONSE_PORT)
iptables_mgr.ipv4['mangle'].add_rule('POSTROUTING', ipv4_rule)
iptables_mgr.apply()
| {
"content_hash": "48cffafd2d1ef9374e0c7443bbb8d5ba",
"timestamp": "",
"source": "github",
"line_count": 1286,
"max_line_length": 79,
"avg_line_length": 41.052099533437016,
"alnum_prop": 0.5495614948951566,
"repo_name": "bigswitch/neutron",
"id": "ee855dc9acca2ce3d560d74e8550cd4278ff212b",
"size": "53429",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/agent/linux/dhcp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "8468247"
},
{
"name": "Shell",
"bytes": "14648"
}
],
"symlink_target": ""
} |
package com.tazine.thread.create;
/**
* 线程状态
*
* @author frank
* @date 2018/07/26
*/
public class ThreadStateDemo extends Thread {
private synchronized void waitForASecond() throws InterruptedException {
wait(500);
}
private synchronized void waitForever() throws InterruptedException {
wait();
}
public synchronized void notifyIt() {
notify();
}
@Override
public void run() {
try {
waitForASecond();
waitForever();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
| {
"content_hash": "3243dd90c8a322da46f070a76b6ae47d",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 76,
"avg_line_length": 19,
"alnum_prop": 0.5838815789473685,
"repo_name": "BookFrank/CodePlay",
"id": "2e5b63209d6226e3ae13d704aa524550dbcc65d8",
"size": "616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codeplay-thread/src/main/java/com/tazine/thread/create/ThreadStateDemo.java",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "178172"
},
{
"name": "Java",
"bytes": "595245"
},
{
"name": "JavaScript",
"bytes": "2771"
}
],
"symlink_target": ""
} |
require 'pathname'
Puppet::Type.newtype(:dsc_xspwebapplicationappdomain) do
require Pathname.new(__FILE__).dirname + '../../' + 'puppet/type/base_dsc'
require Pathname.new(__FILE__).dirname + '../../puppet_x/puppetlabs/dsc_type_helpers'
@doc = %q{
The DSC xSPWebApplicationAppDomain resource type.
Automatically generated from
'xSharePoint/Modules/xSharePoint/DSCResources/MSFT_xSPWebApplicationAppDomain/MSFT_xSPWebApplicationAppDomain.schema.mof'
To learn more about PowerShell Desired State Configuration, please
visit https://technet.microsoft.com/en-us/library/dn249912.aspx.
For more information about built-in DSC Resources, please visit
https://technet.microsoft.com/en-us/library/dn249921.aspx.
For more information about xDsc Resources, please visit
https://github.com/PowerShell/DscResources.
}
validate do
fail('dsc_webapplication is a required attribute') if self[:dsc_webapplication].nil?
fail('dsc_zone is a required attribute') if self[:dsc_zone].nil?
end
def dscmeta_resource_friendly_name; 'xSPWebApplicationAppDomain' end
def dscmeta_resource_name; 'MSFT_xSPWebApplicationAppDomain' end
def dscmeta_module_name; 'xSharePoint' end
def dscmeta_module_version; '0.12.0.0' end
newparam(:name, :namevar => true ) do
end
ensurable do
newvalue(:exists?) { provider.exists? }
newvalue(:present) { provider.create }
defaultto { :present }
end
# Name: WebApplication
# Type: string
# IsMandatory: True
# Values: None
newparam(:dsc_webapplication) do
def mof_type; 'string' end
def mof_is_embedded?; false end
desc "WebApplication - The URL of the web application to set the app domain for"
isrequired
validate do |value|
unless value.kind_of?(String)
fail("Invalid value '#{value}'. Should be a string")
end
end
end
# Name: Zone
# Type: string
# IsMandatory: True
# Values: ["Default", "Internet", "Intranet", "Extranet", "Custom"]
newparam(:dsc_zone) do
def mof_type; 'string' end
def mof_is_embedded?; false end
desc "Zone - The zone that this app domain applies to Valid values are Default, Internet, Intranet, Extranet, Custom."
isrequired
validate do |value|
unless value.kind_of?(String)
fail("Invalid value '#{value}'. Should be a string")
end
unless ['Default', 'default', 'Internet', 'internet', 'Intranet', 'intranet', 'Extranet', 'extranet', 'Custom', 'custom'].include?(value)
fail("Invalid value '#{value}'. Valid values are Default, Internet, Intranet, Extranet, Custom")
end
end
end
# Name: AppDomain
# Type: string
# IsMandatory: False
# Values: None
newparam(:dsc_appdomain) do
def mof_type; 'string' end
def mof_is_embedded?; false end
desc "AppDomain - The domain for apps in this web app zone"
validate do |value|
unless value.kind_of?(String)
fail("Invalid value '#{value}'. Should be a string")
end
end
end
# Name: Port
# Type: string
# IsMandatory: False
# Values: None
newparam(:dsc_port) do
def mof_type; 'string' end
def mof_is_embedded?; false end
desc "Port - The port to run apps on"
validate do |value|
unless value.kind_of?(String)
fail("Invalid value '#{value}'. Should be a string")
end
end
end
# Name: SSL
# Type: boolean
# IsMandatory: False
# Values: None
newparam(:dsc_ssl) do
def mof_type; 'boolean' end
def mof_is_embedded?; false end
desc "SSL - Should apps run under SSL"
validate do |value|
end
newvalues(true, false)
munge do |value|
PuppetX::Dsc::TypeHelpers.munge_boolean(value.to_s)
end
end
# Name: InstallAccount
# Type: MSFT_Credential
# IsMandatory: False
# Values: None
newparam(:dsc_installaccount) do
def mof_type; 'MSFT_Credential' end
def mof_is_embedded?; true end
desc "InstallAccount - POWERSHELL 4 ONLY: The account to run this resource as, use PsDscRunAsAccount if using PowerShell 5"
validate do |value|
unless value.kind_of?(Hash)
fail("Invalid value '#{value}'. Should be a hash")
end
PuppetX::Dsc::TypeHelpers.validate_MSFT_Credential("InstallAccount", value)
end
end
def builddepends
pending_relations = super()
PuppetX::Dsc::TypeHelpers.ensure_reboot_relationship(self, pending_relations)
end
end
Puppet::Type.type(:dsc_xspwebapplicationappdomain).provide :powershell, :parent => Puppet::Type.type(:base_dsc).provider(:powershell) do
confine :true => (Gem::Version.new(Facter.value(:powershell_version)) >= Gem::Version.new('5.0.10240.16384'))
defaultfor :operatingsystem => :windows
mk_resource_methods
end
| {
"content_hash": "e28650d232d9185a63bb50374a1215b9",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 143,
"avg_line_length": 32.397350993377486,
"alnum_prop": 0.6618969746524939,
"repo_name": "cowofevil/puppetlabs-dsc",
"id": "d156a0fa49ea46bc48097b5bf7c4e1bf01795b24",
"size": "4892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/puppet/type/dsc_xspwebapplicationappdomain.rb",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "17769"
},
{
"name": "NSIS",
"bytes": "1454"
},
{
"name": "PowerShell",
"bytes": "2548458"
},
{
"name": "Puppet",
"bytes": "431"
},
{
"name": "Ruby",
"bytes": "4408846"
},
{
"name": "Shell",
"bytes": "3568"
}
],
"symlink_target": ""
} |
from PythonQt import QtCore, QtGui
from director import lcmUtils
from director.simpletimer import SimpleTimer
from director.timercallback import TimerCallback
import subprocess
import os
import sys
class LCMLoggerWidget(object):
def __init__(self, statusBar=None):
self.manager = lcmUtils.LCMLoggerManager()
self.statusBar = statusBar
self.lastActiveLogFile = None
self.numProcesses = 0
self.numLogFiles = 0
self.userTag = ''
self.button = QtGui.QPushButton('')
self.button.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.button.connect('customContextMenuRequested(const QPoint&)', self.showContextMenu)
self.button.connect('clicked()', self.onClick)
self.timer = TimerCallback(targetFps=0.25)
self.timer.callback = self.updateState
self.timer.start()
def updateState(self):
t = SimpleTimer()
self.manager.updateExistingLoggerProcesses()
activeLogFiles = self.manager.getActiveLogFilenames()
self.numProcesses = len(self.manager.getActiveLoggerPids())
self.numLogFiles = len(activeLogFiles)
if self.numLogFiles == 1:
self.lastActiveLogFile = activeLogFiles[0]
if self.numProcesses == 0:
self.button.text = 'start logger'
elif self.numProcesses == 1:
self.button.text = 'stop logger'
elif self.numProcesses > 1:
self.button.text = 'stop all loggers'
statusDescription = 'active' if self.numProcesses else 'last'
logFileDescription = self.lastActiveLogFile or '<unknown>'
self.button.setToolTip('%s log file: %s' % (statusDescription, logFileDescription))
def onClick(self):
if self.numProcesses == 0:
self.manager.startNewLogger(tag=self.userTag)
self.updateState()
self.showStatusMessage('start logging: ' + self.lastActiveLogFile)
else:
self.manager.killAllLoggingProcesses()
self.showStatusMessage('stopped logging')
self.updateState()
def showStatusMessage(self, msg, timeout=2000):
if self.statusBar:
self.statusBar.showMessage(msg, timeout)
def showContextMenu(self, clickPosition):
globalPos = self.button.mapToGlobal(clickPosition)
menu = QtGui.QMenu()
action = menu.addAction('Stop logger')
action.enabled = (self.numProcesses > 0)
action = menu.addAction('Stop and delete log file')
action.enabled = (self.numProcesses > 0 and self.lastActiveLogFile)
action = menu.addAction('Set logger tag')
action.enabled = (self.numProcesses == 0)
action = menu.addAction('Copy log filename')
action.enabled = (self.lastActiveLogFile is not None)
action = menu.addAction('Review log')
action.enabled = (self.lastActiveLogFile is not None)
selectedAction = menu.exec_(globalPos)
if selectedAction is None:
return
if selectedAction.text == 'Copy log filename':
clipboard = QtGui.QApplication.instance().clipboard()
clipboard.setText(self.lastActiveLogFile)
self.showStatusMessage('copy to clipboard: ' + self.lastActiveLogFile)
elif selectedAction.text == 'Stop logger':
self.manager.killAllLoggingProcesses()
self.showStatusMessage('stopped logger')
self.updateState()
elif selectedAction.text == 'Stop and delete log file':
logFileToRemove = self.lastActiveLogFile
self.manager.killAllLoggingProcesses()
self.updateState()
os.remove(logFileToRemove)
self.showStatusMessage('deleted: ' + logFileToRemove)
elif selectedAction.text == 'Set logger tag':
inputDialog = QtGui.QInputDialog()
inputDialog.setInputMode(inputDialog.TextInput)
inputDialog.setLabelText('Log file tag:')
inputDialog.setWindowTitle('Enter tag')
inputDialog.setTextValue(self.userTag)
result = inputDialog.exec_()
if result:
tag = inputDialog.textValue()
self.userTag = tag
self.showStatusMessage('Set lcm logger tag: ' + self.userTag)
elif selectedAction.text == 'Review log':
newEnv = dict(os.environ)
newEnv['LCM_DEFAULT_URL'] = newEnv['LCM_REVIEW_DEFAULT_URL']
devnull = open(os.devnull, 'w')
# Pass entire command line invocation of director to subprocess including cfg and json paths
subprocess.Popen(sys.argv, stdout=devnull, stderr=devnull, env=newEnv)
subprocess.Popen(['lcm-logplayer-gui', self.lastActiveLogFile], stdout=devnull, stderr=devnull, env=newEnv)
subprocess.Popen(['bot-procman-sheriff', '-o'], stdout=devnull, stderr=devnull, env=newEnv)
| {
"content_hash": "408f95b4b06ac6a10c445888df1a57d4",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 119,
"avg_line_length": 37.80152671755725,
"alnum_prop": 0.6453957996768982,
"repo_name": "patmarion/director",
"id": "838101ffdf62d920116635dde6730232dcdc090e",
"size": "4952",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/python/director/lcmloggerwidget.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "121912"
},
{
"name": "C++",
"bytes": "565385"
},
{
"name": "CMake",
"bytes": "82478"
},
{
"name": "Dockerfile",
"bytes": "2510"
},
{
"name": "GLSL",
"bytes": "15443"
},
{
"name": "MATLAB",
"bytes": "161948"
},
{
"name": "Makefile",
"bytes": "5014"
},
{
"name": "Python",
"bytes": "2282093"
},
{
"name": "Shell",
"bytes": "14291"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import logging
from typing import Any, Dict, List, Set, Tuple, Optional, Text
from django.contrib.auth.backends import RemoteUserBackend
from django.conf import settings
from django.http import HttpResponse
import django.contrib.auth
from django_auth_ldap.backend import LDAPBackend, _LDAPUser
from zerver.lib.actions import do_create_user
from zerver.models import UserProfile, Realm, get_user_profile_by_id, \
get_user_profile_by_email, remote_user_to_email, email_to_username, \
get_realm, get_realm_by_email_domain
from apiclient.sample_tools import client as googleapiclient
from oauth2client.crypt import AppIdentityError
from social_core.backends.github import GithubOAuth2, GithubOrganizationOAuth2, \
GithubTeamOAuth2
from social_core.exceptions import AuthFailed, SocialAuthBaseException
from django.contrib.auth import authenticate
from zerver.lib.users import check_full_name
from zerver.lib.request import JsonableError
from zerver.lib.utils import check_subdomain, get_subdomain
from social_django.models import DjangoStorage
from social_django.strategy import DjangoStrategy
def pad_method_dict(method_dict):
# type: (Dict[Text, bool]) -> Dict[Text, bool]
"""Pads an authentication methods dict to contain all auth backends
supported by the software, regardless of whether they are
configured on this server"""
for key in AUTH_BACKEND_NAME_MAP:
if key not in method_dict:
method_dict[key] = False
return method_dict
def auth_enabled_helper(backends_to_check, realm):
# type: (List[Text], Optional[Realm]) -> bool
if realm is not None:
enabled_method_dict = realm.authentication_methods_dict()
pad_method_dict(enabled_method_dict)
else:
enabled_method_dict = dict((method, True) for method in Realm.AUTHENTICATION_FLAGS)
pad_method_dict(enabled_method_dict)
for supported_backend in django.contrib.auth.get_backends():
for backend_name in backends_to_check:
backend = AUTH_BACKEND_NAME_MAP[backend_name]
if enabled_method_dict[backend_name] and isinstance(supported_backend, backend):
return True
return False
def ldap_auth_enabled(realm=None):
# type: (Optional[Realm]) -> bool
return auth_enabled_helper([u'LDAP'], realm)
def email_auth_enabled(realm=None):
# type: (Optional[Realm]) -> bool
return auth_enabled_helper([u'Email'], realm)
def password_auth_enabled(realm=None):
# type: (Optional[Realm]) -> bool
return ldap_auth_enabled(realm) or email_auth_enabled(realm)
def dev_auth_enabled(realm=None):
# type: (Optional[Realm]) -> bool
return auth_enabled_helper([u'Dev'], realm)
def google_auth_enabled(realm=None):
# type: (Optional[Realm]) -> bool
return auth_enabled_helper([u'Google'], realm)
def github_auth_enabled(realm=None):
# type: (Optional[Realm]) -> bool
return auth_enabled_helper([u'GitHub'], realm)
def any_oauth_backend_enabled(realm=None):
# type: (Optional[Realm]) -> bool
"""Used by the login page process to determine whether to show the
'OR' for login with Google"""
return auth_enabled_helper([u'GitHub', u'Google'], realm)
def common_get_active_user_by_email(email, return_data=None):
# type: (Text, Optional[Dict[str, Any]]) -> Optional[UserProfile]
try:
user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return None
if not user_profile.is_active:
if return_data is not None:
return_data['inactive_user'] = True
return None
if user_profile.realm.deactivated:
if return_data is not None:
return_data['inactive_realm'] = True
return None
return user_profile
class ZulipAuthMixin(object):
def get_user(self, user_profile_id):
# type: (int) -> Optional[UserProfile]
""" Get a UserProfile object from the user_profile_id. """
try:
return get_user_profile_by_id(user_profile_id)
except UserProfile.DoesNotExist:
return None
class SocialAuthMixin(ZulipAuthMixin):
auth_backend_name = None # type: Text
def get_email_address(self, *args, **kwargs):
# type: (*Any, **Any) -> Text
raise NotImplementedError
def get_full_name(self, *args, **kwargs):
# type: (*Any, **Any) -> Text
raise NotImplementedError
def authenticate(self,
realm_subdomain='', # type: Optional[Text]
storage=None, # type: Optional[DjangoStorage]
strategy=None, # type: Optional[DjangoStrategy]
user=None, # type: Optional[Dict[str, Any]]
return_data=None, # type: Optional[Dict[str, Any]]
response=None, # type: Optional[Dict[str, Any]]
backend=None # type: Optional[GithubOAuth2]
):
# type: (...) -> Optional[UserProfile]
"""
Django decides which `authenticate` to call by inspecting the
arguments. So it's better to create `authenticate` function
with well defined arguments.
Keeping this function separate so that it can easily be
overridden.
"""
if user is None:
user = {}
if return_data is None:
return_data = {}
if response is None:
response = {}
return self._common_authenticate(self,
realm_subdomain=realm_subdomain,
storage=storage,
strategy=strategy,
user=user,
return_data=return_data,
response=response,
backend=backend)
def _common_authenticate(self, *args, **kwargs):
# type: (*Any, **Any) -> Optional[UserProfile]
return_data = kwargs.get('return_data', {})
email_address = self.get_email_address(*args, **kwargs)
if not email_address:
return_data['invalid_email'] = True
return None
try:
user_profile = get_user_profile_by_email(email_address)
except UserProfile.DoesNotExist:
return_data["valid_attestation"] = True
return None
if not user_profile.is_active:
return_data["inactive_user"] = True
return None
if user_profile.realm.deactivated:
return_data["inactive_realm"] = True
return None
if not check_subdomain(kwargs.get("realm_subdomain"),
user_profile.realm.subdomain):
return_data["invalid_subdomain"] = True
return None
if not auth_enabled_helper([self.auth_backend_name], user_profile.realm):
return_data["auth_backend_disabled"] = True
return None
return user_profile
def process_do_auth(self, user_profile, *args, **kwargs):
# type: (UserProfile, *Any, **Any) -> Optional[HttpResponse]
# These functions need to be imported here to avoid cyclic
# dependency.
from zerver.views.auth import (login_or_register_remote_user,
redirect_to_subdomain_login_url)
from zerver.views.registration import redirect_and_log_into_subdomain
return_data = kwargs.get('return_data', {})
inactive_user = return_data.get('inactive_user')
inactive_realm = return_data.get('inactive_realm')
invalid_subdomain = return_data.get('invalid_subdomain')
invalid_email = return_data.get('invalid_email')
if inactive_user or inactive_realm:
# Redirect to login page. We can't send to registration
# workflow with these errors. We will redirect to login page.
return None
if invalid_email:
# In case of invalid email, we will end up on registration page.
# This seems better than redirecting to login page.
logging.warning(
"{} got invalid email argument.".format(self.auth_backend_name)
)
strategy = self.strategy # type: ignore # This comes from Python Social Auth.
request = strategy.request
email_address = self.get_email_address(*args, **kwargs)
full_name = self.get_full_name(*args, **kwargs)
is_signup = strategy.session_get('is_signup') == '1'
subdomain = strategy.session_get('subdomain')
if not subdomain:
return login_or_register_remote_user(request, email_address,
user_profile, full_name,
invalid_subdomain=bool(invalid_subdomain),
is_signup=is_signup)
try:
realm = Realm.objects.get(string_id=subdomain)
except Realm.DoesNotExist:
return redirect_to_subdomain_login_url()
return redirect_and_log_into_subdomain(realm, full_name, email_address,
is_signup=is_signup)
def auth_complete(self, *args, **kwargs):
# type: (*Any, **Any) -> Optional[HttpResponse]
"""
Returning `None` from this function will redirect the browser
to the login page.
"""
try:
# Call the auth_complete method of BaseOAuth2 is Python Social Auth
return super(SocialAuthMixin, self).auth_complete(*args, **kwargs) # type: ignore
except AuthFailed:
return None
except SocialAuthBaseException as e:
logging.exception(e)
return None
class ZulipDummyBackend(ZulipAuthMixin):
"""
Used when we want to log you in but we don't know which backend to use.
"""
def authenticate(self, username=None, realm_subdomain=None, use_dummy_backend=False,
return_data=None):
# type: (Optional[Text], Optional[Text], bool, Optional[Dict[str, Any]]) -> Optional[UserProfile]
assert username is not None
if use_dummy_backend:
user_profile = common_get_active_user_by_email(username)
if user_profile is None:
return None
if not check_subdomain(realm_subdomain, user_profile.realm.subdomain):
return_data["invalid_subdomain"] = True
return None
return user_profile
return None
class EmailAuthBackend(ZulipAuthMixin):
"""
Email Authentication Backend
Allows a user to sign in using an email/password pair rather than
a username/password pair.
"""
def authenticate(self, username=None, password=None, realm_subdomain=None, return_data=None):
# type: (Optional[Text], Optional[str], Optional[Text], Optional[Dict[str, Any]]) -> Optional[UserProfile]
""" Authenticate a user based on email address as the user name. """
if username is None or password is None:
# Return immediately. Otherwise we will look for a SQL row with
# NULL username. While that's probably harmless, it's needless
# exposure.
return None
user_profile = common_get_active_user_by_email(username, return_data=return_data)
if user_profile is None:
return None
if not password_auth_enabled(user_profile.realm):
if return_data is not None:
return_data['password_auth_disabled'] = True
return None
if not email_auth_enabled(user_profile.realm):
if return_data is not None:
return_data['email_auth_disabled'] = True
return None
if user_profile.check_password(password):
if not check_subdomain(realm_subdomain, user_profile.realm.subdomain):
return_data["invalid_subdomain"] = True
return None
return user_profile
return None
class GoogleMobileOauth2Backend(ZulipAuthMixin):
"""
Google Apps authentication for mobile devices
Allows a user to sign in using a Google-issued OAuth2 token.
Ref:
https://developers.google.com/+/mobile/android/sign-in#server-side_access_for_your_app
https://developers.google.com/accounts/docs/CrossClientAuth#offlineAccess
"""
def authenticate(self, google_oauth2_token=None, realm_subdomain=None, return_data=None):
# type: (Optional[str], Optional[Text], Optional[Dict[str, Any]]) -> Optional[UserProfile]
if return_data is None:
return_data = {}
try:
token_payload = googleapiclient.verify_id_token(google_oauth2_token, settings.GOOGLE_CLIENT_ID)
except AppIdentityError:
return None
if token_payload["email_verified"] in (True, "true"):
try:
user_profile = get_user_profile_by_email(token_payload["email"])
except UserProfile.DoesNotExist:
return_data["valid_attestation"] = True
return None
if not user_profile.is_active:
return_data["inactive_user"] = True
return None
if user_profile.realm.deactivated:
return_data["inactive_realm"] = True
return None
if not check_subdomain(realm_subdomain, user_profile.realm.subdomain):
return_data["invalid_subdomain"] = True
return None
if not google_auth_enabled(realm=user_profile.realm):
return_data["google_auth_disabled"] = True
return None
return user_profile
else:
return_data["valid_attestation"] = False
return None
class ZulipRemoteUserBackend(RemoteUserBackend):
create_unknown_user = False
def authenticate(self, remote_user, realm_subdomain=None):
# type: (str, Optional[Text]) -> Optional[UserProfile]
if not remote_user:
return None
email = remote_user_to_email(remote_user)
user_profile = common_get_active_user_by_email(email)
if user_profile is None:
return None
if not check_subdomain(realm_subdomain, user_profile.realm.subdomain):
return None
if not auth_enabled_helper([u"RemoteUser"], user_profile.realm):
return None
return user_profile
class ZulipLDAPException(Exception):
pass
class ZulipLDAPAuthBackendBase(ZulipAuthMixin, LDAPBackend):
# Don't use Django LDAP's permissions functions
def has_perm(self, user, perm, obj=None):
# type: (UserProfile, Any, Any) -> bool
# Using Any type is safe because we are not doing anything with
# the arguments.
return False
def has_module_perms(self, user, app_label):
# type: (UserProfile, str) -> bool
return False
def get_all_permissions(self, user, obj=None):
# type: (UserProfile, Any) -> Set
# Using Any type is safe because we are not doing anything with
# the arguments.
return set()
def get_group_permissions(self, user, obj=None):
# type: (UserProfile, Any) -> Set
# Using Any type is safe because we are not doing anything with
# the arguments.
return set()
def django_to_ldap_username(self, username):
# type: (Text) -> Text
if settings.LDAP_APPEND_DOMAIN:
if not username.endswith("@" + settings.LDAP_APPEND_DOMAIN):
raise ZulipLDAPException("Username does not match LDAP domain.")
return email_to_username(username)
return username
def ldap_to_django_username(self, username):
# type: (str) -> str
if settings.LDAP_APPEND_DOMAIN:
return "@".join((username, settings.LDAP_APPEND_DOMAIN))
return username
class ZulipLDAPAuthBackend(ZulipLDAPAuthBackendBase):
def authenticate(self, username, password, realm_subdomain=None, return_data=None):
# type: (Text, str, Optional[Text], Optional[Dict[str, Any]]) -> Optional[UserProfile]
try:
if settings.REALMS_HAVE_SUBDOMAINS:
self._realm = get_realm(realm_subdomain)
else:
self._realm = get_realm_by_email_domain(username)
username = self.django_to_ldap_username(username)
user_profile = ZulipLDAPAuthBackendBase.authenticate(self, username, password)
if user_profile is None:
return None
if not check_subdomain(realm_subdomain, user_profile.realm.subdomain):
return None
return user_profile
except Realm.DoesNotExist:
return None
except ZulipLDAPException:
return None
def get_or_create_user(self, username, ldap_user):
# type: (str, _LDAPUser) -> Tuple[UserProfile, bool]
try:
user_profile = get_user_profile_by_email(username)
if not user_profile.is_active or user_profile.realm.deactivated:
raise ZulipLDAPException("Realm has been deactivated")
if not ldap_auth_enabled(user_profile.realm):
raise ZulipLDAPException("LDAP Authentication is not enabled")
return user_profile, False
except UserProfile.DoesNotExist:
# No need to check for an inactive user since they don't exist yet
if self._realm.deactivated:
raise ZulipLDAPException("Realm has been deactivated")
full_name_attr = settings.AUTH_LDAP_USER_ATTR_MAP["full_name"]
short_name = full_name = ldap_user.attrs[full_name_attr][0]
try:
full_name = check_full_name(full_name)
except JsonableError as e:
raise ZulipLDAPException(e.error)
if "short_name" in settings.AUTH_LDAP_USER_ATTR_MAP:
short_name_attr = settings.AUTH_LDAP_USER_ATTR_MAP["short_name"]
short_name = ldap_user.attrs[short_name_attr][0]
user_profile = do_create_user(username, None, self._realm, full_name, short_name)
return user_profile, True
# Just like ZulipLDAPAuthBackend, but doesn't let you log in.
class ZulipLDAPUserPopulator(ZulipLDAPAuthBackendBase):
def authenticate(self, username, password, realm_subdomain=None):
# type: (Text, str, Optional[Text]) -> None
return None
class DevAuthBackend(ZulipAuthMixin):
# Allow logging in as any user without a password.
# This is used for convenience when developing Zulip.
def authenticate(self, username, realm_subdomain=None, return_data=None):
# type: (Text, Optional[Text], Optional[Dict[str, Any]]) -> Optional[UserProfile]
user_profile = common_get_active_user_by_email(username, return_data=return_data)
if user_profile is None:
return None
if not dev_auth_enabled(user_profile.realm):
return None
return user_profile
class GitHubAuthBackend(SocialAuthMixin, GithubOAuth2):
auth_backend_name = u"GitHub"
def get_email_address(self, *args, **kwargs):
# type: (*Any, **Any) -> Optional[Text]
try:
return kwargs['response']['email']
except KeyError:
return None
def get_full_name(self, *args, **kwargs):
# type: (*Any, **Any) -> Text
# In case of any error return an empty string. Name is used by
# the registration page to pre-populate the name field. However,
# if it is not supplied, our registration process will make sure
# that the user enters a valid name.
try:
name = kwargs['response']['name']
except KeyError:
name = ''
if name is None:
return ''
return name
def do_auth(self, *args, **kwargs):
# type: (*Any, **Any) -> Optional[HttpResponse]
"""
This function is called once the OAuth2 workflow is complete. We
override this function to:
1. Inject `return_data` and `realm_admin` kwargs. These will
be used by `authenticate()` function to make the decision.
2. Call the proper `do_auth` function depending on whether
we are doing individual, team or organization based GitHub
authentication.
The actual decision on authentication is done in
SocialAuthMixin._common_authenticate().
"""
kwargs['return_data'] = {}
request = self.strategy.request
kwargs['realm_subdomain'] = get_subdomain(request)
user_profile = None
team_id = settings.SOCIAL_AUTH_GITHUB_TEAM_ID
org_name = settings.SOCIAL_AUTH_GITHUB_ORG_NAME
if (team_id is None and org_name is None):
try:
user_profile = GithubOAuth2.do_auth(self, *args, **kwargs)
except AuthFailed:
logging.info("User authentication failed.")
user_profile = None
elif (team_id):
backend = GithubTeamOAuth2(self.strategy, self.redirect_uri)
try:
user_profile = backend.do_auth(*args, **kwargs)
except AuthFailed:
logging.info("User is not member of GitHub team.")
user_profile = None
elif (org_name):
backend = GithubOrganizationOAuth2(self.strategy, self.redirect_uri)
try:
user_profile = backend.do_auth(*args, **kwargs)
except AuthFailed:
logging.info("User is not member of GitHub organization.")
user_profile = None
return self.process_do_auth(user_profile, *args, **kwargs)
AUTH_BACKEND_NAME_MAP = {
u'Dev': DevAuthBackend,
u'Email': EmailAuthBackend,
u'GitHub': GitHubAuthBackend,
u'Google': GoogleMobileOauth2Backend,
u'LDAP': ZulipLDAPAuthBackend,
u'RemoteUser': ZulipRemoteUserBackend,
} # type: Dict[Text, Any]
| {
"content_hash": "d21354776353532ca60d82ede648cad6",
"timestamp": "",
"source": "github",
"line_count": 557,
"max_line_length": 114,
"avg_line_length": 39.99102333931777,
"alnum_prop": 0.6127946127946128,
"repo_name": "ryanbackman/zulip",
"id": "ac52a7f2ee4df7b6b7e9b3a3b84e79f465b6fdae",
"size": "22275",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zproject/backends.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "392722"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "Groovy",
"bytes": "5509"
},
{
"name": "HTML",
"bytes": "590505"
},
{
"name": "JavaScript",
"bytes": "1783783"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "87372"
},
{
"name": "Python",
"bytes": "3908421"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "38065"
}
],
"symlink_target": ""
} |
<?php
/**
* XmlConnect index controller
*
* @category Mage
* @package Mage_Xmlconnect
* @author Magento Core Team <core@magentocommerce.com>
*/
class Mage_XmlConnect_IndexController extends Mage_XmlConnect_Controller_Action
{
/**
* Default action
*
* @return null
*/
public function indexAction()
{
try {
$this->loadLayout(false);
$this->renderLayout();
} catch (Mage_Core_Exception $e) {
$this->_message($e->getMessage(), self::MESSAGE_STATUS_ERROR);
} catch (Exception $e) {
$this->_message($this->__('Unable to load categories.'), self::MESSAGE_STATUS_ERROR);
Mage::logException($e);
}
}
}
| {
"content_hash": "4e6ee5ed52c1804024fe6d3dcb423860",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 97,
"avg_line_length": 24.7,
"alnum_prop": 0.5654520917678812,
"repo_name": "dangquochoi2007/shop2015",
"id": "c3f2a0bfaa84e1a24bce2b1a0c2311d4cf859db8",
"size": "1683",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "shop/app/code/core/Mage/XmlConnect/controllers/IndexController.php",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "19946"
},
{
"name": "CSS",
"bytes": "1754765"
},
{
"name": "JavaScript",
"bytes": "1103712"
},
{
"name": "PHP",
"bytes": "44374450"
},
{
"name": "PowerShell",
"bytes": "1028"
},
{
"name": "Ruby",
"bytes": "288"
},
{
"name": "Shell",
"bytes": "3072"
},
{
"name": "XSLT",
"bytes": "2135"
}
],
"symlink_target": ""
} |
<?php
namespace smile\ldapBundle\Tests\Controller;
use Symfony\Bundle\FrameworkBundle\Test\WebTestCase;
class DefaultControllerTest extends WebTestCase
{
public function testIndex()
{
$client = static::createClient();
$crawler = $client->request('GET', '/hello/Fabien');
$this->assertTrue($crawler->filter('html:contains("Hello Fabien")')->count() > 0);
}
}
| {
"content_hash": "ae48e1b6f9da304b0c83a92c17c7aa21",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 90,
"avg_line_length": 23.470588235294116,
"alnum_prop": 0.6741854636591479,
"repo_name": "azyzromanov/symfony_test",
"id": "1e7388e9078226bb1d561ef3276ccd16f1de682c",
"size": "399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/smile/ldapBundle/Tests/Controller/DefaultControllerTest.php",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "295858"
},
{
"name": "JavaScript",
"bytes": "376081"
},
{
"name": "PHP",
"bytes": "163370"
},
{
"name": "XML",
"bytes": "8739"
}
],
"symlink_target": ""
} |
const protractorUtils = require('@angular/bazel/protractor-utils');
const protractor = require('protractor');
module.exports = function(config) {
return protractorUtils.runServer(config.workspace, config.server, '-port', [])
.then(serverSpec => {
const serverUrl = `http://localhost:${serverSpec.port}`;
// Since the browser restarts in this benchmark we need to set both the browser.baseUrl
// for the first test and the protractor config.baseUrl for the subsequent tests
protractor.browser.baseUrl = serverUrl;
return protractor.browser.getProcessedConfig().then((config) => config.baseUrl = serverUrl);
});
};
| {
"content_hash": "5e76523c2c5288829fc43c705ac75949",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 100,
"avg_line_length": 44.666666666666664,
"alnum_prop": 0.7014925373134329,
"repo_name": "jonrimmer/angular",
"id": "6671d2d2339857d31a044bc9bc0eee4f7a1fe978",
"size": "872",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "modules/benchmarks/src/largetable/render3/protractor.on-prepare.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "313776"
},
{
"name": "Dockerfile",
"bytes": "10943"
},
{
"name": "HTML",
"bytes": "289836"
},
{
"name": "JavaScript",
"bytes": "746563"
},
{
"name": "PHP",
"bytes": "7222"
},
{
"name": "Python",
"bytes": "193555"
},
{
"name": "Shell",
"bytes": "106462"
},
{
"name": "TypeScript",
"bytes": "14320342"
}
],
"symlink_target": ""
} |
package io.quarkus.hibernate.orm.multiplepersistenceunits.model.annotation.user;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Table;
@Entity
@Table(name = "User_")
public class User {
private long id;
private String name;
public User() {
}
public User(String name) {
this.name = name;
}
@Id
@GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "userSeq")
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Override
public String toString() {
return "User:" + name;
}
}
| {
"content_hash": "0e29e04b3f62e3783ef3ba5da4de0560",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 80,
"avg_line_length": 18.804347826086957,
"alnum_prop": 0.638150289017341,
"repo_name": "quarkusio/quarkus",
"id": "5071e3a692150918a397d3d5390a895859f99b7d",
"size": "865",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/multiplepersistenceunits/model/annotation/user/User.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "23342"
},
{
"name": "Batchfile",
"bytes": "13096"
},
{
"name": "CSS",
"bytes": "6685"
},
{
"name": "Dockerfile",
"bytes": "459"
},
{
"name": "FreeMarker",
"bytes": "8106"
},
{
"name": "Groovy",
"bytes": "16133"
},
{
"name": "HTML",
"bytes": "1418749"
},
{
"name": "Java",
"bytes": "38584810"
},
{
"name": "JavaScript",
"bytes": "90960"
},
{
"name": "Kotlin",
"bytes": "704351"
},
{
"name": "Mustache",
"bytes": "13191"
},
{
"name": "Scala",
"bytes": "9756"
},
{
"name": "Shell",
"bytes": "71729"
}
],
"symlink_target": ""
} |
<html>
<body>
<font face="verdana" size="-1">
This intention converts properties of closure type to methods.
</font>
</body>
</html>
| {
"content_hash": "2f6e85a3c6f29218d9913662d6345dc1",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 62,
"avg_line_length": 19.142857142857142,
"alnum_prop": 0.6940298507462687,
"repo_name": "joewalnes/idea-community",
"id": "507337e17c805393f6a9d2f524894b5d89ac8bca",
"size": "134",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plugins/groovy/resources/intentionDescriptions/ConvertClosureToMethodIntention/description.html",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "387"
},
{
"name": "C",
"bytes": "136045"
},
{
"name": "C#",
"bytes": "103"
},
{
"name": "C++",
"bytes": "40449"
},
{
"name": "Emacs Lisp",
"bytes": "2507"
},
{
"name": "Erlang",
"bytes": "10"
},
{
"name": "Groovy",
"bytes": "361320"
},
{
"name": "Java",
"bytes": "89694599"
},
{
"name": "JavaScript",
"bytes": "978"
},
{
"name": "Objective-C",
"bytes": "1877"
},
{
"name": "PHP",
"bytes": "145"
},
{
"name": "Perl",
"bytes": "6523"
},
{
"name": "Python",
"bytes": "1699274"
},
{
"name": "Shell",
"bytes": "6965"
},
{
"name": "VimL",
"bytes": "5950"
}
],
"symlink_target": ""
} |
'use strict'
exports.seed = function(knex, Promise) {
// Deletes ALL existing entries
return Promise.all([
// Inserts seed entries
knex('patient')
.insert({
last_name: 'White',
first_name: 'Barry',
middle_initial: 'O',
birth_date: '1972-7-7',
street_address: '2600 Anywhere Street',
city: 'Knoxville',
state: 'TN',
zip: '37901'
}),
]);
};
| {
"content_hash": "df0d4428c5a5e89edae570ac6e601ca4",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 47,
"avg_line_length": 22.63157894736842,
"alnum_prop": 0.5348837209302325,
"repo_name": "daveharmswebdev/bvc-ehr",
"id": "74fc50544ab81dcd5c5bc344b629cf6714343643",
"size": "430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seeds/b2_patient.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1382"
},
{
"name": "HTML",
"bytes": "26322"
},
{
"name": "JavaScript",
"bytes": "117816"
}
],
"symlink_target": ""
} |
import { Component, ViewChild } from '@angular/core';
import { Events, MenuController, Nav, Platform } from 'ionic-angular';
import { SplashScreen } from '@ionic-native/splash-screen';
import { Storage } from '@ionic/storage';
import { AboutPage } from '../pages/about/about';
import { AccountPage } from '../pages/account/account';
import { LoginPage } from '../pages/login/login';
import { MapPage } from '../pages/map/map';
import { SignupPage } from '../pages/signup/signup';
import { TabsPage } from '../pages/tabs-page/tabs-page';
import { TutorialPage } from '../pages/tutorial/tutorial';
import { SchedulePage } from '../pages/schedule/schedule';
import { SpeakerListPage } from '../pages/speaker-list/speaker-list';
import { SupportPage } from '../pages/support/support';
import { ConferenceData } from '../providers/conference-data';
import { UserData } from '../providers/user-data';
import { NewsPage } from '../pages/news/news';
export interface PageInterface {
title: string;
name: string;
component: any;
icon: string;
logsOut?: boolean;
index?: number;
tabName?: string;
tabComponent?: any;
}
@Component({
templateUrl: 'app.template.html'
})
export class ConferenceApp {
// the root nav is a child of the root app component
// @ViewChild(Nav) gets a reference to the app's root nav
@ViewChild(Nav) nav: Nav;
// List of pages that can be navigated to from the left menu
// the left menu only works after login
// the login page disables the left menu
appPages: PageInterface[] = [
{ title: 'Schedule', name: 'TabsPage', component: TabsPage, tabComponent: SchedulePage, index: 0, icon: 'calendar' },
{ title: 'Speakers', name: 'TabsPage', component: TabsPage, tabComponent: SpeakerListPage, index: 1, icon: 'contacts' },
{ title: 'Map', name: 'TabsPage', component: TabsPage, tabComponent: MapPage, index: 2, icon: 'map' },
{ title: 'About', name: 'TabsPage', component: TabsPage, tabComponent: AboutPage, index: 3, icon: 'information-circle' },
{ title: 'News', name: 'TabsPage', component: TabsPage, tabComponent: NewsPage, index: 4, icon: 'paper' }
];
loggedInPages: PageInterface[] = [
{ title: 'Account', name: 'AccountPage', component: AccountPage, icon: 'person' },
{ title: 'Support', name: 'SupportPage', component: SupportPage, icon: 'help' },
{ title: 'Logout', name: 'TabsPage', component: TabsPage, icon: 'log-out', logsOut: true }
];
loggedOutPages: PageInterface[] = [
{ title: 'Login', name: 'LoginPage', component: LoginPage, icon: 'log-in' },
{ title: 'Support', name: 'SupportPage', component: SupportPage, icon: 'help' },
{ title: 'Signup', name: 'SignupPage', component: SignupPage, icon: 'person-add' }
];
rootPage: any;
constructor(
public events: Events,
public userData: UserData,
public menu: MenuController,
public platform: Platform,
public confData: ConferenceData,
public storage: Storage,
public splashScreen: SplashScreen
) {
// Check if the user has already seen the tutorial
this.storage.get('hasSeenTutorial')
.then((hasSeenTutorial) => {
if (hasSeenTutorial) {
this.rootPage = TabsPage;
} else {
this.rootPage = TutorialPage;
}
this.platformReady()
});
// load the conference data
confData.load();
// decide which menu items should be hidden by current login status stored in local storage
this.userData.hasLoggedIn().then((hasLoggedIn) => {
this.enableMenu(hasLoggedIn === true);
});
this.enableMenu(true);
this.listenToLoginEvents();
}
openPage(page: PageInterface) {
let params = {};
// the nav component was found using @ViewChild(Nav)
// setRoot on the nav to remove previous pages and only have this page
// we wouldn't want the back button to show in this scenario
if (page.index) {
params = { tabIndex: page.index };
}
// If we are already on tabs just change the selected tab
// don't setRoot again, this maintains the history stack of the
// tabs even if changing them from the menu
if (this.nav.getActiveChildNavs().length && page.index != undefined) {
this.nav.getActiveChildNavs()[0].select(page.index);
// Set the root of the nav with params if it's a tab index
} else {
this.nav.setRoot(page.name, params).catch((err: any) => {
console.log(`Didn't set nav root: ${err}`);
});
}
if (page.logsOut === true) {
// Give the menu time to close before changing to logged out
this.userData.logout();
}
}
openTutorial() {
this.nav.setRoot(TutorialPage);
}
listenToLoginEvents() {
this.events.subscribe('user:login', () => {
this.enableMenu(true);
});
this.events.subscribe('user:signup', () => {
this.enableMenu(true);
});
this.events.subscribe('user:logout', () => {
this.enableMenu(false);
});
}
enableMenu(loggedIn: boolean) {
this.menu.enable(loggedIn, 'loggedInMenu');
this.menu.enable(!loggedIn, 'loggedOutMenu');
}
platformReady() {
// Call any initial plugins when ready
this.platform.ready().then(() => {
this.splashScreen.hide();
});
}
isActive(page: PageInterface) {
let childNav = this.nav.getActiveChildNavs()[0];
// Tabs are a special case because they have their own navigation
if (childNav) {
if (childNav.getSelected() && childNav.getSelected().root === page.tabComponent) {
return 'primary';
}
return;
}
if (this.nav.getActive() && this.nav.getActive().name === page.name) {
return 'primary';
}
return;
}
}
| {
"content_hash": "280114bf6746a0ef90a92fc4286e4c5f",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 125,
"avg_line_length": 33.098837209302324,
"alnum_prop": 0.650447918496399,
"repo_name": "netive/melppang",
"id": "fb5a069d147c851a049205cf9e9537286f29ec2f",
"size": "5693",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/app/app.component.ts",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "18947"
},
{
"name": "C",
"bytes": "1025"
},
{
"name": "CSS",
"bytes": "8004"
},
{
"name": "HTML",
"bytes": "20566"
},
{
"name": "Java",
"bytes": "397732"
},
{
"name": "JavaScript",
"bytes": "37105"
},
{
"name": "Objective-C",
"bytes": "140057"
},
{
"name": "Shell",
"bytes": "1927"
},
{
"name": "TypeScript",
"bytes": "40415"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function
import matplotlib
matplotlib.use('Agg')
from matplotlib import rc
import matplotlib.pyplot as plt
import pandas as pd
def initialize_matplotlib():
inches_per_pt = 1.0 / 72.27
fig_width = 240 * inches_per_pt # width in inches
fig_height = 160 * inches_per_pt #.4 * fig_width
rc('axes', labelsize=6)
rc('axes', titlesize=6)
rc('axes', unicode_minus=False)
rc('axes', grid=False)
rc('figure', figsize=(fig_width, fig_height))
rc('grid', linestyle=':')
rc('font', family='serif')
rc('legend', fontsize=5)
rc('lines', linewidth=.7)
rc('ps', usedistiller='xpdf')
rc('text', usetex=True)
rc('xtick', labelsize=6)
rc('ytick', labelsize=6)
initialize_matplotlib()
df = pd.read_excel('results_for_figure1.xlsx', sheetname='Figure3')
styles = {
'TribeFlow-Dyn':'D',
'TribeFlow':'o',
#'FPMC':
#'PRLME':
}
colors = {
'LFM-1k':'g',
'LFM-G':'m',
'Bkite':'y',
'FourSQ':'b',
'Yoo':'r'
}
for method in styles:
for dset in colors:
idx = (df['Name'] == method) & (df['Dataset'] == dset)
x_ax = df[idx]['Runtime_s']
y_ax = df[idx]['MRR']
horizontalalignment = 'left'
verticalalignment = 'bottom'
if colors[dset] == 'g':
verticalalignment = 'top'
for x, y in zip(x_ax, y_ax):
plt.text(x, y, \
method + '\n' + \
dset, fontsize=7, \
verticalalignment=verticalalignment, \
horizontalalignment=horizontalalignment)
ps = colors[dset] + styles[method]
plt.semilogx(x_ax, y_ax, ps, alpha=.5, markersize=5)
ax = plt.gca()
ax.tick_params(direction='out', pad=0.3)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
plt.ylim((0, 0.16))
plt.xlim((1e2, 1e6))
plt.minorticks_off()
plt.ylabel('MRR', labelpad=0)
plt.xlabel('Training Time (s)', labelpad=0)
plt.tight_layout(pad=0.2)
plt.savefig('figure3.pdf')
| {
"content_hash": "96edde13485a09be9d943cf5fd6cc6fe",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 67,
"avg_line_length": 26.457831325301203,
"alnum_prop": 0.5655737704918032,
"repo_name": "flaviovdf/tribeflow",
"id": "f86056c51beecacdac10dd2ecb37a3c7a2ee74f7",
"size": "2214",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/paper-data/plot_figure3.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "16016"
},
{
"name": "Jupyter Notebook",
"bytes": "58814"
},
{
"name": "Makefile",
"bytes": "337"
},
{
"name": "Python",
"bytes": "158324"
},
{
"name": "Shell",
"bytes": "3233"
}
],
"symlink_target": ""
} |
<?php
namespace Bolt\Extension\IComeFromTheNet\BookMe\Bundle\Order\Model\Builder;
class OrderCalcualtor
{
public function calculateSurchargeCost()
{
}
public function calcualteCouponDiscount()
{
}
public function calcualteCustomerCost()
{
}
public function runCalcualtor(OrderSummaryTrait $oOrder)
{
}
}
/* End of Class */ | {
"content_hash": "a1bd3b760f340d587471dcd70dd83f85",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 75,
"avg_line_length": 12.567567567567568,
"alnum_prop": 0.5526881720430108,
"repo_name": "icomefromthenet/BoltBookeMe",
"id": "729fa4c9b3357d374cb1e75c6a2d59f3626dad0c",
"size": "465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Bundle/Order/Model/Builder/OrderCalcualtor.php",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2388"
},
{
"name": "HTML",
"bytes": "61895"
},
{
"name": "JavaScript",
"bytes": "12143"
},
{
"name": "PHP",
"bytes": "1346314"
}
],
"symlink_target": ""
} |
function cam_overlay() {
container ={};
container.win = Titanium.UI.createWindow();
container.scanner = Titanium.UI.createView({
width:260,
height:200,
borderColor:'red',
borderWidth:5,
borderRadius:15
});
container.button = Titanium.UI.createButton({
color:'#fff',
backgroundImage:'/images/BUTT_grn_on.png',
backgroundSelectedImage:'/images/BUTT_grn_off.png',
backgroundDisabledImage: '/images/BUTT_gry_on.png',
bottom:10,
width:301,
height:57,
font:{fontSize:20,fontWeight:'bold',fontFamily:'Helvetica Neue'},
title:'Take Picture'
});
container.closebutton = Titanium.UI.createButton({
color:'#fff',
backgroundImage:'/images/BUTT_red_on.png',
backgroundSelectedImage:'/images/BUTT_red_off.png',
backgroundDisabledImage: '/images/BUTT_gry_on.png',
top:10,
width:301,
height:57,
font:{fontSize:20,fontWeight:'bold',fontFamily:'Helvetica Neue'},
title:'Close Camera'
});
container.messageView = Titanium.UI.createView({
height:30,
width:250,
visible:false
});
container.indView = Titanium.UI.createView({
height:30,
width:250,
backgroundColor:'#000',
borderRadius:10,
opacity:0.7
});
container.messageView.add(container.indView);
// message
container.message = Titanium.UI.createLabel({
text:'Picture Taken',
color:'#fff',
font:{fontSize:20,fontWeight:'bold',fontFamily:'Helvetica Neue'},
width:'auto',
height:'auto'
});
container.messageView.add(container.message);
container.overlay = Titanium.UI.createView();
container.overlay.add(container.scanner);
container.overlay.add(container.button);
container.overlay.add(container.messageView);
container.overlay.add(container.closebutton);
container.button.addEventListener('click',function()
{
container.scanner.borderColor = 'blue';
Ti.Media.takePicture();
container.messageView.animate({visible:true});
setTimeout(function()
{
container.scanner.borderColor = 'red';
container.messageView.animate({visible:false});
},500);
});
container.closebutton.addEventListener('click',function()
{
alert("Camera closed");
Ti.Media.hideCamera();
container.win.close();
});
Titanium.Media.showCamera({
success:function(event)
{
Ti.API.debug("picture was taken");
// place our picture into our window
var imageView = Ti.UI.createImageView({
image:event.media,
width:container.win.width,
height:container.win.height
});
container.win.add(imageView);
// programatically hide the camera
Ti.Media.hideCamera();
},
cancel:function()
{
},
error:function(error)
{
var a = Titanium.UI.createAlertDialog({title:'Camera'});
if (error.code == Titanium.Media.NO_CAMERA)
{
a.setMessage('Please run this test on device');
}
else
{
a.setMessage('Unexpected error: ' + error.code);
}
a.show();
},
overlay:container.overlay,
showControls:false, // don't show system controls
mediaTypes:Ti.Media.MEDIA_TYPE_PHOTO,
autohide:false // tell the system not to auto-hide and we'll do it ourself
});
container.open = function(){
container.win.open();
};
return container.win;
};
module.exports = cam_overlay; | {
"content_hash": "55ce93c53fed545ffbd9dd1c4b830347",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 76,
"avg_line_length": 24.022727272727273,
"alnum_prop": 0.6975717439293598,
"repo_name": "fziegler/plates",
"id": "7030a63d7f94bee139d446c7b4407069aabff956",
"size": "3171",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Resources/ui/handheld/ios/phone/camera_overlay.js",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "533303"
}
],
"symlink_target": ""
} |
<!DOCTYPE html>
<html>
<head>
<!-- [[! Document Settings ]] -->
<meta charset="utf-8" />
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<!-- [[! Page Meta ]] -->
<title>raycoarana</title>
<meta name="description" content="raycoarana - My coding adventures and other random stuff" />
<meta name="HandheldFriendly" content="True" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<link rel="shortcut icon" href="/assets/images/favicon.ico" >
<!-- [[! Styles'n'Scripts ]] -->
<link rel="stylesheet" type="text/css" href="/assets/css/screen.css" />
<link rel="stylesheet" type="text/css"
href="//fonts.googleapis.com/css?family=Merriweather:300,700,700italic,300italic|Open+Sans:700,400" />
<link rel="stylesheet" type="text/css" href="/assets/css/syntax.css" />
<link rel="stylesheet" type="text/css" href="/assets/css/contact.css" />
<script type="text/javascript" src="https://code.jquery.com/jquery-1.11.3.min.js"></script>
<link rel="canonical" href="/" />
<meta name="referrer" content="origin" />
<meta property="og:site_name" content="raycoarana" />
<meta property="og:type" content="website" />
<meta property="og:title" content="raycoarana" />
<meta property="og:description" content="My coding adventures and other random stuff" />
<meta property="og:url" content="/" />
<meta property="og:image" content="/assets/images/cover1.jpg" />
<meta name="twitter:card" content="summary_large_image" />
<meta name="twitter:title" content="raycoarana" />
<meta name="twitter:description" content="My coding adventures and other random stuff" />
<meta name="twitter:url" content="/" />
<meta name="twitter:image:src" content="/assets/images/cover1.jpg" />
<link rel="stylesheet" type="text/css" href="//cdnjs.cloudflare.com/ajax/libs/cookieconsent2/3.0.3/cookieconsent.min.css" />
<script src="//cdnjs.cloudflare.com/ajax/libs/cookieconsent2/3.0.3/cookieconsent.min.js"></script>
<script>
window.addEventListener("load", function(){
window.cookieconsent.initialise({
"palette": {
"popup": {
"background": "#edeff5",
"text": "#838391"
},
"button": {
"background": "#4b81e8"
}
},
"theme": "classic",
"content": {
"message": "Este sitio web usa cookies para proporcionar la mejor experiencia.",
"dismiss": "Aceptar",
"link": "Política de privacidad",
"href": "/privacy"
}
})});
</script>
<script type="application/ld+json">
{
"@context": "http://schema.org",
"@type": "Website",
"publisher": "raycoarana.com - My coding adventures",
"url": "/",
"image": "/assets/images/cover1.jpg",
"description": "My coding adventures and other random stuff"
}
</script>
<!-- Start GitHub Plug-in requirements -->
<link rel="stylesheet" href="/assets/css/github-styles.css">
<script src="/assets/js/jquery.github.widget.js"></script>
<!-- End GitHub Plug-in requirements -->
<meta name="generator" content="Jekyll 3.0.0" />
<link rel="alternate" type="application/rss+xml" title="raycoarana" href="/feed.xml" />
</head>
<body class="tag-template nav-closed">
<div class="nav">
<h3 class="nav-title">Menu</h3>
<a href class="nav-close">
<span class="hidden">Close</span>
</a>
<ul>
<li class="nav-home " role="presentation"><a href="/">Inicio</a></li>
<li class="nav-development " role="presentation"><a href="/tag/development">Desarrollo</a></li>
<li class="nav-author " role="presentation"><a href="/author/raycoarana">Autor</a></li>
<li class="nav-about " role="presentation"><a href="/about">Sobre mí</a></li>
<li class="nav-contact " role="presentation"><a href="/contact">Contacto</a></li>
</ul>
<a class="subscribe-button icon-feed" href="/feed.xml">RSS</a>
</div>
<span class="nav-cover"></span>
<div class="site-wrapper">
<!-- < dynamically overriding backgrounds for tags as well as descriptions -->
<!-- < default}} -->
<!-- The tag above means - insert everything in this file into the [body] of the default.hbs template -->
<!-- If we have a tag cover, display that - else blog cover - else nothing -->
<header class="main-header tag-head " style="background-image: url(/assets/images/cover2.jpg) ">
<nav class="main-nav overlay clearfix">
<a class="blog-logo" href="/"><img src="/assets/images/main-logo.png" alt="Blog Logo" /></a>
<a class="menu-button icon-menu" href><span class="word">Menu</span></a>
</nav>
<div class="vertical">
<div class="main-header-content inner">
<h1 class="page-title">Ux</h1>
<h2 class="page-description">
A 1-post collection
</h2>
</div>
</div>
</header>
<!-- The main content area on the homepage -->
<main id="content" class="content" role="main">
<!-- The tag below includes the post loop - partials/loop.hbs -->
<!-- Previous/next page links - only displayed on page 2+ -->
<div class="extra-pagination inner">
<nav class="pagination" role="pagination">
<span class="page-number"> Page 1 of 1 </span>
</nav>
</div>
<!-- This is the post loop - each post will be output using this markup -->
<article class="post">
<header class="post-header">
<h2 class="post-title"><a href="/blog/2014/03/antipatrones-de-navegacion-en-android/">10 antipatrones de navegación en Android</a></h2>
</header>
<section class="post-excerpt">
<p>Los chicos de Android Design in Action han publicado un video en YouTube donde nos describen algunos antipatrones de navegación que se han encontrado en algunas apps de las que analizan regularmente. Es un video muy interesante y casi obligatorio ver para no caer en malos patrones a la hora de crear aplicaciones. Si bien ver el video está bien, os lo resumo de forma rápida por si no tenéis 26 min libres para verlo.</p>
<a class="read-more" href="/blog/2014/03/antipatrones-de-navegacion-en-android/">»</a>
</section>
<footer class="post-meta">
<img class="author-thumb" src="/assets/images/avatar.jpg" alt="Author image" nopin="nopin" />
<!-- author -->
<a href='/author/raycoarana'>Rayco Araña</a>
<!-- [[tags prefix=" on "]] -->
on
<a href='/tag/android'>Android</a>,
<a href='/tag/diseño'>Diseño</a>,
<a href='/tag/patrones'>Patrones</a>,
<a href='/tag/smartphone'>Smartphone</a>,
<a href='/tag/UX'>Ux</a>
<time class="post-date" datetime="2014-03-02">02 Mar 2014</time>
</footer>
</article>
<!-- Previous/next page links - displayed on every page -->
<nav class="pagination" role="pagination">
<span class="page-number"> Page 1 of 1 </span>
</nav>
</main>
<footer class="site-footer clearfix">
<section class="copyright"><a href="/">raycoarana</a> © 2018</section>
<section class="poweredby">Proudly published with <a href="https://jekyllrb.com/">Jekyll</a> using <a href="https://github.com/biomadeira/jasper">Jasper</a></section>
</footer>
</div>
<script type="text/javascript" src="/assets/js/jquery.fitvids.js"></script>
<script type="text/javascript" src="/assets/js/index.js"></script>
<!-- Add Google Analytics -->
<!-- Google Analytics Tracking code -->
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-48361320-1', 'auto');
ga('send', 'pageview');
</script>
</body>
</html>
| {
"content_hash": "eb2aa7e3d0e0bc83b1043dacda277b7e",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 437,
"avg_line_length": 38.30222222222222,
"alnum_prop": 0.5778602924112323,
"repo_name": "raycoarana/raycoarana.github.io",
"id": "db488076d42d3600de573195fa68b64a9831be46",
"size": "8628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tag/UX/index.html",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "50080"
},
{
"name": "HTML",
"bytes": "1071917"
},
{
"name": "JavaScript",
"bytes": "5351"
}
],
"symlink_target": ""
} |
bool FIRCLSCompactUnwindInit(FIRCLSCompactUnwindContext* context,
const void* unwindInfo,
const void* ehFrame,
uintptr_t loadAddress) {
if (!FIRCLSIsValidPointer(context)) {
FIRCLSSDKLog("Error: invalid context passed to compact unwind init");
return false;
}
if (!FIRCLSIsValidPointer(unwindInfo)) {
FIRCLSSDKLog("Error: invalid unwind info passed to compact unwind init");
return false;
}
if (!FIRCLSIsValidPointer(loadAddress)) {
FIRCLSSDKLog("Error: invalid load address passed to compact unwind init");
return false;
}
memset(context, 0, sizeof(FIRCLSCompactUnwindContext));
if (!FIRCLSReadMemory((vm_address_t)unwindInfo, &context->unwindHeader,
sizeof(struct unwind_info_section_header))) {
FIRCLSSDKLog("Error: could not read memory contents of unwindInfo\n");
return false;
}
if (context->unwindHeader.version != UNWIND_SECTION_VERSION) {
FIRCLSSDKLog("Error: bad unwind_info structure version (%d != %d)\n",
context->unwindHeader.version, UNWIND_SECTION_VERSION);
return false;
}
// copy in the values
context->unwindInfo = unwindInfo;
context->ehFrame = ehFrame;
context->loadAddress = loadAddress;
return true;
}
void* FIRCLSCompactUnwindGetIndexData(FIRCLSCompactUnwindContext* context) {
return (void*)((uintptr_t)context->unwindInfo +
(uintptr_t)context->unwindHeader.indexSectionOffset);
}
compact_unwind_encoding_t* FIRCLSCompactUnwindGetCommonEncodings(
FIRCLSCompactUnwindContext* context) {
return (compact_unwind_encoding_t*)((uintptr_t)context->unwindInfo +
(uintptr_t)
context->unwindHeader.commonEncodingsArraySectionOffset);
}
void* FIRCLSCompactUnwindGetSecondLevelData(FIRCLSCompactUnwindContext* context) {
return (void*)((uintptr_t)context->unwindInfo +
context->indexHeader.secondLevelPagesSectionOffset);
}
uintptr_t FIRCLSCompactUnwindGetIndexFunctionOffset(FIRCLSCompactUnwindContext* context) {
return context->loadAddress + context->indexHeader.functionOffset;
}
uintptr_t FIRCLSCompactUnwindGetTargetAddress(FIRCLSCompactUnwindContext* context, uintptr_t pc) {
uintptr_t offset = FIRCLSCompactUnwindGetIndexFunctionOffset(context);
if (pc <= offset) {
FIRCLSSDKLog("Error: PC is invalid\n");
return 0;
}
return pc - offset;
}
#pragma mark - Parsing and Lookup
bool FIRCLSCompactUnwindLookupFirstLevel(FIRCLSCompactUnwindContext* context, uintptr_t address) {
if (!context) {
return false;
}
// In practice, it appears that there always one more first level entry
// than required. This actually makes sense, since we have to use this
// info to check if we are in range. This implies there must be
// at least 2 indices at a minimum.
uint32_t indexCount = context->unwindHeader.indexCount;
if (indexCount < 2) {
return false;
}
// make sure our address is valid
if (address < context->loadAddress) {
return false;
}
struct unwind_info_section_header_index_entry* indexEntries =
FIRCLSCompactUnwindGetIndexData(context);
if (!indexEntries) {
return false;
}
address -= context->loadAddress; // search relative to zero
// minus one because of the extra entry - see comment above
for (uint32_t index = 0; index < indexCount - 1; ++index) {
uint32_t value = indexEntries[index].functionOffset;
uint32_t nextValue = indexEntries[index + 1].functionOffset;
if (address >= value && address < nextValue) {
context->firstLevelNextFunctionOffset = nextValue;
context->indexHeader = indexEntries[index];
return true;
}
}
return false;
}
uint32_t FIRCLSCompactUnwindGetSecondLevelPageKind(FIRCLSCompactUnwindContext* context) {
if (!context) {
return 0;
}
return *(uint32_t*)FIRCLSCompactUnwindGetSecondLevelData(context);
}
bool FIRCLSCompactUnwindLookupSecondLevelRegular(FIRCLSCompactUnwindContext* context,
uintptr_t pc,
FIRCLSCompactUnwindResult* result) {
FIRCLSSDKLog("Encountered a regular second-level page\n");
return false;
}
// this only works for compressed entries right now
bool FIRCLSCompactUnwindBinarySearchSecondLevel(uintptr_t address,
uint32_t* index,
uint16_t entryCount,
uint32_t* entryArray) {
if (!index || !entryArray) {
return false;
}
if (entryCount == 0) {
return false;
}
if (address == 0) {
return false;
}
uint32_t highIndex = entryCount;
*index = 0;
while (*index < highIndex) {
uint32_t midIndex = (*index + highIndex) / 2;
// FIRCLSSDKLog("%u %u %u\n", *index, midIndex, highIndex);
uintptr_t value = UNWIND_INFO_COMPRESSED_ENTRY_FUNC_OFFSET(entryArray[midIndex]);
if (value > address) {
if (highIndex == midIndex) {
return false;
}
highIndex = midIndex;
continue;
}
*index = midIndex;
// are we at the end of the array?
if (midIndex == entryCount - 1) {
return false;
}
uintptr_t nextValue = UNWIND_INFO_COMPRESSED_ENTRY_FUNC_OFFSET(entryArray[midIndex + 1]);
if (nextValue > address) {
// we've found it
break;
}
*index += 1;
}
// check to make sure we're still within bounds
return *index < entryCount;
}
bool FIRCLSCompactUnwindLookupSecondLevelCompressed(FIRCLSCompactUnwindContext* context,
uintptr_t pc,
FIRCLSCompactUnwindResult* result) {
if (!context || !result) {
return false;
}
void* ptr = FIRCLSCompactUnwindGetSecondLevelData(context);
if (!ptr) {
return false;
}
memset(result, 0, sizeof(FIRCLSCompactUnwindResult));
struct unwind_info_compressed_second_level_page_header* header =
(struct unwind_info_compressed_second_level_page_header*)ptr;
// adjust address
uintptr_t targetAddress = FIRCLSCompactUnwindGetTargetAddress(context, pc);
uint32_t* entryArray = ptr + header->entryPageOffset;
uint32_t index = 0;
if (!FIRCLSCompactUnwindBinarySearchSecondLevel(targetAddress, &index, header->entryCount,
entryArray)) {
FIRCLSSDKLogInfo("Unable to find PC in second level\n");
return false;
}
uint32_t entry = entryArray[index];
// Computing the fuction start address is easy
result->functionStart = UNWIND_INFO_COMPRESSED_ENTRY_FUNC_OFFSET(entry) +
FIRCLSCompactUnwindGetIndexFunctionOffset(context);
// Computing the end is more complex, because we could be on the last entry. In that case, we
// cannot use the next value as the end.
result->functionEnd = context->loadAddress;
if (index < header->entryCount - 1) {
result->functionEnd += UNWIND_INFO_COMPRESSED_ENTRY_FUNC_OFFSET(entryArray[index + 1]) +
context->indexHeader.functionOffset;
} else {
result->functionEnd += context->firstLevelNextFunctionOffset;
}
// FIRCLSSDKLog("Located %lx => %lx %lx\n", pc, result->functionStart, result->functionEnd);
if ((pc < result->functionStart) || (pc >= result->functionEnd)) {
FIRCLSSDKLog("PC does not match computed function range\n");
return false;
}
uint32_t encodingIndex = UNWIND_INFO_COMPRESSED_ENTRY_ENCODING_INDEX(entry);
// encoding could be in the common array
if (encodingIndex < context->unwindHeader.commonEncodingsArrayCount) {
result->encoding = FIRCLSCompactUnwindGetCommonEncodings(context)[encodingIndex];
// FIRCLSSDKLog("Entry has common encoding: 0x%x\n", result->encoding);
} else {
encodingIndex = encodingIndex - context->unwindHeader.commonEncodingsArrayCount;
compact_unwind_encoding_t* encodings = ptr + header->encodingsPageOffset;
result->encoding = encodings[encodingIndex];
// FIRCLSSDKLog("Entry has compressed encoding: 0x%x\n", result->encoding);
}
if (result->encoding == 0) {
FIRCLSSDKLogInfo("Entry has has no unwind info\n");
return false;
}
return true;
}
bool FIRCLSCompactUnwindLookupSecondLevel(FIRCLSCompactUnwindContext* context,
uintptr_t pc,
FIRCLSCompactUnwindResult* result) {
switch (FIRCLSCompactUnwindGetSecondLevelPageKind(context)) {
case UNWIND_SECOND_LEVEL_REGULAR:
FIRCLSSDKLogInfo("Found a second level regular header\n");
if (FIRCLSCompactUnwindLookupSecondLevelRegular(context, pc, result)) {
return true;
}
break;
case UNWIND_SECOND_LEVEL_COMPRESSED:
FIRCLSSDKLogInfo("Found a second level compressed header\n");
if (FIRCLSCompactUnwindLookupSecondLevelCompressed(context, pc, result)) {
return true;
}
break;
default:
FIRCLSSDKLogError("Unrecognized header kind - unable to continue\n");
break;
}
return false;
}
bool FIRCLSCompactUnwindLookup(FIRCLSCompactUnwindContext* context,
uintptr_t pc,
FIRCLSCompactUnwindResult* result) {
if (!context || !result) {
return false;
}
// step 1 - find the pc in the first-level index
if (!FIRCLSCompactUnwindLookupFirstLevel(context, pc)) {
FIRCLSSDKLogWarn("Unable to find pc in first level\n");
return false;
}
FIRCLSSDKLogDebug("Found first level (second => %u)\n",
context->indexHeader.secondLevelPagesSectionOffset);
// step 2 - use that info to find the second-level information
// that second actually has the encoding info we're looking for.
if (!FIRCLSCompactUnwindLookupSecondLevel(context, pc, result)) {
FIRCLSSDKLogInfo("Second-level PC lookup failed\n");
return false;
}
return true;
}
#pragma mark - Unwinding
bool FIRCLSCompactUnwindLookupAndCompute(FIRCLSCompactUnwindContext* context,
FIRCLSThreadContext* registers) {
if (!context || !registers) {
return false;
}
uintptr_t pc = FIRCLSThreadContextGetPC(registers);
// little sanity check
if (pc < context->loadAddress) {
return false;
}
FIRCLSCompactUnwindResult result;
memset(&result, 0, sizeof(result));
if (!FIRCLSCompactUnwindLookup(context, pc, &result)) {
FIRCLSSDKLogInfo("Unable to lookup compact unwind for pc %p\n", (void*)pc);
return false;
}
// Ok, armed with the encoding, we can actually attempt to modify the registers. Because
// the encoding is arch-specific, this function has to be defined per-arch.
if (!FIRCLSCompactUnwindComputeRegisters(context, &result, registers)) {
FIRCLSSDKLogError("Failed to compute registers\n");
return false;
}
return true;
}
#if CLS_DWARF_UNWINDING_SUPPORTED
bool FIRCLSCompactUnwindDwarfFrame(FIRCLSCompactUnwindContext* context,
uintptr_t dwarfOffset,
FIRCLSThreadContext* registers) {
if (!context || !registers) {
return false;
}
// Everyone's favorite! Dwarf unwinding!
FIRCLSSDKLogInfo("Trying to read dwarf data with offset %lx\n", dwarfOffset);
FIRCLSDwarfCFIRecord record;
if (!FIRCLSDwarfParseCFIFromFDERecordOffset(&record, context->ehFrame, dwarfOffset)) {
FIRCLSSDKLogError("Unable to init FDE\n");
return false;
}
if (!FIRCLSDwarfUnwindComputeRegisters(&record, registers)) {
FIRCLSSDKLogError("Failed to compute DWARF registers\n");
return false;
}
return true;
}
#endif
#else
INJECT_STRIP_SYMBOL(compact_unwind)
#endif
| {
"content_hash": "53accae85b42bd14369fdad1f58a3e84",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 99,
"avg_line_length": 31.618037135278513,
"alnum_prop": 0.6621644295302014,
"repo_name": "firebase/firebase-ios-sdk",
"id": "efd2542604bfb49013f5c975bd1892fdec1b3f23",
"size": "13036",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Crashlytics/Crashlytics/Unwind/Compact/FIRCLSCompactUnwind.c",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "365959"
},
{
"name": "C++",
"bytes": "8345652"
},
{
"name": "CMake",
"bytes": "91856"
},
{
"name": "JavaScript",
"bytes": "3675"
},
{
"name": "Objective-C",
"bytes": "10276029"
},
{
"name": "Objective-C++",
"bytes": "837306"
},
{
"name": "Python",
"bytes": "117723"
},
{
"name": "Ruby",
"bytes": "179250"
},
{
"name": "Shell",
"bytes": "127192"
},
{
"name": "Swift",
"bytes": "2052268"
},
{
"name": "sed",
"bytes": "2015"
}
],
"symlink_target": ""
} |
using System.Text;
using Diadoc.Api.Proto;
using Diadoc.Api.Proto.Recognition;
namespace Diadoc.Api
{
public partial class DiadocHttpApi
{
public string Recognize(string fileName, byte[] content)
{
var queryString = string.Format("/Recognize?filename={0}", fileName);
var responseBytes = PerformHttpRequest(null, "POST", queryString, content);
return Encoding.UTF8.GetString(responseBytes);
}
public Recognized GetRecognized(string recognitionId)
{
var queryString = string.Format("/GetRecognized?recognitionId={0}", recognitionId);
return PerformHttpRequest<Recognized>(null, "GET", queryString);
}
public RussianAddress ParseRussianAddress(string address)
{
var queryString = string.Format("/ParseRussianAddress?address={0}", address);
return PerformHttpRequest<RussianAddress>(null, "GET", queryString);
}
}
}
| {
"content_hash": "00a281dc2dc2c062b5b9af604e09bc98",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 86,
"avg_line_length": 30.678571428571427,
"alnum_prop": 0.7508731082654249,
"repo_name": "s-rogonov/diadocsdk-csharp",
"id": "d9697d17d6022fa8d373ee3a784d6da2967348fa",
"size": "861",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/DiadocHttpApi.Recognize.cs",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "790"
},
{
"name": "C#",
"bytes": "1238487"
},
{
"name": "PowerShell",
"bytes": "6089"
},
{
"name": "Protocol Buffer",
"bytes": "138050"
},
{
"name": "Shell",
"bytes": "2935"
}
],
"symlink_target": ""
} |
script_name=$(basename ${0}); pushd $(dirname ${0}) > /dev/null
script_path=$(pwd -P); popd > /dev/null
bashstrap_path=($(cd ${script_path}/..; pwd -P))
source "${script_path}/common.sh"
function preinstall_check() {
if [ -r ${install_file} ]; then
echo "Bashstrap already installed. Try ${script_path}/update.sh"
exit 1
fi
}
function preinstall_backup() {
rm -rf ${backup_path}
rm -f ${restore_file}
mkdir -p ${backup_path}
touch ${restore_file}
for file in ${restore_file_list[@]}
do
if [[ -r ${original_path}/${file} ]]; then
echo " - Backing up ${original_path}/${file} to ${backup_path}/${file}"
cp ${original_path}/${file} ${backup_path}/${file}
echo ${file} >> ${restore_file}
fi
done
}
preinstall_check
echo "Initialising Bashstrap"
preinstall_backup
install_files
set_git_config
| {
"content_hash": "309e1d6fb08b2b6124ae7757deea5812",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 77,
"avg_line_length": 23.52777777777778,
"alnum_prop": 0.6257378984651711,
"repo_name": "andystanton/bashstrap",
"id": "1e89749de6742a37a321d606c02cd536272ce26d",
"size": "860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "script/init.sh",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "24323"
},
{
"name": "VimL",
"bytes": "459"
}
],
"symlink_target": ""
} |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="description" content="Ionic makes it incredibly easy to build beautiful and interactive mobile apps using HTML5 and AngularJS.">
<meta name="keywords" content="html5,javascript,mobile,drifty,ionic,hybrid,phonegap,cordova,native,ios,android,angularjs">
<meta name="author" content="Drifty">
<meta property="og:image" content="http://ionicframework.com/img/ionic-logo-blog.png"/>
<link href="/css/v2.css?1" rel="stylesheet">
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"></script>
<script src="https://ajax.googleapis.com/ajax/libs/angularjs/1.4.6/angular.min.js"></script>
<script src="https://ajax.googleapis.com/ajax/libs/angularjs/1.4.6/angular-animate.min.js"></script>
<script src="/js/ionic-docs.min.js"></script>
<link rel="stylesheet" href="/css/ionic-hljs.css">
<script src="/js/ionic-highlight.js"></script>
<script>hljs.initHighlightingOnLoad();</script>
<title>SlideLazy - Class in module - Ionic Framework</title>
</head>
<body id="docs-page-{{SlideLazy | slugify}}"
class="v2 docs page-{{SlideLazy | slugify}}"
data-spy="scroll"
data-target="#components-index">
<nav class="navbar navbar-default">
<div class="container-fluid">
<!-- Brand and toggle get grouped for better mobile display -->
<div class="navbar-header">
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#bs-example-navbar-collapse-1" aria-expanded="false">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<a class="navbar-brand" href="#">
<img src="/img/ionic-docs-logo.png" id="ionic-docs-logo" />
</a>
</div>
<!-- Collect the nav links, forms, and other content for toggling -->
<div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1">
<form class="navbar-form navbar-left" role="search">
<div class="form-group">
<input type="text" class="form-control" placeholder="Search">
</div>
</form>
<ul class="nav navbar-nav navbar-right">
<li><a href="getting-started/">Getting Started</a></li>
<li class="active"><a href="/docs/v2/">Docs</a></li>
<li><a href="http://blog.ionic.io/">Blog</a></li>
<li><a href="http://forum.ionicframework.com/">Forum</a></li>
<li class="dropdown">
<a href="#"
class="dropdown-toggle"
data-toggle="dropdown"
role="button"
aria-haspopup="true"
aria-expanded="false">
More <span class="caret"></span>
</a>
<ul class="dropdown-menu">
<li><a target="_blank" href="http://ionic.io/">
Ionic Platform</a></li>
<li><a target="_blank" href="http://showcase.ionicframework.com/">
Showcase</a></li>
<li><a target="_blank" href="http://jobs.ionic.io/">
Job Board</a></li>
<li><a target="_blank" href="http://market.ionic.io/">
Market</a></li>
<li><a target="_blank" href="http://ionicworldwide.herokuapp.com/">
Ionic Worldwide</a></li>
<li><a target="_blank" href="http://play.ionic.io/">
Playground</a></li>
<li><a target="_blank" href="http://creator.ionic.io/">
Creator</a></li>
<li><a target="_blank" href="http://shop.ionic.io/">Shop</a></li>
</ul>
</li>
</ul>
</div><!-- /.navbar-collapse -->
</div><!-- /.container-fluid -->
</nav>
<div class="docs-container"
ng-app="IonicDocs"
>
<nav class="side-nav">
<form class="form-group search" role="search">
<input type="text" class="form-control" placeholder="Search">
</form>
<form class="form-group api-select">
<select name="version"
id="version-toggle"
onchange="window.location.href=this.options[this.selectedIndex].value">
<option
value="/docs/v2/nightly/api/"
selected>
nightly
</option>
<option
value="/docs/v2/api/"
>
2.0.0 (latest)
</option>
</select>
</form>
<ul ng-controller="DocsNavCtrl">
<li class="back-to-main">
<a href="/docs/v2">Back to Main Docs</a>
</li>
<li class="active">
<a ng-click="showAPI = !showAPI">API</a>
<ul ng-show="showAPI" ng-init="showAPI = true">
<li class="">
<a href="/docs/v2/nightly/api"></a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/action-sheet/ActionSheet">ActionSheet</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/animations/Animation">Animation</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/config/App">App</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/platform/applinks/AppLinks">AppLinks</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/app/Attr">Attr</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/platform/barcode/Barcode">Barcode</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/platform/battery/Battery">Battery</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/blur/Blur">Blur</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/button/Button">Button</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/platform/camera/Camera">Camera</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/checkbox/Checkbox">Checkbox</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/util/ClickBlock">ClickBlock</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/config/Config">Config</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/config/ConfigComponent">ConfigComponent</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/platform/contacts/Contacts">Contacts</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/content/Content">Content</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/platform/device/Device">Device</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/platform/device-motion/DeviceMotion">DeviceMotion</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/platform/device-orientation/DeviceOrientation">DeviceOrientation</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/platform/dialogs/Dialogs">Dialogs</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/util/Events">Events</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/platform/geolocation/Geolocation">Geolocation</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/show-hide-when/HideWhen">HideWhen</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/icon/Icon">Icon</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/app/IdRef">IdRef</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/tap-click/initTapClick">initTapClick</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/app/IonicApp">IonicApp</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/config/ionicProviders">ionicProviders</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/item/ionItem">ionItem</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/radio/ionRadio">ionRadio</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/tap-click/isActivatable">isActivatable</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/item/Item">Item</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/item/ItemGroup">ItemGroup</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/item/ItemGroupTitle">ItemGroupTitle</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/item/ItemSlidingOptionButton">ItemSlidingOptionButton</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/platform/keyboard/Keyboard">Keyboard</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/text-input/Label">Label</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/list/List">List</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/list/ListHeader">ListHeader</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/platform/storage/LocalStorage">LocalStorage</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/menu/Menu">Menu</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/menu/MenuClose">MenuClose</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/menu/MenuToggle">MenuToggle</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/menu/MenuType">MenuType</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/modal/Modal">Modal</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/platform/NativePlugin">NativePlugin</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/platform/NativePluginDecorator">NativePluginDecorator</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/nav/Nav">Nav</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/navbar/Navbar">Navbar</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/navbar/NavbarTemplate">NavbarTemplate</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/nav/NavController">NavController</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/nav/NavParams">NavParams</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/nav/NavPop">NavPop</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/nav/NavPush">NavPush</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/nav/NavRouter">NavRouter</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/overlay/OverlayAnchor">OverlayAnchor</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/config/Page">Page</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/platform/Platform">Platform</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/popup/Popup">Popup</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/radio/RadioGroup">RadioGroup</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/scroll/Refresher">Refresher</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/scroll/Scroll">Scroll</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/searchbar/Search Bar">Search Bar</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/segment/Segment">Segment</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/segment/SegmentButton">SegmentButton</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/show-hide-when/ShowWhen">ShowWhen</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/slides/Slide">Slide</a>
</li>
<li class="active">
<a href="/docs/v2/nightly/api/components/slides/SlideLazy">SlideLazy</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/slides/Slides">Slides</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/platform/storage/SqlStorage">SqlStorage</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/platform/statusbar/StatusBar">StatusBar</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/platform/storage/Storage">Storage</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/platform/storage/StorageEngine">StorageEngine</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/switch/Switch">Switch</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/tabs/Tab">Tab</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/tabs/Tabs">Tabs</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/text-input/TextInput">TextInput</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/text-input/TextInputElement">TextInputElement</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/toolbar/Toolbar">Toolbar</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/toolbar/ToolbarBase">ToolbarBase</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/toolbar/ToolbarItem">ToolbarItem</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/toolbar/ToolbarTitle">ToolbarTitle</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/transitions/Transition">Transition</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/translation/Translate">Translate</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/translation/TranslatePipe">TranslatePipe</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/platform/vibration/Vibration">Vibration</a>
</li>
<li class="">
<a href="/docs/v2/nightly/api/components/nav/ViewController">ViewController</a>
</li>
</ul>
</li>
</ul>
</nav>
<main>
<div class="improve-docs">
<a href="http://github.com/driftyco/ionic2/tree/master/ionic/components/slides/slides.ts#L467">
View Source
</a>
<a href="http://github.com/driftyco/ionic2/edit/master/ionic/components/slides/slides.ts#L467">
Improve this doc
</a>
</div>
<h1 class="api-title">
SlideLazy
</h1>
<h1 class="class export">SlideLazy <span class="type">class</span></h1>
<p class="module">exported from <a href="undefined">ionic/ionic</a><br />
defined in <a href="https://github.com/driftyco/ionic2/tree/master/ionic/components/slides/slides.ts#L468-L477">ionic/components/slides/slides.ts (line 468)</a>
</p>
<h2>Directive</h2>
<p><span>selector: slide-lazy</span></p>
</main>
</div>
</body>
</html>
| {
"content_hash": "54f75bac73469b27f3c82ccaa9d8feed",
"timestamp": "",
"source": "github",
"line_count": 444,
"max_line_length": 160,
"avg_line_length": 31.045045045045047,
"alnum_prop": 0.6441526407428904,
"repo_name": "philmerrell/ionic-site",
"id": "62c19d1b0c189dbb7e8d345fa412410462564315",
"size": "13784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_site/docs/v2/nightly/api/components/slides/SlideLazy/index.html",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3547082"
},
{
"name": "HTML",
"bytes": "49816208"
},
{
"name": "JavaScript",
"bytes": "98020489"
},
{
"name": "Shell",
"bytes": "268"
}
],
"symlink_target": ""
} |
namespace crashpad {
namespace internal {
ModuleSnapshotWin::ModuleSnapshotWin()
: ModuleSnapshot(),
name_(),
pdb_name_(),
uuid_(),
pe_image_reader_(),
process_reader_(nullptr),
timestamp_(0),
age_(0),
initialized_(),
vs_fixed_file_info_(),
initialized_vs_fixed_file_info_() {
}
ModuleSnapshotWin::~ModuleSnapshotWin() {
}
bool ModuleSnapshotWin::Initialize(
ProcessReaderWin* process_reader,
const ProcessInfo::Module& process_reader_module) {
INITIALIZATION_STATE_SET_INITIALIZING(initialized_);
process_reader_ = process_reader;
name_ = process_reader_module.name;
timestamp_ = process_reader_module.timestamp;
pe_image_reader_.reset(new PEImageReader());
if (!pe_image_reader_->Initialize(process_reader_,
process_reader_module.dll_base,
process_reader_module.size,
base::UTF16ToUTF8(name_))) {
return false;
}
DWORD age_dword;
if (pe_image_reader_->DebugDirectoryInformation(
&uuid_, &age_dword, &pdb_name_)) {
static_assert(sizeof(DWORD) == sizeof(uint32_t), "unexpected age size");
age_ = age_dword;
} else {
// If we fully supported all old debugging formats, we would want to extract
// and emit a different type of CodeView record here (as old Microsoft tools
// would do). As we don't expect to ever encounter a module that wouldn't be
// be using .PDB that we actually have symbols for, we simply set a
// plausible name here, but this will never correspond to symbols that we
// have.
pdb_name_ = base::UTF16ToUTF8(name_);
}
INITIALIZATION_STATE_SET_VALID(initialized_);
return true;
}
void ModuleSnapshotWin::GetCrashpadOptions(CrashpadInfoClientOptions* options) {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
if (process_reader_->Is64Bit())
GetCrashpadOptionsInternal<process_types::internal::Traits64>(options);
else
GetCrashpadOptionsInternal<process_types::internal::Traits32>(options);
}
std::string ModuleSnapshotWin::Name() const {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
return base::UTF16ToUTF8(name_);
}
uint64_t ModuleSnapshotWin::Address() const {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
return pe_image_reader_->Address();
}
uint64_t ModuleSnapshotWin::Size() const {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
return pe_image_reader_->Size();
}
time_t ModuleSnapshotWin::Timestamp() const {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
return timestamp_;
}
void ModuleSnapshotWin::FileVersion(uint16_t* version_0,
uint16_t* version_1,
uint16_t* version_2,
uint16_t* version_3) const {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
const VS_FIXEDFILEINFO* ffi = VSFixedFileInfo();
if (ffi) {
*version_0 = ffi->dwFileVersionMS >> 16;
*version_1 = ffi->dwFileVersionMS & 0xffff;
*version_2 = ffi->dwFileVersionLS >> 16;
*version_3 = ffi->dwFileVersionLS & 0xffff;
} else {
*version_0 = 0;
*version_1 = 0;
*version_2 = 0;
*version_3 = 0;
}
}
void ModuleSnapshotWin::SourceVersion(uint16_t* version_0,
uint16_t* version_1,
uint16_t* version_2,
uint16_t* version_3) const {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
const VS_FIXEDFILEINFO* ffi = VSFixedFileInfo();
if (ffi) {
*version_0 = ffi->dwProductVersionMS >> 16;
*version_1 = ffi->dwProductVersionMS & 0xffff;
*version_2 = ffi->dwProductVersionLS >> 16;
*version_3 = ffi->dwProductVersionLS & 0xffff;
} else {
*version_0 = 0;
*version_1 = 0;
*version_2 = 0;
*version_3 = 0;
}
}
ModuleSnapshot::ModuleType ModuleSnapshotWin::GetModuleType() const {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
const VS_FIXEDFILEINFO* ffi = VSFixedFileInfo();
if (ffi) {
switch (ffi->dwFileType) {
case VFT_APP:
return ModuleSnapshot::kModuleTypeExecutable;
case VFT_DLL:
return ModuleSnapshot::kModuleTypeSharedLibrary;
case VFT_DRV:
case VFT_VXD:
return ModuleSnapshot::kModuleTypeLoadableModule;
}
}
return ModuleSnapshot::kModuleTypeUnknown;
}
void ModuleSnapshotWin::UUIDAndAge(crashpad::UUID* uuid, uint32_t* age) const {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
*uuid = uuid_;
*age = age_;
}
std::string ModuleSnapshotWin::DebugFileName() const {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
return pdb_name_;
}
std::vector<std::string> ModuleSnapshotWin::AnnotationsVector() const {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
// These correspond to system-logged things on Mac. We don't currently track
// any of these on Windows, but could in the future.
// See https://crashpad.chromium.org/bug/38.
return std::vector<std::string>();
}
std::map<std::string, std::string> ModuleSnapshotWin::AnnotationsSimpleMap()
const {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
PEImageAnnotationsReader annotations_reader(
process_reader_, pe_image_reader_.get(), name_);
return annotations_reader.SimpleMap();
}
template <class Traits>
void ModuleSnapshotWin::GetCrashpadOptionsInternal(
CrashpadInfoClientOptions* options) {
process_types::CrashpadInfo<Traits> crashpad_info;
if (!pe_image_reader_->GetCrashpadInfo(&crashpad_info)) {
options->crashpad_handler_behavior = TriState::kUnset;
options->system_crash_reporter_forwarding = TriState::kUnset;
return;
}
options->crashpad_handler_behavior =
CrashpadInfoClientOptions::TriStateFromCrashpadInfo(
crashpad_info.crashpad_handler_behavior);
options->system_crash_reporter_forwarding =
CrashpadInfoClientOptions::TriStateFromCrashpadInfo(
crashpad_info.system_crash_reporter_forwarding);
}
const VS_FIXEDFILEINFO* ModuleSnapshotWin::VSFixedFileInfo() const {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
if (initialized_vs_fixed_file_info_.is_uninitialized()) {
initialized_vs_fixed_file_info_.set_invalid();
if (pe_image_reader_->VSFixedFileInfo(&vs_fixed_file_info_)) {
initialized_vs_fixed_file_info_.set_valid();
}
}
return initialized_vs_fixed_file_info_.is_valid() ? &vs_fixed_file_info_
: nullptr;
}
} // namespace internal
} // namespace crashpad
| {
"content_hash": "6df013ef37fdeb256733ca1297c7b191",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 80,
"avg_line_length": 32.985,
"alnum_prop": 0.6656055782931636,
"repo_name": "XiaosongWei/chromium-crosswalk",
"id": "0553f35b1affb55be510ecfed54f785c20158aa8",
"size": "7471",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "third_party/crashpad/crashpad/snapshot/win/module_snapshot_win.cc",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import time
import fixtures
import mock
import nova
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import cast_as_call
from nova.tests.unit import policy_fixture
from nova.tests.unit.virt.libvirt import fake_libvirt_utils
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova.virt.libvirt import guest as libvirt_guest
class TestSerialConsoleLiveMigrate(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(TestSerialConsoleLiveMigrate, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
# Replace libvirt with fakelibvirt
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt',
fakelibvirt))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.host.libvirt',
fakelibvirt))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.guest.libvirt',
fakelibvirt))
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.admin_api = api_fixture.admin_api
self.api = api_fixture.api
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
nova.tests.unit.fake_network.set_stub_network_methods(self)
self.flags(compute_driver='libvirt.LibvirtDriver')
self.flags(enabled=True, group="serial_console")
self.flags(enabled=False, group="vnc")
self.flags(enabled=False, group="spice")
self.flags(use_usb_tablet=False, group="libvirt")
self.flags(host="test_compute1")
self.start_service('conductor')
self.flags(driver='chance_scheduler', group='scheduler')
self.start_service('scheduler')
self.compute = self.start_service('compute', host='test_compute1')
self.consoleauth = self.start_service('consoleauth')
self.useFixture(cast_as_call.CastAsCall(self))
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
self.image_id = self.api.get_images()[0]['id']
self.flavor_id = self.api.get_flavors()[0]['id']
@mock.patch('nova.virt.libvirt.LibvirtDriver.get_volume_connector')
@mock.patch('nova.virt.libvirt.guest.Guest.get_job_info')
@mock.patch.object(fakelibvirt.Domain, 'migrateToURI2')
@mock.patch('nova.virt.libvirt.host.Host.get_connection')
@mock.patch('nova.virt.disk.api.get_disk_size', return_value=1024)
@mock.patch('os.path.getsize', return_value=1024)
@mock.patch('nova.conductor.tasks.live_migrate.LiveMigrationTask.'
'_check_destination_is_not_source', return_value=False)
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_image')
def test_serial_console_live_migrate(self, mock_create_image,
mock_conductor_source_check,
mock_path_get_size,
mock_get_disk_size,
mock_host_get_connection,
mock_migrate_to_uri,
mock_get_job_info,
mock_get_volume_connector):
"""Regression test for bug #1595962.
If the graphical consoles VNC and SPICE are disabled, the
live-migration of an instance will result in an ERROR state.
VNC and SPICE are usually disabled on IBM z systems platforms
where graphical consoles are not available. The serial console
is then enabled and VNC + SPICE are disabled.
The error will be raised at
https://github.com/openstack/nova/blob/
4f33047d07f5a11b208c344fe206aba01cd8e6fe/
nova/virt/libvirt/driver.py#L5842-L5852
"""
mock_get_job_info.return_value = libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED)
fake_connection = fakelibvirt.Connection('qemu:///system',
version=1002007,
hv_version=2001000)
mock_host_get_connection.return_value = fake_connection
server_attr = dict(name='server1',
imageRef=self.image_id,
flavorRef=self.flavor_id)
server = self.api.post_server({'server': server_attr})
server_id = server['id']
self.wait_till_active_or_timeout(server_id)
post = {"os-migrateLive": {
"block_migration": False,
"disk_over_commit": False,
"host": "test_compute1"
}}
try:
# This should succeed
self.admin_api.post_server_action(server_id, post)
self.wait_till_active_or_timeout(server_id)
except Exception as ex:
self.fail(ex.response.content)
def wait_till_active_or_timeout(self, server_id):
timeout = 0.0
server = self.api.get_server(server_id)
while server['status'] != "ACTIVE" and timeout < 10.0:
time.sleep(.1)
timeout += .1
server = self.api.get_server(server_id)
if server['status'] != "ACTIVE":
self.fail("The server is not active after the timeout.")
| {
"content_hash": "09e3b430e052cd881faa98c34c80c139",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 74,
"avg_line_length": 43.378787878787875,
"alnum_prop": 0.6105483758295495,
"repo_name": "jianghuaw/nova",
"id": "df0fb6af7a3e81a3a5165a6df9ec3f92450808e6",
"size": "6301",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/functional/regressions/test_bug_1595962.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1435"
},
{
"name": "PHP",
"bytes": "32515"
},
{
"name": "Python",
"bytes": "19932348"
},
{
"name": "Shell",
"bytes": "28290"
},
{
"name": "Smarty",
"bytes": "339635"
}
],
"symlink_target": ""
} |
// This is an open source non-commercial project. Dear PVS-Studio, please check it.
// PVS-Studio Static Code Analyzer for C, C++ and C#: http://www.viva64.com
/* SearchCommand.cs --
* Ars Magna project, http://arsmagna.ru
* -------------------------------------------------------
* Status: poor
*/
#region Using directives
using System;
using System.Linq;
using Telegram.Bot;
using Telegram.Bot.Types;
using AM.Configuration;
using ManagedIrbis;
#endregion
namespace IrbisBot.Commands
{
/// <summary>
/// Поиск с обращением к ИРБИС-серверу.
/// </summary>
class SearchCommand
: BotCommand
{
public override string Name => "search";
public override void Execute(Message message, TelegramBotClient client)
{
var chatId = message.Chat.Id;
string query = message.Text.Trim();
string answer = $"Ищу книги и статьи по теме '{query}'";
client.SendTextMessageAsync(chatId, answer).Wait();
try
{
string connectionString = ConfigurationUtility.GetString("irbis");
using (IrbisConnection connection = new IrbisConnection())
{
connection.ParseConnectionString(connectionString);
connection.Connect();
int[] found = connection.Search("\"K={0}\"", query);
client.SendTextMessageAsync(chatId, $"Найдено: {found.Length}").Wait();
if (found.Length > 5)
{
client.SendTextMessageAsync(chatId, "Покажу только первые пять").Wait();
}
found = found.Reverse().Take(5).ToArray();
foreach (int mfn in found)
{
string description = connection.FormatRecord("@brief", mfn);
client.SendTextMessageAsync(chatId, description).Wait();
}
}
}
catch (Exception exception)
{
Console.WriteLine(exception);
}
}
public override bool Contains(string command)
{
return true;
}
}
}
| {
"content_hash": "02f1a85b8ac5936262873a110f193309",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 96,
"avg_line_length": 30.80821917808219,
"alnum_prop": 0.5264562027567808,
"repo_name": "amironov73/ManagedIrbis",
"id": "3dd9d34286cc3a5da199e96b20408733fec7a893",
"size": "2330",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Source/Classic/Apps/IrbisBot/Source/Commands/SearchCommand.cs",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "92910"
},
{
"name": "ASP.NET",
"bytes": "413"
},
{
"name": "Batchfile",
"bytes": "33021"
},
{
"name": "C",
"bytes": "24669"
},
{
"name": "C#",
"bytes": "19567730"
},
{
"name": "CSS",
"bytes": "170"
},
{
"name": "F*",
"bytes": "362819"
},
{
"name": "HTML",
"bytes": "5592"
},
{
"name": "JavaScript",
"bytes": "5342"
},
{
"name": "Pascal",
"bytes": "152697"
},
{
"name": "Shell",
"bytes": "524"
},
{
"name": "Smalltalk",
"bytes": "29356"
},
{
"name": "TeX",
"bytes": "44337"
},
{
"name": "VBA",
"bytes": "46543"
},
{
"name": "Witcher Script",
"bytes": "40165"
}
],
"symlink_target": ""
} |
package storage
type Group struct {
Name string `json:"name"`
Rights map[string]map[string]string `json:"rights"`
}
type User struct {
Name string `json:"name"`
PasswordHash string `json:"passwordhash,omitempty"`
Groups []string `json:"groups"`
}
type ProjectConfig struct {
Users []*User
Groups []*Group
}
type StorageBackend interface {
Load() (*ProjectConfig, error)
Save(cfg *ProjectConfig) error
}
| {
"content_hash": "37a7c39d8d1eb174a7e8d6dc96e13931",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 54,
"avg_line_length": 20.863636363636363,
"alnum_prop": 0.6557734204793029,
"repo_name": "trusch/jwtd",
"id": "13b5d98e0743d36c3926a82088da0a89ebe995dc",
"size": "459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "storage/StorageBackend.go",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "65186"
},
{
"name": "Shell",
"bytes": "5020"
}
],
"symlink_target": ""
} |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>presburger: Not compatible 👼</title>
<link rel="shortcut icon" type="image/png" href="../../../../../favicon.png" />
<link href="../../../../../bootstrap.min.css" rel="stylesheet">
<link href="../../../../../bootstrap-custom.css" rel="stylesheet">
<link href="//maxcdn.bootstrapcdn.com/font-awesome/4.2.0/css/font-awesome.min.css" rel="stylesheet">
<script src="../../../../../moment.min.js"></script>
<!-- HTML5 Shim and Respond.js IE8 support of HTML5 elements and media queries -->
<!-- WARNING: Respond.js doesn't work if you view the page via file:// -->
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/html5shiv/3.7.2/html5shiv.min.js"></script>
<script src="https://oss.maxcdn.com/respond/1.4.2/respond.min.js"></script>
<![endif]-->
</head>
<body>
<div class="container">
<div class="navbar navbar-default" role="navigation">
<div class="container-fluid">
<div class="navbar-header">
<a class="navbar-brand" href="../../../../.."><i class="fa fa-lg fa-flag-checkered"></i> Coq bench</a>
</div>
<div id="navbar" class="collapse navbar-collapse">
<ul class="nav navbar-nav">
<li><a href="../..">clean / released</a></li>
<li class="active"><a href="">8.4.6~camlp4 / presburger - 8.6.0</a></li>
</ul>
</div>
</div>
</div>
<div class="article">
<div class="row">
<div class="col-md-12">
<a href="../..">« Up</a>
<h1>
presburger
<small>
8.6.0
<span class="label label-info">Not compatible 👼</span>
</small>
</h1>
<p>📅 <em><script>document.write(moment("2022-11-06 19:57:14 +0000", "YYYY-MM-DD HH:mm:ss Z").fromNow());</script> (2022-11-06 19:57:14 UTC)</em><p>
<h2>Context</h2>
<pre># Packages matching: installed
# Name # Installed # Synopsis
base-bigarray base
base-num base Num library distributed with the OCaml compiler
base-ocamlbuild base OCamlbuild binary and libraries distributed with the OCaml compiler
base-threads base
base-unix base
camlp4 4.02+7 Camlp4 is a system for writing extensible parsers for programming languages
conf-findutils 1 Virtual package relying on findutils
conf-which 1 Virtual package relying on which
coq 8.4.6~camlp4 Formal proof management system.
num 0 The Num library for arbitrary-precision integer and rational arithmetic
ocaml 4.02.3 The OCaml compiler (virtual package)
ocaml-base-compiler 4.02.3 Official 4.02.3 release
ocaml-config 1 OCaml Switch Configuration
ocamlbuild 0 Build system distributed with the OCaml compiler since OCaml 3.10.0
# opam file:
opam-version: "2.0"
maintainer: "Hugo.Herbelin@inria.fr"
homepage: "https://github.com/coq-contribs/presburger"
license: "LGPL 2.1"
build: [make "-j%{jobs}%"]
install: [make "install"]
remove: ["rm" "-R" "%{lib}%/coq/user-contrib/Presburger"]
depends: [
"ocaml"
"coq" {>= "8.6" & < "8.7~"}
]
tags: [
"keyword: integers"
"keyword: arithmetic"
"keyword: decision procedure"
"keyword: Presburger"
"category: Mathematics/Logic/Foundations"
"category: Mathematics/Arithmetic and Number Theory/Miscellaneous"
"category: Computer Science/Decision Procedures and Certified Algorithms/Decision procedures"
"date: March 2002"
]
authors: [ "Laurent Théry" ]
bug-reports: "https://github.com/coq-contribs/presburger/issues"
dev-repo: "git+https://github.com/coq-contribs/presburger.git"
synopsis: "Presburger's algorithm"
description: """
A formalization of Presburger's algorithm as stated in
the initial paper by Presburger."""
flags: light-uninstall
url {
src: "https://github.com/coq-contribs/presburger/archive/v8.6.0.tar.gz"
checksum: "md5=116492346c9c2eaff0d9871d7248a09e"
}
</pre>
<h2>Lint</h2>
<dl class="dl-horizontal">
<dt>Command</dt>
<dd><code>true</code></dd>
<dt>Return code</dt>
<dd>0</dd>
</dl>
<h2>Dry install 🏜️</h2>
<p>Dry install with the current Coq version:</p>
<dl class="dl-horizontal">
<dt>Command</dt>
<dd><code>opam install -y --show-action coq-presburger.8.6.0 coq.8.4.6~camlp4</code></dd>
<dt>Return code</dt>
<dd>5120</dd>
<dt>Output</dt>
<dd><pre>[NOTE] Package coq is already installed (current version is 8.4.6~camlp4).
The following dependencies couldn't be met:
- coq-presburger -> coq >= 8.6 -> ocaml >= 4.05.0
base of this switch (use `--unlock-base' to force)
Your request can't be satisfied:
- No available version of coq satisfies the constraints
No solution found, exiting
</pre></dd>
</dl>
<p>Dry install without Coq/switch base, to test if the problem was incompatibility with the current Coq/OCaml version:</p>
<dl class="dl-horizontal">
<dt>Command</dt>
<dd><code>opam remove -y coq; opam install -y --show-action --unlock-base coq-presburger.8.6.0</code></dd>
<dt>Return code</dt>
<dd>0</dd>
</dl>
<h2>Install dependencies</h2>
<dl class="dl-horizontal">
<dt>Command</dt>
<dd><code>true</code></dd>
<dt>Return code</dt>
<dd>0</dd>
<dt>Duration</dt>
<dd>0 s</dd>
</dl>
<h2>Install 🚀</h2>
<dl class="dl-horizontal">
<dt>Command</dt>
<dd><code>true</code></dd>
<dt>Return code</dt>
<dd>0</dd>
<dt>Duration</dt>
<dd>0 s</dd>
</dl>
<h2>Installation size</h2>
<p>No files were installed.</p>
<h2>Uninstall 🧹</h2>
<dl class="dl-horizontal">
<dt>Command</dt>
<dd><code>true</code></dd>
<dt>Return code</dt>
<dd>0</dd>
<dt>Missing removes</dt>
<dd>
none
</dd>
<dt>Wrong removes</dt>
<dd>
none
</dd>
</dl>
</div>
</div>
</div>
<hr/>
<div class="footer">
<p class="text-center">
Sources are on <a href="https://github.com/coq-bench">GitHub</a> © Guillaume Claret 🐣
</p>
</div>
</div>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
<script src="../../../../../bootstrap.min.js"></script>
</body>
</html>
| {
"content_hash": "a678370cdf63e1805a20c3d9e04a0116",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 159,
"avg_line_length": 42.186440677966104,
"alnum_prop": 0.5579215213606535,
"repo_name": "coq-bench/coq-bench.github.io",
"id": "f3f9b63fb5d881289c9f2d57aae0edc348d95ec3",
"size": "7493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clean/Linux-x86_64-4.02.3-2.0.6/released/8.4.6~camlp4/presburger/8.6.0.html",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
package org.mybatis.spring;
import static org.assertj.core.api.Assertions.*;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.fail;
import java.sql.SQLException;
import org.apache.ibatis.session.ExecutorType;
import org.apache.ibatis.session.SqlSession;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.springframework.dao.DataAccessException;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
import org.springframework.transaction.TransactionStatus;
import org.springframework.transaction.support.DefaultTransactionDefinition;
import org.springframework.transaction.support.TransactionSynchronizationManager;
// tests basic usage and implementation only
// MapperFactoryBeanTest handles testing the transactional functions in SqlSessionTemplate
public class SqlSessionTemplateTest extends AbstractMyBatisSpringTest {
private static SqlSession sqlSessionTemplate;
@BeforeAll
static void setupSqlTemplate() {
sqlSessionTemplate = new SqlSessionTemplate(sqlSessionFactory);
}
@AfterEach
void tearDown() {
try {
connection.close();
} catch (SQLException ignored) {
}
}
@Test
void testGetConnection() throws java.sql.SQLException {
java.sql.Connection conn = sqlSessionTemplate.getConnection();
// outside of an explicit tx, getConnection() will start a tx, get an open connection then
// end the tx, which closes the connection
assertThat(conn.isClosed()).isTrue();
}
@Test
void testGetConnectionInTx() throws java.sql.SQLException {
TransactionStatus status = null;
try {
status = txManager.getTransaction(new DefaultTransactionDefinition());
java.sql.Connection conn = sqlSessionTemplate.getConnection();
assertThat(conn.isClosed()).isFalse();
} finally {
// rollback required to close connection
txManager.rollback(status);
}
}
@Test
void testCommit() {
assertThrows(UnsupportedOperationException.class, sqlSessionTemplate::commit);
}
@Test
void testClose() {
assertThrows(UnsupportedOperationException.class, sqlSessionTemplate::close);
}
@Test
void testRollback() {
assertThrows(UnsupportedOperationException.class, sqlSessionTemplate::rollback);
}
@Test
void testExecutorType() {
SqlSessionTemplate template = new SqlSessionTemplate(sqlSessionFactory, ExecutorType.BATCH);
assertThat(template.getExecutorType()).isEqualTo(ExecutorType.BATCH);
DataSourceTransactionManager manager = new DataSourceTransactionManager(dataSource);
TransactionStatus status = null;
try {
status = manager.getTransaction(new DefaultTransactionDefinition());
// will synchronize the template with the current tx
template.getConnection();
SqlSessionHolder holder = (SqlSessionHolder) TransactionSynchronizationManager.getResource(sqlSessionFactory);
assertThat(holder.getExecutorType()).isEqualTo(ExecutorType.BATCH);
} finally {
// rollback required to close connection
txManager.rollback(status);
}
}
@Test
void testExceptionTranslationShouldThrowMyBatisSystemException() throws SQLException {
try {
sqlSessionTemplate.selectOne("undefined");
fail("exception not thrown when expected");
} catch (MyBatisSystemException mbse) {
// success
} catch (Throwable t) {
fail("SqlSessionTemplate should translate MyBatis PersistenceExceptions");
} finally {
connection.close(); // the template do not open the connection so it do not close it
}
}
@Test
void testExceptionTranslationShouldThrowDataAccessException() {
// this query must be the same as the query in TestMapper.xml
connection.getPreparedStatementResultSetHandler().prepareThrowsSQLException("SELECT 'fail'");
try {
sqlSessionTemplate.selectOne("org.mybatis.spring.TestMapper.findFail");
fail("exception not thrown when expected");
} catch (MyBatisSystemException mbse) {
fail("SqlSessionTemplate should translate SQLExceptions into DataAccessExceptions");
} catch (DataAccessException dae) {
// success
} catch (Throwable t) {
fail("SqlSessionTemplate should translate MyBatis PersistenceExceptions");
}
}
@Test
void testTemplateWithNoTxInsert() {
sqlSessionTemplate.getMapper(TestMapper.class).insertTest("test1");
assertCommitJdbc();
assertCommitSession();
}
@Test
void testTemplateWithNoTxSelect() {
sqlSessionTemplate.getMapper(TestMapper.class).findTest();
assertCommit();
}
@Test
void testWithTxRequired() {
DefaultTransactionDefinition txDef = new DefaultTransactionDefinition();
txDef.setPropagationBehaviorName("PROPAGATION_REQUIRED");
TransactionStatus status = txManager.getTransaction(txDef);
sqlSessionTemplate.getMapper(TestMapper.class).findTest();
txManager.commit(status);
assertCommit();
assertSingleConnection();
}
}
| {
"content_hash": "4a638fa85491b55c658375e2403f3797",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 116,
"avg_line_length": 30.02958579881657,
"alnum_prop": 0.7497536945812808,
"repo_name": "mybatis/spring",
"id": "1fbe09d1802ed4d317c26c306e4dd4bb38198545",
"size": "5694",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/test/java/org/mybatis/spring/SqlSessionTemplateTest.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6360"
},
{
"name": "Java",
"bytes": "405031"
},
{
"name": "Shell",
"bytes": "1196"
}
],
"symlink_target": ""
} |
// Copyright © 2012 onwards, Andrew Whewell
// All rights reserved.
//
// Redistribution and use of this software in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
// * Neither the name of the author nor the names of the program's contributors may be used to endorse or promote products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OF THE SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
using System;
using System.Text;
using System.Collections.Generic;
using System.Linq;
using Microsoft.VisualStudio.TestTools.UnitTesting;
using Test.Framework;
using VirtualRadar.Interface.Listener;
using InterfaceFactory;
using VirtualRadar.Interface;
namespace Test.VirtualRadar.Library.Listener
{
[TestClass]
public class Sbs3MessageBytesExtractorTests
{
public TestContext TestContext { get; set; }
private ISbs3MessageBytesExtractor _Extractor;
private CommonMessageBytesExtractorTests _CommonTests;
[TestInitialize]
public void TestInitialise()
{
_Extractor = Factory.Singleton.Resolve<ISbs3MessageBytesExtractor>();
_CommonTests = new CommonMessageBytesExtractorTests(TestContext, _Extractor, ExtractedBytesFormat.ModeS);
}
[TestCleanup]
public void TestCleanup()
{
}
/// <summary>
/// Adds DCE stuffing to the list of bytes passed across.
/// </summary>
/// <param name="bytes"></param>
private List<byte> AddDCEStuffing(List<byte> bytes)
{
for(var i = 0;i < bytes.Count;++i) {
if(bytes[i] == 0x010) {
bytes.Insert(i, 0x10);
++i;
}
}
return bytes;
}
/// <summary>
/// Builds up a message packet from the payload passed across. The payload is modified by this method.
/// </summary>
/// <param name="payload"></param>
/// <param name="isFirstMessage"></param>
/// <returns></returns>
private List<byte> BuildValidMessagePacket(List<byte> payload, bool isFirstMessage = true)
{
var checksumCalculator = new Crc16Ccitt(Crc16Ccitt.InitialiseToZero);
var checksum = AddDCEStuffing(new List<byte>(checksumCalculator.ComputeChecksumBytes(payload.ToArray(), false)));
var result = new List<byte>();
if(isFirstMessage) result.Add(0x00); // prefix with a leading byte so that the listener will not reject it
result.Add(0x10);
result.Add(0x02);
result.AddRange(AddDCEStuffing(payload));
result.Add(0x10);
result.Add(0x03);
result.AddRange(checksum);
return result;
}
/// <summary>
/// Builds up a message packet from a payload described as a string of bytes in hex separated by spaces.
/// </summary>
/// <param name="bytes"></param>
/// <param name="isFirstMessage"></param>
/// <returns></returns>
private List<byte> BuildValidMessagePacket(string bytes, bool isFirstMessage = true)
{
var payload = new List<byte>();
foreach(var byteText in bytes.Split(new char[] { ' ' }, StringSplitOptions.RemoveEmptyEntries)) {
payload.Add(Convert.ToByte(byteText, 16));
}
return BuildValidMessagePacket(payload, isFirstMessage);
}
[TestMethod]
[DataSource("Data Source='RawDecodingTests.xls';Provider=Microsoft.Jet.OLEDB.4.0;Persist Security Info=False;Extended Properties='Excel 8.0'",
"Sbs3RadarListener$")]
public void Sbs3MessageBytesExtractor_ExtractModeSMessageBytes_Extracts_Mode_S_Messages_From_Bytes()
{
_CommonTests.Do_ExtractMessageBytes_Extracts_Messages_From_Bytes(false, 2, 2);
}
[TestMethod]
public void Sbs3MessageBytesExtractor_Connect_Only_Translates_SBS3_Packet_Types_1_5_And_7()
{
for(int i = 0;i < 256;++i) {
TestCleanup();
TestInitialise();
int payloadLength;
switch(i) {
case 0x01:
case 0x05: payloadLength = 18; break;
case 0x07: payloadLength = 11; break;
case 0x09: payloadLength = 6; break;
case 0x20: payloadLength = 2; break;
case 0x21: payloadLength = 10; break;
case 0x26: payloadLength = 35; break;
case 0x2a:
case 0x2b: payloadLength = 18; break; // no fixed length
case 0x2c: payloadLength = 2; break;
case 0x38: payloadLength = 26; break;
case 0x3b: payloadLength = 129; break;
case 0x45: payloadLength = 18; break; // no fixed length
case 0x57: payloadLength = 18; break; // no fixed length
case 0x58: payloadLength = 18; break; // no fixed length
default: payloadLength = 18; break; // 18 or over probably has the best chance of confusing the code
}
var payload = new List<byte>();
payload.Add((byte)i);
payload.AddRange(new byte[payloadLength]);
var message = BuildValidMessagePacket(payload).ToArray();
var extracted = _Extractor.ExtractMessageBytes(message, 0, message.Length).SingleOrDefault();
bool expectedTranslate = i == 1 || i == 5 || i == 7;
if(expectedTranslate) Assert.IsNotNull(extracted, "Did not extract SBS3 packet type {0}", i);
else Assert.IsNull(extracted, "Extracted SBS3 packet type {0}", i);
}
}
}
}
| {
"content_hash": "09f571bfa8f821bd331aa60df0d93870",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 749,
"avg_line_length": 49.358620689655176,
"alnum_prop": 0.6086349028922733,
"repo_name": "wiseman/virtual-radar-server",
"id": "e7610442666252bdd3a71614328b3b2b59f3f809",
"size": "7160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Test/Test.VirtualRadar.Library/Listener/Sbs3MessageBytesExtractorTests.cs",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C#",
"bytes": "6080863"
},
{
"name": "JavaScript",
"bytes": "641019"
},
{
"name": "Shell",
"bytes": "5140"
}
],
"symlink_target": ""
} |
#ifndef __AFXPLEX_H__
#define __AFXPLEX_H__
#ifndef __AFX_H__
#include <afx.h>
#endif
#ifdef _AFX_PACKING
#pragma pack(push, _AFX_PACKING)
#endif
#ifdef AFX_COLL_SEG
#pragma code_seg(AFX_COLL_SEG)
#endif
struct CPlex // warning variable length structure
{
CPlex* pNext;
#if (_AFX_PACKING >= 8)
DWORD dwReserved[1]; // align on 8 byte boundary
#endif
// BYTE data[maxNum*elementSize];
void* data() { return this+1; }
static CPlex* PASCAL Create(CPlex*& head, UINT nMax, UINT cbElement);
// like 'calloc' but no zero fill
// may throw memory exceptions
void FreeDataChain(); // free this one and links
};
#ifdef AFX_COLL_SEG
#pragma code_seg()
#endif
#ifdef _AFX_PACKING
#pragma pack(pop)
#endif
#endif //__AFXPLEX_H__
/////////////////////////////////////////////////////////////////////////////
| {
"content_hash": "d88abc8c91283a9198178d0510c68598",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 77,
"avg_line_length": 19.931818181818183,
"alnum_prop": 0.5758266818700114,
"repo_name": "johanlantz/headsetpresenter",
"id": "9e6f446256605ed8ac5334a2eacae8b5c8f785bb",
"size": "1299",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "HeadsetPresenter_Bluetools/Microsoft SDK/include/Win64/mfc/AFXPLEX_.H",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "7004"
},
{
"name": "Batchfile",
"bytes": "2489"
},
{
"name": "C",
"bytes": "16452971"
},
{
"name": "C#",
"bytes": "1393901"
},
{
"name": "C++",
"bytes": "19851472"
},
{
"name": "Clarion",
"bytes": "4450"
},
{
"name": "HTML",
"bytes": "50191"
},
{
"name": "Makefile",
"bytes": "51937"
},
{
"name": "NSIS",
"bytes": "68365"
},
{
"name": "Objective-C",
"bytes": "1800430"
},
{
"name": "PHP",
"bytes": "10905"
},
{
"name": "Visual Basic",
"bytes": "823951"
}
],
"symlink_target": ""
} |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>hierarchy-builder-shim: 17 s 🏆</title>
<link rel="shortcut icon" type="image/png" href="../../../../../favicon.png" />
<link href="../../../../../bootstrap.min.css" rel="stylesheet">
<link href="../../../../../bootstrap-custom.css" rel="stylesheet">
<link href="//maxcdn.bootstrapcdn.com/font-awesome/4.2.0/css/font-awesome.min.css" rel="stylesheet">
<script src="../../../../../moment.min.js"></script>
<!-- HTML5 Shim and Respond.js IE8 support of HTML5 elements and media queries -->
<!-- WARNING: Respond.js doesn't work if you view the page via file:// -->
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/html5shiv/3.7.2/html5shiv.min.js"></script>
<script src="https://oss.maxcdn.com/respond/1.4.2/respond.min.js"></script>
<![endif]-->
</head>
<body>
<div class="container">
<div class="navbar navbar-default" role="navigation">
<div class="container-fluid">
<div class="navbar-header">
<a class="navbar-brand" href="../../../../.."><i class="fa fa-lg fa-flag-checkered"></i> Coq bench</a>
</div>
<div id="navbar" class="collapse navbar-collapse">
<ul class="nav navbar-nav">
<li><a href="../..">clean / released</a></li>
<li class="active"><a href="">8.11.2 / hierarchy-builder-shim - 1.1.0</a></li>
</ul>
</div>
</div>
</div>
<div class="article">
<div class="row">
<div class="col-md-12">
<a href="../..">« Up</a>
<h1>
hierarchy-builder-shim
<small>
1.1.0
<span class="label label-success">17 s 🏆</span>
</small>
</h1>
<p>📅 <em><script>document.write(moment("2022-04-27 11:02:28 +0000", "YYYY-MM-DD HH:mm:ss Z").fromNow());</script> (2022-04-27 11:02:28 UTC)</em><p>
<h2>Context</h2>
<pre># Packages matching: installed
# Name # Installed # Synopsis
base-bigarray base
base-threads base
base-unix base
conf-findutils 1 Virtual package relying on findutils
coq 8.11.2 Formal proof management system
num 1.4 The legacy Num library for arbitrary-precision integer and rational arithmetic
ocaml 4.11.2 The OCaml compiler (virtual package)
ocaml-base-compiler 4.11.2 Official release 4.11.2
ocaml-config 1 OCaml Switch Configuration
ocamlfind 1.9.3 A library manager for OCaml
# opam file:
opam-version: "2.0"
name: "coq-hierarchy-builder"
version: "dev"
maintainer: "Enrico Tassi <enrico.tassi@inria.fr>"
authors: [ "Cyril Cohen" "Kazuhiko Sakaguchi" "Enrico Tassi" ]
license: "MIT"
homepage: "https://github.com/math-comp/hierarchy-builder"
bug-reports: "https://github.com/math-comp/hierarchy-builder/issues"
dev-repo: "git+https://github.com/math-comp/hierarchy-builder"
build: [ make "-C" "shim" "build" ]
install: [ make "-C" "shim" "install" ]
conflicts: [ "coq-hierarchy-builder" ]
depends: [ "coq" {>= "8.10"} ]
synopsis: "Shim package for HB"
description: """
This package provide the support constants one can use to compile files
generated by HB.
"""
tags: [ "logpath:HB" ]
url {
src: "https://github.com/math-comp/hierarchy-builder/archive/v1.1.0.tar.gz"
checksum: "sha256=ac5ebf16afdc4ed14018b823d19838c6e8423319c858c1f6e65f21f6b18d96b1"
}
</pre>
<h2>Lint</h2>
<dl class="dl-horizontal">
<dt>Command</dt>
<dd><code>true</code></dd>
<dt>Return code</dt>
<dd>0</dd>
</dl>
<h2>Dry install 🏜️</h2>
<p>Dry install with the current Coq version:</p>
<dl class="dl-horizontal">
<dt>Command</dt>
<dd><code>opam install -y --show-action coq-hierarchy-builder-shim.1.1.0 coq.8.11.2</code></dd>
<dt>Return code</dt>
<dd>0</dd>
</dl>
<p>Dry install without Coq/switch base, to test if the problem was incompatibility with the current Coq/OCaml version:</p>
<dl class="dl-horizontal">
<dt>Command</dt>
<dd><code>true</code></dd>
<dt>Return code</dt>
<dd>0</dd>
</dl>
<h2>Install dependencies</h2>
<dl class="dl-horizontal">
<dt>Command</dt>
<dd><code>opam list; echo; ulimit -Sv 4000000; timeout 4h opam install -y --deps-only coq-hierarchy-builder-shim.1.1.0 coq.8.11.2</code></dd>
<dt>Return code</dt>
<dd>0</dd>
<dt>Duration</dt>
<dd>12 s</dd>
</dl>
<h2>Install 🚀</h2>
<dl class="dl-horizontal">
<dt>Command</dt>
<dd><code>opam list; echo; ulimit -Sv 16000000; timeout 4h opam install -y -v coq-hierarchy-builder-shim.1.1.0 coq.8.11.2</code></dd>
<dt>Return code</dt>
<dd>0</dd>
<dt>Duration</dt>
<dd>17 s</dd>
</dl>
<h2>Installation size</h2>
<p>Total: 26 K</p>
<ul>
<li>24 K <code>../ocaml-base-compiler.4.11.2/lib/coq/user-contrib/HB/structures.vo</code></li>
<li>1 K <code>../ocaml-base-compiler.4.11.2/lib/coq/user-contrib/HB/structures.glob</code></li>
<li>1 K <code>../ocaml-base-compiler.4.11.2/lib/coq/user-contrib/HB/structures.v</code></li>
</ul>
<h2>Uninstall 🧹</h2>
<dl class="dl-horizontal">
<dt>Command</dt>
<dd><code>opam remove -y coq-hierarchy-builder-shim.1.1.0</code></dd>
<dt>Return code</dt>
<dd>0</dd>
<dt>Missing removes</dt>
<dd>
none
</dd>
<dt>Wrong removes</dt>
<dd>
none
</dd>
</dl>
</div>
</div>
</div>
<hr/>
<div class="footer">
<p class="text-center">
Sources are on <a href="https://github.com/coq-bench">GitHub</a> © Guillaume Claret 🐣
</p>
</div>
</div>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
<script src="../../../../../bootstrap.min.js"></script>
</body>
</html>
| {
"content_hash": "0b71d1ecb6dccbae305eafdaa2e3dd7b",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 159,
"avg_line_length": 43.515723270440255,
"alnum_prop": 0.542997542997543,
"repo_name": "coq-bench/coq-bench.github.io",
"id": "5ca70cd8b3b0617c85b26d7133dda185033a5c1a",
"size": "6944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clean/Linux-x86_64-4.11.2-2.0.7/released/8.11.2/hierarchy-builder-shim/1.1.0.html",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
package com.android.dx;
import com.android.dx.rop.code.Rop;
import com.android.dx.rop.code.Rops;
import com.android.dx.rop.type.TypeList;
/**
* An operation on two values of the same type.
*
* <p>Math operations ({@link #ADD}, {@link #SUBTRACT}, {@link #MULTIPLY},
* {@link #DIVIDE}, and {@link #REMAINDER}) support ints, longs, floats and
* doubles.
*
* <p>Bit operations ({@link #AND}, {@link #OR}, {@link #XOR}, {@link
* #SHIFT_LEFT}, {@link #SHIFT_RIGHT}, {@link #UNSIGNED_SHIFT_RIGHT}) support
* ints and longs.
*
* <p>Division by zero behaves differently depending on the operand type.
* For int and long operands, {@link #DIVIDE} and {@link #REMAINDER} throw
* {@link ArithmeticException} if {@code b == 0}. For float and double operands,
* the operations return {@code NaN}.
*/
public enum BinaryOp {
/** {@code a + b} */
ADD() {
@Override Rop rop(TypeList types) {
return Rops.opAdd(types);
}
},
/** {@code a - b} */
SUBTRACT() {
@Override Rop rop(TypeList types) {
return Rops.opSub(types);
}
},
/** {@code a * b} */
MULTIPLY() {
@Override Rop rop(TypeList types) {
return Rops.opMul(types);
}
},
/** {@code a / b} */
DIVIDE() {
@Override Rop rop(TypeList types) {
return Rops.opDiv(types);
}
},
/** {@code a % b} */
REMAINDER() {
@Override Rop rop(TypeList types) {
return Rops.opRem(types);
}
},
/** {@code a & b} */
AND() {
@Override Rop rop(TypeList types) {
return Rops.opAnd(types);
}
},
/** {@code a | b} */
OR() {
@Override Rop rop(TypeList types) {
return Rops.opOr(types);
}
},
/** {@code a ^ b} */
XOR() {
@Override Rop rop(TypeList types) {
return Rops.opXor(types);
}
},
/** {@code a << b} */
SHIFT_LEFT() {
@Override Rop rop(TypeList types) {
return Rops.opShl(types);
}
},
/** {@code a >> b} */
SHIFT_RIGHT() {
@Override Rop rop(TypeList types) {
return Rops.opShr(types);
}
},
/** {@code a >>> b} */
UNSIGNED_SHIFT_RIGHT() {
@Override Rop rop(TypeList types) {
return Rops.opUshr(types);
}
};
abstract Rop rop(com.android.dx.rop.type.TypeList types);
}
| {
"content_hash": "346875a88c091c5ee12ae8bacd141611",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 80,
"avg_line_length": 23.58653846153846,
"alnum_prop": 0.5169180595189564,
"repo_name": "sawrus/dexmaker",
"id": "20c6501f832e23a0319de1511d5e54991c424d62",
"size": "3072",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "dexmaker/src/main/java/com/android/dx/BinaryOp.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "2130179"
}
],
"symlink_target": ""
} |
#include "src/core/lib/iomgr/port.h"
/* This polling engine is only relevant on linux kernels supporting epoll() */
#ifdef GRPC_LINUX_EPOLL
#include "src/core/lib/iomgr/ev_epoll_linux.h"
#include <assert.h>
#include <errno.h>
#include <poll.h>
#include <pthread.h>
#include <signal.h>
#include <string.h>
#include <sys/epoll.h>
#include <sys/socket.h>
#include <unistd.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/tls.h>
#include <grpc/support/useful.h>
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/block_annotate.h"
/* TODO: sreek - Move this to init.c and initialize this like other tracers. */
static int grpc_polling_trace = 0; /* Disabled by default */
#define GRPC_POLLING_TRACE(fmt, ...) \
if (grpc_polling_trace) { \
gpr_log(GPR_INFO, (fmt), __VA_ARGS__); \
}
/* Uncomment the following to enable extra checks on poll_object operations */
/* #define PO_DEBUG */
static int grpc_wakeup_signal = -1;
static bool is_grpc_wakeup_signal_initialized = false;
/* TODO: sreek: Right now, this wakes up all pollers. In future we should make
* sure to wake up one polling thread (which can wake up other threads if
* needed) */
static grpc_wakeup_fd global_wakeup_fd;
/* Implements the function defined in grpc_posix.h. This function might be
* called before even calling grpc_init() to set either a different signal to
* use. If signum == -1, then the use of signals is disabled */
void grpc_use_signal(int signum) {
grpc_wakeup_signal = signum;
is_grpc_wakeup_signal_initialized = true;
if (grpc_wakeup_signal < 0) {
gpr_log(GPR_INFO,
"Use of signals is disabled. Epoll engine will not be used");
} else {
gpr_log(GPR_INFO, "epoll engine will be using signal: %d",
grpc_wakeup_signal);
}
}
struct polling_island;
typedef enum {
POLL_OBJ_FD,
POLL_OBJ_POLLSET,
POLL_OBJ_POLLSET_SET
} poll_obj_type;
typedef struct poll_obj {
#ifdef PO_DEBUG
poll_obj_type obj_type;
#endif
gpr_mu mu;
struct polling_island *pi;
} poll_obj;
const char *poll_obj_string(poll_obj_type po_type) {
switch (po_type) {
case POLL_OBJ_FD:
return "fd";
case POLL_OBJ_POLLSET:
return "pollset";
case POLL_OBJ_POLLSET_SET:
return "pollset_set";
}
GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
/*******************************************************************************
* Fd Declarations
*/
#define FD_FROM_PO(po) ((grpc_fd *)(po))
struct grpc_fd {
poll_obj po;
int fd;
/* refst format:
bit 0 : 1=Active / 0=Orphaned
bits 1-n : refcount
Ref/Unref by two to avoid altering the orphaned bit */
gpr_atm refst;
/* Internally stores data of type (grpc_error *). If the FD is shutdown, this
contains reason for shutdown (i.e a pointer to grpc_error) ORed with
FD_SHUTDOWN_BIT. Since address allocations are word-aligned, the lower bit
of (grpc_error *) addresses is guaranteed to be zero. Even if the
(grpc_error *), is of special types like GRPC_ERROR_NONE, GRPC_ERROR_OOM
etc, the lower bit is guaranteed to be zero.
Once an fd is shutdown, any pending or future read/write closures on the
fd should fail */
gpr_atm shutdown_error;
/* The fd is either closed or we relinquished control of it. In either
cases, this indicates that the 'fd' on this structure is no longer
valid */
bool orphaned;
/* Closures to call when the fd is readable or writable respectively. These
fields contain one of the following values:
CLOSURE_READY : The fd has an I/O event of interest but there is no
closure yet to execute
CLOSURE_NOT_READY : The fd has no I/O event of interest
closure ptr : The closure to be executed when the fd has an I/O
event of interest
shutdown_error | FD_SHUTDOWN_BIT :
'shutdown_error' field ORed with FD_SHUTDOWN_BIT.
This indicates that the fd is shutdown. Since all
memory allocations are word-aligned, the lower two
bits of the shutdown_error pointer are always 0. So
it is safe to OR these with FD_SHUTDOWN_BIT
Valid state transitions:
<closure ptr> <-----3------ CLOSURE_NOT_READY ----1----> CLOSURE_READY
| | ^ | ^ | |
| | | | | | |
| +--------------4----------+ 6 +---------2---------------+ |
| | |
| v |
+-----5-------> [shutdown_error | FD_SHUTDOWN_BIT] <----7---------+
For 1, 4 : See set_ready() function
For 2, 3 : See notify_on() function
For 5,6,7: See set_shutdown() function */
gpr_atm read_closure;
gpr_atm write_closure;
struct grpc_fd *freelist_next;
grpc_closure *on_done_closure;
/* The pollset that last noticed that the fd is readable. The actual type
* stored in this is (grpc_pollset *) */
gpr_atm read_notifier_pollset;
grpc_iomgr_object iomgr_object;
};
/* Reference counting for fds */
// #define GRPC_FD_REF_COUNT_DEBUG
#ifdef GRPC_FD_REF_COUNT_DEBUG
static void fd_ref(grpc_fd *fd, const char *reason, const char *file, int line);
static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
int line);
#define GRPC_FD_REF(fd, reason) fd_ref(fd, reason, __FILE__, __LINE__)
#define GRPC_FD_UNREF(fd, reason) fd_unref(fd, reason, __FILE__, __LINE__)
#else
static void fd_ref(grpc_fd *fd);
static void fd_unref(grpc_fd *fd);
#define GRPC_FD_REF(fd, reason) fd_ref(fd)
#define GRPC_FD_UNREF(fd, reason) fd_unref(fd)
#endif
static void fd_global_init(void);
static void fd_global_shutdown(void);
#define CLOSURE_NOT_READY ((gpr_atm)0)
#define CLOSURE_READY ((gpr_atm)2)
#define FD_SHUTDOWN_BIT 1
/*******************************************************************************
* Polling island Declarations
*/
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
#define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__)
#define PI_UNREF(exec_ctx, p, r) \
pi_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__)
#else /* defined(GRPC_WORKQUEUE_REFCOUNT_DEBUG) */
#define PI_ADD_REF(p, r) pi_add_ref((p))
#define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p))
#endif /* !defined(GRPC_PI_REF_COUNT_DEBUG) */
/* This is also used as grpc_workqueue (by directly casing it) */
typedef struct polling_island {
grpc_closure_scheduler workqueue_scheduler;
gpr_mu mu;
/* Ref count. Use PI_ADD_REF() and PI_UNREF() macros to increment/decrement
the refcount.
Once the ref count becomes zero, this structure is destroyed which means
we should ensure that there is never a scenario where a PI_ADD_REF() is
racing with a PI_UNREF() that just made the ref_count zero. */
gpr_atm ref_count;
/* Pointer to the polling_island this merged into.
* merged_to value is only set once in polling_island's lifetime (and that too
* only if the island is merged with another island). Because of this, we can
* use gpr_atm type here so that we can do atomic access on this and reduce
* lock contention on 'mu' mutex.
*
* Note that if this field is not NULL (i.e not 0), all the remaining fields
* (except mu and ref_count) are invalid and must be ignored. */
gpr_atm merged_to;
/* Number of threads currently polling on this island */
gpr_atm poller_count;
/* Mutex guarding the read end of the workqueue (must be held to pop from
* workqueue_items) */
gpr_mu workqueue_read_mu;
/* Queue of closures to be executed */
gpr_mpscq workqueue_items;
/* Count of items in workqueue_items */
gpr_atm workqueue_item_count;
/* Wakeup fd used to wake pollers to check the contents of workqueue_items */
grpc_wakeup_fd workqueue_wakeup_fd;
/* The fd of the underlying epoll set */
int epoll_fd;
/* The file descriptors in the epoll set */
size_t fd_cnt;
size_t fd_capacity;
grpc_fd **fds;
} polling_island;
/*******************************************************************************
* Pollset Declarations
*/
struct grpc_pollset_worker {
/* Thread id of this worker */
pthread_t pt_id;
/* Used to prevent a worker from getting kicked multiple times */
gpr_atm is_kicked;
struct grpc_pollset_worker *next;
struct grpc_pollset_worker *prev;
};
struct grpc_pollset {
poll_obj po;
grpc_pollset_worker root_worker;
bool kicked_without_pollers;
bool shutting_down; /* Is the pollset shutting down ? */
bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */
grpc_closure *shutdown_done; /* Called after after shutdown is complete */
};
/*******************************************************************************
* Pollset-set Declarations
*/
struct grpc_pollset_set {
poll_obj po;
};
/*******************************************************************************
* Common helpers
*/
static bool append_error(grpc_error **composite, grpc_error *error,
const char *desc) {
if (error == GRPC_ERROR_NONE) return true;
if (*composite == GRPC_ERROR_NONE) {
*composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
}
*composite = grpc_error_add_child(*composite, error);
return false;
}
/*******************************************************************************
* Polling island Definitions
*/
/* The wakeup fd that is used to wake up all threads in a Polling island. This
is useful in the polling island merge operation where we need to wakeup all
the threads currently polling the smaller polling island (so that they can
start polling the new/merged polling island)
NOTE: This fd is initialized to be readable and MUST NOT be consumed i.e the
threads that woke up MUST NOT call grpc_wakeup_fd_consume_wakeup() */
static grpc_wakeup_fd polling_island_wakeup_fd;
/* The polling island being polled right now.
See comments in workqueue_maybe_wakeup for why this is tracked. */
static __thread polling_island *g_current_thread_polling_island;
/* Forward declaration */
static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error);
#ifdef GRPC_TSAN
/* Currently TSAN may incorrectly flag data races between epoll_ctl and
epoll_wait for any grpc_fd structs that are added to the epoll set via
epoll_ctl and are returned (within a very short window) via epoll_wait().
To work-around this race, we establish a happens-before relation between
the code just-before epoll_ctl() and the code after epoll_wait() by using
this atomic */
gpr_atm g_epoll_sync;
#endif /* defined(GRPC_TSAN) */
static const grpc_closure_scheduler_vtable workqueue_scheduler_vtable = {
workqueue_enqueue, workqueue_enqueue, "workqueue"};
static void pi_add_ref(polling_island *pi);
static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi);
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
static void pi_add_ref_dbg(polling_island *pi, const char *reason,
const char *file, int line) {
long old_cnt = gpr_atm_acq_load(&pi->ref_count);
pi_add_ref(pi);
gpr_log(GPR_DEBUG, "Add ref pi: %p, old: %ld -> new:%ld (%s) - (%s, %d)",
(void *)pi, old_cnt, old_cnt + 1, reason, file, line);
}
static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi,
const char *reason, const char *file, int line) {
long old_cnt = gpr_atm_acq_load(&pi->ref_count);
pi_unref(exec_ctx, pi);
gpr_log(GPR_DEBUG, "Unref pi: %p, old:%ld -> new:%ld (%s) - (%s, %d)",
(void *)pi, old_cnt, (old_cnt - 1), reason, file, line);
}
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
const char *file, int line,
const char *reason) {
if (workqueue != NULL) {
pi_add_ref_dbg((polling_island *)workqueue, reason, file, line);
}
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {
if (workqueue != NULL) {
pi_unref_dbg(exec_ctx, (polling_island *)workqueue, reason, file, line);
}
}
#else
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
if (workqueue != NULL) {
pi_add_ref((polling_island *)workqueue);
}
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue) {
if (workqueue != NULL) {
pi_unref(exec_ctx, (polling_island *)workqueue);
}
}
#endif
static void pi_add_ref(polling_island *pi) {
gpr_atm_no_barrier_fetch_add(&pi->ref_count, 1);
}
static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi) {
/* If ref count went to zero, delete the polling island.
Note that this deletion not be done under a lock. Once the ref count goes
to zero, we are guaranteed that no one else holds a reference to the
polling island (and that there is no racing pi_add_ref() call either).
Also, if we are deleting the polling island and the merged_to field is
non-empty, we should remove a ref to the merged_to polling island
*/
if (1 == gpr_atm_full_fetch_add(&pi->ref_count, -1)) {
polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
polling_island_delete(exec_ctx, pi);
if (next != NULL) {
PI_UNREF(exec_ctx, next, "pi_delete"); /* Recursive call */
}
}
}
/* The caller is expected to hold pi->mu lock before calling this function */
static void polling_island_add_fds_locked(polling_island *pi, grpc_fd **fds,
size_t fd_count, bool add_fd_refs,
grpc_error **error) {
int err;
size_t i;
struct epoll_event ev;
char *err_msg;
const char *err_desc = "polling_island_add_fds";
#ifdef GRPC_TSAN
/* See the definition of g_epoll_sync for more context */
gpr_atm_rel_store(&g_epoll_sync, (gpr_atm)0);
#endif /* defined(GRPC_TSAN) */
for (i = 0; i < fd_count; i++) {
ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
ev.data.ptr = fds[i];
err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_ADD, fds[i]->fd, &ev);
if (err < 0) {
if (errno != EEXIST) {
gpr_asprintf(
&err_msg,
"epoll_ctl (epoll_fd: %d) add fd: %d failed with error: %d (%s)",
pi->epoll_fd, fds[i]->fd, errno, strerror(errno));
append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
gpr_free(err_msg);
}
continue;
}
if (pi->fd_cnt == pi->fd_capacity) {
pi->fd_capacity = GPR_MAX(pi->fd_capacity + 8, pi->fd_cnt * 3 / 2);
pi->fds = gpr_realloc(pi->fds, sizeof(grpc_fd *) * pi->fd_capacity);
}
pi->fds[pi->fd_cnt++] = fds[i];
if (add_fd_refs) {
GRPC_FD_REF(fds[i], "polling_island");
}
}
}
/* The caller is expected to hold pi->mu before calling this */
static void polling_island_add_wakeup_fd_locked(polling_island *pi,
grpc_wakeup_fd *wakeup_fd,
grpc_error **error) {
struct epoll_event ev;
int err;
char *err_msg;
const char *err_desc = "polling_island_add_wakeup_fd";
ev.events = (uint32_t)(EPOLLIN | EPOLLET);
ev.data.ptr = wakeup_fd;
err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_ADD,
GRPC_WAKEUP_FD_GET_READ_FD(wakeup_fd), &ev);
if (err < 0 && errno != EEXIST) {
gpr_asprintf(&err_msg,
"epoll_ctl (epoll_fd: %d) add wakeup fd: %d failed with "
"error: %d (%s)",
pi->epoll_fd, GRPC_WAKEUP_FD_GET_READ_FD(&global_wakeup_fd),
errno, strerror(errno));
append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
gpr_free(err_msg);
}
}
/* The caller is expected to hold pi->mu lock before calling this function */
static void polling_island_remove_all_fds_locked(polling_island *pi,
bool remove_fd_refs,
grpc_error **error) {
int err;
size_t i;
char *err_msg;
const char *err_desc = "polling_island_remove_fds";
for (i = 0; i < pi->fd_cnt; i++) {
err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_DEL, pi->fds[i]->fd, NULL);
if (err < 0 && errno != ENOENT) {
gpr_asprintf(&err_msg,
"epoll_ctl (epoll_fd: %d) delete fds[%zu]: %d failed with "
"error: %d (%s)",
pi->epoll_fd, i, pi->fds[i]->fd, errno, strerror(errno));
append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
gpr_free(err_msg);
}
if (remove_fd_refs) {
GRPC_FD_UNREF(pi->fds[i], "polling_island");
}
}
pi->fd_cnt = 0;
}
/* The caller is expected to hold pi->mu lock before calling this function */
static void polling_island_remove_fd_locked(polling_island *pi, grpc_fd *fd,
bool is_fd_closed,
grpc_error **error) {
int err;
size_t i;
char *err_msg;
const char *err_desc = "polling_island_remove_fd";
/* If fd is already closed, then it would have been automatically been removed
from the epoll set */
if (!is_fd_closed) {
err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_DEL, fd->fd, NULL);
if (err < 0 && errno != ENOENT) {
gpr_asprintf(
&err_msg,
"epoll_ctl (epoll_fd: %d) del fd: %d failed with error: %d (%s)",
pi->epoll_fd, fd->fd, errno, strerror(errno));
append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
gpr_free(err_msg);
}
}
for (i = 0; i < pi->fd_cnt; i++) {
if (pi->fds[i] == fd) {
pi->fds[i] = pi->fds[--pi->fd_cnt];
GRPC_FD_UNREF(fd, "polling_island");
break;
}
}
}
/* Might return NULL in case of an error */
static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
grpc_fd *initial_fd,
grpc_error **error) {
polling_island *pi = NULL;
const char *err_desc = "polling_island_create";
*error = GRPC_ERROR_NONE;
pi = gpr_malloc(sizeof(*pi));
pi->workqueue_scheduler.vtable = &workqueue_scheduler_vtable;
gpr_mu_init(&pi->mu);
pi->fd_cnt = 0;
pi->fd_capacity = 0;
pi->fds = NULL;
pi->epoll_fd = -1;
gpr_mu_init(&pi->workqueue_read_mu);
gpr_mpscq_init(&pi->workqueue_items);
gpr_atm_rel_store(&pi->workqueue_item_count, 0);
gpr_atm_rel_store(&pi->ref_count, 0);
gpr_atm_rel_store(&pi->poller_count, 0);
gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL);
if (!append_error(error, grpc_wakeup_fd_init(&pi->workqueue_wakeup_fd),
err_desc)) {
goto done;
}
pi->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
if (pi->epoll_fd < 0) {
append_error(error, GRPC_OS_ERROR(errno, "epoll_create1"), err_desc);
goto done;
}
polling_island_add_wakeup_fd_locked(pi, &global_wakeup_fd, error);
polling_island_add_wakeup_fd_locked(pi, &pi->workqueue_wakeup_fd, error);
if (initial_fd != NULL) {
polling_island_add_fds_locked(pi, &initial_fd, 1, true, error);
}
done:
if (*error != GRPC_ERROR_NONE) {
polling_island_delete(exec_ctx, pi);
pi = NULL;
}
return pi;
}
static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi) {
GPR_ASSERT(pi->fd_cnt == 0);
if (pi->epoll_fd >= 0) {
close(pi->epoll_fd);
}
GPR_ASSERT(gpr_atm_no_barrier_load(&pi->workqueue_item_count) == 0);
gpr_mu_destroy(&pi->workqueue_read_mu);
gpr_mpscq_destroy(&pi->workqueue_items);
gpr_mu_destroy(&pi->mu);
grpc_wakeup_fd_destroy(&pi->workqueue_wakeup_fd);
gpr_free(pi->fds);
gpr_free(pi);
}
/* Attempts to gets the last polling island in the linked list (liked by the
* 'merged_to' field). Since this does not lock the polling island, there are no
* guarantees that the island returned is the last island */
static polling_island *polling_island_maybe_get_latest(polling_island *pi) {
polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
while (next != NULL) {
pi = next;
next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
}
return pi;
}
/* Gets the lock on the *latest* polling island i.e the last polling island in
the linked list (linked by the 'merged_to' field). Call gpr_mu_unlock on the
returned polling island's mu.
Usage: To lock/unlock polling island "pi", do the following:
polling_island *pi_latest = polling_island_lock(pi);
...
... critical section ..
...
gpr_mu_unlock(&pi_latest->mu); // NOTE: use pi_latest->mu. NOT pi->mu */
static polling_island *polling_island_lock(polling_island *pi) {
polling_island *next = NULL;
while (true) {
next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
if (next == NULL) {
/* Looks like 'pi' is the last node in the linked list but unless we check
this by holding the pi->mu lock, we cannot be sure (i.e without the
pi->mu lock, we don't prevent island merges).
To be absolutely sure, check once more by holding the pi->mu lock */
gpr_mu_lock(&pi->mu);
next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
if (next == NULL) {
/* pi is infact the last node and we have the pi->mu lock. we're done */
break;
}
/* pi->merged_to is not NULL i.e pi isn't the last node anymore. pi->mu
* isn't the lock we are interested in. Continue traversing the list */
gpr_mu_unlock(&pi->mu);
}
pi = next;
}
return pi;
}
/* Gets the lock on the *latest* polling islands in the linked lists pointed by
*p and *q (and also updates *p and *q to point to the latest polling islands)
This function is needed because calling the following block of code to obtain
locks on polling islands (*p and *q) is prone to deadlocks.
{
polling_island_lock(*p, true);
polling_island_lock(*q, true);
}
Usage/example:
polling_island *p1;
polling_island *p2;
..
polling_island_lock_pair(&p1, &p2);
..
.. Critical section with both p1 and p2 locked
..
// Release locks: Always call polling_island_unlock_pair() to release locks
polling_island_unlock_pair(p1, p2);
*/
static void polling_island_lock_pair(polling_island **p, polling_island **q) {
polling_island *pi_1 = *p;
polling_island *pi_2 = *q;
polling_island *next_1 = NULL;
polling_island *next_2 = NULL;
/* The algorithm is simple:
- Go to the last polling islands in the linked lists *pi_1 and *pi_2 (and
keep updating pi_1 and pi_2)
- Then obtain locks on the islands by following a lock order rule of
locking polling_island with lower address first
Special case: Before obtaining the locks, check if pi_1 and pi_2 are
pointing to the same island. If that is the case, we can just call
polling_island_lock()
- After obtaining both the locks, double check that the polling islands
are still the last polling islands in their respective linked lists
(this is because there might have been polling island merges before
we got the lock)
- If the polling islands are the last islands, we are done. If not,
release the locks and continue the process from the first step */
while (true) {
next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to);
while (next_1 != NULL) {
pi_1 = next_1;
next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to);
}
next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to);
while (next_2 != NULL) {
pi_2 = next_2;
next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to);
}
if (pi_1 == pi_2) {
pi_1 = pi_2 = polling_island_lock(pi_1);
break;
}
if (pi_1 < pi_2) {
gpr_mu_lock(&pi_1->mu);
gpr_mu_lock(&pi_2->mu);
} else {
gpr_mu_lock(&pi_2->mu);
gpr_mu_lock(&pi_1->mu);
}
next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to);
next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to);
if (next_1 == NULL && next_2 == NULL) {
break;
}
gpr_mu_unlock(&pi_1->mu);
gpr_mu_unlock(&pi_2->mu);
}
*p = pi_1;
*q = pi_2;
}
static void polling_island_unlock_pair(polling_island *p, polling_island *q) {
if (p == q) {
gpr_mu_unlock(&p->mu);
} else {
gpr_mu_unlock(&p->mu);
gpr_mu_unlock(&q->mu);
}
}
static void workqueue_maybe_wakeup(polling_island *pi) {
/* If this thread is the current poller, then it may be that it's about to
decrement the current poller count, so we need to look past this thread */
bool is_current_poller = (g_current_thread_polling_island == pi);
gpr_atm min_current_pollers_for_wakeup = is_current_poller ? 1 : 0;
gpr_atm current_pollers = gpr_atm_no_barrier_load(&pi->poller_count);
/* Only issue a wakeup if it's likely that some poller could come in and take
it right now. Note that since we do an anticipatory mpscq_pop every poll
loop, it's ok if we miss the wakeup here, as we'll get the work item when
the next poller enters anyway. */
if (current_pollers > min_current_pollers_for_wakeup) {
GRPC_LOG_IF_ERROR("workqueue_wakeup_fd",
grpc_wakeup_fd_wakeup(&pi->workqueue_wakeup_fd));
}
}
static void workqueue_move_items_to_parent(polling_island *q) {
polling_island *p = (polling_island *)gpr_atm_no_barrier_load(&q->merged_to);
if (p == NULL) {
return;
}
gpr_mu_lock(&q->workqueue_read_mu);
int num_added = 0;
while (gpr_atm_no_barrier_load(&q->workqueue_item_count) > 0) {
gpr_mpscq_node *n = gpr_mpscq_pop(&q->workqueue_items);
if (n != NULL) {
gpr_atm_no_barrier_fetch_add(&q->workqueue_item_count, -1);
gpr_atm_no_barrier_fetch_add(&p->workqueue_item_count, 1);
gpr_mpscq_push(&p->workqueue_items, n);
num_added++;
}
}
gpr_mu_unlock(&q->workqueue_read_mu);
if (num_added > 0) {
workqueue_maybe_wakeup(p);
}
workqueue_move_items_to_parent(p);
}
static polling_island *polling_island_merge(polling_island *p,
polling_island *q,
grpc_error **error) {
/* Get locks on both the polling islands */
polling_island_lock_pair(&p, &q);
if (p != q) {
/* Make sure that p points to the polling island with fewer fds than q */
if (p->fd_cnt > q->fd_cnt) {
GPR_SWAP(polling_island *, p, q);
}
/* Merge p with q i.e move all the fds from p (The one with fewer fds) to q
Note that the refcounts on the fds being moved will not change here.
This is why the last param in the following two functions is 'false') */
polling_island_add_fds_locked(q, p->fds, p->fd_cnt, false, error);
polling_island_remove_all_fds_locked(p, false, error);
/* Wakeup all the pollers (if any) on p so that they pickup this change */
polling_island_add_wakeup_fd_locked(p, &polling_island_wakeup_fd, error);
/* Add the 'merged_to' link from p --> q */
gpr_atm_rel_store(&p->merged_to, (gpr_atm)q);
PI_ADD_REF(q, "pi_merge"); /* To account for the new incoming ref from p */
workqueue_move_items_to_parent(p);
}
/* else if p == q, nothing needs to be done */
polling_island_unlock_pair(p, q);
/* Return the merged polling island (Note that no merge would have happened
if p == q which is ok) */
return q;
}
static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error) {
GPR_TIMER_BEGIN("workqueue.enqueue", 0);
grpc_workqueue *workqueue = (grpc_workqueue *)closure->scheduler;
/* take a ref to the workqueue: otherwise it can happen that whatever events
* this kicks off ends up destroying the workqueue before this function
* completes */
GRPC_WORKQUEUE_REF(workqueue, "enqueue");
polling_island *pi = (polling_island *)workqueue;
gpr_atm last = gpr_atm_no_barrier_fetch_add(&pi->workqueue_item_count, 1);
closure->error_data.error = error;
gpr_mpscq_push(&pi->workqueue_items, &closure->next_data.atm_next);
if (last == 0) {
workqueue_maybe_wakeup(pi);
}
workqueue_move_items_to_parent(pi);
GRPC_WORKQUEUE_UNREF(exec_ctx, workqueue, "enqueue");
GPR_TIMER_END("workqueue.enqueue", 0);
}
static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
polling_island *pi = (polling_island *)workqueue;
return workqueue == NULL ? grpc_schedule_on_exec_ctx
: &pi->workqueue_scheduler;
}
static grpc_error *polling_island_global_init() {
grpc_error *error = GRPC_ERROR_NONE;
error = grpc_wakeup_fd_init(&polling_island_wakeup_fd);
if (error == GRPC_ERROR_NONE) {
error = grpc_wakeup_fd_wakeup(&polling_island_wakeup_fd);
}
return error;
}
static void polling_island_global_shutdown() {
grpc_wakeup_fd_destroy(&polling_island_wakeup_fd);
}
/*******************************************************************************
* Fd Definitions
*/
/* We need to keep a freelist not because of any concerns of malloc performance
* but instead so that implementations with multiple threads in (for example)
* epoll_wait deal with the race between pollset removal and incoming poll
* notifications.
*
* The problem is that the poller ultimately holds a reference to this
* object, so it is very difficult to know when is safe to free it, at least
* without some expensive synchronization.
*
* If we keep the object freelisted, in the worst case losing this race just
* becomes a spurious read notification on a reused fd.
*/
/* The alarm system needs to be able to wakeup 'some poller' sometimes
* (specifically when a new alarm needs to be triggered earlier than the next
* alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
* case occurs. */
static grpc_fd *fd_freelist = NULL;
static gpr_mu fd_freelist_mu;
#ifdef GRPC_FD_REF_COUNT_DEBUG
#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
gpr_log(GPR_DEBUG, "FD %d %p ref %d %ld -> %ld [%s; %s:%d]", fd->fd,
(void *)fd, n, gpr_atm_no_barrier_load(&fd->refst),
gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
#else
#define REF_BY(fd, n, reason) ref_by(fd, n)
#define UNREF_BY(fd, n, reason) unref_by(fd, n)
static void ref_by(grpc_fd *fd, int n) {
#endif
GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
}
#ifdef GRPC_FD_REF_COUNT_DEBUG
static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
gpr_atm old;
gpr_log(GPR_DEBUG, "FD %d %p unref %d %ld -> %ld [%s; %s:%d]", fd->fd,
(void *)fd, n, gpr_atm_no_barrier_load(&fd->refst),
gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
#else
static void unref_by(grpc_fd *fd, int n) {
gpr_atm old;
#endif
old = gpr_atm_full_fetch_add(&fd->refst, -n);
if (old == n) {
/* Add the fd to the freelist */
gpr_mu_lock(&fd_freelist_mu);
fd->freelist_next = fd_freelist;
fd_freelist = fd;
grpc_iomgr_unregister_object(&fd->iomgr_object);
grpc_error *err = (grpc_error *)gpr_atm_acq_load(&fd->shutdown_error);
/* Clear the least significant bit if it set (in case fd was shutdown) */
err = (grpc_error *)((intptr_t)err & ~FD_SHUTDOWN_BIT);
GRPC_ERROR_UNREF(err);
gpr_mu_unlock(&fd_freelist_mu);
} else {
GPR_ASSERT(old > n);
}
}
/* Increment refcount by two to avoid changing the orphan bit */
#ifdef GRPC_FD_REF_COUNT_DEBUG
static void fd_ref(grpc_fd *fd, const char *reason, const char *file,
int line) {
ref_by(fd, 2, reason, file, line);
}
static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
int line) {
unref_by(fd, 2, reason, file, line);
}
#else
static void fd_ref(grpc_fd *fd) { ref_by(fd, 2); }
static void fd_unref(grpc_fd *fd) { unref_by(fd, 2); }
#endif
static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
static void fd_global_shutdown(void) {
gpr_mu_lock(&fd_freelist_mu);
gpr_mu_unlock(&fd_freelist_mu);
while (fd_freelist != NULL) {
grpc_fd *fd = fd_freelist;
fd_freelist = fd_freelist->freelist_next;
gpr_mu_destroy(&fd->po.mu);
gpr_free(fd);
}
gpr_mu_destroy(&fd_freelist_mu);
}
static grpc_fd *fd_create(int fd, const char *name) {
grpc_fd *new_fd = NULL;
gpr_mu_lock(&fd_freelist_mu);
if (fd_freelist != NULL) {
new_fd = fd_freelist;
fd_freelist = fd_freelist->freelist_next;
}
gpr_mu_unlock(&fd_freelist_mu);
if (new_fd == NULL) {
new_fd = gpr_malloc(sizeof(grpc_fd));
gpr_mu_init(&new_fd->po.mu);
}
/* Note: It is not really needed to get the new_fd->po.mu lock here. If this
* is a newly created fd (or an fd we got from the freelist), no one else
* would be holding a lock to it anyway. */
gpr_mu_lock(&new_fd->po.mu);
new_fd->po.pi = NULL;
#ifdef PO_DEBUG
new_fd->po.obj_type = POLL_OBJ_FD;
#endif
gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
new_fd->fd = fd;
gpr_atm_no_barrier_store(&new_fd->shutdown_error, (gpr_atm)GRPC_ERROR_NONE);
new_fd->orphaned = false;
gpr_atm_no_barrier_store(&new_fd->read_closure, CLOSURE_NOT_READY);
gpr_atm_no_barrier_store(&new_fd->write_closure, CLOSURE_NOT_READY);
gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
new_fd->freelist_next = NULL;
new_fd->on_done_closure = NULL;
gpr_mu_unlock(&new_fd->po.mu);
char *fd_name;
gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
#ifdef GRPC_FD_REF_COUNT_DEBUG
gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, (void *)new_fd, fd_name);
#endif
gpr_free(fd_name);
return new_fd;
}
static int fd_wrapped_fd(grpc_fd *fd) {
int ret_fd = -1;
gpr_mu_lock(&fd->po.mu);
if (!fd->orphaned) {
ret_fd = fd->fd;
}
gpr_mu_unlock(&fd->po.mu);
return ret_fd;
}
static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *on_done, int *release_fd,
const char *reason) {
bool is_fd_closed = false;
grpc_error *error = GRPC_ERROR_NONE;
polling_island *unref_pi = NULL;
gpr_mu_lock(&fd->po.mu);
fd->on_done_closure = on_done;
/* If release_fd is not NULL, we should be relinquishing control of the file
descriptor fd->fd (but we still own the grpc_fd structure). */
if (release_fd != NULL) {
*release_fd = fd->fd;
} else {
close(fd->fd);
is_fd_closed = true;
}
fd->orphaned = true;
/* Remove the active status but keep referenced. We want this grpc_fd struct
to be alive (and not added to freelist) until the end of this function */
REF_BY(fd, 1, reason);
/* Remove the fd from the polling island:
- Get a lock on the latest polling island (i.e the last island in the
linked list pointed by fd->po.pi). This is the island that
would actually contain the fd
- Remove the fd from the latest polling island
- Unlock the latest polling island
- Set fd->po.pi to NULL (but remove the ref on the polling island
before doing this.) */
if (fd->po.pi != NULL) {
polling_island *pi_latest = polling_island_lock(fd->po.pi);
polling_island_remove_fd_locked(pi_latest, fd, is_fd_closed, &error);
gpr_mu_unlock(&pi_latest->mu);
unref_pi = fd->po.pi;
fd->po.pi = NULL;
}
grpc_closure_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
gpr_mu_unlock(&fd->po.mu);
UNREF_BY(fd, 2, reason); /* Drop the reference */
if (unref_pi != NULL) {
/* Unref stale polling island here, outside the fd lock above.
The polling island owns a workqueue which owns an fd, and unreffing
inside the lock can cause an eventual lock loop that makes TSAN very
unhappy. */
PI_UNREF(exec_ctx, unref_pi, "fd_orphan");
}
GRPC_LOG_IF_ERROR("fd_orphan", GRPC_ERROR_REF(error));
GRPC_ERROR_UNREF(error);
}
static void notify_on(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *state,
grpc_closure *closure) {
while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(state);
switch (curr) {
case CLOSURE_NOT_READY: {
/* CLOSURE_NOT_READY -> <closure>.
We're guaranteed by API that there's an acquire barrier before here,
so there's no need to double-dip and this can be a release-only.
The release itself pairs with the acquire half of a set_ready full
barrier. */
if (gpr_atm_rel_cas(state, CLOSURE_NOT_READY, (gpr_atm)closure)) {
return; /* Successful. Return */
}
break; /* retry */
}
case CLOSURE_READY: {
/* Change the state to CLOSURE_NOT_READY. Schedule the closure if
successful. If not, the state most likely transitioned to shutdown.
We should retry.
This can be a no-barrier cas since the state is being transitioned to
CLOSURE_NOT_READY; set_ready and set_shutdown do not schedule any
closure when transitioning out of CLOSURE_NO_READY state (i.e there
is no other code that needs to 'happen-after' this) */
if (gpr_atm_no_barrier_cas(state, CLOSURE_READY, CLOSURE_NOT_READY)) {
grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
return; /* Successful. Return */
}
break; /* retry */
}
default: {
/* 'curr' is either a closure or the fd is shutdown(in which case 'curr'
contains a pointer to the shutdown-error). If the fd is shutdown,
schedule the closure with the shutdown error */
if ((curr & FD_SHUTDOWN_BIT) > 0) {
grpc_error *shutdown_err = (grpc_error *)(curr & ~FD_SHUTDOWN_BIT);
grpc_closure_sched(exec_ctx, closure,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"FD Shutdown", &shutdown_err, 1));
return;
}
/* There is already a closure!. This indicates a bug in the code */
gpr_log(GPR_ERROR,
"notify_on called with a previous callback still pending");
abort();
}
}
}
GPR_UNREACHABLE_CODE(return );
}
static void set_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *state,
grpc_error *shutdown_err) {
gpr_atm new_state = (gpr_atm)shutdown_err | FD_SHUTDOWN_BIT;
while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(state);
switch (curr) {
case CLOSURE_READY:
case CLOSURE_NOT_READY:
/* Need a full barrier here so that the initial load in notify_on
doesn't need a barrier */
if (gpr_atm_full_cas(state, curr, new_state)) {
return; /* early out */
}
break; /* retry */
default: {
/* 'curr' is either a closure or the fd is already shutdown */
/* If fd is already shutdown, we are done */
if ((curr & FD_SHUTDOWN_BIT) > 0) {
return;
}
/* Fd is not shutdown. Schedule the closure and move the state to
shutdown state.
Needs an acquire to pair with setting the closure (and get a
happens-after on that edge), and a release to pair with anything
loading the shutdown state. */
if (gpr_atm_full_cas(state, curr, new_state)) {
grpc_closure_sched(exec_ctx, (grpc_closure *)curr,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"FD Shutdown", &shutdown_err, 1));
return;
}
/* 'curr' was a closure but now changed to a different state. We will
have to retry */
break;
}
}
}
GPR_UNREACHABLE_CODE(return );
}
static void set_ready(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *state) {
while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(state);
switch (curr) {
case CLOSURE_READY: {
/* Already ready. We are done here */
return;
}
case CLOSURE_NOT_READY: {
/* No barrier required as we're transitioning to a state that does not
involve a closure */
if (gpr_atm_no_barrier_cas(state, CLOSURE_NOT_READY, CLOSURE_READY)) {
return; /* early out */
}
break; /* retry */
}
default: {
/* 'curr' is either a closure or the fd is shutdown */
if ((curr & FD_SHUTDOWN_BIT) > 0) {
/* The fd is shutdown. Do nothing */
return;
}
/* Full cas: acquire pairs with this cas' release in the event of a
spurious set_ready; release pairs with this or the acquire in
notify_on (or set_shutdown) */
else if (gpr_atm_full_cas(state, curr, CLOSURE_NOT_READY)) {
grpc_closure_sched(exec_ctx, (grpc_closure *)curr, GRPC_ERROR_NONE);
return;
}
/* else the state changed again (only possible by either a racing
set_ready or set_shutdown functions. In both these cases, the closure
would have been scheduled for execution. So we are done here */
return;
}
}
}
}
static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
grpc_fd *fd) {
gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
return (grpc_pollset *)notifier;
}
static bool fd_is_shutdown(grpc_fd *fd) {
grpc_error *err = (grpc_error *)gpr_atm_acq_load(&fd->shutdown_error);
return (((intptr_t)err & FD_SHUTDOWN_BIT) > 0);
}
/* Might be called multiple times */
static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
/* Store the shutdown error ORed with FD_SHUTDOWN_BIT in fd->shutdown_error */
if (gpr_atm_rel_cas(&fd->shutdown_error, (gpr_atm)GRPC_ERROR_NONE,
(gpr_atm)why | FD_SHUTDOWN_BIT)) {
shutdown(fd->fd, SHUT_RDWR);
set_shutdown(exec_ctx, fd, &fd->read_closure, why);
set_shutdown(exec_ctx, fd, &fd->write_closure, why);
} else {
/* Shutdown already called */
GRPC_ERROR_UNREF(why);
}
}
static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
notify_on(exec_ctx, fd, &fd->read_closure, closure);
}
static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
notify_on(exec_ctx, fd, &fd->write_closure, closure);
}
static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
gpr_mu_lock(&fd->po.mu);
grpc_workqueue *workqueue =
GRPC_WORKQUEUE_REF((grpc_workqueue *)fd->po.pi, "fd_get_workqueue");
gpr_mu_unlock(&fd->po.mu);
return workqueue;
}
/*******************************************************************************
* Pollset Definitions
*/
GPR_TLS_DECL(g_current_thread_pollset);
GPR_TLS_DECL(g_current_thread_worker);
static __thread bool g_initialized_sigmask;
static __thread sigset_t g_orig_sigmask;
static void sig_handler(int sig_num) {
#ifdef GRPC_EPOLL_DEBUG
gpr_log(GPR_INFO, "Received signal %d", sig_num);
#endif
}
static void poller_kick_init() { signal(grpc_wakeup_signal, sig_handler); }
/* Global state management */
static grpc_error *pollset_global_init(void) {
gpr_tls_init(&g_current_thread_pollset);
gpr_tls_init(&g_current_thread_worker);
poller_kick_init();
return grpc_wakeup_fd_init(&global_wakeup_fd);
}
static void pollset_global_shutdown(void) {
grpc_wakeup_fd_destroy(&global_wakeup_fd);
gpr_tls_destroy(&g_current_thread_pollset);
gpr_tls_destroy(&g_current_thread_worker);
}
static grpc_error *pollset_worker_kick(grpc_pollset_worker *worker) {
grpc_error *err = GRPC_ERROR_NONE;
/* Kick the worker only if it was not already kicked */
if (gpr_atm_no_barrier_cas(&worker->is_kicked, (gpr_atm)0, (gpr_atm)1)) {
GRPC_POLLING_TRACE(
"pollset_worker_kick: Kicking worker: %p (thread id: %ld)",
(void *)worker, worker->pt_id);
int err_num = pthread_kill(worker->pt_id, grpc_wakeup_signal);
if (err_num != 0) {
err = GRPC_OS_ERROR(err_num, "pthread_kill");
}
}
return err;
}
/* Return 1 if the pollset has active threads in pollset_work (pollset must
* be locked) */
static int pollset_has_workers(grpc_pollset *p) {
return p->root_worker.next != &p->root_worker;
}
static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
worker->prev->next = worker->next;
worker->next->prev = worker->prev;
}
static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) {
if (pollset_has_workers(p)) {
grpc_pollset_worker *w = p->root_worker.next;
remove_worker(p, w);
return w;
} else {
return NULL;
}
}
static void push_back_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
worker->next = &p->root_worker;
worker->prev = worker->next->prev;
worker->prev->next = worker->next->prev = worker;
}
static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
worker->prev = &p->root_worker;
worker->next = worker->prev->next;
worker->prev->next = worker->next->prev = worker;
}
/* p->mu must be held before calling this function */
static grpc_error *pollset_kick(grpc_pollset *p,
grpc_pollset_worker *specific_worker) {
GPR_TIMER_BEGIN("pollset_kick", 0);
grpc_error *error = GRPC_ERROR_NONE;
const char *err_desc = "Kick Failure";
grpc_pollset_worker *worker = specific_worker;
if (worker != NULL) {
if (worker == GRPC_POLLSET_KICK_BROADCAST) {
if (pollset_has_workers(p)) {
GPR_TIMER_BEGIN("pollset_kick.broadcast", 0);
for (worker = p->root_worker.next; worker != &p->root_worker;
worker = worker->next) {
if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)worker) {
append_error(&error, pollset_worker_kick(worker), err_desc);
}
}
GPR_TIMER_END("pollset_kick.broadcast", 0);
} else {
p->kicked_without_pollers = true;
}
} else {
GPR_TIMER_MARK("kicked_specifically", 0);
if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)worker) {
append_error(&error, pollset_worker_kick(worker), err_desc);
}
}
} else if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)p) {
/* Since worker == NULL, it means that we can kick "any" worker on this
pollset 'p'. If 'p' happens to be the same pollset this thread is
currently polling (i.e in pollset_work() function), then there is no need
to kick any other worker since the current thread can just absorb the
kick. This is the reason why we enter this case only when
g_current_thread_pollset is != p */
GPR_TIMER_MARK("kick_anonymous", 0);
worker = pop_front_worker(p);
if (worker != NULL) {
GPR_TIMER_MARK("finally_kick", 0);
push_back_worker(p, worker);
append_error(&error, pollset_worker_kick(worker), err_desc);
} else {
GPR_TIMER_MARK("kicked_no_pollers", 0);
p->kicked_without_pollers = true;
}
}
GPR_TIMER_END("pollset_kick", 0);
GRPC_LOG_IF_ERROR("pollset_kick", GRPC_ERROR_REF(error));
return error;
}
static grpc_error *kick_poller(void) {
return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
}
static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
gpr_mu_init(&pollset->po.mu);
*mu = &pollset->po.mu;
pollset->po.pi = NULL;
#ifdef PO_DEBUG
pollset->po.obj_type = POLL_OBJ_POLLSET;
#endif
pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker;
pollset->kicked_without_pollers = false;
pollset->shutting_down = false;
pollset->finish_shutdown_called = false;
pollset->shutdown_done = NULL;
}
/* Convert a timespec to milliseconds:
- Very small or negative poll times are clamped to zero to do a non-blocking
poll (which becomes spin polling)
- Other small values are rounded up to one millisecond
- Longer than a millisecond polls are rounded up to the next nearest
millisecond to avoid spinning
- Infinite timeouts are converted to -1 */
static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
gpr_timespec now) {
gpr_timespec timeout;
static const int64_t max_spin_polling_us = 10;
if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
return -1;
}
if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros(
max_spin_polling_us,
GPR_TIMESPAN))) <= 0) {
return 0;
}
timeout = gpr_time_sub(deadline, now);
return gpr_time_to_millis(gpr_time_add(
timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN)));
}
static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_pollset *notifier) {
set_ready(exec_ctx, fd, &fd->read_closure);
/* Note, it is possible that fd_become_readable might be called twice with
different 'notifier's when an fd becomes readable and it is in two epoll
sets (This can happen briefly during polling island merges). In such cases
it does not really matter which notifer is set as the read_notifier_pollset
(They would both point to the same polling island anyway) */
/* Use release store to match with acquire load in fd_get_read_notifier */
gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
}
static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
set_ready(exec_ctx, fd, &fd->write_closure);
}
static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx,
grpc_pollset *ps, char *reason) {
if (ps->po.pi != NULL) {
PI_UNREF(exec_ctx, ps->po.pi, reason);
}
ps->po.pi = NULL;
}
static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
grpc_pollset *pollset) {
/* The pollset cannot have any workers if we are at this stage */
GPR_ASSERT(!pollset_has_workers(pollset));
pollset->finish_shutdown_called = true;
/* Release the ref and set pollset->po.pi to NULL */
pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown");
grpc_closure_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
}
/* pollset->po.mu lock must be held by the caller before calling this */
static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure) {
GPR_TIMER_BEGIN("pollset_shutdown", 0);
GPR_ASSERT(!pollset->shutting_down);
pollset->shutting_down = true;
pollset->shutdown_done = closure;
pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
/* If the pollset has any workers, we cannot call finish_shutdown_locked()
because it would release the underlying polling island. In such a case, we
let the last worker call finish_shutdown_locked() from pollset_work() */
if (!pollset_has_workers(pollset)) {
GPR_ASSERT(!pollset->finish_shutdown_called);
GPR_TIMER_MARK("pollset_shutdown.finish_shutdown_locked", 0);
finish_shutdown_locked(exec_ctx, pollset);
}
GPR_TIMER_END("pollset_shutdown", 0);
}
/* pollset_shutdown is guaranteed to be called before pollset_destroy. So other
* than destroying the mutexes, there is nothing special that needs to be done
* here */
static void pollset_destroy(grpc_pollset *pollset) {
GPR_ASSERT(!pollset_has_workers(pollset));
gpr_mu_destroy(&pollset->po.mu);
}
static bool maybe_do_workqueue_work(grpc_exec_ctx *exec_ctx,
polling_island *pi) {
if (gpr_mu_trylock(&pi->workqueue_read_mu)) {
gpr_mpscq_node *n = gpr_mpscq_pop(&pi->workqueue_items);
gpr_mu_unlock(&pi->workqueue_read_mu);
if (n != NULL) {
if (gpr_atm_full_fetch_add(&pi->workqueue_item_count, -1) > 1) {
workqueue_maybe_wakeup(pi);
}
grpc_closure *c = (grpc_closure *)n;
grpc_error *error = c->error_data.error;
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
return true;
} else if (gpr_atm_no_barrier_load(&pi->workqueue_item_count) > 0) {
/* n == NULL might mean there's work but it's not available to be popped
* yet - try to ensure another workqueue wakes up to check shortly if so
*/
workqueue_maybe_wakeup(pi);
}
}
return false;
}
#define GRPC_EPOLL_MAX_EVENTS 100
/* Note: sig_mask contains the signal mask to use *during* epoll_wait() */
static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
grpc_pollset *pollset,
grpc_pollset_worker *worker, int timeout_ms,
sigset_t *sig_mask, grpc_error **error) {
struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS];
int epoll_fd = -1;
int ep_rv;
polling_island *pi = NULL;
char *err_msg;
const char *err_desc = "pollset_work_and_unlock";
GPR_TIMER_BEGIN("pollset_work_and_unlock", 0);
/* We need to get the epoll_fd to wait on. The epoll_fd is in inside the
latest polling island pointed by pollset->po.pi
Since epoll_fd is immutable, we can read it without obtaining the polling
island lock. There is however a possibility that the polling island (from
which we got the epoll_fd) got merged with another island while we are
in this function. This is still okay because in such a case, we will wakeup
right-away from epoll_wait() and pick up the latest polling_island the next
this function (i.e pollset_work_and_unlock()) is called */
if (pollset->po.pi == NULL) {
pollset->po.pi = polling_island_create(exec_ctx, NULL, error);
if (pollset->po.pi == NULL) {
GPR_TIMER_END("pollset_work_and_unlock", 0);
return; /* Fatal error. We cannot continue */
}
PI_ADD_REF(pollset->po.pi, "ps");
GRPC_POLLING_TRACE("pollset_work: pollset: %p created new pi: %p",
(void *)pollset, (void *)pollset->po.pi);
}
pi = polling_island_maybe_get_latest(pollset->po.pi);
epoll_fd = pi->epoll_fd;
/* Update the pollset->po.pi since the island being pointed by
pollset->po.pi maybe older than the one pointed by pi) */
if (pollset->po.pi != pi) {
/* Always do PI_ADD_REF before PI_UNREF because PI_UNREF may cause the
polling island to be deleted */
PI_ADD_REF(pi, "ps");
PI_UNREF(exec_ctx, pollset->po.pi, "ps");
pollset->po.pi = pi;
}
/* Add an extra ref so that the island does not get destroyed (which means
the epoll_fd won't be closed) while we are are doing an epoll_wait() on the
epoll_fd */
PI_ADD_REF(pi, "ps_work");
gpr_mu_unlock(&pollset->po.mu);
/* If we get some workqueue work to do, it might end up completing an item on
the completion queue, so there's no need to poll... so we skip that and
redo the complete loop to verify */
if (!maybe_do_workqueue_work(exec_ctx, pi)) {
gpr_atm_no_barrier_fetch_add(&pi->poller_count, 1);
g_current_thread_polling_island = pi;
GRPC_SCHEDULING_START_BLOCKING_REGION;
ep_rv = epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms,
sig_mask);
GRPC_SCHEDULING_END_BLOCKING_REGION;
if (ep_rv < 0) {
if (errno != EINTR) {
gpr_asprintf(&err_msg,
"epoll_wait() epoll fd: %d failed with error: %d (%s)",
epoll_fd, errno, strerror(errno));
append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
} else {
/* We were interrupted. Save an interation by doing a zero timeout
epoll_wait to see if there are any other events of interest */
GRPC_POLLING_TRACE(
"pollset_work: pollset: %p, worker: %p received kick",
(void *)pollset, (void *)worker);
ep_rv = epoll_wait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, 0);
}
}
#ifdef GRPC_TSAN
/* See the definition of g_poll_sync for more details */
gpr_atm_acq_load(&g_epoll_sync);
#endif /* defined(GRPC_TSAN) */
for (int i = 0; i < ep_rv; ++i) {
void *data_ptr = ep_ev[i].data.ptr;
if (data_ptr == &global_wakeup_fd) {
append_error(error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd),
err_desc);
} else if (data_ptr == &pi->workqueue_wakeup_fd) {
append_error(error,
grpc_wakeup_fd_consume_wakeup(&pi->workqueue_wakeup_fd),
err_desc);
maybe_do_workqueue_work(exec_ctx, pi);
} else if (data_ptr == &polling_island_wakeup_fd) {
GRPC_POLLING_TRACE(
"pollset_work: pollset: %p, worker: %p polling island (epoll_fd: "
"%d) got merged",
(void *)pollset, (void *)worker, epoll_fd);
/* This means that our polling island is merged with a different
island. We do not have to do anything here since the subsequent call
to the function pollset_work_and_unlock() will pick up the correct
epoll_fd */
} else {
grpc_fd *fd = data_ptr;
int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
int write_ev = ep_ev[i].events & EPOLLOUT;
if (read_ev || cancel) {
fd_become_readable(exec_ctx, fd, pollset);
}
if (write_ev || cancel) {
fd_become_writable(exec_ctx, fd);
}
}
}
g_current_thread_polling_island = NULL;
gpr_atm_no_barrier_fetch_add(&pi->poller_count, -1);
}
GPR_ASSERT(pi != NULL);
/* Before leaving, release the extra ref we added to the polling island. It
is important to use "pi" here (i.e our old copy of pollset->po.pi
that we got before releasing the polling island lock). This is because
pollset->po.pi pointer might get udpated in other parts of the
code when there is an island merge while we are doing epoll_wait() above */
PI_UNREF(exec_ctx, pi, "ps_work");
GPR_TIMER_END("pollset_work_and_unlock", 0);
}
/* pollset->po.mu lock must be held by the caller before calling this.
The function pollset_work() may temporarily release the lock (pollset->po.mu)
during the course of its execution but it will always re-acquire the lock and
ensure that it is held by the time the function returns */
static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker_hdl,
gpr_timespec now, gpr_timespec deadline) {
GPR_TIMER_BEGIN("pollset_work", 0);
grpc_error *error = GRPC_ERROR_NONE;
int timeout_ms = poll_deadline_to_millis_timeout(deadline, now);
sigset_t new_mask;
grpc_pollset_worker worker;
worker.next = worker.prev = NULL;
worker.pt_id = pthread_self();
gpr_atm_no_barrier_store(&worker.is_kicked, (gpr_atm)0);
*worker_hdl = &worker;
gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
if (pollset->kicked_without_pollers) {
/* If the pollset was kicked without pollers, pretend that the current
worker got the kick and skip polling. A kick indicates that there is some
work that needs attention like an event on the completion queue or an
alarm */
GPR_TIMER_MARK("pollset_work.kicked_without_pollers", 0);
pollset->kicked_without_pollers = 0;
} else if (!pollset->shutting_down) {
/* We use the posix-signal with number 'grpc_wakeup_signal' for waking up
(i.e 'kicking') a worker in the pollset. A 'kick' is a way to inform the
worker that there is some pending work that needs immediate attention
(like an event on the completion queue, or a polling island merge that
results in a new epoll-fd to wait on) and that the worker should not
spend time waiting in epoll_pwait().
A worker can be kicked anytime from the point it is added to the pollset
via push_front_worker() (or push_back_worker()) to the point it is
removed via remove_worker().
If the worker is kicked before/during it calls epoll_pwait(), it should
immediately exit from epoll_wait(). If the worker is kicked after it
returns from epoll_wait(), then nothing really needs to be done.
To accomplish this, we mask 'grpc_wakeup_signal' on this thread at all
times *except* when it is in epoll_pwait(). This way, the worker never
misses acting on a kick */
if (!g_initialized_sigmask) {
sigemptyset(&new_mask);
sigaddset(&new_mask, grpc_wakeup_signal);
pthread_sigmask(SIG_BLOCK, &new_mask, &g_orig_sigmask);
sigdelset(&g_orig_sigmask, grpc_wakeup_signal);
g_initialized_sigmask = true;
/* new_mask: The new thread mask which blocks 'grpc_wakeup_signal'.
This is the mask used at all times *except during
epoll_wait()*"
g_orig_sigmask: The thread mask which allows 'grpc_wakeup_signal' and
this is the mask to use *during epoll_wait()*
The new_mask is set on the worker before it is added to the pollset
(i.e before it can be kicked) */
}
push_front_worker(pollset, &worker); /* Add worker to pollset */
pollset_work_and_unlock(exec_ctx, pollset, &worker, timeout_ms,
&g_orig_sigmask, &error);
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->po.mu);
/* Note: There is no need to reset worker.is_kicked to 0 since we are no
longer going to use this worker */
remove_worker(pollset, &worker);
}
/* If we are the last worker on the pollset (i.e pollset_has_workers() is
false at this point) and the pollset is shutting down, we may have to
finish the shutdown process by calling finish_shutdown_locked().
See pollset_shutdown() for more details.
Note: Continuing to access pollset here is safe; it is the caller's
responsibility to not destroy a pollset when it has outstanding calls to
pollset_work() */
if (pollset->shutting_down && !pollset_has_workers(pollset) &&
!pollset->finish_shutdown_called) {
GPR_TIMER_MARK("pollset_work.finish_shutdown_locked", 0);
finish_shutdown_locked(exec_ctx, pollset);
gpr_mu_unlock(&pollset->po.mu);
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->po.mu);
}
*worker_hdl = NULL;
gpr_tls_set(&g_current_thread_pollset, (intptr_t)0);
gpr_tls_set(&g_current_thread_worker, (intptr_t)0);
GPR_TIMER_END("pollset_work", 0);
GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error));
return error;
}
static void add_poll_object(grpc_exec_ctx *exec_ctx, poll_obj *bag,
poll_obj_type bag_type, poll_obj *item,
poll_obj_type item_type) {
GPR_TIMER_BEGIN("add_poll_object", 0);
#ifdef PO_DEBUG
GPR_ASSERT(item->obj_type == item_type);
GPR_ASSERT(bag->obj_type == bag_type);
#endif
grpc_error *error = GRPC_ERROR_NONE;
polling_island *pi_new = NULL;
gpr_mu_lock(&bag->mu);
gpr_mu_lock(&item->mu);
retry:
/*
* 1) If item->pi and bag->pi are both non-NULL and equal, do nothing
* 2) If item->pi and bag->pi are both NULL, create a new polling island (with
* a refcount of 2) and point item->pi and bag->pi to the new island
* 3) If exactly one of item->pi or bag->pi is NULL, update it to point to
* the other's non-NULL pi
* 4) Finally if item->pi and bag-pi are non-NULL and not-equal, merge the
* polling islands and update item->pi and bag->pi to point to the new
* island
*/
/* Early out if we are trying to add an 'fd' to a 'bag' but the fd is already
* orphaned */
if (item_type == POLL_OBJ_FD && (FD_FROM_PO(item))->orphaned) {
gpr_mu_unlock(&item->mu);
gpr_mu_unlock(&bag->mu);
return;
}
if (item->pi == bag->pi) {
pi_new = item->pi;
if (pi_new == NULL) {
/* GPR_ASSERT(item->pi == bag->pi == NULL) */
/* If we are adding an fd to a bag (i.e pollset or pollset_set), then
* we need to do some extra work to make TSAN happy */
if (item_type == POLL_OBJ_FD) {
/* Unlock before creating a new polling island: the polling island will
create a workqueue which creates a file descriptor, and holding an fd
lock here can eventually cause a loop to appear to TSAN (making it
unhappy). We don't think it's a real loop (there's an epoch point
where that loop possibility disappears), but the advantages of
keeping TSAN happy outweigh any performance advantage we might have
by keeping the lock held. */
gpr_mu_unlock(&item->mu);
pi_new = polling_island_create(exec_ctx, FD_FROM_PO(item), &error);
gpr_mu_lock(&item->mu);
/* Need to reverify any assumptions made between the initial lock and
getting to this branch: if they've changed, we need to throw away our
work and figure things out again. */
if (item->pi != NULL) {
GRPC_POLLING_TRACE(
"add_poll_object: Raced creating new polling island. pi_new: %p "
"(fd: %d, %s: %p)",
(void *)pi_new, FD_FROM_PO(item)->fd, poll_obj_string(bag_type),
(void *)bag);
/* No need to lock 'pi_new' here since this is a new polling island
and no one has a reference to it yet */
polling_island_remove_all_fds_locked(pi_new, true, &error);
/* Ref and unref so that the polling island gets deleted during unref
*/
PI_ADD_REF(pi_new, "dance_of_destruction");
PI_UNREF(exec_ctx, pi_new, "dance_of_destruction");
goto retry;
}
} else {
pi_new = polling_island_create(exec_ctx, NULL, &error);
}
GRPC_POLLING_TRACE(
"add_poll_object: Created new polling island. pi_new: %p (%s: %p, "
"%s: %p)",
(void *)pi_new, poll_obj_string(item_type), (void *)item,
poll_obj_string(bag_type), (void *)bag);
} else {
GRPC_POLLING_TRACE(
"add_poll_object: Same polling island. pi: %p (%s, %s)",
(void *)pi_new, poll_obj_string(item_type),
poll_obj_string(bag_type));
}
} else if (item->pi == NULL) {
/* GPR_ASSERT(bag->pi != NULL) */
/* Make pi_new point to latest pi*/
pi_new = polling_island_lock(bag->pi);
if (item_type == POLL_OBJ_FD) {
grpc_fd *fd = FD_FROM_PO(item);
polling_island_add_fds_locked(pi_new, &fd, 1, true, &error);
}
gpr_mu_unlock(&pi_new->mu);
GRPC_POLLING_TRACE(
"add_poll_obj: item->pi was NULL. pi_new: %p (item(%s): %p, "
"bag(%s): %p)",
(void *)pi_new, poll_obj_string(item_type), (void *)item,
poll_obj_string(bag_type), (void *)bag);
} else if (bag->pi == NULL) {
/* GPR_ASSERT(item->pi != NULL) */
/* Make pi_new to point to latest pi */
pi_new = polling_island_lock(item->pi);
gpr_mu_unlock(&pi_new->mu);
GRPC_POLLING_TRACE(
"add_poll_obj: bag->pi was NULL. pi_new: %p (item(%s): %p, "
"bag(%s): %p)",
(void *)pi_new, poll_obj_string(item_type), (void *)item,
poll_obj_string(bag_type), (void *)bag);
} else {
pi_new = polling_island_merge(item->pi, bag->pi, &error);
GRPC_POLLING_TRACE(
"add_poll_obj: polling islands merged. pi_new: %p (item(%s): %p, "
"bag(%s): %p)",
(void *)pi_new, poll_obj_string(item_type), (void *)item,
poll_obj_string(bag_type), (void *)bag);
}
/* At this point, pi_new is the polling island that both item->pi and bag->pi
MUST be pointing to */
if (item->pi != pi_new) {
PI_ADD_REF(pi_new, poll_obj_string(item_type));
if (item->pi != NULL) {
PI_UNREF(exec_ctx, item->pi, poll_obj_string(item_type));
}
item->pi = pi_new;
}
if (bag->pi != pi_new) {
PI_ADD_REF(pi_new, poll_obj_string(bag_type));
if (bag->pi != NULL) {
PI_UNREF(exec_ctx, bag->pi, poll_obj_string(bag_type));
}
bag->pi = pi_new;
}
gpr_mu_unlock(&item->mu);
gpr_mu_unlock(&bag->mu);
GRPC_LOG_IF_ERROR("add_poll_object", error);
GPR_TIMER_END("add_poll_object", 0);
}
static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_fd *fd) {
add_poll_object(exec_ctx, &pollset->po, POLL_OBJ_POLLSET, &fd->po,
POLL_OBJ_FD);
}
/*******************************************************************************
* Pollset-set Definitions
*/
static grpc_pollset_set *pollset_set_create(void) {
grpc_pollset_set *pss = gpr_malloc(sizeof(*pss));
gpr_mu_init(&pss->po.mu);
pss->po.pi = NULL;
#ifdef PO_DEBUG
pss->po.obj_type = POLL_OBJ_POLLSET_SET;
#endif
return pss;
}
static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *pss) {
gpr_mu_destroy(&pss->po.mu);
if (pss->po.pi != NULL) {
PI_UNREF(exec_ctx, pss->po.pi, "pss_destroy");
}
gpr_free(pss);
}
static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
grpc_fd *fd) {
add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &fd->po,
POLL_OBJ_FD);
}
static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
grpc_fd *fd) {
/* Nothing to do */
}
static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *pss, grpc_pollset *ps) {
add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &ps->po,
POLL_OBJ_POLLSET);
}
static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *pss, grpc_pollset *ps) {
/* Nothing to do */
}
static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *bag,
grpc_pollset_set *item) {
add_poll_object(exec_ctx, &bag->po, POLL_OBJ_POLLSET_SET, &item->po,
POLL_OBJ_POLLSET_SET);
}
static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *bag,
grpc_pollset_set *item) {
/* Nothing to do */
}
/* Test helper functions
* */
void *grpc_fd_get_polling_island(grpc_fd *fd) {
polling_island *pi;
gpr_mu_lock(&fd->po.mu);
pi = fd->po.pi;
gpr_mu_unlock(&fd->po.mu);
return pi;
}
void *grpc_pollset_get_polling_island(grpc_pollset *ps) {
polling_island *pi;
gpr_mu_lock(&ps->po.mu);
pi = ps->po.pi;
gpr_mu_unlock(&ps->po.mu);
return pi;
}
bool grpc_are_polling_islands_equal(void *p, void *q) {
polling_island *p1 = p;
polling_island *p2 = q;
/* Note: polling_island_lock_pair() may change p1 and p2 to point to the
latest polling islands in their respective linked lists */
polling_island_lock_pair(&p1, &p2);
polling_island_unlock_pair(p1, p2);
return p1 == p2;
}
/*******************************************************************************
* Event engine binding
*/
static void shutdown_engine(void) {
fd_global_shutdown();
pollset_global_shutdown();
polling_island_global_shutdown();
}
static const grpc_event_engine_vtable vtable = {
.pollset_size = sizeof(grpc_pollset),
.fd_create = fd_create,
.fd_wrapped_fd = fd_wrapped_fd,
.fd_orphan = fd_orphan,
.fd_shutdown = fd_shutdown,
.fd_is_shutdown = fd_is_shutdown,
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
.fd_get_workqueue = fd_get_workqueue,
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
.pollset_destroy = pollset_destroy,
.pollset_work = pollset_work,
.pollset_kick = pollset_kick,
.pollset_add_fd = pollset_add_fd,
.pollset_set_create = pollset_set_create,
.pollset_set_destroy = pollset_set_destroy,
.pollset_set_add_pollset = pollset_set_add_pollset,
.pollset_set_del_pollset = pollset_set_del_pollset,
.pollset_set_add_pollset_set = pollset_set_add_pollset_set,
.pollset_set_del_pollset_set = pollset_set_del_pollset_set,
.pollset_set_add_fd = pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd,
.kick_poller = kick_poller,
.workqueue_ref = workqueue_ref,
.workqueue_unref = workqueue_unref,
.workqueue_scheduler = workqueue_scheduler,
.shutdown_engine = shutdown_engine,
};
/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
* Create a dummy epoll_fd to make sure epoll support is available */
static bool is_epoll_available() {
int fd = epoll_create1(EPOLL_CLOEXEC);
if (fd < 0) {
gpr_log(
GPR_ERROR,
"epoll_create1 failed with error: %d. Not using epoll polling engine",
fd);
return false;
}
close(fd);
return true;
}
const grpc_event_engine_vtable *grpc_init_epoll_linux(void) {
/* If use of signals is disabled, we cannot use epoll engine*/
if (is_grpc_wakeup_signal_initialized && grpc_wakeup_signal < 0) {
return NULL;
}
if (!grpc_has_wakeup_fd()) {
return NULL;
}
if (!is_epoll_available()) {
return NULL;
}
if (!is_grpc_wakeup_signal_initialized) {
grpc_use_signal(SIGRTMIN + 6);
}
fd_global_init();
if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
return NULL;
}
if (!GRPC_LOG_IF_ERROR("polling_island_global_init",
polling_island_global_init())) {
return NULL;
}
return &vtable;
}
#else /* defined(GRPC_LINUX_EPOLL) */
#if defined(GRPC_POSIX_SOCKET)
#include "src/core/lib/iomgr/ev_posix.h"
/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
* NULL */
const grpc_event_engine_vtable *grpc_init_epoll_linux(void) { return NULL; }
#endif /* defined(GRPC_POSIX_SOCKET) */
void grpc_use_signal(int signum) {}
#endif /* !defined(GRPC_LINUX_EPOLL) */
| {
"content_hash": "915b80b7b2c22ae6bed6808adf79887f",
"timestamp": "",
"source": "github",
"line_count": 2129,
"max_line_length": 80,
"avg_line_length": 35.419915453264444,
"alnum_prop": 0.6143961596095957,
"repo_name": "apolcyn/grpc",
"id": "f6372c0f3f6425fc29407d891c73c2756dc12bed",
"size": "76977",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/core/lib/iomgr/ev_epoll_linux.c",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "23184"
},
{
"name": "C",
"bytes": "6803673"
},
{
"name": "C#",
"bytes": "1529121"
},
{
"name": "C++",
"bytes": "2072246"
},
{
"name": "CMake",
"bytes": "418448"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "JavaScript",
"bytes": "323192"
},
{
"name": "M4",
"bytes": "39654"
},
{
"name": "Makefile",
"bytes": "846848"
},
{
"name": "Objective-C",
"bytes": "350426"
},
{
"name": "PHP",
"bytes": "301694"
},
{
"name": "Protocol Buffer",
"bytes": "120452"
},
{
"name": "PureBasic",
"bytes": "147"
},
{
"name": "Python",
"bytes": "1440747"
},
{
"name": "Ruby",
"bytes": "677997"
},
{
"name": "Shell",
"bytes": "58791"
},
{
"name": "Swift",
"bytes": "5418"
}
],
"symlink_target": ""
} |
This guide covers a migration from `redux-resource-prop-types@2.x` to `redux-resource-prop-types@3.0.0`.
v3.0.0 of `redux-resource-prop-types` introduced several breaking changes. The philosophy behind
v3.0.0 is that we want to provide you with better primitive prop types to build more robust
prop types.
The v2 prop types were very basic, and consequently didn't do a very good at protecting you from
bugs. Using the new prop types in v3, you can have much more confidence that the prop types
that you build are actually helping you find issues with your props.
### New Prop Types
Familiarize yourself with the new prop types. You'll want to use these to build new, better
prop types.
You can read about them on
[the documentation page](https://redux-resource.js.org/docs/extras/redux-resource-prop-types.html).
### Removed Prop Types
#### `resourceIdsPropType`
You can instead use the new prop type, `idPropType`, like so:
```js
import PropTypes from 'prop-types';
import { idPropType } from 'redux-resource-prop-types';
PropTypes.arrayOf(idPropType);
```
#### `resourcesPropType`
If you'd like to continue using the old version, here's the code:
```js
const resourcesPropType = PropTypes.arrayOf(
PropTypes.shape({
id: PropTypes.oneOfType([
PropTypes.string,
PropTypes.number,
])
})
);
```
Instead, we recommend using the new `resourcePropType` (note that that name is singular!) to create
a more robust prop type for your resources. Then, you can use that prop type to build an array prop
type:
```js
const booksPropType = PropTypes.arrayOf(bookPropType);
```
#### `slicePropType`
If you'd like to continue using the old version in your application, then you can copy and paste this
code into your application:
```js
const slicePropType = PropTypes.shape({
resources: PropTypes.object.isRequired,
meta: PropTypes.object.isRequired,
requests: PropTypes.object.isRequired,
lists: PropTypes.object.isRequired
});
```
However, we recommend building a more robust prop type, such as:
```js
import { idsPropType } from 'redux-resource-prop-types';
const booksSlicePropType = PropTypes.shape({
resources: PropTypes.objectOf(booksPropType).isRequired,
meta: PropTypes.objectOf(booksMetaPropType).isRequired,
requests: PropType.objectOf(booksRequestPropType).isRequired,
lists: PropType.objectOf(idsPropType).isRequired,
customStuff: myCustomPropType,
// ...and so on
});
``` | {
"content_hash": "66d1ce64e30b71d8ea395f4a89944598",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 104,
"avg_line_length": 29.70731707317073,
"alnum_prop": 0.7536945812807881,
"repo_name": "jmeas/resourceful-redux",
"id": "a49b0ab171b19dea7038b46c4b869e3e5d94c2a5",
"size": "2449",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "packages/redux-resource-prop-types/docs/migration-guides/2-to-3.md",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "985"
},
{
"name": "JavaScript",
"bytes": "163430"
}
],
"symlink_target": ""
} |
.apcdialog {
position: absolute;
border-radius: 7px;
box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); }
> .apcdialog-container {
display: -webkit-box;
display: -moz-box;
display: box;
display: -webkit-flex;
display: -moz-flex;
display: -ms-flexbox;
display: flex;
-webkit-box-orient: vertical;
-moz-box-orient: vertical;
box-orient: vertical;
-webkit-flex-direction: column;
-moz-flex-direction: column;
flex-direction: column;
-ms-flex-direction: column;
position: relative;
border: solid 1px #bab;
border-radius: 7px;
background: white; }
.apcdialog-header {
border-top-left-radius: 7px;
border-top-right-radius: 7px;
padding: 25px 30px 0;
-webkit-flex-shrink: 0;
-moz-flex-shrink: 0;
flex-shrink: 0;
-ms-flex-negative: 0; }
.apcdialog-body {
cursor: default; }
.apcdialog-footer {
display: -webkit-box;
display: -moz-box;
display: box;
display: -webkit-flex;
display: -moz-flex;
display: -ms-flexbox;
display: flex;
-webkit-flex-shrink: 0;
-moz-flex-shrink: 0;
flex-shrink: 0;
-ms-flex-negative: 0;
border-bottom-left-radius: 7px;
border-bottom-right-radius: 7px;
padding: 0 30px 20px;
cursor: default; }
.apcdialog-footer > .footer-left {
-webkit-box-flex: 1;
-moz-box-flex: 1;
box-flex: 1;
-webkit-flex: 1;
-moz-flex: 1;
-ms-flex: 1;
flex: 1;
-webkit-box-align: center;
-moz-box-align: center;
box-align: center;
-webkit-align-items: center;
-moz-align-items: center;
-ms-align-items: center;
-o-align-items: center;
align-items: center;
-ms-flex-align: center; }
.apcdialog-closebtn {
font-size: 1.2em;
position: absolute;
right: 15px;
top: 14px;
cursor: pointer;
color: #bab; }
.apcdialog.dialog-modal {
display: -webkit-box;
display: -moz-box;
display: box;
display: -webkit-flex;
display: -moz-flex;
display: -ms-flexbox;
display: flex;
-webkit-box-pack: center;
-moz-box-pack: center;
box-pack: center;
-webkit-justify-content: center;
-moz-justify-content: center;
-ms-justify-content: center;
-o-justify-content: center;
justify-content: center;
-ms-flex-pack: center;
-webkit-box-align: center;
-moz-box-align: center;
box-align: center;
-webkit-align-items: center;
-moz-align-items: center;
-ms-align-items: center;
-o-align-items: center;
align-items: center;
-ms-flex-align: center;
top: 0;
left: 0;
width: 100%;
height: 100%;
background-color: rgba(0, 0, 0, 0.5); }
> .apcdialog.dialog-modal-container {
overflow: auto;
max-height: 93%;
max-width: 93%;
border: solid 1px #777; }
.apcdialog.dialog-modal-body {
overflow: auto;
padding: 20px 30px; }
| {
"content_hash": "071569e7bc6401f8f928a04cf87ef813",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 46,
"avg_line_length": 26.169642857142858,
"alnum_prop": 0.6015011941316957,
"repo_name": "apcjs/apcDialog",
"id": "431fc0ab3293f1336e6b390ba963936566dbf105",
"size": "2933",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/apcdialog.css",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4295"
},
{
"name": "JavaScript",
"bytes": "104"
}
],
"symlink_target": ""
} |
<?php
namespace Onyx\Destiny\Helpers\Network;
use Barryvdh\Debugbar\Facade as DebugBar;
use GuzzleHttp\Client as Guzzle;
class Http
{
/**
* @var \GuzzleHttp\Client
*/
protected $guzzle;
public function __construct()
{
$this->setupGuzzle();
}
private function setupGuzzle()
{
$this->guzzle = new Guzzle();
}
/**
* Request an URL expecting JSON to be returned.
*
* @param $url
* @param $cache integer
*
* @throws BungieOfflineException
*
* @return array
*/
public function getJson($url, $cache = 0)
{
if (!$this->guzzle instanceof Guzzle) {
$this->setupGuzzle();
}
$sum = md5($url);
if ($cache != 0 && \Cache::has($sum)) {
return \Cache::get($sum);
}
DebugBar::startMeasure($sum, $url);
$response = $this->guzzle->get($url, [
'headers' => ['X-API-Key' => env('BUNGIE_KEY')],
]);
DebugBar::stopMeasure($sum);
if ($response->getStatusCode() != 200) {
throw new BungieOfflineException();
}
if ($cache != 0) {
\Cache::put($sum, json_decode($response->getBody(), true), $cache);
}
return json_decode($response->getBody(), true);
}
}
class BungieOfflineException extends \Exception
{
}
| {
"content_hash": "d5f0bdc2650b780d06fcd666695c9aea",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 79,
"avg_line_length": 20.057971014492754,
"alnum_prop": 0.5267341040462428,
"repo_name": "iBotPeaches/PandaLove",
"id": "42bcefaef27bfe2c483fa455c87815d5a0ed05aa",
"size": "1384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Onyx/Destiny/Helpers/Network/Http.php",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "47450"
},
{
"name": "HTML",
"bytes": "232005"
},
{
"name": "JavaScript",
"bytes": "14390"
},
{
"name": "PHP",
"bytes": "783289"
},
{
"name": "Shell",
"bytes": "550"
}
],
"symlink_target": ""
} |
RSpec.shared_examples_for "AccountBuilder#build" do
subject(:account) { builder.build }
let(:builder) { described_class.new(options) }
let(:facility) { FactoryBot.build_stubbed(:facility) }
let(:user) { FactoryBot.build_stubbed(:user) }
context "when the affiliate_id param is set" do
let(:affiliate) { Affiliate.create!(name: "New Affiliate") }
let(:affiliate_other) { "" }
it "sets the affiliate", :aggregate_failures do
expect(account.affiliate).to eq(affiliate)
expect(account.affiliate_other).to be_blank
end
context "when the affiliate selected is 'Other'" do
let(:affiliate) { Affiliate.OTHER }
context "and the affiliate_other param is set" do
let(:affiliate_other) { "Other Affiliate" }
it "sets affiliate_other", :aggregate_failures do
expect(account.affiliate).to eq(affiliate)
expect(account.affiliate_other).to eq("Other Affiliate")
end
end
end
context "when the affiliate supports subaffiliates" do
before { affiliate.update_attribute(:subaffiliates_enabled, true) }
context "and the affiliate_other param is set" do
let(:affiliate_other) { "Affiliate Category" }
it "sets affiliate_other", :aggregate_failures do
expect(account.affiliate).to eq(affiliate)
expect(account.affiliate_other).to eq("Affiliate Category")
end
end
end
end
context "when the affiliate_id param is not set" do
let(:affiliate) { nil }
let(:affiliate_other) { "" }
it "does not set the affiliate", :aggregate_failures do
expect(account.affiliate).to be_blank
expect(account.affiliate_other).to be_blank
end
end
end
| {
"content_hash": "11e006baa2199d35250dd8d7222905f3",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 73,
"avg_line_length": 33.15384615384615,
"alnum_prop": 0.6653132250580046,
"repo_name": "tablexi/nucore-open",
"id": "6a2e2b4be10e8d9a4a31b1480d55e789f960af7d",
"size": "1755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spec/account_builder_shared_examples.rb",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "674"
},
{
"name": "CoffeeScript",
"bytes": "64006"
},
{
"name": "Dockerfile",
"bytes": "1234"
},
{
"name": "HTML",
"bytes": "13675"
},
{
"name": "Haml",
"bytes": "328929"
},
{
"name": "JavaScript",
"bytes": "70594"
},
{
"name": "Ruby",
"bytes": "2793374"
},
{
"name": "SCSS",
"bytes": "30141"
},
{
"name": "Shell",
"bytes": "2316"
}
],
"symlink_target": ""
} |
import sys
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import ET
from libcloud.common.dimensiondata import DimensionDataAPIException
from libcloud.common.types import InvalidCredsError
from libcloud.backup.base import BackupTargetJob
from libcloud.backup.drivers.dimensiondata import DimensionDataBackupDriver as DimensionData
from libcloud.backup.drivers.dimensiondata import DEFAULT_BACKUP_PLAN
from libcloud.test import MockHttp, unittest
from libcloud.test.file_fixtures import BackupFileFixtures
from libcloud.test.secrets import DIMENSIONDATA_PARAMS
class DimensionData_v2_3_Tests(unittest.TestCase):
def setUp(self):
DimensionData.connectionCls.active_api_version = '2.3'
DimensionData.connectionCls.conn_class = DimensionDataMockHttp
DimensionDataMockHttp.type = None
self.driver = DimensionData(*DIMENSIONDATA_PARAMS)
def test_invalid_region(self):
with self.assertRaises(ValueError):
self.driver = DimensionData(*DIMENSIONDATA_PARAMS, region='blah')
def test_invalid_creds(self):
DimensionDataMockHttp.type = 'UNAUTHORIZED'
with self.assertRaises(InvalidCredsError):
self.driver.list_targets()
def test_list_targets(self):
targets = self.driver.list_targets()
self.assertEqual(len(targets), 2)
self.assertEqual(targets[0].id, '5579f3a7-4c32-4cf5-8a7e-b45c36a35c10')
self.assertEqual(targets[0].address, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(targets[0].extra['servicePlan'], 'Enterprise')
def test_create_target(self):
target = self.driver.create_target(
'name',
'e75ead52-692f-4314-8725-c8a4f4d13a87',
extra={'servicePlan': 'Enterprise'})
self.assertEqual(target.id, 'ee7c4b64-f7af-4a4f-8384-be362273530f')
self.assertEqual(target.address, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(target.extra['servicePlan'], 'Enterprise')
def test_create_target_DEFAULT(self):
DimensionDataMockHttp.type = 'DEFAULT'
target = self.driver.create_target(
'name',
'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(target.id, 'ee7c4b64-f7af-4a4f-8384-be362273530f')
self.assertEqual(target.address, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
def test_create_target_EXISTS(self):
DimensionDataMockHttp.type = 'EXISTS'
with self.assertRaises(DimensionDataAPIException) as context:
self.driver.create_target(
'name',
'e75ead52-692f-4314-8725-c8a4f4d13a87',
extra={'servicePlan': 'Enterprise'})
self.assertEqual(context.exception.code, 'ERROR')
self.assertEqual(context.exception.msg, 'Cloud backup for this server is already enabled or being enabled (state: NORMAL).')
def test_update_target(self):
target = self.driver.list_targets()[0]
extra = {'servicePlan': 'Essentials'}
new_target = self.driver.update_target(target, extra=extra)
self.assertEqual(new_target.extra['servicePlan'], 'Essentials')
def test_update_target_DEFAULT(self):
DimensionDataMockHttp.type = 'DEFAULT'
target = 'e75ead52-692f-4314-8725-c8a4f4d13a87'
self.driver.update_target(target)
def test_update_target_STR(self):
target = 'e75ead52-692f-4314-8725-c8a4f4d13a87'
extra = {'servicePlan': 'Essentials'}
new_target = self.driver.update_target(target, extra=extra)
self.assertEqual(new_target.extra['servicePlan'], 'Essentials')
def test_delete_target(self):
target = self.driver.list_targets()[0]
self.assertTrue(self.driver.delete_target(target))
def test_ex_add_client_to_target(self):
target = self.driver.list_targets()[0]
client = self.driver.ex_list_available_client_types(target)[0]
storage_policy = self.driver.ex_list_available_storage_policies(target)[0]
schedule_policy = self.driver.ex_list_available_schedule_policies(target)[0]
self.assertTrue(
self.driver.ex_add_client_to_target(target, client, storage_policy,
schedule_policy, 'ON_FAILURE', 'nobody@example.com')
)
def test_ex_add_client_to_target_STR(self):
self.assertTrue(
self.driver.ex_add_client_to_target('e75ead52-692f-4314-8725-c8a4f4d13a87', 'FA.Linux', '14 Day Storage Policy',
'12AM - 6AM', 'ON_FAILURE', 'nobody@example.com')
)
def test_ex_get_backup_details_for_target(self):
target = self.driver.list_targets()[0]
response = self.driver.ex_get_backup_details_for_target(target)
self.assertEqual(response.service_plan, 'Enterprise')
client = response.clients[0]
self.assertEqual(client.id, '30b1ff76-c76d-4d7c-b39d-3b72be0384c8')
self.assertEqual(client.type.type, 'FA.Linux')
self.assertEqual(client.running_job.progress, 5)
self.assertTrue(isinstance(client.running_job, BackupTargetJob))
self.assertEqual(len(client.alert.notify_list), 2)
self.assertTrue(isinstance(client.alert.notify_list, list))
def test_ex_get_backup_details_for_target_NOBACKUP(self):
target = self.driver.list_targets()[0].address
DimensionDataMockHttp.type = 'NOBACKUP'
response = self.driver.ex_get_backup_details_for_target(target)
self.assertTrue(response is None)
def test_ex_cancel_target_job(self):
target = self.driver.list_targets()[0]
response = self.driver.ex_get_backup_details_for_target(target)
client = response.clients[0]
self.assertTrue(isinstance(client.running_job, BackupTargetJob))
success = client.running_job.cancel()
self.assertTrue(success)
def test_ex_cancel_target_job_with_extras(self):
success = self.driver.cancel_target_job(
None,
ex_client='30b1ff76_c76d_4d7c_b39d_3b72be0384c8',
ex_target='e75ead52_692f_4314_8725_c8a4f4d13a87'
)
self.assertTrue(success)
def test_ex_cancel_target_job_FAIL(self):
DimensionDataMockHttp.type = 'FAIL'
with self.assertRaises(DimensionDataAPIException) as context:
self.driver.cancel_target_job(
None,
ex_client='30b1ff76_c76d_4d7c_b39d_3b72be0384c8',
ex_target='e75ead52_692f_4314_8725_c8a4f4d13a87'
)
self.assertEqual(context.exception.code, 'ERROR')
"""Test a backup info for a target that does not have a client"""
def test_ex_get_backup_details_for_target_NO_CLIENT(self):
DimensionDataMockHttp.type = 'NOCLIENT'
response = self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(response.service_plan, 'Essentials')
self.assertEqual(len(response.clients), 0)
"""Test a backup details that has a client, but no alerting or running jobs"""
def test_ex_get_backup_details_for_target_NO_JOB_OR_ALERT(self):
DimensionDataMockHttp.type = 'NOJOB'
response = self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314_8725-c8a4f4d13a87')
self.assertEqual(response.service_plan, 'Enterprise')
self.assertTrue(isinstance(response.clients, list))
self.assertEqual(len(response.clients), 1)
client = response.clients[0]
self.assertEqual(client.id, '30b1ff76-c76d-4d7c-b39d-3b72be0384c8')
self.assertEqual(client.type.type, 'FA.Linux')
self.assertIsNone(client.running_job)
self.assertIsNone(client.alert)
"""Test getting backup info for a server that doesn't exist"""
def test_ex_get_backup_details_for_target_DISABLED(self):
DimensionDataMockHttp.type = 'DISABLED'
with self.assertRaises(DimensionDataAPIException) as context:
self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(context.exception.code, 'ERROR')
self.assertEqual(context.exception.msg, 'Server e75ead52-692f-4314-8725-c8a4f4d13a87 has not been provisioned for backup')
def test_ex_list_available_client_types(self):
target = self.driver.list_targets()[0]
answer = self.driver.ex_list_available_client_types(target)
self.assertEqual(len(answer), 1)
self.assertEqual(answer[0].type, 'FA.Linux')
self.assertEqual(answer[0].is_file_system, True)
self.assertEqual(answer[0].description, 'Linux File system')
def test_ex_list_available_storage_policies(self):
target = self.driver.list_targets()[0]
answer = self.driver.ex_list_available_storage_policies(target)
self.assertEqual(len(answer), 1)
self.assertEqual(answer[0].name,
'30 Day Storage Policy + Secondary Copy')
self.assertEqual(answer[0].retention_period, 30)
self.assertEqual(answer[0].secondary_location, 'Primary')
def test_ex_list_available_schedule_policies(self):
target = self.driver.list_targets()[0]
answer = self.driver.ex_list_available_schedule_policies(target)
self.assertEqual(len(answer), 1)
self.assertEqual(answer[0].name, '12AM - 6AM')
self.assertEqual(answer[0].description, 'Daily backup will start between 12AM - 6AM')
def test_ex_remove_client_from_target(self):
target = self.driver.list_targets()[0]
client = self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314-8725-c8a4f4d13a87').clients[0]
self.assertTrue(self.driver.ex_remove_client_from_target(target, client))
def test_ex_remove_client_from_target_STR(self):
self.assertTrue(
self.driver.ex_remove_client_from_target(
'e75ead52-692f-4314-8725-c8a4f4d13a87',
'30b1ff76-c76d-4d7c-b39d-3b72be0384c8'
)
)
def test_ex_remove_client_from_target_FAIL(self):
DimensionDataMockHttp.type = 'FAIL'
with self.assertRaises(DimensionDataAPIException) as context:
self.driver.ex_remove_client_from_target(
'e75ead52-692f-4314-8725-c8a4f4d13a87',
'30b1ff76-c76d-4d7c-b39d-3b72be0384c8'
)
self.assertEqual(context.exception.code, 'ERROR')
self.assertTrue('Backup Client is currently performing another operation' in context.exception.msg)
def test_priv_target_to_target_address(self):
target = self.driver.list_targets()[0]
self.assertEqual(
self.driver._target_to_target_address(target),
'e75ead52-692f-4314-8725-c8a4f4d13a87'
)
def test_priv_target_to_target_address_STR(self):
self.assertEqual(
self.driver._target_to_target_address('e75ead52-692f-4314-8725-c8a4f4d13a87'),
'e75ead52-692f-4314-8725-c8a4f4d13a87'
)
def test_priv_target_to_target_address_TYPEERROR(self):
with self.assertRaises(TypeError):
self.driver._target_to_target_address([1, 2, 3])
def test_priv_client_to_client_id(self):
client = self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314-8725-c8a4f4d13a87').clients[0]
self.assertEqual(
self.driver._client_to_client_id(client),
'30b1ff76-c76d-4d7c-b39d-3b72be0384c8'
)
def test_priv_client_to_client_id_STR(self):
self.assertEqual(
self.driver._client_to_client_id('30b1ff76-c76d-4d7c-b39d-3b72be0384c8'),
'30b1ff76-c76d-4d7c-b39d-3b72be0384c8'
)
def test_priv_client_to_client_id_TYPEERROR(self):
with self.assertRaises(TypeError):
self.driver._client_to_client_id([1, 2, 3])
class InvalidRequestError(Exception):
def __init__(self, tag):
super(InvalidRequestError, self).__init__("Invalid Request - %s" % tag)
class DimensionDataMockHttp(MockHttp):
fixtures = BackupFileFixtures('dimensiondata')
def _oec_0_9_myaccount_UNAUTHORIZED(self, method, url, body, headers):
return (httplib.UNAUTHORIZED, "", {}, httplib.responses[httplib.UNAUTHORIZED])
def _oec_0_9_myaccount(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_EXISTS(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_DEFAULT(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_INPROGRESS(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_FAIL(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_NOCLIENT(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_DISABLED(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_NOJOB(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87(self, method, url, body, headers):
body = self.fixtures.load(
'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT(self, method, url, body, headers):
body = self.fixtures.load(
'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_NOCLIENT(self, method, url, body, headers):
body = self.fixtures.load(
'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_NOJOB(self, method, url, body, headers):
body = self.fixtures.load(
'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DISABLED(self, method, url, body, headers):
body = self.fixtures.load(
'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server(self, method, url, body, headers):
body = self.fixtures.load(
'server_server.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_type(self, method, url, body, headers):
body = self.fixtures.load(
'_backup_client_type.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_storagePolicy(
self, method, url, body, headers):
body = self.fixtures.load(
'_backup_client_storagePolicy.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_schedulePolicy(
self, method, url, body, headers):
body = self.fixtures.load(
'_backup_client_schedulePolicy.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client(
self, method, url, body, headers):
if method == 'POST':
body = self.fixtures.load(
'_backup_client_SUCCESS_PUT.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
else:
raise ValueError("Unknown Method {0}".format(method))
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_NOCLIENT(
self, method, url, body, headers):
# only gets here are implemented
# If we get any other method something has gone wrong
assert(method == 'GET')
body = self.fixtures.load(
'_backup_INFO_NOCLIENT.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_DISABLED(
self, method, url, body, headers):
# only gets here are implemented
# If we get any other method something has gone wrong
assert(method == 'GET')
body = self.fixtures.load(
'_backup_INFO_DISABLED.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_NOJOB(
self, method, url, body, headers):
# only gets here are implemented
# If we get any other method something has gone wrong
assert(method == 'GET')
body = self.fixtures.load(
'_backup_INFO_NOJOB.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_DEFAULT(
self, method, url, body, headers):
if method != 'POST':
raise InvalidRequestError('Only POST is accepted for this test')
request = ET.fromstring(body)
service_plan = request.get('servicePlan')
if service_plan != DEFAULT_BACKUP_PLAN:
raise InvalidRequestError('The default plan %s should have been passed in. Not %s' % (DEFAULT_BACKUP_PLAN, service_plan))
body = self.fixtures.load(
'_backup_ENABLE.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup(
self, method, url, body, headers):
if method == 'POST':
body = self.fixtures.load(
'_backup_ENABLE.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
elif method == 'GET':
if url.endswith('disable'):
body = self.fixtures.load(
'_backup_DISABLE.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
body = self.fixtures.load(
'_backup_INFO.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
else:
raise ValueError("Unknown Method {0}".format(method))
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_NOBACKUP(
self, method, url, body, headers):
assert(method == 'GET')
body = self.fixtures.load('server_server_NOBACKUP.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_EXISTS(
self, method, url, body, headers):
# only POSTs are implemented
# If we get any other method something has gone wrong
assert(method == 'POST')
body = self.fixtures.load(
'_backup_EXISTS.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_modify(
self, method, url, body, headers):
request = ET.fromstring(body)
service_plan = request.get('servicePlan')
if service_plan != 'Essentials':
raise InvalidRequestError("Expected Essentials backup plan in request")
body = self.fixtures.load('_backup_modify.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_modify_DEFAULT(
self, method, url, body, headers):
request = ET.fromstring(body)
service_plan = request.get('servicePlan')
if service_plan != DEFAULT_BACKUP_PLAN:
raise InvalidRequestError("Expected % backup plan in test" % DEFAULT_BACKUP_PLAN)
body = self.fixtures.load('_backup_modify.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8(
self, method, url, body, headers):
if url.endswith('disable'):
body = self.fixtures.load(
('_remove_backup_client.xml')
)
elif url.endswith('cancelJob'):
body = self.fixtures.load(
(''
'_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8_cancelJob.xml')
)
else:
raise ValueError("Unknown URL: %s" % url)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8_FAIL(
self, method, url, body, headers):
if url.endswith('disable'):
body = self.fixtures.load(
('_remove_backup_client_FAIL.xml')
)
elif url.endswith('cancelJob'):
body = self.fixtures.load(
(''
'_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8_cancelJob_FAIL.xml')
)
else:
raise ValueError("Unknown URL: %s" % url)
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
| {
"content_hash": "e2dd54ff9b98d7b7c84aaee0306ce870",
"timestamp": "",
"source": "github",
"line_count": 482,
"max_line_length": 154,
"avg_line_length": 48.051867219917014,
"alnum_prop": 0.6566642200250421,
"repo_name": "Kami/libcloud",
"id": "13039d4c9c3fc4b5a5b8455345b3dfb8a524d7df",
"size": "23943",
"binary": false,
"copies": "10",
"ref": "refs/heads/trunk",
"path": "libcloud/test/backup/test_dimensiondata_v2_3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1819"
},
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "PowerShell",
"bytes": "410"
},
{
"name": "Python",
"bytes": "9122888"
},
{
"name": "Shell",
"bytes": "12994"
}
],
"symlink_target": ""
} |
/* $NetBSD: cfgetispeed.c,v 1.8 2012/06/25 22:32:46 abs Exp $ */
/*-
* Copyright (c) 1989, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <assert.h>
#include <stdio.h>
#include <termios.h>
speed_t cfgetispeed(const struct termios *t) {
_DIAGASSERT(t != NULL);
return (t->c_ispeed);
}
| {
"content_hash": "5a577949ffa5d5970120508ce34e1432",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 77,
"avg_line_length": 45.02439024390244,
"alnum_prop": 0.7497291440953413,
"repo_name": "cahirwpz/wifire-os",
"id": "5caf93842c26ebcf59c274e0199054964a6759a2",
"size": "1846",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/libc/termios/cfgetispeed.c",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "23927"
},
{
"name": "C",
"bytes": "150423"
},
{
"name": "Makefile",
"bytes": "4757"
}
],
"symlink_target": ""
} |
namespace io {
SocketAddress* SocketAddress::toSocketAddress( struct sockaddr* addr, socklen_t len ) {
(void) len ;
switch( addr->sa_family ) {
case AF_UNSPEC:
return new UnspecifiedAddress();
case AF_INET:
return new Inet4Address( *(struct sockaddr_in*)addr );
case AF_INET6:
return new Inet6Address( *(struct sockaddr_in6*)addr );
case AF_UNIX:
return new UnixAddress( *(struct sockaddr_un*)addr );
}
char buf[1024];
snprintf( buf, sizeof(buf), "Unknown socket family %d", addr->sa_family );
throw UnknownSocketFamilyException(buf);
}
}
| {
"content_hash": "81024ca2c9b1d557a15ea1e2be93253d",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 87,
"avg_line_length": 27.681818181818183,
"alnum_prop": 0.6535303776683087,
"repo_name": "jrahm/OPLabs",
"id": "4ea1b2ae9d2bdfd6b2a5d9158136dc5f1ae75336",
"size": "791",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mercury/src/io/SocketAddress.cpp",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "1404"
},
{
"name": "C++",
"bytes": "283402"
},
{
"name": "CSS",
"bytes": "52534"
},
{
"name": "HTML",
"bytes": "32482"
},
{
"name": "Java",
"bytes": "103735"
},
{
"name": "Makefile",
"bytes": "2193"
},
{
"name": "Objective-C",
"bytes": "103995"
},
{
"name": "Python",
"bytes": "70599"
},
{
"name": "Shell",
"bytes": "3126"
}
],
"symlink_target": ""
} |
package com.sshtools.forker.client.impl.jna.win32;
import java.nio.ByteBuffer;
import com.sun.jna.Native;
import com.sun.jna.Pointer;
import com.sun.jna.WString;
import com.sun.jna.ptr.IntByReference;
import com.sun.jna.win32.W32APIOptions;
/**
* Extends JNA Platform Kernel32 a little.
*/
public interface Kernel32 extends com.sun.jna.platform.win32.Kernel32 {
/** The instance. */
Kernel32 INSTANCE = Native.load("kernel32", Kernel32.class, W32APIOptions.DEFAULT_OPTIONS);
/**
* @param hFile
* @param lpBuffer
* @param nNumberOfBytesToRead
* @param lpNumberOfBytesRead
* @param lpOverlapped
* @return status
*/
int ReadFile(HANDLE hFile, ByteBuffer lpBuffer, int nNumberOfBytesToRead, IntByReference lpNumberOfBytesRead,
OVERLAPPED lpOverlapped);
/**
* @param hFile
* @param lpBuffer
* @param nNumberOfBytesToWrite
* @param lpNumberOfBytesWritten
* @param lpOverlapped
* @return status
*/
int WriteFile(HANDLE hFile, ByteBuffer lpBuffer, int nNumberOfBytesToWrite, IntByReference lpNumberOfBytesWritten,
OVERLAPPED lpOverlapped);
/**
* @param hThread
* @return status
*/
DWORD ResumeThread(HANDLE hThread);
/**
* @param lpApplicationName
* @param lpCommandLine
* @param lpProcessAttributes
* @param lpThreadAttributes
* @param bInheritHandles
* @param dwCreationFlags
* @param lpEnvironment
* @param lpCurrentDirectory
* @param lpStartupInfo
* @param lpProcessInformation
* @return status
*/
boolean CreateProcessW(WString lpApplicationName, char[] lpCommandLine, SECURITY_ATTRIBUTES lpProcessAttributes,
SECURITY_ATTRIBUTES lpThreadAttributes, boolean bInheritHandles, DWORD dwCreationFlags, Pointer lpEnvironment,
char[] lpCurrentDirectory, STARTUPINFO lpStartupInfo, PROCESS_INFORMATION lpProcessInformation);
/**
* @param lpFileName
* @param dwDesiredAccess
* @param dwShareMode
* @param lpSecurityAttributes
* @param dwCreationDisposition
* @param dwFlagsAndAttributes
* @param hTemplateFile
* @return status
*/
HANDLE CreateFile(WString lpFileName, int dwDesiredAccess, int dwShareMode,
SECURITY_ATTRIBUTES lpSecurityAttributes, int dwCreationDisposition, int dwFlagsAndAttributes, HANDLE hTemplateFile);
/**
* @param name
* @param dwOpenMode
* @param dwPipeMode
* @param nMaxInstances
* @param nOutBufferSize
* @param nInBufferSize
* @param nDefaultTimeOut
* @param securityAttributes
* @return status
*/
HANDLE CreateNamedPipeW(WString name, int dwOpenMode, int dwPipeMode, int nMaxInstances,
int nOutBufferSize, int nInBufferSize, int nDefaultTimeOut, SECURITY_ATTRIBUTES securityAttributes);
/** BOOL SetCurrentDirectory( LPCTSTR lpPathName ); */
int SetCurrentDirectoryW(char[] pathName);
}
| {
"content_hash": "72a4f17cae08d86c365dd469cab2da4b",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 120,
"avg_line_length": 29.847826086956523,
"alnum_prop": 0.7611070648215587,
"repo_name": "sshtools/forker",
"id": "28c9fdae9bc13ba928937cbb8075770eb6e8bbcf",
"size": "3379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "forker-client/src/main/java/com/sshtools/forker/client/impl/jna/win32/Kernel32.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "148"
},
{
"name": "Io",
"bytes": "66"
},
{
"name": "Java",
"bytes": "878927"
},
{
"name": "JavaScript",
"bytes": "1157"
}
],
"symlink_target": ""
} |
SELECT @@SERVERNAME as [Server], j.[name] as [Job Name], CASE j.[enabled] WHEN 1 THEN 'Enabled' ELSE 'Disabled' END AS [Job Status],
-- Type of Schedule
CASE freq_type
WHEN 1 THEN 'One time, occurs at ' + CONVERT(varchar(15), CONVERT(time, STUFF(STUFF(RIGHT('000000' + CONVERT(varchar(6), active_start_time), 6), 3, 0, ':'), 6, 0, ':')), 100) + ' on ' + CONVERT(varchar, CONVERT(datetime,CONVERT(char(8), s.active_start_date)), 101)
WHEN 64 THEN 'When SQL Server Agent Service starts'
WHEN 128 THEN 'When the Server is idle'
ELSE ''
END +
-- Frequency of type
CASE
WHEN freq_type = 4 THEN 'Occurs every ' +
CASE s.freq_interval
WHEN 1 THEN 'day'
ELSE CONVERT(varchar, s.freq_interval) + ' day(s)'
END
WHEN freq_type = 8 THEN 'Occurs every ' +
CASE s.freq_recurrence_factor
WHEN 1 THEN 'week on '
ELSE CONVERT(varchar, s.freq_recurrence_factor) + ' week(s) on '
END +
REPLACE(RTRIM(
CASE WHEN s.freq_interval & 1 = 1 THEN 'Sunday ' ELSE '' END +
CASE WHEN s.freq_interval & 2 = 2 THEN 'Monday ' ELSE '' END +
CASE WHEN s.freq_interval & 4 = 4 THEN 'Tuesday ' ELSE '' END +
CASE WHEN s.freq_interval & 8 = 8 THEN 'Wednesday ' ELSE '' END +
CASE WHEN s.freq_interval & 16 = 16 THEN 'Thursday ' ELSE '' END +
CASE WHEN s.freq_interval & 32 = 32 THEN 'Friday ' ELSE '' END +
CASE WHEN s.freq_interval & 64 = 64 THEN 'Saturday ' ELSE '' END), ' ', ', ')
WHEN freq_type = 16 THEN 'Occurs every ' +
CASE s.freq_recurrence_factor
WHEN 1 THEN 'month on day '
ELSE CONVERT(varchar, s.freq_recurrence_factor) + ' month(s) on day '
END + CONVERT(varchar(2), s.freq_interval)
WHEN freq_type = 32 THEN 'Occurs every ' +
CASE s.freq_recurrence_factor
WHEN 1 THEN 'month on the '
ELSE CONVERT(varchar, s.freq_recurrence_factor) + ' month(s) on the '
END +
CASE s.freq_relative_interval WHEN 1 THEN 'first ' WHEN 2 THEN 'second ' WHEN 4 THEN 'third ' WHEN 8 THEN 'fourth ' WHEN 16 THEN 'last ' END +
CASE s.freq_interval WHEN 1 THEN 'Sunday' WHEN 2 THEN 'Monday' WHEN 3 THEN 'Tuesday' WHEN 4 THEN 'Wednesday' WHEN 5 THEN 'Thursday' WHEN 6 THEN 'Friday' WHEN 7 THEN 'Saturday' WHEN 8 THEN 'day' WHEN 9 THEN 'weekday' WHEN 10 THEN 'weekend' END
ELSE ''
END +
-- Frequency of time
CASE s.freq_subday_type
WHEN 1 THEN ' at ' + CONVERT(varchar(15), CONVERT(time, STUFF(STUFF(RIGHT('000000' + CONVERT(varchar(6), active_start_time), 6), 3, 0, ':'), 6, 0, ':')), 100)
WHEN 2 THEN ', every ' + CONVERT(varchar, freq_subday_interval) + ' second(s)'
WHEN 4 THEN ', every ' + CONVERT(varchar, freq_subday_interval) + ' minute(s)'
WHEN 8 THEN ', every ' + CONVERT(varchar, freq_subday_interval) + ' hour(s)'
ELSE ''
END +
-- Time bounds
CASE s.freq_subday_type
WHEN 0 THEN ''
WHEN 1 THEN ''
ELSE ' between ' + CONVERT(varchar(15), CONVERT(time, STUFF(STUFF(RIGHT('000000' + CONVERT(varchar(6),s.active_start_time),6 ),3,0,':'),6,0,':')), 100) + ' and ' + CONVERT(varchar(15), CONVERT(time, STUFF(STUFF(RIGHT('000000' + CONVERT(varchar(6),active_end_time),6 ),3,0,':'),6,0,':')), 100)
END +
-- Date bounds
'. Schedule will be used starting on ' + CONVERT(varchar, CONVERT(datetime,CONVERT(char(8), s.active_start_date)), 101) +
CASE active_end_date
WHEN '99991231' THEN ''
ELSE ' and ending on ' + CONVERT(varchar, CONVERT(datetime,CONVERT(char(8), s.active_end_date)), 101)
END AS [Schedule],
CASE s.[enabled] WHEN 1 THEN 'Enabled' WHEN 0 THEN 'Disabled' ELSE NULL END AS [Schedule Status],
CASE js.next_run_date WHEN 0 THEN NULL ELSE CONVERT(varchar, msdb.dbo.agent_datetime(js.next_run_date, js.next_run_time), 120) END AS [Next Run Date]
FROM msdb.dbo.sysjobs j
LEFT OUTER JOIN msdb.dbo.sysjobschedules js on j.job_id = js.job_id
LEFT OUTER JOIN msdb.dbo.sysschedules s on js.schedule_id = s.schedule_id
ORDER BY j.name ASC | {
"content_hash": "fb1d71c4047381f98911ac14823177f5",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 293,
"avg_line_length": 56.1764705882353,
"alnum_prop": 0.6664921465968586,
"repo_name": "codedecay/Scripts",
"id": "a41b5d78a520f5daaa87b58b252cd6e4338715e2",
"size": "3898",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "SQL/Inventory/Job_Schedule_Description.sql",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PowerShell",
"bytes": "267494"
},
{
"name": "SQLPL",
"bytes": "4277"
}
],
"symlink_target": ""
} |
"""
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from rest_framework import serializers
from django.contrib.auth.models import User, Group
from main.models import Image, BasicUser, Project, AnnotationsJson
class BasicUserSerializer(serializers.ModelSerializer):
images_by_user = serializers.PrimaryKeyRelatedField(many=True, queryset=Image.objects.all())
projects_by_user = serializers.PrimaryKeyRelatedField(many=True, queryset=Project.objects.all())
annotations_by_user = serializers.PrimaryKeyRelatedField(many=True, queryset=AnnotationsJson.objects.all())
class Meta:
model = BasicUser
fields = ['id', 'display_name', 'email', 'projects_by_user', 'images_by_user', 'annotations_by_user']
def get_authenticated_user(validated_data):
email = validated_data.pop("owner_email")
# if not User.objects.filter(email=email).exists():
# user = User.objects.create_user(email, email, email)
# user.save()
return User.objects.get(email=email)
class ProjectSerializer(serializers.ModelSerializer):
# images = serializers.PrimaryKeyRelatedField(many=True, queryset=Image.objects.all())
owner = serializers.ReadOnlyField(source='owner.email')
class Meta:
model = Project
fields = ['id', 'name', 'owner', 'labels_json']
def create(self, validated_data, *args, **kwargs):
owner = get_authenticated_user(validated_data)
return Project.objects.create(owner=owner, **validated_data)
class ImageSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.email')
project_id = serializers.ReadOnlyField(source='part_of_project.id')
class Meta:
model = Image
fields = ['id', 'title', 'description', 'owner', 'image', 'project_id']
def create(self, validated_data, *args, **kwargs):
owner = get_authenticated_user(validated_data)
project_id = validated_data.pop("project_id")
return Image.objects.create(owner=owner, part_of_project=Project.objects.get(id=project_id), **validated_data)
class AnnotationsJsonSerializer(serializers.ModelSerializer):
#images = serializers.PrimaryKeyRelatedField(many=True, queryset=Image.objects.all())
owner = serializers.ReadOnlyField(source='owner.email')
image_id = serializers.ReadOnlyField(source='on_image.id')
class Meta:
model = AnnotationsJson
fields = ['id', 'owner', 'content_json', "image_id"]
def create(self, validated_data, *args, **kwargs):
owner = get_authenticated_user(validated_data)
image_id = validated_data.pop("image_id")
return AnnotationsJson.objects.create(owner=owner, on_image=Image.objects.get(id=image_id), **validated_data)
class GroupSerializer(serializers.ModelSerializer):
class Meta:
model = Group
fields = ['id','name',]
def create(self, validated_data, *args, **kwargs):
return Group.objects.create(**validated_data)
class UserSerializer(serializers.ModelSerializer):
images_by_user = ImageSerializer(read_only=True, many=True)
images_by_user_id = serializers.PrimaryKeyRelatedField(write_only=True, source='images_by_user', many=True, queryset=Image.objects.all())
projects_by_user = ProjectSerializer(read_only=True, many=True)
projects_by_user_id = serializers.PrimaryKeyRelatedField(write_only=True, source='projects_by_user', many=True, queryset=Project.objects.all())
annotations_by_user = AnnotationsJsonSerializer(read_only=True, many=True)
annotations_by_user_id = serializers.PrimaryKeyRelatedField(write_only=True, source='annotations_by_user', many=True, queryset=AnnotationsJson.objects.all())
groups = GroupSerializer(many=True)
class Meta:
model = User
fields = ['email', 'projects_by_user', 'projects_by_user_id', 'images_by_user', 'images_by_user_id', 'annotations_by_user', 'annotations_by_user_id', 'groups',]
| {
"content_hash": "6e175d47745b8312f396c0f2a964ef11",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 168,
"avg_line_length": 41.63551401869159,
"alnum_prop": 0.7207631874298541,
"repo_name": "kartta-labs/noter-backend",
"id": "1ffad3cff4511a08e683410821a44fe468a54211",
"size": "4455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "noter_backend/main/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1889"
},
{
"name": "Python",
"bytes": "56419"
},
{
"name": "Shell",
"bytes": "2057"
}
],
"symlink_target": ""
} |
using Newtonsoft.Json;
namespace BambooTray.Domain.Resources
{
// ReSharper disable ClassNeverInstantiated.Global
// ReSharper disable UnusedAutoPropertyAccessor.Global
// ReSharper disable UnusedMember.Global
public class Result
{
[JsonProperty("plan")]
public PlanDetailResonse Plan { get; set; }
[JsonProperty("lifeCycleState")]
public string LifeCycleState { get; set; }
[JsonProperty("id")]
public int Id { get; set; }
[JsonProperty("key")]
public string Key { get; set; }
[JsonProperty("state")]
public string State { get; set; }
[JsonProperty("number")]
public int Number { get; set; }
public ResultDetailResponse Detail { get; set; }
}
} | {
"content_hash": "a605437684c823aef6cfc3f303bdbf9c",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 58,
"avg_line_length": 27,
"alnum_prop": 0.6012345679012345,
"repo_name": "joebuschmann/bambootray",
"id": "d203527da7fffa8ff43edad032d0a0ff4b76ccf2",
"size": "812",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "BambooTray.Domain/Resources/Result.cs",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "65598"
},
{
"name": "PowerShell",
"bytes": "130"
}
],
"symlink_target": ""
} |
- Added: `keyframe-declaration-no-important` rule.
- Added: `selector-pseudo-class-no-unknown` rule.
- Added: `selector-type-no-unknown` rule.
# 7.0.0
- Added: `at-rule-name-space-after` rule.
- Added: `function-max-empty-lines` rule.
- Added: `no-extra-semicolons` rule.
- Added: `selector-attribute-brackets-space-inside` rule.
- Added: `selector-attribute-operator-space-after` rule.
- Added: `selector-attribute-operator-space-before` rule.
- Added: `selector-max-empty-lines` rule.
- Added: `selector-pseudo-class-parentheses-space-inside` rule.
- Added: `selector-pseudo-element-no-unknown` rule.
- Added: `shorthand-property-no-redundant-values` rule.
# 6.0.0
- Added: `at-rule-name-case` rule.
- Added: `at-rule-semicolon-newline-after` rule.
- Added: `function-name-case` rule.
- Added: `property-case` rule.
- Added: `selector-pseudo-class-case` rule.
- Added: `selector-pseudo-element-case` rule.
- Added: `selector-type-case` rule.
- Added: `unit-case` rule.
- Added: `unit-no-unknown` rule.
# 5.0.0
- Removed: `font-family-name-quotes`, `function-url-quotes` and `string-quotes` rules.
- Added: `declaration-block-no-ignored-properties` rule.
# 4.0.1
- Fixed: include peerDependencies in `package.json` to expose compatibility.
# 4.0.0
- Removed: `stylelint < 4.5.0` compatibility.
- Added: `font-family-name-quotes` rule with `"double-where-recommended"` option.
- Added: `function-linear-gradient-no-nonstandard-direction` rule.
- Added: `media-feature-no-missing-punctuation` rule.
- Added: `no-invalid-double-slash-comments` rule.
- Added: `string-no-newline` rule.
# 3.0.0
- Changed: first-nested at-rules now behave the same as first-nested comments i.e. they can no longer be preceded by an empty line.
# 2.0.0
- Changed: first-nested comments can no longer be preceded by an empty line.
- Fixed: `comment-empty-line-before` now ignores `stylelint` command comments.
# 1.0.0
- Fixed: more forgiving empty lines rules when comments are present i.e. the `rule-non-nested-empty-line-before` and `at-rule-empty-line-before` now make use of the `ignore: ["after-comment"]` option.
# 0.2.0
- Added: `block-no-empty` rule.
# 0.1.0
- Initial release
| {
"content_hash": "6b8d125125132655bc1722253a6e4d07",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 200,
"avg_line_length": 32.56716417910448,
"alnum_prop": 0.7236480293308891,
"repo_name": "PanJ/SimplerCityGlide",
"id": "c29d26422dc32bf0ab741fdc06ac84f324a6f7ea",
"size": "2191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "node_modules/stylelint-config-standard/CHANGELOG.md",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "1526"
},
{
"name": "CSS",
"bytes": "5425"
},
{
"name": "HTML",
"bytes": "7506"
},
{
"name": "JavaScript",
"bytes": "124091"
}
],
"symlink_target": ""
} |
package org.apache.carbondata.processing.loading;
import org.apache.carbondata.common.CarbonIterator;
import org.apache.carbondata.common.logging.LogServiceFactory;
import org.apache.carbondata.core.metadata.CarbonTableIdentifier;
import org.apache.carbondata.processing.loading.exception.BadRecordFoundException;
import org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException;
import org.apache.carbondata.processing.loading.exception.NoRetryException;
import org.apache.carbondata.processing.loading.model.CarbonLoadModel;
import org.apache.carbondata.processing.util.CarbonBadRecordUtil;
import org.apache.log4j.Logger;
/**
* It executes the data load.
*/
public class DataLoadExecutor {
private static final Logger LOGGER =
LogServiceFactory.getLogService(DataLoadExecutor.class.getName());
private AbstractDataLoadProcessorStep loadProcessorStep;
private boolean isClosed;
public void execute(CarbonLoadModel loadModel, String[] storeLocation,
CarbonIterator<Object[]>[] inputIterators) throws Exception {
try {
loadProcessorStep =
new DataLoadProcessBuilder().build(loadModel, storeLocation, inputIterators);
// 1. initialize
loadProcessorStep.initialize();
LOGGER.info("Data Loading is started for table " + loadModel.getTableName());
// 2. execute the step
loadProcessorStep.execute();
// check and remove any bad record key from bad record entry logger static map
if (CarbonBadRecordUtil.hasBadRecord(loadModel)) {
LOGGER.error("Data Load is partially success for table " + loadModel.getTableName());
}
} catch (CarbonDataLoadingException e) {
if (e instanceof BadRecordFoundException) {
throw new NoRetryException(e.getMessage());
} else {
throw e;
}
} catch (Exception e) {
LOGGER.error("Data Loading failed for table " + loadModel.getTableName(), e);
throw new CarbonDataLoadingException(
"Data Loading failed for table " + loadModel.getTableName(), e);
}
}
/**
* This method will remove any bad record key from the map entry
*
* @param carbonTableIdentifier
* @return
*/
private boolean badRecordFound(CarbonTableIdentifier carbonTableIdentifier) {
String badRecordLoggerKey = carbonTableIdentifier.getBadRecordLoggerKey();
boolean badRecordKeyFound = false;
if (null != BadRecordsLogger.hasBadRecord(badRecordLoggerKey)) {
badRecordKeyFound = true;
}
return badRecordKeyFound;
}
/**
* Method to clean all the resource
*/
public void close() {
if (!isClosed && loadProcessorStep != null) {
loadProcessorStep.close();
}
isClosed = true;
}
}
| {
"content_hash": "47aec9c6a58f954bb07f2ccc60f6dcd5",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 93,
"avg_line_length": 34.56962025316456,
"alnum_prop": 0.7301354815086049,
"repo_name": "ravipesala/incubator-carbondata",
"id": "11841f9eb4e08e0dbf6e1ac26625a0b21dbd4f1b",
"size": "3531",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "processing/src/main/java/org/apache/carbondata/processing/loading/DataLoadExecutor.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1639"
},
{
"name": "C++",
"bytes": "104618"
},
{
"name": "CMake",
"bytes": "1555"
},
{
"name": "Java",
"bytes": "7320389"
},
{
"name": "Python",
"bytes": "19915"
},
{
"name": "Scala",
"bytes": "10982356"
},
{
"name": "Shell",
"bytes": "4349"
},
{
"name": "Smalltalk",
"bytes": "86"
},
{
"name": "Thrift",
"bytes": "26649"
}
],
"symlink_target": ""
} |
var app = require('express')(),
wizard = require('hmpo-form-wizard'),
steps = require('./steps'),
fields = require('./fields');
app.use(require('hmpo-template-mixins')(fields, { sharedTranslationKey: 'prototype' }));
app.use(wizard(steps, fields, { templatePath: 'prototype_oix_170518/startpage-oix' }));
module.exports = app;
| {
"content_hash": "aa5adb962cdadaf9cfe4b1d926322f48",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 88,
"avg_line_length": 34.2,
"alnum_prop": 0.672514619883041,
"repo_name": "UKHomeOffice/passports-prototype",
"id": "3e56cf9b80f1abdc910d414b74ac44955dec111d",
"size": "342",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "routes/prototype_oix_170518/startpage-oix/index.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "39324"
},
{
"name": "HTML",
"bytes": "6619551"
},
{
"name": "JavaScript",
"bytes": "1250249"
}
],
"symlink_target": ""
} |
<!DOCTYPE html>
<!--
Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/
-->
<html>
<head>
<meta charset="utf-8">
<title>CSS Test: 'object-fit: scale-down' on video element, with a SVG image and with various 'object-position' values</title>
<link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com">
<link rel="help" href="http://www.w3.org/TR/css3-images/#sizing">
<link rel="help" href="http://www.w3.org/TR/css3-images/#the-object-fit">
<link rel="help" href="http://www.w3.org/TR/css3-images/#the-object-position">
<link rel="match" href="object-fit-scale-down-svg-001-ref.html">
<style type="text/css">
video {
border: 1px dashed gray;
padding: 1px;
object-fit: scale-down;
float: left;
}
.bigWide {
width: 48px;
height: 32px;
}
.bigTall {
width: 32px;
height: 48px;
}
.small {
width: 8px;
height: 8px;
}
br { clear: both; }
.tr { object-position: top right }
.bl { object-position: bottom left }
.tl { object-position: top 25% left 25% }
.br { object-position: bottom 1px right 2px }
.tc { object-position: top 3px left 50% }
.cr { object-position: top 50% right 25% }
</style>
</head>
<body>
<!-- big/wide: -->
<video poster="support/colors-16x8.svg" class="bigWide tr"></video>
<video poster="support/colors-16x8.svg" class="bigWide bl"></video>
<video poster="support/colors-16x8.svg" class="bigWide tl"></video>
<video poster="support/colors-16x8.svg" class="bigWide br"></video>
<video poster="support/colors-16x8.svg" class="bigWide tc"></video>
<video poster="support/colors-16x8.svg" class="bigWide cr"></video>
<video poster="support/colors-16x8.svg" class="bigWide"></video>
<br>
<!-- big/tall: -->
<video poster="support/colors-16x8.svg" class="bigTall tr"></video>
<video poster="support/colors-16x8.svg" class="bigTall bl"></video>
<video poster="support/colors-16x8.svg" class="bigTall tl"></video>
<video poster="support/colors-16x8.svg" class="bigTall br"></video>
<video poster="support/colors-16x8.svg" class="bigTall tc"></video>
<video poster="support/colors-16x8.svg" class="bigTall cr"></video>
<video poster="support/colors-16x8.svg" class="bigTall"></video>
<br>
<!-- small: -->
<video poster="support/colors-16x8.svg" class="small tr"></video>
<video poster="support/colors-16x8.svg" class="small bl"></video>
<video poster="support/colors-16x8.svg" class="small tl"></video>
<video poster="support/colors-16x8.svg" class="small br"></video>
<video poster="support/colors-16x8.svg" class="small tc"></video>
<video poster="support/colors-16x8.svg" class="small cr"></video>
<video poster="support/colors-16x8.svg" class="small"></video>
<br>
</body>
</html>
| {
"content_hash": "d94a496e8fb4649ae8f3b7bb9e299a9f",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 130,
"avg_line_length": 39.35526315789474,
"alnum_prop": 0.6258776328986961,
"repo_name": "scheib/chromium",
"id": "ab29b635cc06a05cd1cdeadf929ed5edd3195dd3",
"size": "2991",
"binary": false,
"copies": "31",
"ref": "refs/heads/main",
"path": "third_party/blink/web_tests/external/wpt/css/css-images/object-fit-scale-down-svg-001p.html",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE194_Unexpected_Sign_Extension__negative_strncpy_84_bad.cpp
Label Definition File: CWE194_Unexpected_Sign_Extension.label.xml
Template File: sources-sink-84_bad.tmpl.cpp
*/
/*
* @description
* CWE: 194 Unexpected Sign Extension
* BadSource: negative Set data to a fixed negative number
* GoodSource: Positive integer
* Sinks: strncpy
* BadSink : Copy strings using strncpy() with the length of data
* Flow Variant: 84 Data flow: data passed to class constructor and destructor by declaring the class object on the heap and deleting it after use
*
* */
#ifndef OMITBAD
#include "std_testcase.h"
#include "CWE194_Unexpected_Sign_Extension__negative_strncpy_84.h"
namespace CWE194_Unexpected_Sign_Extension__negative_strncpy_84
{
CWE194_Unexpected_Sign_Extension__negative_strncpy_84_bad::CWE194_Unexpected_Sign_Extension__negative_strncpy_84_bad(short dataCopy)
{
data = dataCopy;
/* FLAW: Use a negative number */
data = -1;
}
CWE194_Unexpected_Sign_Extension__negative_strncpy_84_bad::~CWE194_Unexpected_Sign_Extension__negative_strncpy_84_bad()
{
{
char source[100];
char dest[100] = "";
memset(source, 'A', 100-1);
source[100-1] = '\0';
if (data < 100)
{
/* POTENTIAL FLAW: data is interpreted as an unsigned int - if its value is negative,
* the sign extension could result in a very large number */
strncpy(dest, source, data);
dest[data] = '\0'; /* strncpy() does not always NULL terminate */
}
printLine(dest);
}
}
}
#endif /* OMITBAD */
| {
"content_hash": "e211cb59f4469b2d70607793e66f8fd3",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 146,
"avg_line_length": 35.166666666666664,
"alnum_prop": 0.6617298578199052,
"repo_name": "maurer/tiamat",
"id": "894bc3f4b25a741d1402cde9bf845fb6975deb7a",
"size": "1688",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "samples/Juliet/testcases/CWE194_Unexpected_Sign_Extension/s02/CWE194_Unexpected_Sign_Extension__negative_strncpy_84_bad.cpp",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import subprocess
from typing import List
import rich_click as click
PYTHON_VERSIONS = ["3.7", "3.8", "3.9"]
GHCR_IO_PREFIX = "ghcr.io"
GHCR_IO_IMAGES = [
"{prefix}/{repo}/{branch}/ci/python{python_version}:latest",
"{prefix}/{repo}/{branch}/prod/python{python_version}:latest",
]
# noinspection StrFormat
def pull_push_all_images(
source_prefix: str,
target_prefix: str,
images: List[str],
source_branch: str,
source_repo: str,
target_branch: str,
target_repo: str,
):
for python_version in PYTHON_VERSIONS:
for image in images:
source_image = image.format(
prefix=source_prefix, branch=source_branch, repo=source_repo, python_version=python_version
)
target_image = image.format(
prefix=target_prefix, branch=target_branch, repo=target_repo, python_version=python_version
)
print(f"Copying image: {source_image} -> {target_image}")
subprocess.run(["docker", "pull", source_image], check=True)
subprocess.run(["docker", "tag", source_image, target_image], check=True)
subprocess.run(["docker", "push", target_image], check=True)
@click.group(invoke_without_command=True)
@click.option("--source-branch", type=str, default="main", help="Source branch name [main]")
@click.option("--target-branch", type=str, default="main", help="Target branch name [main]")
@click.option("--source-repo", type=str, default="apache/airflow", help="Source repo")
@click.option("--target-repo", type=str, default="apache/airflow", help="Target repo")
def main(
source_branch: str,
target_branch: str,
source_repo: str,
target_repo: str,
):
pull_push_all_images(
GHCR_IO_PREFIX, GHCR_IO_PREFIX, GHCR_IO_IMAGES, source_branch, source_repo, target_branch, target_repo
)
if __name__ == "__main__":
main()
| {
"content_hash": "a2985ce80b7acb21bc45dda59ee6ef03",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 110,
"avg_line_length": 32.8448275862069,
"alnum_prop": 0.6409448818897637,
"repo_name": "bolkedebruin/airflow",
"id": "bcb81c55223f83811ce5dc8c8f06553314c88a0a",
"size": "3064",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev/retag_docker_images.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25286"
},
{
"name": "Dockerfile",
"bytes": "40459"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "157840"
},
{
"name": "JavaScript",
"bytes": "167972"
},
{
"name": "Jinja",
"bytes": "33382"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "19287942"
},
{
"name": "Shell",
"bytes": "645244"
},
{
"name": "TypeScript",
"bytes": "173854"
}
],
"symlink_target": ""
} |
from django import forms
from . import models
class ThoughtForm(forms.ModelForm):
class Meta:
fields = ('condition', 'notes')
model = models.Thought | {
"content_hash": "fdb32d64b2fe5f85c8337a0f535d12c9",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 39,
"avg_line_length": 19,
"alnum_prop": 0.6666666666666666,
"repo_name": "treehouse/livestream-django-feelings",
"id": "a38eb3232e52925c352e40ed085c1b625f609ec2",
"size": "171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "feelings/thoughts/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "65"
},
{
"name": "HTML",
"bytes": "18469"
},
{
"name": "JavaScript",
"bytes": "1252960"
},
{
"name": "Python",
"bytes": "38118"
}
],
"symlink_target": ""
} |
<?php
namespace oasis\names\specification\ubl\schema\xsd\CommonBasicComponents_2;
/**
* @xmlNamespace urn:oasis:names:specification:ubl:schema:xsd:CommonBasicComponents-2
* @xmlType CoordinateSystemCodeType
* @xmlName CoordinateSystemCode
* @var oasis\names\specification\ubl\schema\xsd\CommonBasicComponents_2\CoordinateSystemCode
*/
class CoordinateSystemCode
extends CoordinateSystemCodeType
{
} // end class CoordinateSystemCode
| {
"content_hash": "dcfb37f35c8cff4774ef218608abd2c7",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 93,
"avg_line_length": 27.75,
"alnum_prop": 0.8175675675675675,
"repo_name": "emoxie/quickbooks-sdk",
"id": "eaee55d2d19a0f94deb245781767305e66857e6a",
"size": "444",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/Dependencies/XSD2PHP/test/data/expected/ubl2.0/oasis/names/specification/ubl/schema/xsd/CommonBasicComponents_2/CoordinateSystemCode.php",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9685"
},
{
"name": "PHP",
"bytes": "5213526"
},
{
"name": "Shell",
"bytes": "99"
},
{
"name": "XSLT",
"bytes": "11481"
}
],
"symlink_target": ""
} |
package com.meisolsson.githubsdk.model.payload;
import android.os.Parcel;
import android.os.Parcelable;
import android.support.annotation.Nullable;
import com.google.auto.value.AutoValue;
import com.meisolsson.githubsdk.model.ReferenceType;
import com.squareup.moshi.Json;
import com.squareup.moshi.JsonAdapter;
import com.squareup.moshi.Moshi;
@AutoValue
public abstract class DeletePayload extends GitHubPayload<DeletePayload.Builder> implements Parcelable {
@Nullable
public abstract String ref();
@Json(name = "ref_type")
@Nullable
public abstract ReferenceType refType();
public abstract Builder toBuilder();
public static JsonAdapter<DeletePayload> jsonAdapter(Moshi moshi) {
return new AutoValue_DeletePayload.MoshiJsonAdapter(moshi);
}
public static DeletePayload createFromParcel(Parcel in) {
return AutoValue_DeletePayload.CREATOR.createFromParcel(in);
}
public static Builder builder() {
return new AutoValue_DeletePayload.Builder();
}
@AutoValue.Builder
public abstract static class Builder extends GitHubPayload.Builder<DeletePayload, Builder> {
public abstract Builder ref(String ref);
public abstract Builder refType(ReferenceType refType);
public abstract DeletePayload build();
}
}
| {
"content_hash": "6e973f230cf9deba14b0ed756acf8c53",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 104,
"avg_line_length": 28.106382978723403,
"alnum_prop": 0.7509462528387585,
"repo_name": "Meisolsson/GitHubSdk",
"id": "636113326b0bd13e5957c2c2370b619545f5021e",
"size": "1916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "library/src/main/java/com/meisolsson/githubsdk/model/payload/DeletePayload.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "411171"
}
],
"symlink_target": ""
} |
<?xml version="1.0" encoding="UTF-8"?>
<rootTag>
<Award>
<AwardTitle>Workshop on Video Games in Engineering and Computer Science Education</AwardTitle>
<AwardEffectiveDate>09/01/2009</AwardEffectiveDate>
<AwardExpirationDate>08/31/2011</AwardExpirationDate>
<AwardAmount>92328</AwardAmount>
<AwardInstrument>
<Value>Standard Grant</Value>
</AwardInstrument>
<Organization>
<Code>11040200</Code>
<Directorate>
<LongName>Direct For Education and Human Resources</LongName>
</Directorate>
<Division>
<LongName>Division Of Undergraduate Education</LongName>
</Division>
</Organization>
<ProgramOfficer>
<SignBlockName>Russell L. Pimmel</SignBlockName>
</ProgramOfficer>
<AbstractNarration>The project is supporting a workshop to explore the opportunities and challenges that are presented by the use of games and game-based solutions in engineering and computer science education. It addresses a critical need to assess the current state of video game use in these disciplines and identify future research directions. The workshop is bringing together a diverse group of twenty-five participants from engineering, computer science, science and instructional design who share a common interest in the use of games and related technologies in education. The workshop is working to identify challenges facing educators and researchers when it comes to the design, development, implementation, and assessment of games and game-based solutions in engineering and computer science education. The investigators are posting workshop materials on a website and preparing conference presentations and a review article for publication.</AbstractNarration>
<MinAmdLetterDate>08/20/2009</MinAmdLetterDate>
<MaxAmdLetterDate>04/07/2011</MaxAmdLetterDate>
<ARRAAmount/>
<AwardID>0938176</AwardID>
<Investigator>
<FirstName>Vinod</FirstName>
<LastName>Srinivasan</LastName>
<EmailAddress>vinod@viz.tamu.edu</EmailAddress>
<StartDate>08/20/2009</StartDate>
<EndDate>04/07/2011</EndDate>
<RoleCode>Former Principal Investigator</RoleCode>
</Investigator>
<Investigator>
<FirstName>Timothy</FirstName>
<LastName>McLaughlin</LastName>
<EmailAddress>timm@viz.tamu.edu</EmailAddress>
<StartDate>04/07/2011</StartDate>
<EndDate/>
<RoleCode>Principal Investigator</RoleCode>
</Investigator>
<Investigator>
<FirstName>Karen</FirstName>
<LastName>Butler-Purry</LastName>
<EmailAddress>klbutler@tamu.edu</EmailAddress>
<StartDate>08/20/2009</StartDate>
<EndDate/>
<RoleCode>Co-Principal Investigator</RoleCode>
</Investigator>
<Institution>
<Name>Texas A&M Research Foundation</Name>
<CityName>College Station</CityName>
<ZipCode>778454321</ZipCode>
<PhoneNumber>9798458600</PhoneNumber>
<StreetAddress>400 Harvey Mitchell Parkway, S</StreetAddress>
<CountryName>United States</CountryName>
<StateName>Texas</StateName>
<StateCode>TX</StateCode>
</Institution>
<ProgramElement>
<Code>7494</Code>
<Text>CCLI-Type 1 (Exploratory)</Text>
</ProgramElement>
<ProgramReference>
<Code>9178</Code>
<Text>UNDERGRADUATE EDUCATION</Text>
</ProgramReference>
<ProgramReference>
<Code>SMET</Code>
<Text>SCIENCE, MATH, ENG & TECH EDUCATION</Text>
</ProgramReference>
</Award>
</rootTag>
| {
"content_hash": "de87cd1793e64bcd175ad0ea4b1cecf9",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 978,
"avg_line_length": 46.328947368421055,
"alnum_prop": 0.7216699801192843,
"repo_name": "jallen2/Research-Trend",
"id": "9bb373f24e7bd8c652090adca38e0892ec7fd840",
"size": "3521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CU_Funding/2009/0938176.xml",
"mode": "33261",
"license": "mit",
"language": [],
"symlink_target": ""
} |
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <winpr/crt.h>
#include <winpr/path.h>
#include <winpr/file.h>
#include <winpr/string.h>
#include <winpr/synch.h>
#include <winpr/thread.h>
#include <winpr/stream.h>
#include <winpr/environment.h>
#include <winpr/interlocked.h>
#include <winpr/collections.h>
#include <winpr/shell.h>
#include <freerdp/channels/rdpdr.h>
#include "drive_file.h"
typedef struct _DRIVE_DEVICE DRIVE_DEVICE;
struct _DRIVE_DEVICE
{
DEVICE device;
WCHAR* path;
BOOL automount;
UINT32 PathLength;
wListDictionary* files;
HANDLE thread;
wMessageQueue* IrpQueue;
DEVMAN* devman;
rdpContext* rdpcontext;
};
static UINT sys_code_page = 0;
static DWORD drive_map_windows_err(DWORD fs_errno)
{
DWORD rc;
/* try to return NTSTATUS version of error code */
switch (fs_errno)
{
case STATUS_SUCCESS:
rc = STATUS_SUCCESS;
break;
case ERROR_ACCESS_DENIED:
case ERROR_SHARING_VIOLATION:
rc = STATUS_ACCESS_DENIED;
break;
case ERROR_FILE_NOT_FOUND:
rc = STATUS_NO_SUCH_FILE;
break;
case ERROR_BUSY_DRIVE:
rc = STATUS_DEVICE_BUSY;
break;
case ERROR_INVALID_DRIVE:
rc = STATUS_NO_SUCH_DEVICE;
break;
case ERROR_NOT_READY:
rc = STATUS_NO_SUCH_DEVICE;
break;
case ERROR_FILE_EXISTS:
case ERROR_ALREADY_EXISTS:
rc = STATUS_OBJECT_NAME_COLLISION;
break;
case ERROR_INVALID_NAME:
rc = STATUS_NO_SUCH_FILE;
break;
case ERROR_INVALID_HANDLE:
rc = STATUS_INVALID_HANDLE;
break;
case ERROR_NO_MORE_FILES:
rc = STATUS_NO_MORE_FILES;
break;
case ERROR_DIRECTORY:
rc = STATUS_NOT_A_DIRECTORY;
break;
case ERROR_PATH_NOT_FOUND:
rc = STATUS_OBJECT_PATH_NOT_FOUND;
break;
default:
rc = STATUS_UNSUCCESSFUL;
WLog_ERR(TAG, "Error code not found: %" PRIu32 "", fs_errno);
break;
}
return rc;
}
static DRIVE_FILE* drive_get_file_by_id(DRIVE_DEVICE* drive, UINT32 id)
{
DRIVE_FILE* file = NULL;
void* key = (void*)(size_t)id;
if (!drive)
return NULL;
file = (DRIVE_FILE*)ListDictionary_GetItemValue(drive->files, key);
return file;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drive_process_irp_create(DRIVE_DEVICE* drive, IRP* irp)
{
UINT32 FileId;
DRIVE_FILE* file;
BYTE Information;
UINT32 FileAttributes;
UINT32 SharedAccess;
UINT32 DesiredAccess;
UINT32 CreateDisposition;
UINT32 CreateOptions;
UINT32 PathLength;
UINT64 allocationSize;
const WCHAR* path;
if (!drive || !irp || !irp->devman || !irp->Complete)
return ERROR_INVALID_PARAMETER;
if (Stream_GetRemainingLength(irp->input) < 6 * 4 + 8)
return ERROR_INVALID_DATA;
Stream_Read_UINT32(irp->input, DesiredAccess);
Stream_Read_UINT64(irp->input, allocationSize);
Stream_Read_UINT32(irp->input, FileAttributes);
Stream_Read_UINT32(irp->input, SharedAccess);
Stream_Read_UINT32(irp->input, CreateDisposition);
Stream_Read_UINT32(irp->input, CreateOptions);
Stream_Read_UINT32(irp->input, PathLength);
if (Stream_GetRemainingLength(irp->input) < PathLength)
return ERROR_INVALID_DATA;
path = (const WCHAR*)Stream_Pointer(irp->input);
FileId = irp->devman->id_sequence++;
file = drive_file_new(drive->path, path, PathLength, FileId, DesiredAccess, CreateDisposition,
CreateOptions, FileAttributes, SharedAccess);
if (!file)
{
irp->IoStatus = drive_map_windows_err(GetLastError());
FileId = 0;
Information = 0;
}
else
{
void* key = (void*)(size_t)file->id;
if (!ListDictionary_Add(drive->files, key, file))
{
WLog_ERR(TAG, "ListDictionary_Add failed!");
return ERROR_INTERNAL_ERROR;
}
switch (CreateDisposition)
{
case FILE_SUPERSEDE:
case FILE_OPEN:
case FILE_CREATE:
case FILE_OVERWRITE:
Information = FILE_SUPERSEDED;
break;
case FILE_OPEN_IF:
Information = FILE_OPENED;
break;
case FILE_OVERWRITE_IF:
Information = FILE_OVERWRITTEN;
break;
default:
Information = 0;
break;
}
}
Stream_Write_UINT32(irp->output, FileId);
Stream_Write_UINT8(irp->output, Information);
return irp->Complete(irp);
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drive_process_irp_close(DRIVE_DEVICE* drive, IRP* irp)
{
void* key;
DRIVE_FILE* file;
if (!drive || !irp || !irp->Complete || !irp->output)
return ERROR_INVALID_PARAMETER;
file = drive_get_file_by_id(drive, irp->FileId);
key = (void*)(size_t)irp->FileId;
if (!file)
irp->IoStatus = STATUS_UNSUCCESSFUL;
else
{
ListDictionary_Remove(drive->files, key);
if (drive_file_free(file))
irp->IoStatus = STATUS_SUCCESS;
else
irp->IoStatus = drive_map_windows_err(GetLastError());
}
Stream_Zero(irp->output, 5); /* Padding(5) */
return irp->Complete(irp);
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drive_process_irp_read(DRIVE_DEVICE* drive, IRP* irp)
{
DRIVE_FILE* file;
UINT32 Length;
UINT64 Offset;
if (!drive || !irp || !irp->output || !irp->Complete)
return ERROR_INVALID_PARAMETER;
if (Stream_GetRemainingLength(irp->input) < 12)
return ERROR_INVALID_DATA;
Stream_Read_UINT32(irp->input, Length);
Stream_Read_UINT64(irp->input, Offset);
file = drive_get_file_by_id(drive, irp->FileId);
if (!file)
{
irp->IoStatus = STATUS_UNSUCCESSFUL;
Length = 0;
}
else if (!drive_file_seek(file, Offset))
{
irp->IoStatus = drive_map_windows_err(GetLastError());
Length = 0;
}
if (!Stream_EnsureRemainingCapacity(irp->output, Length + 4))
{
WLog_ERR(TAG, "Stream_EnsureRemainingCapacity failed!");
return ERROR_INTERNAL_ERROR;
}
else if (Length == 0)
Stream_Write_UINT32(irp->output, 0);
else
{
BYTE* buffer = Stream_Pointer(irp->output) + sizeof(UINT32);
if (!drive_file_read(file, buffer, &Length))
{
irp->IoStatus = drive_map_windows_err(GetLastError());
Stream_Write_UINT32(irp->output, 0);
}
else
{
Stream_Write_UINT32(irp->output, Length);
Stream_Seek(irp->output, Length);
}
}
return irp->Complete(irp);
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drive_process_irp_write(DRIVE_DEVICE* drive, IRP* irp)
{
DRIVE_FILE* file;
UINT32 Length;
UINT64 Offset;
void* ptr;
if (!drive || !irp || !irp->input || !irp->output || !irp->Complete)
return ERROR_INVALID_PARAMETER;
if (Stream_GetRemainingLength(irp->input) < 32)
return ERROR_INVALID_DATA;
Stream_Read_UINT32(irp->input, Length);
Stream_Read_UINT64(irp->input, Offset);
Stream_Seek(irp->input, 20); /* Padding */
ptr = Stream_Pointer(irp->input);
if (!Stream_SafeSeek(irp->input, Length))
return ERROR_INVALID_DATA;
file = drive_get_file_by_id(drive, irp->FileId);
if (!file)
{
irp->IoStatus = STATUS_UNSUCCESSFUL;
Length = 0;
}
else if (!drive_file_seek(file, Offset))
{
irp->IoStatus = drive_map_windows_err(GetLastError());
Length = 0;
}
else if (!drive_file_write(file, ptr, Length))
{
irp->IoStatus = drive_map_windows_err(GetLastError());
Length = 0;
}
Stream_Write_UINT32(irp->output, Length);
Stream_Write_UINT8(irp->output, 0); /* Padding */
return irp->Complete(irp);
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drive_process_irp_query_information(DRIVE_DEVICE* drive, IRP* irp)
{
DRIVE_FILE* file;
UINT32 FsInformationClass;
if (!drive || !irp || !irp->Complete)
return ERROR_INVALID_PARAMETER;
if (Stream_GetRemainingLength(irp->input) < 4)
return ERROR_INVALID_DATA;
Stream_Read_UINT32(irp->input, FsInformationClass);
file = drive_get_file_by_id(drive, irp->FileId);
if (!file)
{
irp->IoStatus = STATUS_UNSUCCESSFUL;
}
else if (!drive_file_query_information(file, FsInformationClass, irp->output))
{
irp->IoStatus = drive_map_windows_err(GetLastError());
}
return irp->Complete(irp);
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drive_process_irp_set_information(DRIVE_DEVICE* drive, IRP* irp)
{
DRIVE_FILE* file;
UINT32 FsInformationClass;
UINT32 Length;
if (!drive || !irp || !irp->Complete || !irp->input || !irp->output)
return ERROR_INVALID_PARAMETER;
if (Stream_GetRemainingLength(irp->input) < 32)
return ERROR_INVALID_DATA;
Stream_Read_UINT32(irp->input, FsInformationClass);
Stream_Read_UINT32(irp->input, Length);
Stream_Seek(irp->input, 24); /* Padding */
file = drive_get_file_by_id(drive, irp->FileId);
if (!file)
{
irp->IoStatus = STATUS_UNSUCCESSFUL;
}
else if (!drive_file_set_information(file, FsInformationClass, Length, irp->input))
{
irp->IoStatus = drive_map_windows_err(GetLastError());
}
if (file && file->is_dir && !PathIsDirectoryEmptyW(file->fullpath))
irp->IoStatus = STATUS_DIRECTORY_NOT_EMPTY;
Stream_Write_UINT32(irp->output, Length);
return irp->Complete(irp);
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drive_process_irp_query_volume_information(DRIVE_DEVICE* drive, IRP* irp)
{
UINT32 FsInformationClass;
wStream* output = NULL;
char* volumeLabel = { "FREERDP" };
char* diskType = { "FAT32" };
WCHAR* outStr = NULL;
int length;
DWORD lpSectorsPerCluster;
DWORD lpBytesPerSector;
DWORD lpNumberOfFreeClusters;
DWORD lpTotalNumberOfClusters;
WIN32_FILE_ATTRIBUTE_DATA wfad;
if (!drive || !irp)
return ERROR_INVALID_PARAMETER;
output = irp->output;
if (Stream_GetRemainingLength(irp->input) < 4)
return ERROR_INVALID_DATA;
Stream_Read_UINT32(irp->input, FsInformationClass);
GetDiskFreeSpaceW(drive->path, &lpSectorsPerCluster, &lpBytesPerSector, &lpNumberOfFreeClusters,
&lpTotalNumberOfClusters);
switch (FsInformationClass)
{
case FileFsVolumeInformation:
/* http://msdn.microsoft.com/en-us/library/cc232108.aspx */
if ((length = ConvertToUnicode(sys_code_page, 0, volumeLabel, -1, &outStr, 0) * 2) <= 0)
{
WLog_ERR(TAG, "ConvertToUnicode failed!");
return CHANNEL_RC_NO_MEMORY;
}
Stream_Write_UINT32(output, 17 + length); /* Length */
if (!Stream_EnsureRemainingCapacity(output, 17 + length))
{
WLog_ERR(TAG, "Stream_EnsureRemainingCapacity failed!");
free(outStr);
return CHANNEL_RC_NO_MEMORY;
}
GetFileAttributesExW(drive->path, GetFileExInfoStandard, &wfad);
Stream_Write_UINT32(output, wfad.ftCreationTime.dwLowDateTime); /* VolumeCreationTime */
Stream_Write_UINT32(output,
wfad.ftCreationTime.dwHighDateTime); /* VolumeCreationTime */
Stream_Write_UINT32(output, lpNumberOfFreeClusters & 0xffff); /* VolumeSerialNumber */
Stream_Write_UINT32(output, length); /* VolumeLabelLength */
Stream_Write_UINT8(output, 0); /* SupportsObjects */
/* Reserved(1), MUST NOT be added! */
Stream_Write(output, outStr, length); /* VolumeLabel (Unicode) */
free(outStr);
break;
case FileFsSizeInformation:
/* http://msdn.microsoft.com/en-us/library/cc232107.aspx */
Stream_Write_UINT32(output, 24); /* Length */
if (!Stream_EnsureRemainingCapacity(output, 24))
{
WLog_ERR(TAG, "Stream_EnsureRemainingCapacity failed!");
return CHANNEL_RC_NO_MEMORY;
}
Stream_Write_UINT64(output, lpTotalNumberOfClusters); /* TotalAllocationUnits */
Stream_Write_UINT64(output, lpNumberOfFreeClusters); /* AvailableAllocationUnits */
Stream_Write_UINT32(output, lpSectorsPerCluster); /* SectorsPerAllocationUnit */
Stream_Write_UINT32(output, lpBytesPerSector); /* BytesPerSector */
break;
case FileFsAttributeInformation:
/* http://msdn.microsoft.com/en-us/library/cc232101.aspx */
if ((length = ConvertToUnicode(sys_code_page, 0, diskType, -1, &outStr, 0) * 2) <= 0)
{
WLog_ERR(TAG, "ConvertToUnicode failed!");
return CHANNEL_RC_NO_MEMORY;
}
Stream_Write_UINT32(output, 12 + length); /* Length */
if (!Stream_EnsureRemainingCapacity(output, 12 + length))
{
free(outStr);
WLog_ERR(TAG, "Stream_EnsureRemainingCapacity failed!");
return CHANNEL_RC_NO_MEMORY;
}
Stream_Write_UINT32(output, FILE_CASE_SENSITIVE_SEARCH | FILE_CASE_PRESERVED_NAMES |
FILE_UNICODE_ON_DISK); /* FileSystemAttributes */
Stream_Write_UINT32(output, MAX_PATH); /* MaximumComponentNameLength */
Stream_Write_UINT32(output, length); /* FileSystemNameLength */
Stream_Write(output, outStr, length); /* FileSystemName (Unicode) */
free(outStr);
break;
case FileFsFullSizeInformation:
/* http://msdn.microsoft.com/en-us/library/cc232104.aspx */
Stream_Write_UINT32(output, 32); /* Length */
if (!Stream_EnsureRemainingCapacity(output, 32))
{
WLog_ERR(TAG, "Stream_EnsureRemainingCapacity failed!");
return CHANNEL_RC_NO_MEMORY;
}
Stream_Write_UINT64(output, lpTotalNumberOfClusters); /* TotalAllocationUnits */
Stream_Write_UINT64(output,
lpNumberOfFreeClusters); /* CallerAvailableAllocationUnits */
Stream_Write_UINT64(output, lpNumberOfFreeClusters); /* AvailableAllocationUnits */
Stream_Write_UINT32(output, lpSectorsPerCluster); /* SectorsPerAllocationUnit */
Stream_Write_UINT32(output, lpBytesPerSector); /* BytesPerSector */
break;
case FileFsDeviceInformation:
/* http://msdn.microsoft.com/en-us/library/cc232109.aspx */
Stream_Write_UINT32(output, 8); /* Length */
if (!Stream_EnsureRemainingCapacity(output, 8))
{
WLog_ERR(TAG, "Stream_EnsureRemainingCapacity failed!");
return CHANNEL_RC_NO_MEMORY;
}
Stream_Write_UINT32(output, FILE_DEVICE_DISK); /* DeviceType */
Stream_Write_UINT32(output, 0); /* Characteristics */
break;
default:
irp->IoStatus = STATUS_UNSUCCESSFUL;
Stream_Write_UINT32(output, 0); /* Length */
break;
}
return irp->Complete(irp);
}
/* http://msdn.microsoft.com/en-us/library/cc241518.aspx */
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drive_process_irp_silent_ignore(DRIVE_DEVICE* drive, IRP* irp)
{
UINT32 FsInformationClass;
if (!drive || !irp || !irp->output || !irp->Complete)
return ERROR_INVALID_PARAMETER;
if (Stream_GetRemainingLength(irp->input) < 4)
return ERROR_INVALID_DATA;
Stream_Read_UINT32(irp->input, FsInformationClass);
Stream_Write_UINT32(irp->output, 0); /* Length */
return irp->Complete(irp);
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drive_process_irp_query_directory(DRIVE_DEVICE* drive, IRP* irp)
{
const WCHAR* path;
DRIVE_FILE* file;
BYTE InitialQuery;
UINT32 PathLength;
UINT32 FsInformationClass;
if (!drive || !irp || !irp->Complete)
return ERROR_INVALID_PARAMETER;
if (Stream_GetRemainingLength(irp->input) < 32)
return ERROR_INVALID_DATA;
Stream_Read_UINT32(irp->input, FsInformationClass);
Stream_Read_UINT8(irp->input, InitialQuery);
Stream_Read_UINT32(irp->input, PathLength);
Stream_Seek(irp->input, 23); /* Padding */
path = (WCHAR*)Stream_Pointer(irp->input);
file = drive_get_file_by_id(drive, irp->FileId);
if (file == NULL)
{
irp->IoStatus = STATUS_UNSUCCESSFUL;
Stream_Write_UINT32(irp->output, 0); /* Length */
}
else if (!drive_file_query_directory(file, FsInformationClass, InitialQuery, path, PathLength,
irp->output))
{
irp->IoStatus = drive_map_windows_err(GetLastError());
}
return irp->Complete(irp);
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drive_process_irp_directory_control(DRIVE_DEVICE* drive, IRP* irp)
{
if (!drive || !irp)
return ERROR_INVALID_PARAMETER;
switch (irp->MinorFunction)
{
case IRP_MN_QUERY_DIRECTORY:
return drive_process_irp_query_directory(drive, irp);
case IRP_MN_NOTIFY_CHANGE_DIRECTORY: /* TODO */
return irp->Discard(irp);
default:
irp->IoStatus = STATUS_NOT_SUPPORTED;
Stream_Write_UINT32(irp->output, 0); /* Length */
return irp->Complete(irp);
}
return CHANNEL_RC_OK;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drive_process_irp_device_control(DRIVE_DEVICE* drive, IRP* irp)
{
if (!drive || !irp)
return ERROR_INVALID_PARAMETER;
Stream_Write_UINT32(irp->output, 0); /* OutputBufferLength */
return irp->Complete(irp);
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drive_process_irp(DRIVE_DEVICE* drive, IRP* irp)
{
UINT error;
if (!drive || !irp)
return ERROR_INVALID_PARAMETER;
irp->IoStatus = STATUS_SUCCESS;
switch (irp->MajorFunction)
{
case IRP_MJ_CREATE:
error = drive_process_irp_create(drive, irp);
break;
case IRP_MJ_CLOSE:
error = drive_process_irp_close(drive, irp);
break;
case IRP_MJ_READ:
error = drive_process_irp_read(drive, irp);
break;
case IRP_MJ_WRITE:
error = drive_process_irp_write(drive, irp);
break;
case IRP_MJ_QUERY_INFORMATION:
error = drive_process_irp_query_information(drive, irp);
break;
case IRP_MJ_SET_INFORMATION:
error = drive_process_irp_set_information(drive, irp);
break;
case IRP_MJ_QUERY_VOLUME_INFORMATION:
error = drive_process_irp_query_volume_information(drive, irp);
break;
case IRP_MJ_LOCK_CONTROL:
error = drive_process_irp_silent_ignore(drive, irp);
break;
case IRP_MJ_DIRECTORY_CONTROL:
error = drive_process_irp_directory_control(drive, irp);
break;
case IRP_MJ_DEVICE_CONTROL:
error = drive_process_irp_device_control(drive, irp);
break;
default:
irp->IoStatus = STATUS_NOT_SUPPORTED;
error = irp->Complete(irp);
break;
}
return error;
}
static DWORD WINAPI drive_thread_func(LPVOID arg)
{
IRP* irp;
wMessage message;
DRIVE_DEVICE* drive = (DRIVE_DEVICE*)arg;
UINT error = CHANNEL_RC_OK;
if (!drive)
{
error = ERROR_INVALID_PARAMETER;
goto fail;
}
while (1)
{
if (!MessageQueue_Wait(drive->IrpQueue))
{
WLog_ERR(TAG, "MessageQueue_Wait failed!");
error = ERROR_INTERNAL_ERROR;
break;
}
if (!MessageQueue_Peek(drive->IrpQueue, &message, TRUE))
{
WLog_ERR(TAG, "MessageQueue_Peek failed!");
error = ERROR_INTERNAL_ERROR;
break;
}
if (message.id == WMQ_QUIT)
break;
irp = (IRP*)message.wParam;
if (irp)
{
if ((error = drive_process_irp(drive, irp)))
{
WLog_ERR(TAG, "drive_process_irp failed with error %" PRIu32 "!", error);
break;
}
}
}
fail:
if (error && drive && drive->rdpcontext)
setChannelError(drive->rdpcontext, error, "drive_thread_func reported an error");
ExitThread(error);
return error;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drive_irp_request(DEVICE* device, IRP* irp)
{
DRIVE_DEVICE* drive = (DRIVE_DEVICE*)device;
if (!drive)
return ERROR_INVALID_PARAMETER;
if (!MessageQueue_Post(drive->IrpQueue, NULL, 0, (void*)irp, NULL))
{
WLog_ERR(TAG, "MessageQueue_Post failed!");
return ERROR_INTERNAL_ERROR;
}
return CHANNEL_RC_OK;
}
static UINT drive_free_int(DRIVE_DEVICE* drive)
{
UINT error = CHANNEL_RC_OK;
if (!drive)
return ERROR_INVALID_PARAMETER;
CloseHandle(drive->thread);
ListDictionary_Free(drive->files);
MessageQueue_Free(drive->IrpQueue);
Stream_Free(drive->device.data, TRUE);
free(drive->path);
free(drive);
return error;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drive_free(DEVICE* device)
{
DRIVE_DEVICE* drive = (DRIVE_DEVICE*)device;
UINT error = CHANNEL_RC_OK;
if (!drive)
return ERROR_INVALID_PARAMETER;
if (MessageQueue_PostQuit(drive->IrpQueue, 0) &&
(WaitForSingleObject(drive->thread, INFINITE) == WAIT_FAILED))
{
error = GetLastError();
WLog_ERR(TAG, "WaitForSingleObject failed with error %" PRIu32 "", error);
return error;
}
return drive_free_int(drive);
}
/**
* Helper function used for freeing list dictionary value object
*/
static void drive_file_objfree(void* obj)
{
drive_file_free((DRIVE_FILE*)obj);
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drive_register_drive_path(PDEVICE_SERVICE_ENTRY_POINTS pEntryPoints, const char* name,
const char* path, BOOL automount)
{
size_t i, length;
DRIVE_DEVICE* drive;
UINT error = ERROR_INTERNAL_ERROR;
if (!pEntryPoints || !name || !path)
{
WLog_ERR(TAG, "[%s] Invalid parameters: pEntryPoints=%p, name=%p, path=%p", pEntryPoints,
name, path);
return ERROR_INVALID_PARAMETER;
}
if (name[0] && path[0])
{
size_t pathLength = strnlen(path, MAX_PATH);
drive = (DRIVE_DEVICE*)calloc(1, sizeof(DRIVE_DEVICE));
if (!drive)
{
WLog_ERR(TAG, "calloc failed!");
return CHANNEL_RC_NO_MEMORY;
}
drive->device.type = RDPDR_DTYP_FILESYSTEM;
drive->device.IRPRequest = drive_irp_request;
drive->device.Free = drive_free;
drive->rdpcontext = pEntryPoints->rdpcontext;
drive->automount = automount;
length = strlen(name);
drive->device.data = Stream_New(NULL, length + 1);
if (!drive->device.data)
{
WLog_ERR(TAG, "Stream_New failed!");
error = CHANNEL_RC_NO_MEMORY;
goto out_error;
}
for (i = 0; i < length; i++)
{
/* Filter 2.2.1.3 Device Announce Header (DEVICE_ANNOUNCE) forbidden symbols */
switch (name[i])
{
case ':':
case '<':
case '>':
case '\"':
case '/':
case '\\':
case '|':
case ' ':
Stream_Write_UINT8(drive->device.data, '_');
break;
default:
Stream_Write_UINT8(drive->device.data, (BYTE)name[i]);
break;
}
}
Stream_Write_UINT8(drive->device.data, '\0');
drive->device.name = (const char*)Stream_Buffer(drive->device.data);
if (!drive->device.name)
goto out_error;
if ((pathLength > 1) && (path[pathLength - 1] == '/'))
pathLength--;
if (ConvertToUnicode(sys_code_page, 0, path, pathLength, &drive->path, 0) <= 0)
{
WLog_ERR(TAG, "ConvertToUnicode failed!");
error = CHANNEL_RC_NO_MEMORY;
goto out_error;
}
drive->files = ListDictionary_New(TRUE);
if (!drive->files)
{
WLog_ERR(TAG, "ListDictionary_New failed!");
error = CHANNEL_RC_NO_MEMORY;
goto out_error;
}
ListDictionary_ValueObject(drive->files)->fnObjectFree = drive_file_objfree;
drive->IrpQueue = MessageQueue_New(NULL);
if (!drive->IrpQueue)
{
WLog_ERR(TAG, "ListDictionary_New failed!");
error = CHANNEL_RC_NO_MEMORY;
goto out_error;
}
if ((error = pEntryPoints->RegisterDevice(pEntryPoints->devman, (DEVICE*)drive)))
{
WLog_ERR(TAG, "RegisterDevice failed with error %" PRIu32 "!", error);
goto out_error;
}
if (!(drive->thread =
CreateThread(NULL, 0, drive_thread_func, drive, CREATE_SUSPENDED, NULL)))
{
WLog_ERR(TAG, "CreateThread failed!");
goto out_error;
}
ResumeThread(drive->thread);
}
return CHANNEL_RC_OK;
out_error:
drive_free_int(drive);
return error;
}
#ifdef BUILTIN_CHANNELS
#define DeviceServiceEntry drive_DeviceServiceEntry
#else
#define DeviceServiceEntry FREERDP_API DeviceServiceEntry
#endif
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
UINT DeviceServiceEntry(PDEVICE_SERVICE_ENTRY_POINTS pEntryPoints)
{
RDPDR_DRIVE* drive;
UINT error;
#ifdef WIN32
char* dev;
int len;
char devlist[512], buf[512];
char* bufdup;
char* devdup;
#endif
drive = (RDPDR_DRIVE*)pEntryPoints->device;
#ifndef WIN32
sys_code_page = CP_UTF8;
if (strcmp(drive->Path, "*") == 0)
{
/* all drives */
free(drive->Path);
drive->Path = _strdup("/");
if (!drive->Path)
{
WLog_ERR(TAG, "_strdup failed!");
return CHANNEL_RC_NO_MEMORY;
}
}
else if (strcmp(drive->Path, "%") == 0)
{
free(drive->Path);
drive->Path = GetKnownPath(KNOWN_PATH_HOME);
if (!drive->Path)
{
WLog_ERR(TAG, "_strdup failed!");
return CHANNEL_RC_NO_MEMORY;
}
}
error = drive_register_drive_path(pEntryPoints, drive->Name, drive->Path, drive->automount);
#else
sys_code_page = GetACP();
/* Special case: path[0] == '*' -> export all drives */
/* Special case: path[0] == '%' -> user home dir */
if (strcmp(drive->Path, "%") == 0)
{
GetEnvironmentVariableA("USERPROFILE", buf, sizeof(buf));
PathCchAddBackslashA(buf, sizeof(buf));
free(drive->Path);
drive->Path = _strdup(buf);
if (!drive->Path)
{
WLog_ERR(TAG, "_strdup failed!");
return CHANNEL_RC_NO_MEMORY;
}
error = drive_register_drive_path(pEntryPoints, drive->Name, drive->Path, drive->automount);
}
else if (strcmp(drive->Path, "*") == 0)
{
int i;
/* Enumerate all devices: */
GetLogicalDriveStringsA(sizeof(devlist) - 1, devlist);
for (dev = devlist, i = 0; *dev; dev += 4, i++)
{
if (*dev > 'B')
{
/* Suppress disk drives A and B to avoid pesty messages */
len = sprintf_s(buf, sizeof(buf) - 4, "%s", drive->Name);
buf[len] = '_';
buf[len + 1] = dev[0];
buf[len + 2] = 0;
buf[len + 3] = 0;
if (!(bufdup = _strdup(buf)))
{
WLog_ERR(TAG, "_strdup failed!");
return CHANNEL_RC_NO_MEMORY;
}
if (!(devdup = _strdup(dev)))
{
WLog_ERR(TAG, "_strdup failed!");
return CHANNEL_RC_NO_MEMORY;
}
if ((error = drive_register_drive_path(pEntryPoints, bufdup, devdup, TRUE)))
{
break;
}
}
}
}
else
{
error = drive_register_drive_path(pEntryPoints, drive->Name, drive->Path, drive->automount);
}
#endif
return error;
}
| {
"content_hash": "1cd4733e7890fb049b525c398813bc6f",
"timestamp": "",
"source": "github",
"line_count": 1091,
"max_line_length": 98,
"avg_line_length": 23.627864344637945,
"alnum_prop": 0.6693692295756071,
"repo_name": "chipitsine/FreeRDP",
"id": "1b542252258728efccff55ec9ca4e9a8a2c23cf0",
"size": "26710",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "channels/drive/client/drive_main.c",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11973910"
},
{
"name": "C#",
"bytes": "9809"
},
{
"name": "C++",
"bytes": "169678"
},
{
"name": "CMake",
"bytes": "721640"
},
{
"name": "CSS",
"bytes": "5696"
},
{
"name": "HTML",
"bytes": "99139"
},
{
"name": "Java",
"bytes": "419683"
},
{
"name": "Makefile",
"bytes": "1585"
},
{
"name": "Objective-C",
"bytes": "1141082"
},
{
"name": "Perl",
"bytes": "8044"
},
{
"name": "Python",
"bytes": "3318"
},
{
"name": "Roff",
"bytes": "3708"
},
{
"name": "Shell",
"bytes": "26802"
}
],
"symlink_target": ""
} |
<?php
namespace LibSSH2;
/**
* Terminal class.
*
* Setter/getter class for terminal environment.
*
* @package LibSSH2
*/
class Terminal
{
/**
* Interactive connection (pseudo-tty)
*
* @var string
*/
private $pty = NULL;
/**
* Environmental variables (associative array).
*
* @var array
*/
private $env = [];
/**
* Width of the virtual terminal.
*
* @var int
*/
private $width = 80;
/**
* Height of the virtual terminal.
*
* @var int
*/
private $height = 25;
/**
* Sets interactive connection (pseudo-tty).
*
* @param string $pty pseudo-tty
* @return object
*/
final public function set_pty($pty)
{
$this->pty = $pty;
return $this;
}
/**
* Sets environmental variables.
*
* @param array $env environmental variables
* @return object
*/
final public function set_env($env)
{
$this->env = $env;
return $this;
}
/**
* Sets width of virtual terminal.
*
* @param int $width width of virtual terminal
* @return object
*/
final public function set_width($width)
{
$this->width = $width;
return $this;
}
/**
* Sets height of virtual terminal.
*
* @param int $height height of virtual terminal
* @return object
*/
final public function set_height($height)
{
$this->height = $height;
return $this;
}
/**
* Gets interactive connection (pseudo-tty).
*
* @return string
*/
final public function get_pty()
{
return $this->pty;
}
/**
* Gets environmental variables.
*
* @return array
*/
final public function get_env()
{
return $this->env;
}
/**
* Gets width of virtual terminal.
*
* @return int
*/
final public function get_width()
{
return $this->width;
}
/**
* Gets height of virtual terminal.
*
* @return int
*/
final public function get_height()
{
return $this->height;
}
}
| {
"content_hash": "250917f6e5b4b13c009cd0ec6696870e",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 56,
"avg_line_length": 17.124031007751938,
"alnum_prop": 0.5061113626075147,
"repo_name": "degagne/libssh2",
"id": "892e661025924d9ca6ca09f7841abf78aee9522e",
"size": "2209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/Terminal.php",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "48553"
}
],
"symlink_target": ""
} |
'use strict';
const fs = require('fs-extra');
const path = require('path');
const request = require('request-promise');
const {ciBuildSha} = require('../common/ci');
const {cyan} = require('kleur/colors');
const {getLoggingPrefix, logWithoutTimestamp} = require('../common/logging');
const {replaceUrls: replaceUrlsAppUtil} = require('../server/app-utils');
const hostNamePrefix = 'https://storage.googleapis.com/amp-test-website-1';
/**
* @param {string} dest
* @return {Promise<string[]>}
*/
async function walk(dest) {
const filelist = [];
const files = await fs.readdir(dest);
for (let i = 0; i < files.length; i++) {
const file = `${dest}/${files[i]}`;
fs.statSync(file).isDirectory()
? Array.prototype.push.apply(filelist, await walk(file))
: filelist.push(file);
}
return filelist;
}
/**
* @return {string}
*/
function getBaseUrl() {
return `${hostNamePrefix}/amp_nomodule_${ciBuildSha()}`;
}
/**
* @param {string} filePath
* @return {Promise<void>}
*/
async function replace(filePath) {
const data = await fs.readFile(filePath, 'utf8');
const hostName = getBaseUrl();
const inabox = false;
const storyV1 = true;
const result = replaceUrlsAppUtil(
'compiled',
data,
hostName,
inabox,
storyV1
);
await fs.writeFile(filePath, result, 'utf8');
}
/**
* @param {string} dir
* @return {Promise<void>}
*/
async function replaceUrls(dir) {
const files = await walk(dir);
const promises = files
.filter((fileName) => path.extname(fileName) == '.html')
.map((file) => replace(file));
await Promise.all(promises);
}
/**
* @param {string} result
* @return {Promise<void>}
*/
async function signalPrDeployUpload(result) {
const loggingPrefix = getLoggingPrefix();
logWithoutTimestamp(
`${loggingPrefix} Reporting`,
cyan(result),
'to the pr-deploy GitHub App...'
);
const sha = ciBuildSha();
const baseUrl = 'https://amp-pr-deploy-bot.appspot.com/v0/pr-deploy/';
const url = `${baseUrl}headshas/${sha}/${result}`;
await request.post(url);
}
module.exports = {
getBaseUrl,
replaceUrls,
signalPrDeployUpload,
};
| {
"content_hash": "0c830bc22ee12f0214f050f0f08a7799",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 77,
"avg_line_length": 23.032258064516128,
"alnum_prop": 0.65406162464986,
"repo_name": "voyagegroup/amphtml",
"id": "5ecf27415d68c298ca7894632c7ab8c6bcea812e",
"size": "2769",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "build-system/tasks/pr-deploy-bot-utils.js",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1602462"
},
{
"name": "CSS",
"bytes": "532090"
},
{
"name": "Go",
"bytes": "7573"
},
{
"name": "HTML",
"bytes": "2044560"
},
{
"name": "JavaScript",
"bytes": "19029249"
},
{
"name": "Python",
"bytes": "65270"
},
{
"name": "Shell",
"bytes": "21662"
},
{
"name": "Starlark",
"bytes": "30908"
},
{
"name": "TypeScript",
"bytes": "17646"
},
{
"name": "Yacc",
"bytes": "28669"
}
],
"symlink_target": ""
} |
import pytest
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from cleancat import Integer, Schema, StopValidation, String, ValidationError
from cleancat.sqla import SQLAEmbeddedReference, SQLAReference, object_as_dict
Base = declarative_base()
class Person(Base):
__tablename__ = 'cleancattest'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
age = sa.Column(sa.Integer)
@pytest.fixture
def sqla_session():
"""Set up an SQLA connection, create all tables, and return a session."""
engine = sa.create_engine('sqlite:///:memory:')
Base.metadata.create_all(engine)
session = scoped_session(sessionmaker(bind=engine))
Person.query = session.query_property()
return session
def test_object_as_dict():
steve = Person(name='Steve', age=30)
assert object_as_dict(steve) == {'id': None, 'age': 30, 'name': 'Steve'}
@pytest.mark.usefixtures('sqla_session')
class TestSQLAReferenceField:
def test_it_updates_an_existing_instance(self, sqla_session):
steve = Person(name='Steve', age=30)
sqla_session.add(steve)
sqla_session.commit()
clean_val = SQLAReference(Person).clean(str(steve.id))
assert isinstance(clean_val, Person)
assert clean_val.id == steve.id
def test_updating_missing_instance_fails(self):
expected_err_msg = 'Object does not exist.'
with pytest.raises(ValidationError, match=expected_err_msg):
SQLAReference(Person).clean('id-that-does-not-exist')
def test_it_can_be_optional(self):
field = SQLAReference(Person, required=False)
with pytest.raises(StopValidation) as e:
field.clean(None)
assert e.value.args[0] is None
@pytest.mark.usefixtures('sqla_session')
class TestSchemaWithSQLAEmbeddedReference:
@pytest.fixture
def book_schema_cls(self):
class PersonSchema(Schema):
name = String()
age = Integer()
class BookSchema(Schema):
author = SQLAEmbeddedReference(
Person, PersonSchema, required=False
)
title = String(required=False)
return BookSchema
def test_it_creates_a_new_instance(self, book_schema_cls):
schema = book_schema_cls({'author': {'name': 'New Author', 'age': 30}})
data = schema.full_clean()
author = data['author']
assert isinstance(author, Person)
assert not author.id
assert author.name == 'New Author'
assert author.age == 30
def test_it_updates_an_existing_instance(
self, book_schema_cls, sqla_session
):
steve = Person(name='Steve', age=30)
sqla_session.add(steve)
sqla_session.commit()
schema = book_schema_cls(
{'author': {'id': str(steve.id), 'name': 'Updated', 'age': 50}}
)
data = schema.full_clean()
author = data['author']
assert isinstance(author, Person)
assert author.id == steve.id
assert author.name == 'Updated'
assert author.age == 50
def test_updating_missing_instance_fails(self, book_schema_cls):
schema = book_schema_cls(
{'author': {'id': 123456789, 'name': 'Arbitrary Non-existent ID'}}
)
pytest.raises(ValidationError, schema.full_clean)
assert schema.field_errors == {'author': 'Object does not exist.'}
def test_it_can_be_optional(self, book_schema_cls):
schema = book_schema_cls(
{'title': 'Book without an author', 'author': None}
)
data = schema.full_clean()
assert data == {'title': 'Book without an author', 'author': None}
| {
"content_hash": "b74f75147e5134d22334750466519be5",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 79,
"avg_line_length": 33.669642857142854,
"alnum_prop": 0.635905595332803,
"repo_name": "closeio/cleancat",
"id": "3916bc564c76cf88ce4dbb25a04c99fde1efd255",
"size": "3771",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_sqla.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "90101"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from django.contrib import admin
urlpatterns = patterns('account.views',
url(r'^login/$', 'login', name='login'),
url(r'^logout/$', 'logout', name='logout'),
)
| {
"content_hash": "f49ac8c8d2bfc00e31880597368cd25b",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 47,
"avg_line_length": 30.285714285714285,
"alnum_prop": 0.6745283018867925,
"repo_name": "gdgand/Festi",
"id": "42a2d4b05daeaaa1d305580b5b2b63757a28c278",
"size": "212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "festi/account/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "367769"
},
{
"name": "CoffeeScript",
"bytes": "15698"
},
{
"name": "Erlang",
"bytes": "2128"
},
{
"name": "HTML",
"bytes": "97067"
},
{
"name": "JavaScript",
"bytes": "71030"
},
{
"name": "Python",
"bytes": "36611"
},
{
"name": "Ruby",
"bytes": "583"
},
{
"name": "Shell",
"bytes": "1176"
}
],
"symlink_target": ""
} |
/************************************
* Default Syntax Highlighter theme.
*
* Interface elements.
************************************/
.syntaxhighlighter
{
background-color: #fff !important;
}
/* Highlighed line number */
.syntaxhighlighter .line.highlighted .number
{
color: black !important;
}
/* Highlighed line */
.syntaxhighlighter .line.highlighted.alt1,
.syntaxhighlighter .line.highlighted.alt2
{
background-color: #e0e0e0 !important;
}
/* Gutter line numbers */
.syntaxhighlighter .line .number
{
color: #afafaf !important;
}
/* Add border to the lines */
.syntaxhighlighter .line .content
{
border-left: 3px solid #6CE26C !important;
color: #000 !important;
}
.syntaxhighlighter.printing .line .content
{
border: 0 !important;
}
/* First line */
.syntaxhighlighter .line.alt1
{
background-color: #fff !important;
}
/* Second line */
.syntaxhighlighter .line.alt2
{
background-color: #fff !important;
}
.syntaxhighlighter .toolbar
{
background-color: #F8F8F8 !important;
border: #E7E5DC solid 1px !important;
}
.syntaxhighlighter .toolbar a
{
color: #a0a0a0 !important;
}
.syntaxhighlighter .toolbar a:hover
{
color: red !important;
}
/************************************
* Actual syntax highlighter colors.
************************************/
.syntaxhighlighter .plain,
.syntaxhighlighter .plain a
{
color: #000 !important;
}
.syntaxhighlighter .comments,
.syntaxhighlighter .comments a
{
color: #008200 !important;
}
.syntaxhighlighter .string,
.syntaxhighlighter .string a
{
color: blue !important;
}
.syntaxhighlighter .keyword
{
color: #069 !important;
font-weight: bold !important;
}
.syntaxhighlighter .preprocessor
{
color: gray !important;
}
.syntaxhighlighter .variable
{
color: #a70 !important;
}
.syntaxhighlighter .value
{
color: #090 !important;
}
.syntaxhighlighter .functions
{
color: #ff1493 !important;
}
.syntaxhighlighter .constants
{
color: #0066CC !important;
}
.syntaxhighlighter .script
{
background-color: yellow !important;
}
.syntaxhighlighter .color1,
.syntaxhighlighter .color1 a
{
color: #808080 !important;
}
.syntaxhighlighter .color2,
.syntaxhighlighter .color2 a
{
color: #ff1493 !important;
}
.syntaxhighlighter .color3,
.syntaxhighlighter .color3 a
{
color: red !important;
}
| {
"content_hash": "12fb5d6a51d18b8c3d730373b180ab7d",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 44,
"avg_line_length": 16.93103448275862,
"alnum_prop": 0.6338085539714867,
"repo_name": "Feitianyuan/vienna-rss",
"id": "1655b424b5ae429d3aedef8c2f6937510f6106c7",
"size": "3526",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "SyntaxHighlighter/shThemeDefault.css",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "51746"
},
{
"name": "HTML",
"bytes": "521489"
},
{
"name": "Makefile",
"bytes": "1005"
},
{
"name": "Objective-C",
"bytes": "1230314"
},
{
"name": "Ruby",
"bytes": "1219"
},
{
"name": "Shell",
"bytes": "16834"
}
],
"symlink_target": ""
} |
package module6;
import de.fhpotsdam.unfolding.data.PointFeature;
import jogamp.opengl.glu.mipmap.Image;
import processing.core.PGraphics;
import processing.core.PImage;
/** Implements a visual marker for ocean earthquakes on an earthquake map
*
* @author UC San Diego Intermediate Software Development MOOC team
*
*/
public class OceanQuakeMarker extends EarthquakeMarker {
PImage img;
public OceanQuakeMarker(PointFeature quake, PImage img) {
super(quake);
this.img = img;
// setting field in earthquake marker
isOnLand = false;
}
@Override
public void drawEarthquake(PGraphics pg, float x, float y) {
//IMPLEMENT: drawing centered square for Ocean earthquakes
// DO NOT set the fill color. That will be set in the EarthquakeMarker
// class to indicate the depth of the earthquake.
// Simply draw a centered square.
// HINT: Notice the radius variable in the EarthquakeMarker class
// and how it is set in the EarthquakeMarker constructor
//pg.rect(x-radius, y-radius, 2*radius, 2*radius);
pg.image(img, x -15, y - 15,2*radius, 2*radius);
}
}
| {
"content_hash": "e0c41f4e23040515d694b78e07273310",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 73,
"avg_line_length": 28.076923076923077,
"alnum_prop": 0.7397260273972602,
"repo_name": "Astred/Java-1",
"id": "666e5779e38115d65a8ce8741e7b65d2815014a9",
"size": "1095",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/module6/OceanQuakeMarker.java",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "95080"
}
],
"symlink_target": ""
} |
<?php
class View {
private $_template;
private $_params;
public static $tplDir = "tmpl";
public function __construct($template, $params = array()) {
$this->_template = $template;
$this->setParams($params);
}
public function display() {
extract($this->_params);
$tplpath = self::$tplDir . '/';
if (!empty($this->_params['platform']) && file_exists($tplpath . $this->_params['platform'] . '/' . $this->_template . ".php")) {
$tplpath = $tplpath . $this->_params['platform'] . '/';
}
include $tplpath . $this->_template . ".php";
}
public function fetch() {
ob_start();
$this->display();
$out = ob_get_clean();
return $out;
}
public function setParams($params) {
$this->_params = $params;
}
public function setParam($name, $value) {
$this->_params[$name] = $value;
}
public function getParam($name, $default = null) {
if (isset ($this->_params[$name])) {
return $this->_params[$name];
}
return $default;
}
}
| {
"content_hash": "c373f9ab8eeae4b3e92346f19bf0bb82",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 131,
"avg_line_length": 25.289473684210527,
"alnum_prop": 0.6024973985431842,
"repo_name": "mmoroz/morozProject",
"id": "2aefb75c7439b1bb3290bd71a7224f7c03165c5d",
"size": "961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "import/lib/View.php",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "817"
},
{
"name": "Batchfile",
"bytes": "1030"
},
{
"name": "CSS",
"bytes": "70082"
},
{
"name": "JavaScript",
"bytes": "17881"
},
{
"name": "PHP",
"bytes": "592411"
}
],
"symlink_target": ""
} |
<?xml version="1.0" encoding="UTF-8"?>
<project>
<modelVersion>4.0.0</modelVersion>
<groupId>org.phpmaven.test</groupId>
<artifactId>test-failing</artifactId>
<packaging>php</packaging>
<name>Sample PHP 5 library project</name>
<version>0.0.1</version>
<build>
<sourceDirectory>src/main/php</sourceDirectory>
<testSourceDirectory>src/test/php</testSourceDirectory>
</build>
<dependencies>
<dependency>
<groupId>de.phpunit</groupId>
<artifactId>PHPUnit</artifactId>
<version>3.6.10</version>
<type>phar</type>
<scope>test</scope>
</dependency>
</dependencies>
</project> | {
"content_hash": "7a122e86cd8f02a68ece6945e592021b",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 57,
"avg_line_length": 26.695652173913043,
"alnum_prop": 0.7019543973941368,
"repo_name": "teosoft123/maven-php-plugin",
"id": "d71908ef33cbc4c8dcd08f6b65fe5243b724a409",
"size": "614",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "maven-plugins/maven-php-plugin/src/test/resources/org/phpmaven/test/projects/mojos-phpunit/test-failing/pom.xml",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "193"
},
{
"name": "Batchfile",
"bytes": "96"
},
{
"name": "CSS",
"bytes": "144"
},
{
"name": "DIGITAL Command Language",
"bytes": "1"
},
{
"name": "HTML",
"bytes": "1820"
},
{
"name": "Java",
"bytes": "1312573"
},
{
"name": "JavaScript",
"bytes": "643"
},
{
"name": "PHP",
"bytes": "52703"
},
{
"name": "Shell",
"bytes": "160"
}
],
"symlink_target": ""
} |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
#if UNENG
namespace UnEngine
#else
namespace UnityEngine
#endif
{
public sealed class Gizmos
{
public static Color color { get; set; }
public static Matrix4x4 matrix { get; set; }
public static void DrawCube(Vector3 center, Vector3 size) { }
public static void DrawFrustum(Vector3 center, float fov, float maxRange, float minRange, float aspect) { }
public static void DrawGUITexture(Rect screenRect, Texture texture) { }
public static void DrawGUITexture(Rect screenRect, Texture texture, Material mat) { }
public static void DrawGUITexture(Rect screenRect, Texture texture, int leftBorder, int rightBorder, int topBorder, int bottomBorder) { }
public static void DrawGUITexture(Rect screenRect, Texture texture, int leftBorder, int rightBorder, int topBorder, int bottomBorder, Material mat) { }
public static void DrawIcon(Vector3 center, string name) { }
public static void DrawIcon(Vector3 center, string name, bool allowScaling) { }
public static void DrawLine(Vector3 from, Vector3 to) { }
public static void DrawRay(Ray r) { }
public static void DrawRay(Vector3 from, Vector3 direction) { }
public static void DrawSphere(Vector3 center, float radius) { }
public static void DrawWireCube(Vector3 center, Vector3 size) { }
public static void DrawWireSphere(Vector3 center, float radius) { }
}
}
| {
"content_hash": "137b74db03f197d6f14ad0a27418a190",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 153,
"avg_line_length": 40.8,
"alnum_prop": 0.7626050420168067,
"repo_name": "jbruening/UnEngine",
"id": "1a32951940700e21690aa60fa58be783b2a30556",
"size": "1430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/UnEngine/Utils/Gizmos.cs",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "244180"
}
],
"symlink_target": ""
} |
<?php
/**
* Created by PhpStorm.
* User: Maxime
* Date: 29/02/2016
* Time: 23:21
*/
namespace App\general;
class fonction
{
/**
* @param $idclient // identifiant du client appeler
* @return string // Retourne une clé [TOKEN] permettant l'identification d'une action
*/
public function gen_token($idclient)
{
$ip_client = sha1($_SERVER['REMOTE_ADDR']);
$heure = strtotime(date("H:i"));
$salt = "_";
$caractere = "azertyuiopqsdfghjklmwxcvbnAZERTYUIOPQSDFGHJKLMWXCVBN0123456789";
$shuffle = str_shuffle($caractere);
$lenght = substr($shuffle, 0, 10);
$gen = $heure.$salt.sha1($lenght).$salt.$ip_client.$salt.$idclient;
return $gen;
}
/**
* @param $nom_client // Nom du client
* @param $prenom_client // Prénom du client
* @return string // Retour au format exemple: jdoe (John Doe)
*/
public function gen_username($nom_client, $prenom_client){
return $prenom_client[0].$nom_client;
}
/**
* @return string // Génère un mot de passe aléatoire sur 6 caractères alphanumérique
*/
public function gen_password()
{
$caractere = "AZERTUIOPQSDFGHJLMWXCVBNazertyuiopqsdfghjklmwxcvbn0123456789";
$shuffle = str_shuffle($caractere);
$lenght = substr($shuffle, 0, 6);
return $lenght;
}
/**
* @param $chiffre // chiffre au format standard (0.00)
* @return string // Retourne le montant au formatage number_format (0,00 €)
*/
public function number_decimal($chiffre)
{
return number_format($chiffre, 2, ',', ' ')." €";
}
/**
* @param $nom // Nom du fichier à télécharger
* @param $read_file // lien direct vers le fichier
*/
public function download_file($nom, $read_file)
{
header("Content-Type: application/octet-stream");
header("Content-disposition: attachment; filename=".$nom);
header('Pragma: no-cache');
header('Cache-Control: no-store, no-cache, must-revalidate, post-check=0, pre-check=0');
header('Expires: 0');
readfile($read_file);
exit();
}
/**
* @param $view // premiere Incrémentation (view/)
* @param $sub // Deuxième Incrémentation (view->sub)
* @param $data // Troisème Incrémentation (sub->data)
* @param $type // Type Possible: success, warning, error, info
* @param $service // Service appeler exemple: add-user
* @param $text // Le texte renvoyer par la fonction
*/
public function redirect($view = null, $sub = null, $data = null, $type = null, $service = null, $text = null){
$constante = new constante();
if(!empty($view)){$redirect = "index.php?view=".$view;}
if(!empty($sub)){$redirect .= "&sub=".$sub;}
if(!empty($data)){$redirect .= "&data=".$data;}
if(!empty($type)){$redirect .= "&".$type."=".$service."&text=".$text;}
header("Location: ".$constante->getUrl(array(), false).$redirect);
}
public function is_ajax(){
return isset($_SERVER['HTTP_X_REQUESTED_WITH']) && strtolower($_SERVER['HTTP_X_REQUESTED_WITH']) == 'xmlhttprequest';
}
} | {
"content_hash": "01ecece3bbc3712ce14acf40df7e0ec3",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 125,
"avg_line_length": 33.313131313131315,
"alnum_prop": 0.5745906610066707,
"repo_name": "CRIDIP-SWD/GESTCOM",
"id": "acc3e1938256f6802beade1b8fa71ab6391785d8",
"size": "3317",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "application/general/fonction.php",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "290"
},
{
"name": "ActionScript",
"bytes": "15982"
},
{
"name": "ApacheConf",
"bytes": "107"
},
{
"name": "CSS",
"bytes": "3197474"
},
{
"name": "CoffeeScript",
"bytes": "83631"
},
{
"name": "HTML",
"bytes": "6305637"
},
{
"name": "JavaScript",
"bytes": "11371222"
},
{
"name": "PHP",
"bytes": "487623"
},
{
"name": "Ruby",
"bytes": "5494"
},
{
"name": "Shell",
"bytes": "10442"
}
],
"symlink_target": ""
} |
cd docker
docker-compose run --rm composer %*
cd ..
| {
"content_hash": "60233062ca16206e47931a6c1d3e8a1e",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 35,
"avg_line_length": 17.333333333333332,
"alnum_prop": 0.6923076923076923,
"repo_name": "mattKendon/traws",
"id": "4c95a779dc95b08724c695a127e91f22eee11c4c",
"size": "52",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docker/bin-windows/composer.bat",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "509"
},
{
"name": "CSS",
"bytes": "997945"
},
{
"name": "JavaScript",
"bytes": "1222016"
},
{
"name": "PHP",
"bytes": "3289200"
},
{
"name": "Shell",
"bytes": "1893"
}
],
"symlink_target": ""
} |
package uiotest
import (
"io"
"testing"
"github.com/u-root/u-root/pkg/testutil"
"github.com/u-root/u-root/pkg/uio"
)
// TestLineWriter is an io.Writer that logs full lines of serial to tb.
func TestLineWriter(tb testing.TB, prefix string) io.WriteCloser {
if len(prefix) > 0 {
return uio.FullLineWriter(&testLinePrefixWriter{tb: tb, prefix: prefix})
}
return uio.FullLineWriter(&testLineWriter{tb: tb})
}
// testLinePrefixWriter is an io.Writer that logs full lines of serial to tb.
type testLinePrefixWriter struct {
tb testing.TB
prefix string
}
func (tsw *testLinePrefixWriter) OneLine(p []byte) {
tsw.tb.Logf("%s %s: %s", testutil.NowLog(), tsw.prefix, p)
}
// testLineWriter is an io.Writer that logs full lines of serial to tb.
type testLineWriter struct {
tb testing.TB
}
func (tsw *testLineWriter) OneLine(p []byte) {
tsw.tb.Logf("%s: %s", testutil.NowLog(), p)
}
| {
"content_hash": "9977ad84b6a57cb73ae983e75742c259",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 77,
"avg_line_length": 24.916666666666668,
"alnum_prop": 0.7168338907469343,
"repo_name": "hugelgupf/u-root",
"id": "826bc5195196e7718b6c18e53f86d07fdec6d63a",
"size": "1060",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pkg/uio/uiotest/uiotest.go",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "2717"
},
{
"name": "C",
"bytes": "598"
},
{
"name": "Dockerfile",
"bytes": "11562"
},
{
"name": "Go",
"bytes": "3924400"
},
{
"name": "Makefile",
"bytes": "185"
},
{
"name": "Python",
"bytes": "5194"
},
{
"name": "Shell",
"bytes": "952"
}
],
"symlink_target": ""
} |
using System;
namespace Bog.Web.Api.Areas.HelpPage.ModelDescriptions
{
/// <summary>
/// Describes a type model.
/// </summary>
public abstract class ModelDescription
{
public string Documentation { get; set; }
public Type ModelType { get; set; }
public string Name { get; set; }
}
} | {
"content_hash": "87b7ee0358711c69cfd6883cd510eab2",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 54,
"avg_line_length": 20.875,
"alnum_prop": 0.6137724550898204,
"repo_name": "BankOfGiving/Bog.net",
"id": "66850376d9fcf695ffdc5cc5da4d57662e1c59fe",
"size": "334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Bog.Web.Api/Areas/HelpPage/ModelDescriptions/ModelDescription.cs",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "211"
},
{
"name": "C#",
"bytes": "492174"
},
{
"name": "CSS",
"bytes": "5332"
},
{
"name": "JavaScript",
"bytes": "641050"
}
],
"symlink_target": ""
} |
import unittest
from vehicle import Vehicle
class UtDemo(unittest.TestCase):
'''A Unit Test Demo'''
def setUp(self):
"Create a list of test files"
self.time_list=['20120912072912','20120913072230',20120912073312]
for f in self.time_list:
print f
def test_int(self):
self.assertEquals(2,2,'number not equals')
def test_vehicle(self):
v = Vehicle('Corolla')
v.display()
if __name__=='__main__': unittest.main()
| {
"content_hash": "730f7487ea4d40ff972543d6859e5821",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 73,
"avg_line_length": 24.9,
"alnum_prop": 0.6024096385542169,
"repo_name": "vollov/py-lab",
"id": "a960771067f3064aa34cee6b5f73f7c43b0d9d21",
"size": "516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/oo/utdemo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22"
},
{
"name": "JavaScript",
"bytes": "685"
},
{
"name": "PLSQL",
"bytes": "6838"
},
{
"name": "Python",
"bytes": "254226"
},
{
"name": "Shell",
"bytes": "734"
},
{
"name": "Smarty",
"bytes": "1829"
}
],
"symlink_target": ""
} |
//------------------------------------------------------------------------------
// <auto-generated>
// This code was generated by a tool.
// Runtime Version:4.0.30319.42000
//
// Changes to this file may cause incorrect behavior and will be lost if
// the code is regenerated.
// </auto-generated>
//------------------------------------------------------------------------------
namespace ExpenseManager.Resources.ManageResources {
using System;
/// <summary>
/// A strongly-typed resource class, for looking up localized strings, etc.
/// </summary>
// This class was auto-generated by the StronglyTypedResourceBuilder
// class via a tool like ResGen or Visual Studio.
// To add or remove a member, edit your .ResX file then rerun ResGen
// with the /str option, or rebuild your VS project.
[global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")]
[global::System.Diagnostics.DebuggerNonUserCodeAttribute()]
[global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()]
public class ManageResource {
private static global::System.Resources.ResourceManager resourceMan;
private static global::System.Globalization.CultureInfo resourceCulture;
[global::System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")]
internal ManageResource() {
}
/// <summary>
/// Returns the cached ResourceManager instance used by this class.
/// </summary>
[global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)]
public static global::System.Resources.ResourceManager ResourceManager {
get {
if (object.ReferenceEquals(resourceMan, null)) {
global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("ExpenseManager.Resources.ManageResources.ManageResource", typeof(ManageResource).Assembly);
resourceMan = temp;
}
return resourceMan;
}
}
/// <summary>
/// Overrides the current thread's CurrentUICulture property for all
/// resource lookups using this strongly typed resource class.
/// </summary>
[global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)]
public static global::System.Globalization.CultureInfo Culture {
get {
return resourceCulture;
}
set {
resourceCulture = value;
}
}
/// <summary>
/// Looks up a localized string similar to Change password.
/// </summary>
public static string ChangePassword {
get {
return ResourceManager.GetString("ChangePassword", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Change your password.
/// </summary>
public static string ChangeYourPassword {
get {
return ResourceManager.GetString("ChangeYourPassword", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Create local login.
/// </summary>
public static string CreateLocalLogin {
get {
return ResourceManager.GetString("CreateLocalLogin", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Create password.
/// </summary>
public static string CreatePassword {
get {
return ResourceManager.GetString("CreatePassword", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to An error has occurred.
/// </summary>
public static string ErrorOccured {
get {
return ResourceManager.GetString("ErrorOccured", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to The external login was removed.
/// </summary>
public static string ExternalLoginRemoved {
get {
return ResourceManager.GetString("ExternalLoginRemoved", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to External Logins.
/// </summary>
public static string ExternalLogins {
get {
return ResourceManager.GetString("ExternalLogins", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Log in using your {0} account.
/// </summary>
public static string LogInWithExternalAccount {
get {
return ResourceManager.GetString("LogInWithExternalAccount", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Manage account.
/// </summary>
public static string ManageAccount {
get {
return ResourceManager.GetString("ManageAccount", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Manage your external logins.
/// </summary>
public static string ManageExternalLogins {
get {
return ResourceManager.GetString("ManageExternalLogins", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to There are no external authentication services configured. See <a href="http://go.microsoft.com/fwlink/?LinkId=313242">this article</a> for details on setting up this ASP.NET application to support logging in via external services..
/// </summary>
public static string NoExternalAuthServices {
get {
return ResourceManager.GetString("NoExternalAuthServices", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to You do not have a local Username/password for this site. Add a local account so you can log in without an external login.
/// </summary>
public static string NoLocalUsername {
get {
return ResourceManager.GetString("NoLocalUsername", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Password.
/// </summary>
public static string Password {
get {
return ResourceManager.GetString("Password", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Your password has been changed.
/// </summary>
public static string PasswordChanged {
get {
return ResourceManager.GetString("PasswordChanged", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Your password has been set.
/// </summary>
public static string PasswordSet {
get {
return ResourceManager.GetString("PasswordSet", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Your phone number was added.
/// </summary>
public static string PhoneNumberAdded {
get {
return ResourceManager.GetString("PhoneNumberAdded", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Your phone number was removed.
/// </summary>
public static string PhoneNumberRemoved {
get {
return ResourceManager.GetString("PhoneNumberRemoved", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Registered logins.
/// </summary>
public static string RegisteredLogins {
get {
return ResourceManager.GetString("RegisteredLogins", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Remove this {0} login from your account.
/// </summary>
public static string RemoveLoginProvider {
get {
return ResourceManager.GetString("RemoveLoginProvider", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Set password.
/// </summary>
public static string SetPassword {
get {
return ResourceManager.GetString("SetPassword", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Your two-factor authentication provider has been set.
/// </summary>
public static string TwoFactorAuthProviderSet {
get {
return ResourceManager.GetString("TwoFactorAuthProviderSet", resourceCulture);
}
}
}
}
| {
"content_hash": "23bf8375ac483f80edef515c1d8b7e3d",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 306,
"avg_line_length": 38.3531746031746,
"alnum_prop": 0.566063114330057,
"repo_name": "tiltom/PV247-Expense-manager",
"id": "3c09eba02ab8b50c6098225d35ec47f65df4d930",
"size": "9667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ExpenseManager/ExpenseManager.Resources/ManageResources/ManageResource.Designer.cs",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "109"
},
{
"name": "C#",
"bytes": "559606"
},
{
"name": "CSS",
"bytes": "57769"
},
{
"name": "HTML",
"bytes": "5127"
},
{
"name": "JavaScript",
"bytes": "51106"
}
],
"symlink_target": ""
} |
package io.agrest.cayenne;
import io.agrest.SimpleResponse;
import io.agrest.cayenne.cayenne.main.E2;
import io.agrest.cayenne.cayenne.main.E3;
import io.agrest.cayenne.cayenne.main.E4;
import io.agrest.cayenne.unit.AgCayenneTester;
import io.agrest.cayenne.unit.DbTest;
import io.agrest.jaxrs2.AgJaxrs;
import io.agrest.meta.AgEntity;
import io.bootique.junit5.BQTestTool;
import org.junit.jupiter.api.Test;
import javax.ws.rs.DELETE;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.core.Configuration;
import javax.ws.rs.core.Context;
public class DELETE_AuthorizerIT extends DbTest {
@BQTestTool
static final AgCayenneTester tester = tester(Resource.class)
.entities(E2.class, E3.class, E4.class)
.agCustomizer(ab -> ab
.entityOverlay(AgEntity.overlay(E2.class).deleteAuthorizer(o -> !"dont_delete".equals(o.getName())))
).build();
@Test
public void testInStack_Allowed() {
tester.e2().insertColumns("id_", "name")
.values(1, "a")
.values(2, "b")
.exec();
tester.target("/e2_stack_authorizer/1").delete().wasOk();
tester.e2().matcher().assertMatches(1);
tester.e2().matcher().eq("name", "b").assertOneMatch();
}
@Test
public void testInStack_Blocked() {
tester.e2().insertColumns("id_", "name")
.values(1, "dont_delete")
.values(2, "b")
.exec();
tester.target("/e2_stack_authorizer/1")
.delete()
.wasForbidden();
tester.e2().matcher().assertMatches(2);
tester.e2().matcher().eq("name", "dont_delete").assertOneMatch();
tester.e2().matcher().eq("name", "b").assertOneMatch();
}
@Test
public void testInRequestAndStack_Allowed() {
tester.e2().insertColumns("id_", "name")
.values(1, "a")
.values(2, "b")
.exec();
tester.target("/e2_request_and_stack_authorizer/1/can_delete")
.delete()
.wasOk();
tester.e2().matcher().assertMatches(1);
tester.e2().matcher().eq("name", "b").assertOneMatch();
}
@Test
public void testInRequestAndStack_Blocked() {
tester.e2().insertColumns("id_", "name")
.values(1, "dont_delete_this_either")
.values(2, "b")
.exec();
tester.target("/e2_request_and_stack_authorizer/1/dont_delete_this_either")
.delete()
.wasForbidden();
tester.e2().matcher().assertMatches(2);
tester.e2().matcher().eq("name", "dont_delete_this_either").assertOneMatch();
tester.e2().matcher().eq("name", "b").assertOneMatch();
}
@Path("")
public static class Resource {
@Context
private Configuration config;
@DELETE
@Path("e2_stack_authorizer/{id}")
public SimpleResponse putE2StackFilter(@PathParam("id") int id) {
return AgJaxrs.delete(E2.class, config)
.byId(id)
.sync();
}
@DELETE
@Path("e2_request_and_stack_authorizer/{id}/{name}")
public SimpleResponse putE2RequestAndStackFilter(
@PathParam("name") String name,
@PathParam("id") int id) {
return AgJaxrs.delete(E2.class, config)
.byId(id)
.authorizer(o -> !name.equals(o.getName()))
.sync();
}
}
}
| {
"content_hash": "dc69aa15b4a2bae475a5bb7e519558a6",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 120,
"avg_line_length": 30.76068376068376,
"alnum_prop": 0.5618227285357044,
"repo_name": "nhl/link-rest",
"id": "5c601ac7c9af10d196fa9dacff621239104c877f",
"size": "3599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agrest-cayenne/src/test/java/io/agrest/cayenne/DELETE_AuthorizerIT.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2660"
},
{
"name": "Java",
"bytes": "1290059"
},
{
"name": "XSLT",
"bytes": "16456"
}
],
"symlink_target": ""
} |
function TosPage(page) {
page.parse(function(err) {
if (err) {
TosPage.debug(err);
} else {
TosPage.debug(page.toString());
self.port.once('query:world', function(res) {
if (res.error) {
TosPage.debug(res.error);
} else {
TosPage.debug(res.players);
page.update(res.players);
TosPage.debug('Done');
}
});
self.port.emit('query:world', page.world);
}
});
}
/**
* Prints message if debug output is set in the options.
*/
TosPage.debug = function(message) {
self.port.once('query:options', function(options) {
if (options.debugOutput) {
console.log('Tibia Online Status:', message);
}
});
self.port.emit('query:options');
};
| {
"content_hash": "3472f8aa9cbf2cf245dcb63ce7a73989",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 56,
"avg_line_length": 24.35483870967742,
"alnum_prop": 0.5695364238410596,
"repo_name": "gpedro/tibia_online_status",
"id": "d3ae9635e7e07c163dff4a791da9ae0785055665",
"size": "755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/firefox/data/firefox_page.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "700165"
},
{
"name": "JavaScript",
"bytes": "218477"
}
],
"symlink_target": ""
} |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>BuildConfiguration - FAKE - F# Make</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="description" content="">
<meta name="author" content="Steffen Forkmann, Mauricio Scheffer, Colin Bull">
<script src="https://code.jquery.com/jquery-1.8.0.js"></script>
<script src="https://code.jquery.com/ui/1.8.23/jquery-ui.js"></script>
<script src="https://netdna.bootstrapcdn.com/twitter-bootstrap/2.2.1/js/bootstrap.min.js"></script>
<script type="text/javascript" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML"></script>
<link href="https://netdna.bootstrapcdn.com/twitter-bootstrap/2.2.1/css/bootstrap-combined.min.css" rel="stylesheet">
<link type="text/css" rel="stylesheet" href="http://fsharp.github.io/FAKE/content/style.css" />
<script type="text/javascript" src="http://fsharp.github.io/FAKE/content/tips.js"></script>
<!-- HTML5 shim, for IE6-8 support of HTML5 elements -->
<!--[if lt IE 9]>
<script src="http://html5shim.googlecode.com/svn/trunk/html5.js"></script>
<![endif]-->
</head>
<body>
<div class="container">
<div class="masthead">
<ul class="nav nav-pills pull-right">
<li><a href="http://fsharp.org">fsharp.org</a></li>
<li><a href="http://github.com/fsharp/fake">github page</a></li>
</ul>
<h3 class="muted"><a href="http://fsharp.github.io/FAKE/index.html">FAKE - F# Make</a></h3>
</div>
<hr />
<div class="row">
<div class="span9" id="main">
<h1>BuildConfiguration</h1>
<div class="xmldoc">
<p>Record type which stores Build configuration properties</p>
</div>
<h3>Record Fields</h3>
<table class="table table-bordered member-list">
<thead>
<tr><td>Record Field</td><td>Description</td></tr>
</thead>
<tbody>
<tr>
<td class="member-name">
<code onmouseout="hideTip(event, '1469', 1469)" onmouseover="showTip(event, '1469', 1469)">
Builds
</code>
<div class="tip" id="1469">
<strong>Signature:</strong> seq<Build><br />
</div>
</td>
<td class="xmldoc">
<a href="https://github.com/fsharp/FAKE/blob/master/src/app/FakeLib/TeamCityRESTHelper.fs#L36-36" class="github-link">
<img src="../content/img/github.png" class="normal" />
<img src="../content/img/github-blue.png" class="hover" />
</a>
</td>
</tr>
<tr>
<td class="member-name">
<code onmouseout="hideTip(event, '1470', 1470)" onmouseover="showTip(event, '1470', 1470)">
Description
</code>
<div class="tip" id="1470">
<strong>Signature:</strong> string<br />
</div>
</td>
<td class="xmldoc">
<a href="https://github.com/fsharp/FAKE/blob/master/src/app/FakeLib/TeamCityRESTHelper.fs#L35-35" class="github-link">
<img src="../content/img/github.png" class="normal" />
<img src="../content/img/github-blue.png" class="hover" />
</a>
</td>
</tr>
<tr>
<td class="member-name">
<code onmouseout="hideTip(event, '1471', 1471)" onmouseover="showTip(event, '1471', 1471)">
ID
</code>
<div class="tip" id="1471">
<strong>Signature:</strong> string<br />
</div>
</td>
<td class="xmldoc">
<a href="https://github.com/fsharp/FAKE/blob/master/src/app/FakeLib/TeamCityRESTHelper.fs#L30-30" class="github-link">
<img src="../content/img/github.png" class="normal" />
<img src="../content/img/github-blue.png" class="hover" />
</a>
</td>
</tr>
<tr>
<td class="member-name">
<code onmouseout="hideTip(event, '1472', 1472)" onmouseover="showTip(event, '1472', 1472)">
Name
</code>
<div class="tip" id="1472">
<strong>Signature:</strong> string<br />
</div>
</td>
<td class="xmldoc">
<a href="https://github.com/fsharp/FAKE/blob/master/src/app/FakeLib/TeamCityRESTHelper.fs#L31-31" class="github-link">
<img src="../content/img/github.png" class="normal" />
<img src="../content/img/github-blue.png" class="hover" />
</a>
</td>
</tr>
<tr>
<td class="member-name">
<code onmouseout="hideTip(event, '1473', 1473)" onmouseover="showTip(event, '1473', 1473)">
Paused
</code>
<div class="tip" id="1473">
<strong>Signature:</strong> bool<br />
</div>
</td>
<td class="xmldoc">
<a href="https://github.com/fsharp/FAKE/blob/master/src/app/FakeLib/TeamCityRESTHelper.fs#L34-34" class="github-link">
<img src="../content/img/github.png" class="normal" />
<img src="../content/img/github-blue.png" class="hover" />
</a>
</td>
</tr>
<tr>
<td class="member-name">
<code onmouseout="hideTip(event, '1474', 1474)" onmouseover="showTip(event, '1474', 1474)">
ProjectID
</code>
<div class="tip" id="1474">
<strong>Signature:</strong> string<br />
</div>
</td>
<td class="xmldoc">
<a href="https://github.com/fsharp/FAKE/blob/master/src/app/FakeLib/TeamCityRESTHelper.fs#L33-33" class="github-link">
<img src="../content/img/github.png" class="normal" />
<img src="../content/img/github-blue.png" class="hover" />
</a>
</td>
</tr>
<tr>
<td class="member-name">
<code onmouseout="hideTip(event, '1475', 1475)" onmouseover="showTip(event, '1475', 1475)">
WebURL
</code>
<div class="tip" id="1475">
<strong>Signature:</strong> string<br />
</div>
</td>
<td class="xmldoc">
<a href="https://github.com/fsharp/FAKE/blob/master/src/app/FakeLib/TeamCityRESTHelper.fs#L32-32" class="github-link">
<img src="../content/img/github.png" class="normal" />
<img src="../content/img/github-blue.png" class="hover" />
</a>
</td>
</tr>
</tbody>
</table>
</div>
<div class="span3">
<a href="http://fsharp.github.io/FAKE/index.html">
<img src="http://fsharp.github.io/FAKE/pics/logo.png" style="width:140px;height:140px;margin:10px 0px 0px 35px;border-style:none;" />
</a>
<ul class="nav nav-list" id="menu">
<li class="nav-header">FAKE - F# Make</li>
<li class="divider"></li>
<li><a href="http://fsharp.github.io/FAKE/index.html">Home page</a></li>
<li class="divider"></li>
<li><a href="https://www.nuget.org/packages/FAKE">Get FAKE - F# Make via NuGet</a></li>
<li><a href="http://github.com/fsharp/fake">Source Code on GitHub</a></li>
<li><a href="http://github.com/fsharp/fake/blob/master/License.txt">License (MS-PL)</a></li>
<li><a href="http://fsharp.github.io/FAKE/RELEASE_NOTES.html">Release Notes</a></li>
<li><a href="http://fsharp.github.io/FAKE//contributing.html">Contributing to FAKE - F# Make</a></li>
<li><a href="http://fsharp.github.io/FAKE/users.html">Who is using FAKE?</a></li>
<li><a href="http://stackoverflow.com/questions/tagged/f%23-fake">Ask a question</a></li>
<li class="nav-header">Tutorials</li>
<li><a href="http://fsharp.github.io/FAKE/gettingstarted.html">Getting started</a></li>
<li class="divider"></li>
<li><a href="http://fsharp.github.io/FAKE/nuget.html">NuGet package restore</a></li>
<li><a href="http://fsharp.github.io/FAKE/fxcop.html">Using FxCop in a build</a></li>
<li><a href="http://fsharp.github.io/FAKE/assemblyinfo.html">Generating AssemblyInfo</a></li>
<li><a href="http://fsharp.github.io/FAKE/create-nuget-package.html">Create NuGet packages</a></li>
<li><a href="http://fsharp.github.io/FAKE/specifictargets.html">Running specific targets</a></li>
<li><a href="http://fsharp.github.io/FAKE/commandline.html">Running FAKE from command line</a></li>
<li><a href="http://fsharp.github.io/FAKE/fsc.html">Using the F# compiler from FAKE</a></li>
<li><a href="http://fsharp.github.io/FAKE/customtasks.html">Creating custom tasks</a></li>
<li><a href="http://fsharp.github.io/FAKE/teamcity.html">TeamCity integration</a></li>
<li><a href="http://fsharp.github.io/FAKE/canopy.html">Running canopy tests</a></li>
<li><a href="http://fsharp.github.io/FAKE/octopusdeploy.html">Octopus Deploy</a></li>
<li><a href="http://fsharp.github.io/FAKE/typescript.html">TypeScript support</a></li>
<li class="divider"></li>
<li><a href="http://fsharp.github.io/FAKE/deploy.html">Fake.Deploy</a></li>
<li class="nav-header">Reference</li>
<li><a href="http://fsharp.github.io/FAKE/apidocs/index.html">API Reference</a></li>
</ul>
</div>
</div>
</div>
<a href="http://github.com/fsharp/fake"><img style="position: absolute; top: 0; right: 0; border: 0;" src="https://s3.amazonaws.com/github/ribbons/forkme_right_gray_6d6d6d.png" alt="Fork me on GitHub"></a>
</body>
</html>
| {
"content_hash": "66a19dd7609df91a7a4e5dcafe11ca51",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 209,
"avg_line_length": 44.017699115044245,
"alnum_prop": 0.5524728588661038,
"repo_name": "mwissman/MingleTransitionMonitor",
"id": "8bc065cc4bbefb2eded430deae476b1b425ff68c",
"size": "9948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MingleTransitionMonitor/packages/FAKE.3.9.8/docs/apidocs/fake-teamcityresthelper-buildconfiguration.html",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "86697"
},
{
"name": "CSS",
"bytes": "4203"
},
{
"name": "F#",
"bytes": "4301"
},
{
"name": "JavaScript",
"bytes": "1295"
},
{
"name": "Shell",
"bytes": "183"
},
{
"name": "XSLT",
"bytes": "33710"
}
],
"symlink_target": ""
} |
package scala.concurrent.duration
package object ops extends DurationImplicits
| {
"content_hash": "97b3ce59f34c0b216a33ff22fa90870e",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 44,
"avg_line_length": 26.666666666666668,
"alnum_prop": 0.875,
"repo_name": "AudaxHealthInc/play-json-ops",
"id": "f3c46e54597b3ba22e4db916a1940eeb1d5e8e22",
"size": "80",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "play23-json-ops/src/main/scala/scala/concurrent/duration/ops/package.scala",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Scala",
"bytes": "210263"
}
],
"symlink_target": ""
} |
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.xcolab</groupId>
<artifactId>microservice-services</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<artifactId>user-service</artifactId>
<name>user-service</name>
<description>The members service for XColab platform</description>
<packaging>jar</packaging>
<dependencies>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
</dependency>
<dependency>
<groupId>org.json</groupId>
<artifactId>json</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
</dependency>
<dependency>
<groupId>org.xcolab</groupId>
<artifactId>admin-client</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.xcolab</groupId>
<artifactId>user-client</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.xcolab</groupId>
<artifactId>email-client</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.xcolab</groupId>
<artifactId>tracking-client</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.xcolab</groupId>
<artifactId>contest-client</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.xcolab</groupId>
<artifactId>activity-client</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>it.ozimov</groupId>
<artifactId>yaml-properties-maven-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>properties-maven-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.flywaydb</groupId>
<artifactId>flyway-maven-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.jooq</groupId>
<artifactId>jooq-codegen-maven</artifactId>
<version>${jooq.version}</version>
<configuration combine.self="append">
<generator>
<database>
<!-- 16 tables owned by this service -->
<includes>
user__.+
<!-- TODO: This service does not own these tables and shouldn't be using them -->
| contest__points
| activity__activity_entry
| contest__contest_team_member
<!-- User/Role/Team entities (8) -->
| members_Member
| Users_Roles | Role_ | xcolab_MemberCategory
| members_RoleGroupRoles | members_RoleGroup
| xcolab_PlatformTeam | xcolab_PlatformTeamMember
<!-- Messaging entities (3)-->
| xcolab_Message | xcolab_MessageRecipientStatus
| xcolab_MessagingUserPreferences
<!-- TODO: This could be moved to the content service -->
| xcolab_StaffMember
<!-- TODO: loginLog should be in tracking-service -->
| xcolab_LoginLog
<!-- TODO: is analyticsUserEvent used? If yes -> tracking service -->
| xcolab_AnalyticsUserEvent
</includes>
</database>
</generator>
</configuration>
</plugin>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
</project>
| {
"content_hash": "4cfb9c3e8f83a186e85ddea11540810b",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 113,
"avg_line_length": 35.48062015503876,
"alnum_prop": 0.5274197072318112,
"repo_name": "CCI-MIT/XCoLab",
"id": "bf7cdb9dbccbbbace29f1d93944c72007cecfca6",
"size": "4577",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "microservices/services/user-service/pom.xml",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22697"
},
{
"name": "HTML",
"bytes": "69904"
},
{
"name": "Java",
"bytes": "4089581"
},
{
"name": "JavaScript",
"bytes": "1172118"
},
{
"name": "SCSS",
"bytes": "201019"
},
{
"name": "Shell",
"bytes": "13707"
}
],
"symlink_target": ""
} |
_times () { local n=$1 i; shift; for (( i=0; $i < $n; i++ )); do "$@"; done; }
# e.g. hdecoration 70 1
_habove () {
local width="$1" level="$2"
case $level in
1) echo; _times $width echo -n '*'; echo;;
2) _times $width echo -n '.'; echo;;
esac
}
# future expansion
_hbelow () {
local width="$1" level="$2"
case $level in
1) _times $width echo -n '*'; echo;;
2) _times $width echo -n '.'; echo;;
esac
}
# e.g. header 1 This is some text
header () {
local width=70 level="$1"
shift
local message="$*"
echo
_habove $width $level
# if the message is narrower than width, center it
if [ ${#message} -lt $width ]; then
_times $(( ( $width - ${#message} ) / 2 )) echo -n " "
fi
echo "$message"
_hbelow $width $level
echo
}
# parameters: database, piece of sql
AS_PG_IN () {
db="$1"; shift; runuser postgres -c "psql -d '$db' <<<'$*'"
}
# same, but no text alignment, column headers, or row count
RAW_AS_PG_IN () {
db="$1"; shift; runuser postgres -c "psql -Atq -d '$db' <<<'$*'"
}
# no parameters; lists connectable databases
databases () {
# template0 does not allow connections; do not list it
RAW_AS_PG_IN postgres 'select datname from pg_database where datallowconn'
}
header 1 "Roles:"
AS_PG_IN postgres '\du'
header 1 "Databases and database-level privileges:"
# do not show encodings, which \l does
AS_PG_IN postgres 'select datname, datacl from pg_database'
header 1 "Privileges inside each database:"
for db in $(databases); do
header 2 "$db"
AS_PG_IN "$db" '\dp'
done
| {
"content_hash": "17e08a9c0c4e008c7b89f66846aa4dd3",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 78,
"avg_line_length": 26.06451612903226,
"alnum_prop": 0.5853960396039604,
"repo_name": "jaredjennings/puppet-cmits-postgresql",
"id": "7e0f643b52bb85babbfc066aa724f16638bc3552",
"size": "1657",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "files/privs-report.sh",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Puppet",
"bytes": "26037"
},
{
"name": "Ruby",
"bytes": "24729"
},
{
"name": "Shell",
"bytes": "1657"
}
],
"symlink_target": ""
} |
package com.hadlink.lay_s.presenter;
import android.os.Bundle;
import com.hadlink.lay_s.delegate.ImageDetailDelegate;
import com.hadlink.library.base.BaseActivity;
/**
* @author Created by lyao on 2016/3/7.
*/
public class ImageDetailPresenter extends BaseActivity<ImageDetailDelegate> {
public static final String INTENT_IMAGE_URL_TAG = "INTENT_IMAGE_URL_TAG";
public static final String INTENT_IMAGE_X_TAG = "INTENT_IMAGE_X_TAG";
public static final String INTENT_IMAGE_Y_TAG = "INTENT_IMAGE_Y_TAG";
public static final String INTENT_IMAGE_W_TAG = "INTENT_IMAGE_W_TAG";
public static final String INTENT_IMAGE_H_TAG = "INTENT_IMAGE_H_TAG";
@Override protected boolean getToolbarAvailable() {
return false;
}
@Override protected Class<ImageDetailDelegate> getDelegateClass() {
return ImageDetailDelegate.class;
}
@Override protected void getBundleExtras(Bundle extras) {
String mImageUrl = extras.getString(INTENT_IMAGE_URL_TAG);
int mLocationX = extras.getInt(INTENT_IMAGE_X_TAG);
int mLocationY = extras.getInt(INTENT_IMAGE_Y_TAG);
int mWidth = extras.getInt(INTENT_IMAGE_W_TAG);
int mHeight = extras.getInt(INTENT_IMAGE_H_TAG);
varyViewHelper.showLoadingView();
viewDelegate.setOriginalInfo(mImageUrl, mWidth, mHeight, mLocationX, mLocationY, new Runnable() {
@Override public void run() {
//点击图片时
finish();
}
},new Runnable(){
@Override public void run() {
varyViewHelper.showDataView();
}
});
}
@Override
public void onBackPressed() {
if (viewDelegate.isLoadSuccess())
viewDelegate.transformOut();
else
finish();
}
@Override
protected void onPause() {
super.onPause();
if (isFinishing()) {
overridePendingTransition(0, 0);
}
}
}
| {
"content_hash": "871351dfdefc33f8af2c4245007fec51",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 105,
"avg_line_length": 29.55223880597015,
"alnum_prop": 0.6373737373737374,
"repo_name": "vihuela/Lay-s",
"id": "4693fb0ce52fcd779b4650635e64d217c357945c",
"size": "2739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lay-s/src/main/java/com/hadlink/lay_s/presenter/ImageDetailPresenter.java",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "526000"
}
],
"symlink_target": ""
} |
package hudson.plugins.git;
import com.cloudbees.plugins.credentials.CredentialsMatchers;
import com.cloudbees.plugins.credentials.CredentialsProvider;
import com.cloudbees.plugins.credentials.common.StandardUsernameCredentials;
import com.cloudbees.plugins.credentials.domains.URIRequirementBuilder;
import com.google.common.collect.Iterables;
import edu.umd.cs.findbugs.annotations.CheckForNull;
import edu.umd.cs.findbugs.annotations.NonNull;
import hudson.*;
import hudson.init.Initializer;
import hudson.matrix.MatrixBuild;
import hudson.matrix.MatrixRun;
import hudson.model.*;
import hudson.model.Descriptor.FormException;
import hudson.model.Hudson.MasterComputer;
import hudson.plugins.git.browser.GitRepositoryBrowser;
import hudson.plugins.git.extensions.GitClientConflictException;
import hudson.plugins.git.extensions.GitClientType;
import hudson.plugins.git.extensions.GitSCMExtension;
import hudson.plugins.git.extensions.GitSCMExtensionDescriptor;
import hudson.plugins.git.extensions.impl.AuthorInChangelog;
import hudson.plugins.git.extensions.impl.BuildChooserSetting;
import hudson.plugins.git.extensions.impl.ChangelogToBranch;
import hudson.plugins.git.extensions.impl.PreBuildMerge;
import hudson.plugins.git.opt.PreBuildMergeOptions;
import hudson.plugins.git.util.Build;
import hudson.plugins.git.util.*;
import hudson.remoting.Channel;
import hudson.scm.*;
import hudson.security.ACL;
import hudson.tasks.Builder;
import hudson.tasks.Publisher;
import hudson.triggers.SCMTrigger;
import hudson.util.DescribableList;
import hudson.util.FormValidation;
import hudson.util.IOException2;
import hudson.util.ListBoxModel;
import jenkins.model.Jenkins;
import net.sf.json.JSONObject;
import org.eclipse.jgit.lib.Config;
import org.eclipse.jgit.lib.ObjectId;
import org.eclipse.jgit.transport.RefSpec;
import org.eclipse.jgit.transport.RemoteConfig;
import org.eclipse.jgit.transport.URIish;
import org.jenkinsci.plugins.gitclient.ChangelogCommand;
import org.jenkinsci.plugins.gitclient.CheckoutCommand;
import org.jenkinsci.plugins.gitclient.CloneCommand;
import org.jenkinsci.plugins.gitclient.FetchCommand;
import org.jenkinsci.plugins.gitclient.Git;
import org.jenkinsci.plugins.gitclient.GitClient;
import org.jenkinsci.plugins.gitclient.JGitTool;
import org.kohsuke.stapler.DataBoundConstructor;
import org.kohsuke.stapler.Stapler;
import org.kohsuke.stapler.StaplerRequest;
import org.kohsuke.stapler.export.Exported;
import javax.servlet.ServletException;
import java.io.File;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.PrintStream;
import java.io.Serializable;
import java.io.Writer;
import java.text.MessageFormat;
import java.util.*;
import java.util.logging.Level;
import java.util.logging.Logger;
import static hudson.Util.*;
import static hudson.init.InitMilestone.JOB_LOADED;
import static hudson.init.InitMilestone.PLUGINS_STARTED;
import hudson.plugins.git.browser.GithubWeb;
import static hudson.scm.PollingResult.*;
import hudson.util.IOUtils;
import hudson.util.LogTaskListener;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static org.apache.commons.lang.StringUtils.isBlank;
/**
* Git SCM.
*
* @author Nigel Magnay
* @author Andrew Bayer
* @author Nicolas Deloof
* @author Kohsuke Kawaguchi
* ... and many others
*/
public class GitSCM extends GitSCMBackwardCompatibility {
/**
* Store a config version so we're able to migrate config on various
* functionality upgrades.
*/
private Long configVersion;
/**
* All the remote repositories that we know about.
*/
private List<UserRemoteConfig> userRemoteConfigs;
private transient List<RemoteConfig> remoteRepositories;
/**
* All the branches that we wish to care about building.
*/
private List<BranchSpec> branches;
private boolean doGenerateSubmoduleConfigurations;
public String gitTool = null;
private GitRepositoryBrowser browser;
private Collection<SubmoduleConfig> submoduleCfg;
public static final String GIT_BRANCH = "GIT_BRANCH";
public static final String GIT_COMMIT = "GIT_COMMIT";
public static final String GIT_PREVIOUS_COMMIT = "GIT_PREVIOUS_COMMIT";
public static final String GIT_PREVIOUS_SUCCESSFUL_COMMIT = "GIT_PREVIOUS_SUCCESSFUL_COMMIT";
/**
* All the configured extensions attached to this.
*/
private DescribableList<GitSCMExtension,GitSCMExtensionDescriptor> extensions;
public Collection<SubmoduleConfig> getSubmoduleCfg() {
return submoduleCfg;
}
public void setSubmoduleCfg(Collection<SubmoduleConfig> submoduleCfg) {
this.submoduleCfg = submoduleCfg;
}
static private List<UserRemoteConfig> createRepoList(String url) {
List<UserRemoteConfig> repoList = new ArrayList<UserRemoteConfig>();
repoList.add(new UserRemoteConfig(url, null, null, null));
return repoList;
}
/**
* A convenience constructor that sets everything to default.
*
* @param repositoryUrl
* Repository URL to clone from.
*/
public GitSCM(String repositoryUrl) {
this(
createRepoList(repositoryUrl),
Collections.singletonList(new BranchSpec("")),
false, Collections.<SubmoduleConfig>emptyList(),
null, null, null);
}
// @Restricted(NoExternalUse.class) // because this keeps changing
@DataBoundConstructor
public GitSCM(
List<UserRemoteConfig> userRemoteConfigs,
List<BranchSpec> branches,
Boolean doGenerateSubmoduleConfigurations,
Collection<SubmoduleConfig> submoduleCfg,
GitRepositoryBrowser browser,
String gitTool,
List<GitSCMExtension> extensions) {
// moved from createBranches
if (branches == null) {
branches = new ArrayList<BranchSpec>();
}
if (branches.isEmpty()) {
branches.add(new BranchSpec("*/master"));
}
this.branches = branches;
this.userRemoteConfigs = userRemoteConfigs;
updateFromUserData();
// TODO: getBrowserFromRequest
this.browser = browser;
// emulate bindJSON behavior here
if (doGenerateSubmoduleConfigurations != null) {
this.doGenerateSubmoduleConfigurations = doGenerateSubmoduleConfigurations;
} else {
this.doGenerateSubmoduleConfigurations = false;
}
if (submoduleCfg == null) {
submoduleCfg = new ArrayList<SubmoduleConfig>();
}
this.submoduleCfg = submoduleCfg;
this.configVersion = 2L;
this.gitTool = gitTool;
this.extensions = new DescribableList<GitSCMExtension, GitSCMExtensionDescriptor>(Saveable.NOOP,Util.fixNull(extensions));
getBuildChooser(); // set the gitSCM field.
}
/**
* All the configured extensions attached to this {@link GitSCM}.
*
* Going forward this is primarily how we'll support esoteric use cases.
*
* @since 1.EXTENSION
*/
public DescribableList<GitSCMExtension, GitSCMExtensionDescriptor> getExtensions() {
return extensions;
}
private void updateFromUserData() throws GitException {
// do what newInstance used to do directly from the request data
try {
String[] pUrls = new String[userRemoteConfigs.size()];
String[] repoNames = new String[userRemoteConfigs.size()];
String[] refSpecs = new String[userRemoteConfigs.size()];
for (int i = 0; i < userRemoteConfigs.size(); ++i) {
pUrls[i] = userRemoteConfigs.get(i).getUrl();
repoNames[i] = userRemoteConfigs.get(i).getName();
refSpecs[i] = userRemoteConfigs.get(i).getRefspec();
}
this.remoteRepositories = DescriptorImpl.createRepositoryConfigurations(pUrls, repoNames, refSpecs);
// TODO: replace with new repositories
} catch (IOException e1) {
throw new GitException("Error creating repositories", e1);
}
}
public Object readResolve() throws IOException {
// Migrate data
// Default unspecified to v0
if (configVersion == null) {
configVersion = 0L;
}
if (source != null) {
remoteRepositories = new ArrayList<RemoteConfig>();
branches = new ArrayList<BranchSpec>();
doGenerateSubmoduleConfigurations = false;
List<RefSpec> rs = new ArrayList<RefSpec>();
rs.add(new RefSpec("+refs/heads/*:refs/remotes/origin/*"));
remoteRepositories.add(newRemoteConfig("origin", source, rs.toArray(new RefSpec[0])));
if (branch != null) {
branches.add(new BranchSpec(branch));
} else {
branches.add(new BranchSpec("*/master"));
}
}
if (configVersion < 1 && branches != null) {
// Migrate the branch specs from
// single * wildcard, to ** wildcard.
for (BranchSpec branchSpec : branches) {
String name = branchSpec.getName();
name = name.replace("*", "**");
branchSpec.setName(name);
}
}
if (remoteRepositories != null && userRemoteConfigs == null) {
userRemoteConfigs = new ArrayList<UserRemoteConfig>();
for(RemoteConfig cfg : remoteRepositories) {
// converted as in config.jelly
String url = "";
if (cfg.getURIs().size() > 0 && cfg.getURIs().get(0) != null)
url = cfg.getURIs().get(0).toPrivateString();
String refspec = "";
if (cfg.getFetchRefSpecs().size() > 0 && cfg.getFetchRefSpecs().get(0) != null)
refspec = cfg.getFetchRefSpecs().get(0).toString();
userRemoteConfigs.add(new UserRemoteConfig(url, cfg.getName(), refspec, null));
}
}
// patch internal objects from user data
// if (configVersion == 2) {
if (remoteRepositories == null) {
// if we don't catch GitException here, the whole job fails to load
try {
updateFromUserData();
} catch (GitException e) {
LOGGER.log(Level.WARNING, "Failed to load SCM data", e);
}
}
if (extensions==null)
extensions = new DescribableList<GitSCMExtension, GitSCMExtensionDescriptor>(Saveable.NOOP);
readBackExtensionsFromLegacy();
if (choosingStrategy != null && getBuildChooser().getClass()==DefaultBuildChooser.class) {
for (BuildChooserDescriptor d : BuildChooser.all()) {
if (choosingStrategy.equals(d.getLegacyId())) {
try {
setBuildChooser(d.clazz.newInstance());
} catch (InstantiationException e) {
LOGGER.log(Level.WARNING, "Failed to instantiate the build chooser", e);
} catch (IllegalAccessException e) {
LOGGER.log(Level.WARNING, "Failed to instantiate the build chooser", e);
}
}
}
}
getBuildChooser(); // set the gitSCM field.
return this;
}
@Override
public GitRepositoryBrowser getBrowser() {
return browser;
}
@Override public RepositoryBrowser<?> guessBrowser() {
if (remoteRepositories != null && remoteRepositories.size() == 1) {
List<URIish> uris = remoteRepositories.get(0).getURIs();
if (uris.size() == 1) {
String uri = uris.get(0).toString();
// TODO make extensible by introducing an abstract GitRepositoryBrowserDescriptor
Matcher m = Pattern.compile("(https://github[.]com/[^/]+/[^/]+)[.]git").matcher(uri);
if (m.matches()) {
return new GithubWeb(m.group(1) + "/");
}
m = Pattern.compile("git@github[.]com:([^/]+/[^/]+)[.]git").matcher(uri);
if (m.matches()) {
return new GithubWeb("https://github.com/" + m.group(1) + "/");
}
}
}
return null;
}
public boolean isCreateAccountBasedOnEmail() {
DescriptorImpl gitDescriptor = getDescriptor();
return (gitDescriptor != null && gitDescriptor.isCreateAccountBasedOnEmail());
}
public BuildChooser getBuildChooser() {
BuildChooser bc;
BuildChooserSetting bcs = getExtensions().get(BuildChooserSetting.class);
if (bcs!=null) bc = bcs.getBuildChooser();
else bc = new DefaultBuildChooser();
bc.gitSCM = this;
return bc;
}
public void setBuildChooser(BuildChooser buildChooser) throws IOException {
if (buildChooser.getClass()==DefaultBuildChooser.class) {
getExtensions().remove(BuildChooserSetting.class);
} else {
getExtensions().replace(new BuildChooserSetting(buildChooser));
}
}
@Deprecated
public String getParamLocalBranch(Run<?, ?> build) throws IOException, InterruptedException {
return getParamLocalBranch(build, new LogTaskListener(LOGGER, Level.INFO));
}
/**
* Gets the parameter-expanded effective value in the context of the current build.
*/
public String getParamLocalBranch(Run<?, ?> build, TaskListener listener) throws IOException, InterruptedException {
String branch = getLocalBranch();
// substitute build parameters if available
return getParameterString(branch != null ? branch : null, build.getEnvironment(listener));
}
@Deprecated
public List<RemoteConfig> getParamExpandedRepos(Run<?, ?> build) throws IOException, InterruptedException {
return getParamExpandedRepos(build, new LogTaskListener(LOGGER, Level.INFO));
}
/**
* Expand parameters in {@link #remoteRepositories} with the parameter values provided in the given build
* and return them.
*
* @return can be empty but never null.
*/
public List<RemoteConfig> getParamExpandedRepos(Run<?, ?> build, TaskListener listener) throws IOException, InterruptedException {
List<RemoteConfig> expandedRepos = new ArrayList<RemoteConfig>();
EnvVars env = build.getEnvironment(listener);
for (RemoteConfig oldRepo : Util.fixNull(remoteRepositories)) {
expandedRepos.add(
newRemoteConfig(
getParameterString(oldRepo.getName(), env),
getParameterString(oldRepo.getURIs().get(0).toPrivateString(), env),
getRefSpecs(oldRepo, env).toArray(new RefSpec[0])));
}
return expandedRepos;
}
public RemoteConfig getRepositoryByName(String repoName) {
for (RemoteConfig r : getRepositories()) {
if (r.getName().equals(repoName)) {
return r;
}
}
return null;
}
@Exported
public List<UserRemoteConfig> getUserRemoteConfigs() {
return Collections.unmodifiableList(userRemoteConfigs);
}
public List<RemoteConfig> getRepositories() {
// Handle null-value to ensure backwards-compatibility, ie project configuration missing the <repositories/> XML element
if (remoteRepositories == null) {
return new ArrayList<RemoteConfig>();
}
return remoteRepositories;
}
public String getGitTool() {
return gitTool;
}
public static String getParameterString(String original, EnvVars env) {
return env.expand(original);
}
private List<RefSpec> getRefSpecs(RemoteConfig repo, EnvVars env) {
List<RefSpec> refSpecs = new ArrayList<RefSpec>();
for (RefSpec refSpec : repo.getFetchRefSpecs()) {
refSpecs.add(new RefSpec(getParameterString(refSpec.toString(), env)));
}
return refSpecs;
}
/**
* If the configuration is such that we are tracking just one branch of one repository
* return that branch specifier (in the form of something like "origin/master" or a SHA1-hash
*
* Otherwise return null.
*/
private String getSingleBranch(EnvVars env) {
// if we have multiple branches skip to advanced usecase
if (getBranches().size() != 1 || getRepositories().size() != 1) {
return null;
}
String branch = getBranches().get(0).getName();
String repository = getRepositories().get(0).getName();
// replace repository wildcard with repository name
if (branch.startsWith("*/")) {
branch = repository + branch.substring(1);
}
// if the branch name contains more wildcards then the simple usecase
// does not apply and we need to skip to the advanced usecase
if (branch.contains("*")) {
return null;
}
// substitute build parameters if available
branch = getParameterString(branch, env);
// Check for empty string - replace with "**" when seen.
if (branch.equals("")) {
branch = "**";
}
return branch;
}
@Override
public SCMRevisionState calcRevisionsFromBuild(Run<?, ?> abstractBuild, FilePath workspace, Launcher launcher, TaskListener taskListener) throws IOException, InterruptedException {
return SCMRevisionState.NONE;
}
@Override
public boolean requiresWorkspaceForPolling() {
for (GitSCMExtension ext : getExtensions()) {
if (ext.requiresWorkspaceForPolling()) return true;
}
return getSingleBranch(new EnvVars()) == null;
}
@Override
public PollingResult compareRemoteRevisionWith(Job<?, ?> project, Launcher launcher, FilePath workspace, final TaskListener listener, SCMRevisionState baseline) throws IOException, InterruptedException {
try {
return compareRemoteRevisionWithImpl( project, launcher, workspace, listener);
} catch (GitException e){
throw new IOException2(e);
}
}
private static Node workspaceToNode(FilePath workspace) { // TODO https://trello.com/c/doFFMdUm/46-filepath-getcomputer
Jenkins j = Jenkins.getInstance();
if (workspace != null && workspace.isRemote()) {
for (Computer c : j.getComputers()) {
if (c.getChannel() == workspace.getChannel()) {
Node n = c.getNode();
if (n != null) {
return n;
}
}
}
}
return j;
}
private PollingResult compareRemoteRevisionWithImpl(Job<?, ?> project, Launcher launcher, FilePath workspace, final TaskListener listener) throws IOException, InterruptedException {
// Poll for changes. Are there any unbuilt revisions that Hudson ought to build ?
listener.getLogger().println("Using strategy: " + getBuildChooser().getDisplayName());
final Run lastBuild = project.getLastBuild();
if (lastBuild == null) {
// If we've never been built before, well, gotta build!
listener.getLogger().println("[poll] No previous build, so forcing an initial build.");
return BUILD_NOW;
}
final BuildData buildData = fixNull(getBuildData(lastBuild));
if (buildData.lastBuild != null) {
listener.getLogger().println("[poll] Last Built Revision: " + buildData.lastBuild.revision);
}
final String singleBranch = getSingleBranch(lastBuild.getEnvironment(listener));
// fast remote polling needs a single branch and an existing last build
if (!requiresWorkspaceForPolling() && buildData.lastBuild != null && buildData.lastBuild.getMarked() != null) {
// FIXME this should not be a specific case, but have BuildChooser tell us if it can poll without workspace.
final EnvVars environment = project instanceof AbstractProject ? GitUtils.getPollEnvironment((AbstractProject) project, workspace, launcher, listener, false) : new EnvVars();
GitClient git = createClient(listener, environment, project, Jenkins.getInstance(), null);
String gitRepo = getParamExpandedRepos(lastBuild, listener).get(0).getURIs().get(0).toString();
ObjectId head = git.getHeadRev(gitRepo, getBranches().get(0).getName());
if (head != null){
listener.getLogger().println("[poll] Latest remote head revision is: " + head.getName());
if (buildData.lastBuild.getMarked().getSha1().equals(head)) {
return NO_CHANGES;
} else {
return BUILD_NOW;
}
} else {
listener.getLogger().println("[poll] Couldn't get remote head revision");
return BUILD_NOW;
}
}
final EnvVars environment = project instanceof AbstractProject ? GitUtils.getPollEnvironment((AbstractProject) project, workspace, launcher, listener) : new EnvVars();
FilePath workingDirectory = workingDirectory(project,workspace,environment,listener);
// (Re)build if the working directory doesn't exist
if (workingDirectory == null || !workingDirectory.exists()) {
return BUILD_NOW;
}
GitClient git = createClient(listener, environment, project, workspaceToNode(workspace), workingDirectory);
if (git.hasGitRepo()) {
// Repo is there - do a fetch
listener.getLogger().println("Fetching changes from the remote Git repositories");
// Fetch updates
for (RemoteConfig remoteRepository : getParamExpandedRepos(lastBuild, listener)) {
fetchFrom(git, listener, remoteRepository);
}
listener.getLogger().println("Polling for changes in");
Collection<Revision> candidates = getBuildChooser().getCandidateRevisions(
true, singleBranch, git, listener, buildData, new BuildChooserContextImpl(project, null, environment));
for (Revision c : candidates) {
if (!isRevExcluded(git, c, listener, buildData)) {
return PollingResult.SIGNIFICANT;
}
}
return NO_CHANGES;
} else {
listener.getLogger().println("No Git repository yet, an initial checkout is required");
return PollingResult.SIGNIFICANT;
}
}
/**
* Allows {@link Builder}s and {@link Publisher}s to access a configured {@link GitClient} object to
* perform additional git operations.
*/
public GitClient createClient(TaskListener listener, EnvVars environment, Run<?,?> build, FilePath workspace) throws IOException, InterruptedException {
FilePath ws = workingDirectory(build.getParent(), workspace, environment, listener);
/* ws will be null if the node which ran the build is offline */
if (ws != null) {
ws.mkdirs(); // ensure it exists
}
return createClient(listener,environment, build.getParent(), workspaceToNode(workspace), ws);
}
/*package*/ GitClient createClient(TaskListener listener, EnvVars environment, Job project, Node n, FilePath ws) throws IOException, InterruptedException {
String gitExe = getGitExe(n, listener);
Git git = Git.with(listener, environment).in(ws).using(gitExe);
GitClient c = git.getClient();
for (GitSCMExtension ext : extensions) {
c = ext.decorate(this,c);
}
for (UserRemoteConfig uc : getUserRemoteConfigs()) {
if (uc.getCredentialsId() != null) {
String url = uc.getUrl();
StandardUsernameCredentials credentials = CredentialsMatchers
.firstOrNull(
CredentialsProvider.lookupCredentials(StandardUsernameCredentials.class, project,
ACL.SYSTEM, URIRequirementBuilder.fromUri(url).build()),
CredentialsMatchers.allOf(CredentialsMatchers.withId(uc.getCredentialsId()),
GitClient.CREDENTIALS_MATCHER));
if (credentials != null) {
c.addCredentials(url, credentials);
}
}
}
// TODO add default credentials
return c;
}
private BuildData fixNull(BuildData bd) {
return bd != null ? bd : new BuildData(getScmName(), getUserRemoteConfigs()) /*dummy*/;
}
/**
* Fetch information from a particular remote repository.
*
* @param git
* @param listener
* @param remoteRepository
* @throws InterruptedException
* @throws IOException
*/
private void fetchFrom(GitClient git,
TaskListener listener,
RemoteConfig remoteRepository) throws InterruptedException, IOException {
boolean first = true;
for (URIish url : remoteRepository.getURIs()) {
try {
if (first) {
git.setRemoteUrl(remoteRepository.getName(), url.toPrivateASCIIString());
first = false;
} else {
git.addRemoteUrl(remoteRepository.getName(), url.toPrivateASCIIString());
}
FetchCommand fetch = git.fetch_().from(url, remoteRepository.getFetchRefSpecs());
for (GitSCMExtension extension : extensions) {
extension.decorateFetchCommand(this, git, listener, fetch);
}
fetch.execute();
} catch (GitException ex) {
throw new GitException("Failed to fetch from "+url.toString(), ex);
}
}
}
private RemoteConfig newRemoteConfig(String name, String refUrl, RefSpec... refSpec) {
try {
Config repoConfig = new Config();
// Make up a repo config from the request parameters
repoConfig.setString("remote", name, "url", refUrl);
List<String> str = new ArrayList<String>();
if(refSpec != null && refSpec.length > 0)
for (RefSpec rs: refSpec)
str.add(rs.toString());
repoConfig.setStringList("remote", name, "fetch", str);
return RemoteConfig.getAllRemoteConfigs(repoConfig).get(0);
} catch (Exception ex) {
throw new GitException("Error trying to create JGit configuration", ex);
}
}
public GitTool resolveGitTool(TaskListener listener) {
if (gitTool == null) return GitTool.getDefaultInstallation();
GitTool git = Jenkins.getInstance().getDescriptorByType(GitTool.DescriptorImpl.class).getInstallation(gitTool);
if (git == null) {
listener.getLogger().println("selected Git installation does not exists. Using Default");
git = GitTool.getDefaultInstallation();
}
return git;
}
public String getGitExe(Node builtOn, TaskListener listener) {
return getGitExe(builtOn, null, listener);
}
/**
* Exposing so that we can get this from GitPublisher.
*/
public String getGitExe(Node builtOn, EnvVars env, TaskListener listener) {
GitClientType client = GitClientType.ANY;
for (GitSCMExtension ext : extensions) {
try {
client = client.combine(ext.getRequiredClient());
} catch (GitClientConflictException e) {
throw new RuntimeException(ext.getDescriptor().getDisplayName() + " extended Git behavior is incompatible with other behaviors");
}
}
if (client == GitClientType.JGIT) return JGitTool.MAGIC_EXENAME;
GitTool tool = resolveGitTool(listener);
if (builtOn != null) {
try {
tool = tool.forNode(builtOn, listener);
} catch (IOException e) {
listener.getLogger().println("Failed to get git executable");
} catch (InterruptedException e) {
listener.getLogger().println("Failed to get git executable");
}
}
if (env != null) {
tool = tool.forEnvironment(env);
}
return tool.getGitExe();
}
/**
* Web-bound method to let people look up a build by their SHA1 commit.
*/
public AbstractBuild<?,?> getBySHA1(String sha1) {
AbstractProject<?,?> p = Stapler.getCurrentRequest().findAncestorObject(AbstractProject.class);
for (AbstractBuild b : p.getBuilds()) {
BuildData d = b.getAction(BuildData.class);
if (d!=null && d.lastBuild!=null) {
Build lb = d.lastBuild;
if (lb.isFor(sha1)) return b;
}
}
return null;
}
/*package*/ static class BuildChooserContextImpl implements BuildChooserContext, Serializable {
final Job project;
final Run build;
final EnvVars environment;
BuildChooserContextImpl(Job project, Run build, EnvVars environment) {
this.project = project;
this.build = build;
this.environment = environment;
}
public <T> T actOnBuild(ContextCallable<Run<?,?>, T> callable) throws IOException, InterruptedException {
return callable.invoke(build,Hudson.MasterComputer.localChannel);
}
public <T> T actOnProject(ContextCallable<Job<?,?>, T> callable) throws IOException, InterruptedException {
return callable.invoke(project, MasterComputer.localChannel);
}
public Run<?, ?> getBuild() {
return build;
}
public EnvVars getEnvironment() {
return environment;
}
private Object writeReplace() {
return Channel.current().export(BuildChooserContext.class,new BuildChooserContext() {
public <T> T actOnBuild(ContextCallable<Run<?,?>, T> callable) throws IOException, InterruptedException {
return callable.invoke(build,Channel.current());
}
public <T> T actOnProject(ContextCallable<Job<?,?>, T> callable) throws IOException, InterruptedException {
return callable.invoke(project,Channel.current());
}
public Run<?, ?> getBuild() {
return build;
}
public EnvVars getEnvironment() {
return environment;
}
});
}
}
/**
* Determines the commit to be built in this round, updating the working tree accordingly,
* and return the information about the selected commit.
*
* <p>
* For robustness, this method shouldn't assume too much about the state of the working tree when this method
* is called. In a general case, a working tree is a left-over from the previous build, so it can be quite
* messed up (such as HEAD pointing to a random branch.) It is expected that this method brings it back
* to the predictable clean state by the time this method returns.
*/
private @NonNull Build determineRevisionToBuild(final Run build,
final BuildData buildData,
final EnvVars environment,
final GitClient git,
final TaskListener listener) throws IOException, InterruptedException {
PrintStream log = listener.getLogger();
Collection<Revision> candidates = Collections.EMPTY_LIST;
// every MatrixRun should build the same marked commit ID
if (build instanceof MatrixRun) {
MatrixBuild parentBuild = ((MatrixRun) build).getParentBuild();
if (parentBuild != null) {
BuildData parentBuildData = getBuildData(parentBuild);
if (parentBuildData != null) {
Build lastBuild = parentBuildData.lastBuild;
if (lastBuild!=null)
candidates = Collections.singleton(lastBuild.getMarked());
}
}
}
// parameter forcing the commit ID to build
if (candidates.isEmpty() ) {
final RevisionParameterAction rpa = build.getAction(RevisionParameterAction.class);
if (rpa != null) {
candidates = Collections.singleton(rpa.toRevision(git));
}
}
if (candidates.isEmpty() ) {
final String singleBranch = environment.expand( getSingleBranch(environment) );
final BuildChooserContext context = new BuildChooserContextImpl(build.getParent(), build, environment);
candidates = getBuildChooser().getCandidateRevisions(
false, singleBranch, git, listener, buildData, context);
}
if (candidates.isEmpty()) {
// getBuildCandidates should make the last item the last build, so a re-build
// will build the last built thing.
throw new AbortException("Couldn't find any revision to build. Verify the repository and branch configuration for this job.");
}
Revision marked = candidates.iterator().next();
Revision rev = marked;
// Modify the revision based on extensions
for (GitSCMExtension ext : extensions) {
rev = ext.decorateRevisionToBuild(this,build,git,listener,rev);
}
Build revToBuild = new Build(marked, rev, build.getNumber(), null);
buildData.saveBuild(revToBuild);
if (candidates.size() > 1) {
log.println("Multiple candidate revisions");
Job<?, ?> job = build.getParent();
if (job instanceof AbstractProject) {
AbstractProject project = (AbstractProject) job;
if (!project.isDisabled()) {
log.println("Scheduling another build to catch up with " + project.getFullDisplayName());
if (!project.scheduleBuild(0, new SCMTrigger.SCMTriggerCause("This build was triggered by build "
+ build.getNumber() + " because more than one build candidate was found."))) {
log.println("WARNING: multiple candidate revisions, but unable to schedule build of " + project.getFullDisplayName());
}
}
}
}
return revToBuild;
}
/**
* Retrieve Git objects from the specified remotes by doing the likes of clone/fetch/pull/etc.
*
* By the end of this method, remote refs are updated to include all the commits found in the remote servers.
*/
private void retrieveChanges(Run build, GitClient git, TaskListener listener) throws IOException, InterruptedException {
final PrintStream log = listener.getLogger();
List<RemoteConfig> repos = getParamExpandedRepos(build, listener);
if (repos.isEmpty()) return; // defensive check even though this is an invalid configuration
if (git.hasGitRepo()) {
// It's an update
if (repos.size() == 1)
log.println("Fetching changes from the remote Git repository");
else
log.println(MessageFormat.format("Fetching changes from {0} remote Git repositories", repos.size()));
} else {
log.println("Cloning the remote Git repository");
RemoteConfig rc = repos.get(0);
try {
CloneCommand cmd = git.clone_().url(rc.getURIs().get(0).toPrivateString()).repositoryName(rc.getName());
for (GitSCMExtension ext : extensions) {
ext.decorateCloneCommand(this, build, git, listener, cmd);
}
cmd.execute();
} catch (GitException ex) {
ex.printStackTrace(listener.error("Error cloning remote repo '%s'", rc.getName()));
throw new AbortException();
}
}
for (RemoteConfig remoteRepository : repos) {
fetchFrom(git, listener, remoteRepository);
}
}
@Override
public void checkout(Run<?, ?> build, Launcher launcher, FilePath workspace, TaskListener listener, File changelogFile, SCMRevisionState baseline)
throws IOException, InterruptedException {
if (VERBOSE)
listener.getLogger().println("Using strategy: " + getBuildChooser().getDisplayName());
BuildData previousBuildData = getBuildData(build.getPreviousBuild()); // read only
BuildData buildData = copyBuildData(build.getPreviousBuild());
build.addAction(buildData);
if (VERBOSE && buildData.lastBuild != null) {
listener.getLogger().println("Last Built Revision: " + buildData.lastBuild.revision);
}
EnvVars environment = build.getEnvironment(listener);
GitClient git = createClient(listener, environment, build, workspace);
for (GitSCMExtension ext : extensions) {
ext.beforeCheckout(this, build, git, listener);
}
retrieveChanges(build, git, listener);
Build revToBuild = determineRevisionToBuild(build, buildData, environment, git, listener);
environment.put(GIT_COMMIT, revToBuild.revision.getSha1String());
Branch branch = Iterables.getFirst(revToBuild.revision.getBranches(),null);
if (branch!=null) { // null for a detached HEAD
environment.put(GIT_BRANCH, getBranchName(branch));
}
listener.getLogger().println("Checking out " + revToBuild.revision);
CheckoutCommand checkoutCommand = git.checkout().branch(getParamLocalBranch(build, listener)).ref(revToBuild.revision.getSha1String()).deleteBranchIfExist(true);
for (GitSCMExtension ext : this.getExtensions()) {
ext.decorateCheckoutCommand(this, build, git, listener, checkoutCommand);
}
try {
checkoutCommand.execute();
} catch(GitLockFailedException e) {
// Rethrow IOException so the retry will be able to catch it
throw new IOException("Could not checkout " + revToBuild.revision.getSha1String(), e);
}
build.addAction(new GitTagAction(build, workspace, buildData));
if (changelogFile != null) {
computeChangeLog(git, revToBuild.revision, listener, previousBuildData, new FilePath(changelogFile),
new BuildChooserContextImpl(build.getParent(), build, environment));
}
for (GitSCMExtension ext : extensions) {
ext.onCheckoutCompleted(this, build, git,listener);
}
}
/**
* Build up change log from all the branches that we've merged into {@code revToBuild}.
*
* <p>
* Intuitively, a changelog is a list of commits that's added since the "previous build" to the current build.
* However, because of the multiple branch support in Git, this notion is ambiguous. For example, consider the
* following commit graph where M1...M4 belongs to branch M, B1..B2 belongs to branch B, and so on:
*
* <pre>
* M1 -> M2 -> M3 -> M4
* / \ \ \
* S -> B1 -> B2 \
* \ \
* C1 ---------------> C2
* </pre>
*
* <p>
* If Jenkin built B1, C1, B2, C3 in that order, then one'd prefer that the changelog of B2 only shows
* just B1..B2, not C1..B2. To do this, we attribute every build to specific branches, and when we say
* "since the previous build", what we really mean is "since the last build that built the same branch".
*
* <p>
* TODO: if a branch merge is configured, then the first build will end up listing all the changes
* in the upstream branch, which may be too many. To deal with this nicely, BuildData needs to remember
* when we started merging this branch so that we can properly detect if the current build is the
* first build that's merging a new branch.
*
* Another possibly sensible option is to always exclude all the commits that are happening in the remote branch.
* Picture yourself developing a feature branch that closely tracks a busy mainline, then you might
* not really care the changes going on in the main line. In this way, the changelog only lists your changes,
* so "notify those who break the build" will not spam upstream developers, too.
*
* @param git
* Used for invoking Git
* @param revToBuild
* Points to the revision we'll be building. This includes all the branches we've merged.
* @param listener
* Used for writing to build console
* @param previousBuildData
* Information that captures what we did during the last build. We need this for changelog,
* or else we won't know where to stop.
*/
private void computeChangeLog(GitClient git, Revision revToBuild, TaskListener listener, BuildData previousBuildData, FilePath changelogFile, BuildChooserContext context) throws IOException, InterruptedException {
Writer out = new OutputStreamWriter(changelogFile.write(),"UTF-8");
boolean executed = false;
ChangelogCommand changelog = git.changelog();
changelog.includes(revToBuild.getSha1());
try {
boolean exclusion = false;
ChangelogToBranch changelogToBranch = getExtensions().get(ChangelogToBranch.class);
if (changelogToBranch != null) {
listener.getLogger().println("Using 'Changelog to branch' strategy.");
changelog.excludes(changelogToBranch.getOptions().getRef());
exclusion = true;
} else {
for (Branch b : revToBuild.getBranches()) {
Build lastRevWas = getBuildChooser().prevBuildForChangelog(b.getName(), previousBuildData, git, context);
if (lastRevWas != null && git.isCommitInRepo(lastRevWas.getSHA1())) {
changelog.excludes(lastRevWas.getSHA1());
exclusion = true;
}
}
}
if (!exclusion) {
// this is the first time we are building this branch, so there's no base line to compare against.
// if we force the changelog, it'll contain all the changes in the repo, which is not what we want.
listener.getLogger().println("First time build. Skipping changelog.");
} else {
changelog.to(out).max(MAX_CHANGELOG).execute();
executed = true;
}
} catch (GitException ge) {
ge.printStackTrace(listener.error("Unable to retrieve changeset"));
} finally {
if (!executed) changelog.abort();
IOUtils.closeQuietly(out);
}
}
public void buildEnvVars(AbstractBuild<?, ?> build, java.util.Map<String, String> env) {
super.buildEnvVars(build, env);
Revision rev = fixNull(getBuildData(build)).getLastBuiltRevision();
if (rev!=null) {
Branch branch = Iterables.getFirst(rev.getBranches(), null);
if (branch!=null) {
env.put(GIT_BRANCH, getBranchName(branch));
String prevCommit = getLastBuiltCommitOfBranch(build, branch);
if (prevCommit != null) {
env.put(GIT_PREVIOUS_COMMIT, prevCommit);
}
String prevSuccessfulCommit = getLastSuccessfulBuiltCommitOfBranch(build, branch);
if (prevSuccessfulCommit != null) {
env.put(GIT_PREVIOUS_SUCCESSFUL_COMMIT, prevSuccessfulCommit);
}
}
env.put(GIT_COMMIT, fixEmpty(rev.getSha1String()));
}
if (userRemoteConfigs.size()==1){
env.put("GIT_URL", userRemoteConfigs.get(0).getUrl());
} else {
int count=1;
for(UserRemoteConfig config:userRemoteConfigs) {
env.put("GIT_URL_"+count, config.getUrl());
count++;
}
}
getDescriptor().populateEnvironmentVariables(env);
for (GitSCMExtension ext : extensions) {
ext.populateEnvironmentVariables(this, env);
}
}
private String getBranchName(Branch branch)
{
String name = branch.getName();
if(name.startsWith("refs/remotes/")) {
//Restore expected previous behaviour
name = name.substring("refs/remotes/".length());
}
return name;
}
private String getLastBuiltCommitOfBranch(AbstractBuild<?, ?> build, Branch branch) {
String prevCommit = null;
if (build.getPreviousBuiltBuild() != null) {
final Build lastBuildOfBranch = fixNull(getBuildData(build.getPreviousBuiltBuild())).getLastBuildOfBranch(branch.getName());
if (lastBuildOfBranch != null) {
Revision previousRev = lastBuildOfBranch.getRevision();
if (previousRev != null) {
prevCommit = previousRev.getSha1String();
}
}
}
return prevCommit;
}
private String getLastSuccessfulBuiltCommitOfBranch(AbstractBuild<?, ?> build, Branch branch) {
String prevCommit = null;
if (build.getPreviousSuccessfulBuild() != null) {
final Build lastSuccessfulBuildOfBranch = fixNull(getBuildData(build.getPreviousSuccessfulBuild())).getLastBuildOfBranch(branch.getName());
if (lastSuccessfulBuildOfBranch != null) {
Revision previousRev = lastSuccessfulBuildOfBranch.getRevision();
if (previousRev != null) {
prevCommit = previousRev.getSha1String();
}
}
}
return prevCommit;
}
@Override
public ChangeLogParser createChangeLogParser() {
return new GitChangeLogParser(getExtensions().get(AuthorInChangelog.class)!=null);
}
@Extension
public static final class DescriptorImpl extends SCMDescriptor<GitSCM> {
private String gitExe;
private String globalConfigName;
private String globalConfigEmail;
private boolean createAccountBasedOnEmail;
// private GitClientType defaultClientType = GitClientType.GITCLI;
public DescriptorImpl() {
super(GitSCM.class, GitRepositoryBrowser.class);
load();
}
public String getDisplayName() {
return "Git";
}
@Override public boolean isApplicable(Job project) {
return true;
}
public List<GitSCMExtensionDescriptor> getExtensionDescriptors() {
return GitSCMExtensionDescriptor.all();
}
public boolean showGitToolOptions() {
return Jenkins.getInstance().getDescriptorByType(GitTool.DescriptorImpl.class).getInstallations().length>1;
}
/**
* Lists available toolinstallations.
* @return list of available git tools
*/
public List<GitTool> getGitTools() {
GitTool[] gitToolInstallations = Hudson.getInstance().getDescriptorByType(GitTool.DescriptorImpl.class).getInstallations();
return Arrays.asList(gitToolInstallations);
}
public ListBoxModel doFillGitToolItems() {
ListBoxModel r = new ListBoxModel();
for (GitTool git : getGitTools()) {
r.add(git.getName());
}
return r;
}
/**
* Path to git executable.
* @deprecated
* @see GitTool
*/
@Deprecated
public String getGitExe() {
return gitExe;
}
/**
* Global setting to be used in call to "git config user.name".
*/
public String getGlobalConfigName() {
return fixEmptyAndTrim(globalConfigName);
}
public void setGlobalConfigName(String globalConfigName) {
this.globalConfigName = globalConfigName;
}
/**
* Global setting to be used in call to "git config user.email".
*/
public String getGlobalConfigEmail() {
return fixEmptyAndTrim(globalConfigEmail);
}
public void setGlobalConfigEmail(String globalConfigEmail) {
this.globalConfigEmail = globalConfigEmail;
}
public boolean isCreateAccountBasedOnEmail() {
return createAccountBasedOnEmail;
}
public void setCreateAccountBasedOnEmail(boolean createAccountBasedOnEmail) {
this.createAccountBasedOnEmail = createAccountBasedOnEmail;
}
/**
* Old configuration of git executable - exposed so that we can
* migrate this setting to GitTool without deprecation warnings.
*/
public String getOldGitExe() {
return gitExe;
}
/**
* Determine the browser from the scmData contained in the {@link StaplerRequest}.
*
* @param scmData
* @return browser based on request scmData
*/
private GitRepositoryBrowser getBrowserFromRequest(final StaplerRequest req, final JSONObject scmData) {
if (scmData.containsKey("browser")) {
return req.bindJSON(GitRepositoryBrowser.class, scmData.getJSONObject("browser"));
} else {
return null;
}
}
public static List<RemoteConfig> createRepositoryConfigurations(String[] urls,
String[] repoNames,
String[] refs) throws IOException {
List<RemoteConfig> remoteRepositories;
Config repoConfig = new Config();
// Make up a repo config from the request parameters
String[] names = repoNames;
names = GitUtils.fixupNames(names, urls);
for (int i = 0; i < names.length; i++) {
String url = urls[i];
if (url == null) {
continue;
}
String name = names[i];
name = name.replace(' ', '_');
if (isBlank(refs[i])) {
refs[i] = "+refs/heads/*:refs/remotes/" + name + "/*";
}
repoConfig.setString("remote", name, "url", url);
repoConfig.setStringList("remote", name, "fetch", new ArrayList<String>(Arrays.asList(refs[i].split("\\s+"))));
}
try {
remoteRepositories = RemoteConfig.getAllRemoteConfigs(repoConfig);
} catch (Exception e) {
throw new GitException("Error creating repositories", e);
}
return remoteRepositories;
}
public static PreBuildMergeOptions createMergeOptions(UserMergeOptions mergeOptionsBean,
List<RemoteConfig> remoteRepositories)
throws FormException {
PreBuildMergeOptions mergeOptions = new PreBuildMergeOptions();
if (mergeOptionsBean != null) {
RemoteConfig mergeRemote = null;
String mergeRemoteName = mergeOptionsBean.getMergeRemote().trim();
if (mergeRemoteName.length() == 0) {
mergeRemote = remoteRepositories.get(0);
} else {
for (RemoteConfig remote : remoteRepositories) {
if (remote.getName().equals(mergeRemoteName)) {
mergeRemote = remote;
break;
}
}
}
if (mergeRemote == null) {
throw new FormException("No remote repository configured with name '" + mergeRemoteName + "'", "git.mergeRemote");
}
mergeOptions.setMergeRemote(mergeRemote);
mergeOptions.setMergeTarget(mergeOptionsBean.getMergeTarget());
mergeOptions.setMergeStrategy(mergeOptionsBean.getMergeStrategy());
}
return mergeOptions;
}
public FormValidation doGitRemoteNameCheck(StaplerRequest req)
throws IOException, ServletException {
String mergeRemoteName = req.getParameter("value");
boolean isMerge = req.getParameter("isMerge") != null;
// Added isMerge because we don't want to allow empty remote names for tag/branch pushes.
if (mergeRemoteName.length() == 0 && isMerge) {
return FormValidation.ok();
}
String[] urls = req.getParameterValues("repo.url");
String[] names = req.getParameterValues("repo.name");
if (urls != null && names != null)
for (String name : GitUtils.fixupNames(names, urls))
if (name.equals(mergeRemoteName))
return FormValidation.ok();
return FormValidation.error("No remote repository configured with name '" + mergeRemoteName + "'");
}
@Override
public boolean configure(StaplerRequest req, JSONObject formData) throws FormException {
req.bindJSON(this, formData);
save();
return true;
}
/**
* Fill in the environment variables for launching git
*/
public void populateEnvironmentVariables(Map<String,String> env) {
String name = getGlobalConfigName();
if (name!=null) {
env.put("GIT_COMMITTER_NAME", name);
env.put("GIT_AUTHOR_NAME", name);
}
String email = getGlobalConfigEmail();
if (email!=null) {
env.put("GIT_COMMITTER_EMAIL", email);
env.put("GIT_AUTHOR_EMAIL", email);
}
}
// public GitClientType getDefaultClientType() {
// return defaultClientType;
// }
//
// public void setDefaultClientType(String defaultClientType) {
// this.defaultClientType = GitClientType.valueOf(defaultClientType);
// }
}
private static final long serialVersionUID = 1L;
public boolean isDoGenerateSubmoduleConfigurations() {
return this.doGenerateSubmoduleConfigurations;
}
@Exported
public List<BranchSpec> getBranches() {
return branches;
}
@Override public String getKey() {
String name = getScmName();
if (name != null) {
return name;
}
StringBuilder b = new StringBuilder("git");
for (RemoteConfig cfg : getRepositories()) {
for (URIish uri : cfg.getURIs()) {
b.append(' ').append(uri.toString());
}
}
return b.toString();
}
/**
* Use {@link PreBuildMerge}.
*/
@Exported
@Deprecated
public PreBuildMergeOptions getMergeOptions() throws FormException {
return DescriptorImpl.createMergeOptions(getUserMergeOptions(), remoteRepositories);
}
private boolean isRelevantBuildData(BuildData bd) {
for(UserRemoteConfig c : getUserRemoteConfigs()) {
if(bd.hasBeenReferenced(c.getUrl())) {
return true;
}
}
return false;
}
/**
* @deprecated
*/
public BuildData getBuildData(Run build, boolean clone) {
return clone ? copyBuildData(build) : getBuildData(build);
}
/**
* Like {@link #getBuildData(Run)}, but copy the data into a new object,
* which is used as the first step for updating the data for the next build.
*/
public BuildData copyBuildData(Run build) {
BuildData base = getBuildData(build);
if (base==null)
return new BuildData(getScmName(), getUserRemoteConfigs());
else
return base.clone();
}
/**
* Find the build log (BuildData) recorded with the last build that completed. BuildData
* may not be recorded if an exception occurs in the plugin logic.
*
* @param build
* @return the last recorded build data
*/
public @CheckForNull BuildData getBuildData(Run build) {
BuildData buildData = null;
while (build != null) {
List<BuildData> buildDataList = build.getActions(BuildData.class);
for (BuildData bd : buildDataList) {
if (bd != null && isRelevantBuildData(bd)) {
buildData = bd;
break;
}
}
if (buildData != null) {
break;
}
build = build.getPreviousBuild();
}
return buildData;
}
/**
* Given the workspace, gets the working directory, which will be the workspace
* if no relative target dir is specified. Otherwise, it'll be "workspace/relativeTargetDir".
*
* @param workspace
* @return working directory or null if workspace is null
*/
protected FilePath workingDirectory(Job<?,?> context, FilePath workspace, EnvVars environment, TaskListener listener) throws IOException, InterruptedException {
// JENKINS-10880: workspace can be null
if (workspace == null) {
return null;
}
for (GitSCMExtension ext : extensions) {
FilePath r = ext.getWorkingDirectory(this, context, workspace, environment, listener);
if (r!=null) return r;
}
return workspace;
}
/**
* Given a Revision "r", check whether the list of revisions "COMMITS_WE_HAVE_BUILT..r" are to be entirely excluded given the exclusion rules
*
* @param git GitClient object
* @param r Revision object
* @param listener
* @return true if any exclusion files are matched, false otherwise.
*/
private boolean isRevExcluded(GitClient git, Revision r, TaskListener listener, BuildData buildData) throws IOException, InterruptedException {
try {
List<String> revShow;
if (buildData != null && buildData.lastBuild != null) {
revShow = git.showRevision(buildData.lastBuild.revision.getSha1(), r.getSha1());
} else {
revShow = git.showRevision(r.getSha1());
}
revShow.add("commit "); // sentinel value
int start=0, idx=0;
for (String line : revShow) {
if (line.startsWith("commit ") && idx!=0) {
GitChangeSet change = new GitChangeSet(revShow.subList(start,idx), getExtensions().get(AuthorInChangelog.class)!=null);
Boolean excludeThisCommit=null;
for (GitSCMExtension ext : extensions) {
excludeThisCommit = ext.isRevExcluded(this, git, change, listener, buildData);
if (excludeThisCommit!=null)
break;
}
if (excludeThisCommit==null || !excludeThisCommit)
return false; // this sequence of commits have one commit that we want to build
start = idx;
}
idx++;
}
assert start==revShow.size()-1;
// every commit got excluded
return true;
} catch (GitException e) {
e.printStackTrace(listener.error("Failed to determine if we want to exclude " + r.getSha1String()));
return false; // for historical reason this is not considered a fatal error.
}
}
@Initializer(after=PLUGINS_STARTED)
public static void onLoaded() {
DescriptorImpl desc = Jenkins.getInstance().getDescriptorByType(DescriptorImpl.class);
if (desc.getOldGitExe() != null) {
String exe = desc.getOldGitExe();
String defaultGit = GitTool.getDefaultInstallation().getGitExe();
if (exe.equals(defaultGit)) {
return;
}
System.err.println("[WARNING] you're using deprecated gitexe attribute to configure git plugin. Use Git installations");
}
}
@Initializer(before=JOB_LOADED)
public static void configureXtream() {
Run.XSTREAM.registerConverter(new ObjectIdConverter());
Items.XSTREAM.registerConverter(new RemoteConfigConverter(Items.XSTREAM));
Items.XSTREAM.alias("org.spearce.jgit.transport.RemoteConfig", RemoteConfig.class);
}
private static final Logger LOGGER = Logger.getLogger(GitSCM.class.getName());
/**
* Set to true to enable more logging to build's {@link TaskListener}.
* Used by various classes in this package.
*/
public static boolean VERBOSE = Boolean.getBoolean(GitSCM.class.getName() + ".verbose");
/**
* To avoid pointlessly large changelog, we'll limit the number of changes up to this.
*/
public static final int MAX_CHANGELOG = Integer.getInteger(GitSCM.class.getName()+".maxChangelog",1024);
}
| {
"content_hash": "08089cbbd6b3f25dcefd0aa9644b2d4c",
"timestamp": "",
"source": "github",
"line_count": 1556,
"max_line_length": 217,
"avg_line_length": 39.65167095115681,
"alnum_prop": 0.6100683976790171,
"repo_name": "oleg-nenashev/git-plugin",
"id": "e45a9041e6abc2337f662fe63e20a1f6fe8d8d6b",
"size": "61698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/java/hudson/plugins/git/GitSCM.java",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
<?php
if (!defined('ELK'))
die('No access...');
/**
* Attempts, though an outrageous set of assumptions, to reflow/format an email message
* that was previously wrapped by an email client (so it would look good on
* a 80x24 screen)
*
* Removes extra spaces and newlines
* Fixes some common punctuation errors seen in emails
* Joins lines back together, where needed, to undo the 78 - 80 char wrap in email
*
* Really this is built on a house of cards and should generally be viewed
* as an unfortante evil if you want a post to *not* look like its an email.
* It's always the innocent bystanders who suffer most.
*
* Load class
* Initiate as
* - $formatter = new Email_Format();
*
* Make the call, accepts a string of data and returns it formatted
* - $body = $formatter->reflow($body, '', $html);
*
* @package Maillist
*/
class Email_Format
{
/**
* The full message section we will return
* @var string
*/
private $_body = null;
/**
* The full message section broken in to parts
* @var mixed[]
*/
private $_body_array = array();
/**
* Holds the current quote level we are in
* @var int
*/
private $_in_quote = 0;
/**
* Holds the current code block level we are in
* @var int
*/
private $_in_code = 0;
/**
* Holds the level of bbc list we are in
* @var int
*/
private $_in_bbclist = 0;
/**
* Holds the level of plain list we are in
* @var int
*/
private $_in_plainlist = 0;
/**
* Holds if we are in a plain text list
* @var int
*/
private $_in_list = 0;
/**
* Set if we have entered an area of the message that is a signature block
* @var boolean
*/
private $_found_sig = false;
/**
* Holds the members display name, used for signature check etc.
* @var string
*/
private $_real_name = null;
/**
* Tuning value (fudge) used to decide if a line is short
* change with care, used to help figure out wrapping decisions
* @var int
*/
private $_maillist_short_line = null;
/**
* Extra items to removed, defined in the acp
* @var string[]
*/
private $_maillist_leftover_remove = null;
/**
* Items that may indicate the start of a signature line, defined in the acp
* @var string[]
*/
private $_maillist_sig_keys = null;
/**
* Tuning delta value (fudge) to help indicate the last line in a paragraph
* change with care
* @var int
*/
private $_para_check = 25;
/**
* tuning value used to define a long line in a signature line
* change with care
* @var int
*/
private $_sig_longline = 67;
/**
* Main routine, calls the need functions in the order needed
*
* - Returns a formated string
*
* @param string $data
* @param boolean $html
* @param string $real_name
* @param string $charset
*/
public function reflow($data, $html = false, $real_name = '', $charset = 'UTF-8', $bbc_br = true)
{
global $modSettings;
// load some acp settings in to the class
$this->_maillist_short_line = empty($modSettings['maillist_short_line']) ? 33 : $modSettings['maillist_short_line'];
$this->_maillist_leftover_remove = empty($modSettings['maillist_leftover_remove']) ? '' : $modSettings['maillist_leftover_remove'];
$this->_maillist_sig_keys = empty($modSettings['maillist_sig_keys']) ? '' : $modSettings['maillist_sig_keys'];
$this->_real_name = $real_name;
$this->_prep_data($data, $bbc_br);
$this->_fix_body();
$this->_clean_up($charset);
return $this->_body;
}
/**
* Takes a string of data and creates a line by line array broken on newlines
*
* - Builds all needed details for each array element, including length, if its
* in a quote (&depth) code (&depth) or list (bbc or plain) etc.
*
* @param string $data
*/
private function _prep_data($data, $bbc_br)
{
// Un-wordwrap the email, create a line by line array broken on the newlines
if ($bbc_br === true)
$data = str_replace('[br]', "\n", $data);
$temp = explode("\n", $data);
// Remove any 'stuck' whitespace using the trim value function on all lines
array_walk($temp, array($this, '_trim_value'));
// Get some processing details for each line
for ($i = 0, $num = count($temp); $i < $num; $i++)
{
$this->_body_array[$i]['content'] = $temp[$i];
$this->_body_array[$i]['length'] = Util::strlen($temp[$i]);
// Text lists a) 1. etc
$this->_body_array[$i]['list_item'] = $this->_in_plainlist($temp[$i]);
// [quote]
$this->_in_quote($temp[$i]);
$this->_body_array[$i]['in_quote'] = $this->_in_quote;
// [code]
$this->_in_code($temp[$i]);
$this->_body_array[$i]['in_code'] = $this->_in_code;
// [list]
$this->_in_bbclist($temp[$i]);
$this->_body_array[$i]['in_bbclist'] = $this->_in_bbclist;
}
// Reset our index values
$this->_in_bbclist = 0;
$this->_in_code = 0;
$this->_in_quote = 0;
$this->_in_list = 0;
}
/**
* Goes through the message array and only inserts line feeds (breaks) where
* they are needed, allowing all other text to flow in one line.
*
* - Insets breaks at blank lines, around bbc quote/code/list, text lists,
* signature lines and end of paragraphs ... all assuming it can figure or
* best guess those areas.
*/
private function _fix_body()
{
// Go line by line and put in line breaks *only* where (we often erroneously assume) they are needed
for ($i = 0, $num = count($this->_body_array); $i < $num; $i++)
{
// We are already in a text list, and this current line does not start the next list item
if ($this->_in_list && !$this->_body_array[$i]['list_item'])
{
// Are we at the last known list item?, if so we can turn wrapping off
if (isset($this->_body_array[$i + 1]) && $this->_in_list === $this->_in_plainlist)
{
$this->_body_array[$i - 1]['content'] = $this->_body_array[$i - 1]['content'] . "\n";
$this->_in_list = 0;
}
else
$this->_body_array[$i]['content'] = ' ' . trim($this->_body_array[$i]['content']);
}
// Long line in a sig ... but not a link then lets bail out might be a ps or something
if ($this->_found_sig && ($this->_body_array[$i]['length'] > $this->_sig_longline) && (substr($this->_body_array[$i]['content'], 0, 4) !== 'www.'))
$this->_found_sig = false;
// Blank line, if its not two in a row and not the start of a bbc code then insert a newline
if ($this->_body_array[$i]['content'] == '')
{
if ((isset($this->_body_array[$i - 1])) && ($this->_body_array[$i - 1]['content'] !== "\n") && (substr($this->_body_array[$i - 1]['content'], 0, 1) !== '[') && ($this->_body_array[$i - 1]['length'] > $this->_maillist_short_line))
$this->_body_array[$i]['content'] = "\n";
}
// Lists like a. a) 1. 1)
elseif ($this->_body_array[$i]['list_item'])
{
$this->_in_list++;
$this->_body_array[$i]['content'] = "\n" . $this->_body_array[$i]['content'];
}
// Signature line start as defined in the ACP, i.e. best, regards, thanks
elseif ($this->_in_sig($i))
{
$this->_body_array[$i]['content'] = "\n\n\n" . $this->_body_array[$i]['content'];
$this->_found_sig = true;
}
// Message stuff which should not be here any longer (as defined in the ACP) i.e. To: From: Subject:
elseif (!empty($this->_maillist_leftover_remove) && preg_match('~^((\[b\]){0,2}(' . $this->_maillist_leftover_remove . ')(\[\/b\]){0,2})~', $this->_body_array[$i]['content']))
{
if ($this->_in_quote)
$this->_body_array[$i]['content'] = "\n";
else
$this->_body_array[$i]['content'] = $this->_body_array[$i]['content'] . "\n";
}
// Line starts with a link .....
elseif (in_array(substr($this->_body_array[$i]['content'], 0, 4), array('www.', 'WWW.', 'http', 'HTTP')))
{
$this->_body_array[$i]['content'] = "\n" . $this->_body_array[$i]['content'];
}
// Previous line ended in a break already
elseif (isset($this->_body_array[$i - 1]['content']) && substr(trim($this->_body_array[$i - 1]['content']), -4) == '[br]')
{
$this->_body_array[$i]['content'] = $this->_body_array[$i]['content'];
}
// OK, we can't seem to think of other obvious reasons this should not be on the same line
// and these numbers are quite frankly subjective, but so is how we got here, final "check"
else
{
// Its a wrap ... maybe
if ($i > 0)
$para_check = $this->_body_array[$i]['length'] - $this->_body_array[$i - 1]['length'];
else
$para_check = 1;
// If this line is longer than the line above it we need to do some extra checks
if (($i > 0) && ($this->_body_array[$i - 1]['length'] > $this->_maillist_short_line) && !$this->_found_sig && !$this->_in_code && !$this->_in_bbclist)
{
// If the previous short line did not end in a period or it did and the next line does not start
// with a capital and passes para check then it wraps
if ((substr($this->_body_array[$i - 1]['content'], -1) !== '.') || (substr($this->_body_array[$i - 1]['content'], -1) === '.' && $para_check < $this->_para_check && ($this->_body_array[$i]['content'][0] !== strtoupper($this->_body_array[$i]['content'][0]))))
$this->_body_array[$i]['content'] = $this->_body_array[$i]['content'];
else
$this->_body_array[$i]['content'] = "\n" . $this->_body_array[$i]['content'];
}
elseif ($para_check < 5)
$this->_body_array[$i]['content'] = "\n" . $this->_body_array[$i]['content'];
// A very short line (but not a empty one) followed by a very long line
elseif (isset($this->_body_array[$i - 1]) && !empty($this->_body_array[$i - 1]['content']) && $para_check > $this->_sig_longline && $this->_body_array[$i - 1]['length'] < 3)
$this->_body_array[$i]['content'] = $this->_body_array[$i]['content'];
else
$this->_body_array[$i]['content'] = "\n\n" . $this->_body_array[$i]['content'];
}
}
// Close any open quotes we may have left behind
for ($quotes = 1; $quotes <= $this->_in_quote; $quotes++)
$this->_body_array[$i + $quotes]['content'] = '[/quote]';
// Join the message back together while dropping null index's
$temp = array();
foreach ($this->_body_array as $key => $values)
$temp[] = $values['content'];
$this->_body = trim(implode(' ', array_values($temp)));
}
/**
* Repairs common problems either caused by the reflow or just things found
* in emails.
*
* @param string $charset
*/
private function _clean_up($charset)
{
// Remove any chitta chatta from either end
$tag = '(>([^a-zA-Z0-9_\[\s]){0,3}){1}';
$this->_body = preg_replace("~\n" . $tag . '~', "\n", $this->_body);
// Clean up double breaks found between bbc formatting tags, msoffice loves to do this
$this->_body = preg_replace('~\]\s*\[br\]\s*\[br\]\s*\[~s', '][br][', $this->_body);
// Repair the in its various states and any other chaff
$this->_body = strtr($this->_body, array(' ' => ' ', ' ' => ' ', "\xc2\xa0" => ' ', "\xe2\x80\xa8" => "\n", "\xA0" => ' '));
// Trailing space before an end quote
$this->_body = preg_replace('~\s*\n\s*\[/quote\]~', '[/quote]', $this->_body);
// Any number of spaces (including none), followed by newlines, followed by any number of spaces (including none),
$this->_body = preg_replace("~(\s*[\n]\s*){2,}~", "\n\n", $this->_body);
// Whats with multiple commas ??
$this->_body = preg_replace('~(\s*[,]\s*){2,}~', ', ', $this->_body);
// commas ,in ,the ,wrong ,place? ... find a space then a word starting with a comma broken on word boundary's
$this->_body = preg_replace('~(?:^|\s),(\w+)\b~', ', $1', $this->_body);
// Punctuation missing a space like about.This ... should be about. This or about,this should be about, this
// ... did no one go to school? OK it probably is from our parser :P ...
// Look for a word boundary, any number of word characters, a single lower case, a period a single uppercase
// any number of word characters and a boundary
$this->_body = preg_replace('~(\b\w+[a-z])\.([A-Z]\w+)\b~', '$1. $2', $this->_body);
$this->_body = preg_replace('~(\b\w+[A-z])\,([A-z]\w+)\b~', '$1, $2', $this->_body);
$this->_body = preg_replace('~(\b\w+[A-z])\,([A-Z]\w+)\b~', '$1, $2', $this->_body);
$this->_body = preg_replace('~(\b\w+[a-z])\,([a-z]\w+)\b~', '$1, $2', $this->_body);
$this->_body = preg_replace('~(\b\w+[a-z])\s\.([A-Z]\w+)\b~', '$1. $2', $this->_body);
// Some tags often end up as just dummy tags, bla bla bla you have read this before yes?
$this->_body = preg_replace('~\[[bisu]\]\s*\[/[bisu]\]~', '', $this->_body);
$this->_body = preg_replace('~\[quote\]\s*\[/quote\]~', '', $this->_body);
// Make sure an email did not end up as the authors name .... [quote author=Joe Blow [email]joeblow@gmail.com[/email]]
$this->_body = preg_replace('~\[quote (author=.*)\[email].*\[/email\]\]~', '[quote $1]', $this->_body);
// Any htmlenties that we want to remove, like ms smart ones?
if (preg_match('~“|”|–|—|‘|’~', $this->_body))
$this->_body = html_entity_decode($this->_body, ENT_QUOTES, 'UTF-8');
// Avoid double encoding later on
$this->_body = htmlspecialchars_decode($this->_body, ENT_QUOTES);
// Convert other characters like MS "smart" quotes both uf8
$this->_body = strtr($this->_body, array("\xe2\x80\x98" => "'", "\xe2\x80\x99" => "'", "\xe2\x80\x9c" => '"', "\xe2\x80\x9d" => '"', "\xe2\x80\x93" => '-', "\xe2\x80\x94" => '--', "\xe2\x80\xa6" => '...'));
// And its 1252 variants
if ($charset !== 'UTF-8')
$this->_body = strtr($this->_body, array(chr(145) => "'", chr(146) => "'", chr(147) => '"', chr(148) => '"', chr(150) => '-', chr(151) => '--', chr(133) => '...'));
}
/**
* Checks if a string is the start or end of a bbc [quote] line
*
* - Keeps track of the tag depth
*
* @param string $var
*/
private function _in_quote($var)
{
// In a quote?
if (preg_match('~\[quote( author=.*)?\]?~', $var))
{
// Make sure it is not a single line quote
if (!preg_match('~\[/quote\]?~', $var))
$this->_in_quote++;
}
elseif (preg_match('~\[/quote\]?~', $var))
$this->_in_quote--;
}
/**
* Checks if a string is the potentially the start of a signature line
*
* @param int $i
*/
private function _in_sig($i)
{
// Not in a sig yet, the line starts with a sig key as defined by the ACP, and its a short line of text
if (!$this->_found_sig && !empty($this->_maillist_sig_keys) && (preg_match('~^(' . $this->_maillist_sig_keys . ')~i', $this->_body_array[$i]['content']) && ($this->_body_array[$i]['length'] < $this->_maillist_short_line)))
return true;
// The line is simply just their name
elseif (($this->_body_array[$i]['content'] === $this->_real_name) && !$this->_found_sig)
return true;
// check for universal sig dashes
elseif (!$this->_found_sig && preg_match('~^-- \n~m', $this->_body_array[$i]['content']))
return true;
return false;
}
/**
* Checks if a string is the start or end of a bbc [code] tag
*
* - Keeps track of the tag depth
*
* @param string $var
*/
private function _in_code($var)
{
// In a code block?
if (preg_match('~\[code\]?~', $var))
{
// Make sure it is not a single line code
if (!preg_match('~\[/code\]?~', $var))
$this->_in_code++;
}
elseif (preg_match('~\[/code\]?~', $var))
$this->_in_code--;
}
/**
* Checks if a string is the start or end of a bbc [list] tag
*
* - Keeps track of the tag depth
*
* @param string $var
*/
private function _in_bbclist($var)
{
// Starting a bbc list
if (preg_match('~\[list\]?~', $var))
$this->_in_bbclist++;
// Ending a bbc list
elseif (preg_match('~\[\/list\]?~', $var))
$this->_in_bbclist--;
}
/**
* Checks if a string starts with a plain list tag
* like 1) 1. a) b.
*
* @param string $var
*/
private function _in_plainlist($var)
{
// Starting a list like a) 1. 1) etc ...
$temp = $this->_in_plainlist;
if (preg_match('~^[a-j](\.|\)|-)\s~i', $var) || preg_match('~^[1-9](\.|\)|-)\s?~', $var) || preg_match('~' . chr(187) . '~', $var))
$this->_in_plainlist++;
return $this->_in_plainlist !== $temp;
}
/**
* Callback function for array_walk to remove spaces
*
* - can be translated to 0xA0, or in UTF8 as chr(0xC2).chr(0xA0)
* this function looks to remove all of those in any form. Needed because
* email is often has its character set mangled.
*
* @param string $value
*/
private function _trim_value(&$value)
{
$value = trim($value);
$value = trim($value, chr(0xC2) . chr(0xA0));
$value = trim($value, "\xA0");
$value = trim($value);
}
} | {
"content_hash": "d976ae886d71c40810248b33a746177a",
"timestamp": "",
"source": "github",
"line_count": 472,
"max_line_length": 263,
"avg_line_length": 35.04025423728814,
"alnum_prop": 0.5844972489267791,
"repo_name": "Ant59/Elkarte",
"id": "fb3e2615435ec502a0792bcde319b49d2ae3001e",
"size": "16788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sources/subs/EmailFormat.class.php",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "279"
},
{
"name": "CSS",
"bytes": "327853"
},
{
"name": "HTML",
"bytes": "506968"
},
{
"name": "JavaScript",
"bytes": "503893"
},
{
"name": "PHP",
"bytes": "8637604"
}
],
"symlink_target": ""
} |
package de.larscheidschmitzhermes.collections;
import de.larscheidschmitzhermes.collections.interfaces.CollectionSize;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Priority;
import javax.ws.rs.container.ContainerRequestContext;
import javax.ws.rs.container.ContainerResponseContext;
import javax.ws.rs.container.ContainerResponseFilter;
import javax.ws.rs.ext.Provider;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.util.Arrays;
@Provider
@Priority(1000)
@CollectionSize
public class CollectionSizeFilter implements ContainerResponseFilter {
private Logger logger = LoggerFactory.getLogger(CollectionSizeFilter.class);
@Override
public void filter(ContainerRequestContext requestContext, ContainerResponseContext responseContext) throws IOException {
if (responseContext.getEntity() == null) {
logger.debug("Entity is null, not appending anything!");
return;
}
responseContext.getHeaders().put(extractHeaderNameFromAnnotation(responseContext.getEntityAnnotations()), Arrays.asList(extractSizeFromEntity(responseContext.getEntity())));
}
private String extractHeaderNameFromAnnotation(Annotation[] annotations) {
for (Annotation annotation : annotations) {
if (annotation instanceof CollectionSize) {
return ((CollectionSize) annotation).headerName();
}
}
//this point is technically unreachable (otherwise there is a problem with jax-rs)
//still, this exception is needed for the compiler to be happy
throw new IllegalStateException("Missing required @CollectionSize annotation - this should not be possible.");
}
private Integer extractSizeFromEntity(Object entity) {
if (entity instanceof java.util.Collection) {
return ((java.util.Collection) entity).size();
} else {
logger.debug("Entity is {} and no collection. Returning size 1.", entity);
return 1;
}
}
}
| {
"content_hash": "9cf5a92d8c2728e527e086c25fdebda8",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 181,
"avg_line_length": 40.372549019607845,
"alnum_prop": 0.7294803302574066,
"repo_name": "larscheid-schmitzhermes/collection-size-filter",
"id": "d45a9707372f4512bde5ae1b432664d72528ab40",
"size": "2059",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/java/de/larscheidschmitzhermes/collections/CollectionSizeFilter.java",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "23514"
}
],
"symlink_target": ""
} |