patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -53,7 +53,7 @@ public class ProtocGapicPluginGeneratorTest {
model.getFiles().stream().map(ProtoFile::getProto).collect(Collectors.toList()))
// Only the file to generate a client for (don't generate dependencies)
.addFileToGenerate("multiple_services.proto")
- .setParameter("language=java")
+ .setParameter("language=java,transport=grpc")
.build();
CodeGeneratorResponse response = ProtocGeneratorMain.generate(codeGeneratorRequest); | 1 | /* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.gapic;
import com.google.api.codegen.CodegenTestUtil;
import com.google.api.codegen.ProtocGeneratorMain;
import com.google.api.codegen.protoannotations.GapicCodeGeneratorAnnotationsTest;
import com.google.api.tools.framework.model.Model;
import com.google.api.tools.framework.model.ProtoFile;
import com.google.api.tools.framework.model.testing.TestDataLocator;
import com.google.common.truth.Truth;
import com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest;
import com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse;
import java.util.stream.Collectors;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
public class ProtocGapicPluginGeneratorTest {
private static String[] protoFiles = {"multiple_services.proto"};
private static TestDataLocator testDataLocator;
private static Model model;
@ClassRule public static TemporaryFolder tempDir = new TemporaryFolder();
@BeforeClass
public static void startUp() {
testDataLocator = TestDataLocator.create(GapicCodeGeneratorAnnotationsTest.class);
testDataLocator.addTestDataSource(CodegenTestUtil.class, "testsrc/common");
model = CodegenTestUtil.readModel(testDataLocator, tempDir, protoFiles, new String[] {});
}
@Test
public void testGenerator() {
CodeGeneratorRequest codeGeneratorRequest =
CodeGeneratorRequest.newBuilder()
// All proto files, including dependencies
.addAllProtoFile(
model.getFiles().stream().map(ProtoFile::getProto).collect(Collectors.toList()))
// Only the file to generate a client for (don't generate dependencies)
.addFileToGenerate("multiple_services.proto")
.setParameter("language=java")
.build();
CodeGeneratorResponse response = ProtocGeneratorMain.generate(codeGeneratorRequest);
// TODO(andrealin): Look into setting these up as baseline files.
Truth.assertThat(response).isNotNull();
Truth.assertThat(response.getError()).isEmpty();
Truth.assertThat(response.getFileCount()).isEqualTo(15);
Truth.assertThat(response.getFile(0).getContent()).contains("DecrementerServiceClient");
}
@Test
public void testFailingGenerator() {
CodeGeneratorRequest codeGeneratorRequest =
CodeGeneratorRequest.newBuilder()
.addAllProtoFile(
model.getFiles().stream().map(ProtoFile::getProto).collect(Collectors.toList()))
// File does not exist.
.addFileToGenerate("fuuuuudge.proto")
.build();
CodeGeneratorResponse response = ProtocGeneratorMain.generate(codeGeneratorRequest);
Truth.assertThat(response).isNotNull();
Truth.assertThat(response.getError()).isNotEmpty();
}
}
| 1 | 30,879 | can we also test for `transport=rest`? | googleapis-gapic-generator | java |
@@ -182,7 +182,9 @@ abstract class AbstractSolrBackendFactory implements FactoryInterface
*/
protected function createBackend(Connector $connector)
{
+ $config = $this->config->get($this->mainConfig);
$backend = new $this->backendClass($connector);
+ $backend->setPageSize($config->Index->record_batch_size);
$backend->setQueryBuilder($this->createQueryBuilder());
$backend->setSimilarBuilder($this->createSimilarBuilder());
if ($this->logger) { | 1 | <?php
/**
* Abstract factory for SOLR backends.
*
* PHP version 7
*
* Copyright (C) Villanova University 2013.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category VuFind
* @package Search
* @author David Maus <maus@hab.de>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org Main Site
*/
namespace VuFind\Search\Factory;
use Interop\Container\ContainerInterface;
use Laminas\Config\Config;
use Laminas\ServiceManager\Factory\FactoryInterface;
use VuFind\Search\Solr\DeduplicationListener;
use VuFind\Search\Solr\FilterFieldConversionListener;
use VuFind\Search\Solr\HideFacetValueListener;
use VuFind\Search\Solr\HierarchicalFacetListener;
use VuFind\Search\Solr\InjectConditionalFilterListener;
use VuFind\Search\Solr\InjectHighlightingListener;
use VuFind\Search\Solr\InjectSpellingListener;
use VuFind\Search\Solr\MultiIndexListener;
use VuFind\Search\Solr\V3\ErrorListener as LegacyErrorListener;
use VuFind\Search\Solr\V4\ErrorListener;
use VuFindSearch\Backend\BackendInterface;
use VuFindSearch\Backend\Solr\Backend;
use VuFindSearch\Backend\Solr\Connector;
use VuFindSearch\Backend\Solr\HandlerMap;
use VuFindSearch\Backend\Solr\LuceneSyntaxHelper;
use VuFindSearch\Backend\Solr\QueryBuilder;
use VuFindSearch\Backend\Solr\SimilarBuilder;
/**
* Abstract factory for SOLR backends.
*
* @category VuFind
* @package Search
* @author David Maus <maus@hab.de>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org Main Site
*/
abstract class AbstractSolrBackendFactory implements FactoryInterface
{
/**
* Logger.
*
* @var \Laminas\Log\LoggerInterface
*/
protected $logger;
/**
* Superior service manager.
*
* @var ContainerInterface
*/
protected $serviceLocator;
/**
* Primary configuration file identifier.
*
* @var string
*/
protected $mainConfig = 'config';
/**
* Search configuration file identifier.
*
* @var string
*/
protected $searchConfig;
/**
* Facet configuration file identifier.
*
* @var string
*/
protected $facetConfig;
/**
* YAML searchspecs filename.
*
* @var string
*/
protected $searchYaml;
/**
* VuFind configuration reader
*
* @var \VuFind\Config\PluginManager
*/
protected $config;
/**
* Solr core name
*
* @var string
*/
protected $solrCore = '';
/**
* Solr field used to store unique identifiers
*
* @var string
*/
protected $uniqueKey = 'id';
/**
* Solr connector class
*
* @var string
*/
protected $connectorClass = Connector::class;
/**
* Solr backend class
*
* @var string
*/
protected $backendClass = Backend::class;
/**
* Constructor
*/
public function __construct()
{
}
/**
* Create service
*
* @param ContainerInterface $sm Service manager
* @param string $name Requested service name (unused)
* @param array $options Extra options (unused)
*
* @return Backend
*
* @SuppressWarnings(PHPMD.UnusedFormalParameter)
*/
public function __invoke(ContainerInterface $sm, $name, array $options = null)
{
$this->serviceLocator = $sm;
$this->config = $this->serviceLocator
->get(\VuFind\Config\PluginManager::class);
if ($this->serviceLocator->has(\VuFind\Log\Logger::class)) {
$this->logger = $this->serviceLocator->get(\VuFind\Log\Logger::class);
}
$connector = $this->createConnector();
$backend = $this->createBackend($connector);
$this->createListeners($backend);
return $backend;
}
/**
* Create the SOLR backend.
*
* @param Connector $connector Connector
*
* @return Backend
*/
protected function createBackend(Connector $connector)
{
$backend = new $this->backendClass($connector);
$backend->setQueryBuilder($this->createQueryBuilder());
$backend->setSimilarBuilder($this->createSimilarBuilder());
if ($this->logger) {
$backend->setLogger($this->logger);
}
return $backend;
}
/**
* Create listeners.
*
* @param Backend $backend Backend
*
* @return void
*/
protected function createListeners(Backend $backend)
{
$events = $this->serviceLocator->get('SharedEventManager');
// Load configurations:
$config = $this->config->get($this->mainConfig);
$search = $this->config->get($this->searchConfig);
$facet = $this->config->get($this->facetConfig);
// Highlighting
$this->getInjectHighlightingListener($backend, $search)->attach($events);
// Conditional Filters
if (isset($search->ConditionalHiddenFilters)
&& $search->ConditionalHiddenFilters->count() > 0
) {
$this->getInjectConditionalFilterListener($search)->attach($events);
}
// Spellcheck
if ($config->Spelling->enabled ?? true) {
$dictionaries = ($config->Spelling->simple ?? false)
? ['basicSpell'] : ['default', 'basicSpell'];
$spellingListener = new InjectSpellingListener($backend, $dictionaries);
$spellingListener->attach($events);
}
// Apply field stripping if applicable:
if (isset($search->StripFields) && isset($search->IndexShards)) {
$strip = $search->StripFields->toArray();
foreach ($strip as $k => $v) {
$strip[$k] = array_map('trim', explode(',', $v));
}
$mindexListener = new MultiIndexListener(
$backend,
$search->IndexShards->toArray(),
$strip,
$this->loadSpecs()
);
$mindexListener->attach($events);
}
// Apply deduplication if applicable:
if (isset($search->Records->deduplication)) {
$this->getDeduplicationListener(
$backend, $search->Records->deduplication
)->attach($events);
}
// Attach hierarchical facet listener:
$this->getHierarchicalFacetListener($backend)->attach($events);
// Apply legacy filter conversion if necessary:
$facets = $this->config->get($this->facetConfig);
if (!empty($facets->LegacyFields)) {
$filterFieldConversionListener = new FilterFieldConversionListener(
$facets->LegacyFields->toArray()
);
$filterFieldConversionListener->attach($events);
}
// Attach hide facet value listener:
if ($hfvListener = $this->getHideFacetValueListener($backend, $facet)) {
$hfvListener->attach($events);
}
// Attach error listeners for Solr 3.x and Solr 4.x (for backward
// compatibility with VuFind 1.x instances).
$legacyErrorListener = new LegacyErrorListener($backend);
$legacyErrorListener->attach($events);
$errorListener = new ErrorListener($backend);
$errorListener->attach($events);
}
/**
* Get the Solr core.
*
* @return string
*/
protected function getSolrCore()
{
return $this->solrCore;
}
/**
* Get the Solr URL.
*
* @param string $config name of configuration file (null for default)
*
* @return string|array
*/
protected function getSolrUrl($config = null)
{
$url = $this->config->get($config ?? $this->mainConfig)->Index->url;
$core = $this->getSolrCore();
if (is_object($url)) {
return array_map(
function ($value) use ($core) {
return "$value/$core";
},
$url->toArray()
);
}
return "$url/$core";
}
/**
* Get all hidden filter settings.
*
* @return array
*/
protected function getHiddenFilters()
{
$search = $this->config->get($this->searchConfig);
$hf = [];
// Hidden filters
if (isset($search->HiddenFilters)) {
foreach ($search->HiddenFilters as $field => $value) {
$hf[] = sprintf('%s:"%s"', $field, $value);
}
}
// Raw hidden filters
if (isset($search->RawHiddenFilters)) {
foreach ($search->RawHiddenFilters as $filter) {
$hf[] = $filter;
}
}
return $hf;
}
/**
* Create the SOLR connector.
*
* @return Connector
*/
protected function createConnector()
{
$config = $this->config->get($this->mainConfig);
$searchConfig = $this->config->get($this->searchConfig);
$defaultFields = $searchConfig->General->default_record_fields ?? '*';
$handlers = [
'select' => [
'fallback' => true,
'defaults' => ['fl' => $defaultFields],
'appends' => ['fq' => []],
],
'terms' => [
'functions' => ['terms'],
],
];
foreach ($this->getHiddenFilters() as $filter) {
array_push($handlers['select']['appends']['fq'], $filter);
}
$connector = new $this->connectorClass(
$this->getSolrUrl(), new HandlerMap($handlers), $this->uniqueKey
);
$connector->setTimeout(
isset($config->Index->timeout) ? $config->Index->timeout : 30
);
if ($this->logger) {
$connector->setLogger($this->logger);
}
if ($this->serviceLocator->has(\VuFindHttp\HttpService::class)) {
$connector->setProxy(
$this->serviceLocator->get(\VuFindHttp\HttpService::class)
);
}
return $connector;
}
/**
* Create the query builder.
*
* @return QueryBuilder
*/
protected function createQueryBuilder()
{
$specs = $this->loadSpecs();
$config = $this->config->get($this->mainConfig);
$defaultDismax = isset($config->Index->default_dismax_handler)
? $config->Index->default_dismax_handler : 'dismax';
$builder = new QueryBuilder($specs, $defaultDismax);
// Configure builder:
$search = $this->config->get($this->searchConfig);
$caseSensitiveBooleans
= isset($search->General->case_sensitive_bools)
? $search->General->case_sensitive_bools : true;
$caseSensitiveRanges
= isset($search->General->case_sensitive_ranges)
? $search->General->case_sensitive_ranges : true;
$helper = new LuceneSyntaxHelper(
$caseSensitiveBooleans, $caseSensitiveRanges
);
$builder->setLuceneHelper($helper);
return $builder;
}
/**
* Create the similar records query builder.
*
* @return SimilarBuilder
*/
protected function createSimilarBuilder()
{
return new SimilarBuilder(
$this->config->get($this->searchConfig), $this->uniqueKey
);
}
/**
* Load the search specs.
*
* @return array
*/
protected function loadSpecs()
{
return $this->serviceLocator->get(\VuFind\Config\SearchSpecsReader::class)
->get($this->searchYaml);
}
/**
* Get a deduplication listener for the backend
*
* @param BackendInterface $backend Search backend
* @param bool $enabled Whether deduplication is enabled
*
* @return DeduplicationListener
*/
protected function getDeduplicationListener(BackendInterface $backend, $enabled)
{
return new DeduplicationListener(
$backend,
$this->serviceLocator,
$this->searchConfig,
'datasources',
$enabled
);
}
/**
* Get a hide facet value listener for the backend
*
* @param BackendInterface $backend Search backend
* @param Config $facet Configuration of facets
*
* @return mixed null|HideFacetValueListener
*/
protected function getHideFacetValueListener(
BackendInterface $backend,
Config $facet
) {
if (!isset($facet->HideFacetValue)
|| ($facet->HideFacetValue->count()) == 0
) {
return null;
}
return new HideFacetValueListener(
$backend,
$facet->HideFacetValue->toArray()
);
}
/**
* Get a hierarchical facet listener for the backend
*
* @param BackendInterface $backend Search backend
*
* @return HierarchicalFacetListener
*/
protected function getHierarchicalFacetListener(BackendInterface $backend)
{
return new HierarchicalFacetListener(
$backend,
$this->serviceLocator,
$this->facetConfig
);
}
/**
* Get a highlighting listener for the backend
*
* @param BackendInterface $backend Search backend
* @param Config $search Search configuration
*
* @return InjectHighlightingListener
*/
protected function getInjectHighlightingListener(BackendInterface $backend,
Config $search
) {
$fl = isset($search->General->highlighting_fields)
? $search->General->highlighting_fields : '*';
return new InjectHighlightingListener($backend, $fl);
}
/**
* Get a Conditional Filter Listener
*
* @param Config $search Search configuration
*
* @return InjectConditionalFilterListener
*/
protected function getInjectConditionalFilterListener(Config $search)
{
$listener = new InjectConditionalFilterListener(
$search->ConditionalHiddenFilters->toArray()
);
$listener->setAuthorizationService(
$this->serviceLocator
->get(\LmcRbacMvc\Service\AuthorizationService::class)
);
return $listener;
}
}
| 1 | 30,188 | If record_batch_size is not set in config.ini, this code will trigger a notice about an undefined value. I would suggest either wrapping the setPageSize() call in an `if (!empty(...)) {` check, or else providing a default value in the set call (i.e. `$config->Index->record_batch_size ?? 100`). | vufind-org-vufind | php |
@@ -1,6 +1,6 @@
<script type="text/javascript">
window.analytics||(window.analytics=[]),window.analytics.methods=["identify","track","trackLink","trackForm","trackClick","trackSubmit","page","pageview","ab","alias","ready","group","on","once","off"],window.analytics.factory=function(t){return function(){var a=Array.prototype.slice.call(arguments);return a.unshift(t),window.analytics.push(a),window.analytics}};for(var i=0;i<window.analytics.methods.length;i++){var method=window.analytics.methods[i];window.analytics[method]=window.analytics.factory(method)}window.analytics.load=function(t){var a=document.createElement("script");a.type="text/javascript",a.async=!0,a.src=("https:"===document.location.protocol?"https://":"http://")+"d2dq2ahtl5zl1z.cloudfront.net/analytics.js/v1/"+t+"/analytics.min.js";var n=document.getElementsByTagName("script")[0];n.parentNode.insertBefore(a,n)},window.analytics.SNIPPET_VERSION="2.0.8",
- window.analytics.load("2nexpdgku3");
+ window.analytics.load(<%= ENV['SEGMENT_KEY']%>);
window.analytics.page();
</script>
| 1 | <script type="text/javascript">
window.analytics||(window.analytics=[]),window.analytics.methods=["identify","track","trackLink","trackForm","trackClick","trackSubmit","page","pageview","ab","alias","ready","group","on","once","off"],window.analytics.factory=function(t){return function(){var a=Array.prototype.slice.call(arguments);return a.unshift(t),window.analytics.push(a),window.analytics}};for(var i=0;i<window.analytics.methods.length;i++){var method=window.analytics.methods[i];window.analytics[method]=window.analytics.factory(method)}window.analytics.load=function(t){var a=document.createElement("script");a.type="text/javascript",a.async=!0,a.src=("https:"===document.location.protocol?"https://":"http://")+"d2dq2ahtl5zl1z.cloudfront.net/analytics.js/v1/"+t+"/analytics.min.js";var n=document.getElementsByTagName("script")[0];n.parentNode.insertBefore(a,n)},window.analytics.SNIPPET_VERSION="2.0.8",
window.analytics.load("2nexpdgku3");
window.analytics.page();
</script>
<% if signed_in? %>
<%= render 'shared/signed_in_analytics' %>
<% end %>
| 1 | 9,132 | I didn't realize we were hardcoding this, thanks for moving it to an env value. | thoughtbot-upcase | rb |
@@ -99,7 +99,7 @@ module.exports = function fileItem (props) {
}
</h4>
<div class="UppyDashboardItem-status">
- ${file.data.size && html`<div class="UppyDashboardItem-statusSize">${prettyBytes(file.data.size)}</div>`}
+ ${isNaN(file.data.size) ? '' : html`<div class="UppyDashboardItem-statusSize">${prettyBytes(file.data.size)}</div>`}
${file.source && html`<div class="UppyDashboardItem-sourceIcon">
${acquirers.map(acquirer => {
if (acquirer.id === file.source) return html`<span title="${props.i18n('fileSource')}: ${acquirer.name}">${acquirer.icon()}</span>` | 1 | const html = require('yo-yo')
const { getETA,
getSpeed,
prettyETA,
getFileNameAndExtension,
truncateString,
copyToClipboard } = require('../../core/Utils')
const prettyBytes = require('prettier-bytes')
const FileItemProgress = require('./FileItemProgress')
const getFileTypeIcon = require('./getFileTypeIcon')
const { iconEdit, iconCopy, iconRetry } = require('./icons')
module.exports = function fileItem (props) {
const file = props.file
const acquirers = props.acquirers
const isProcessing = file.progress.preprocess || file.progress.postprocess
const isUploaded = file.progress.uploadComplete && !isProcessing && !file.error
const uploadInProgressOrComplete = file.progress.uploadStarted || isProcessing
const uploadInProgress = (file.progress.uploadStarted && !file.progress.uploadComplete) || isProcessing
const isPaused = file.isPaused || false
const error = file.error || false
const fileName = getFileNameAndExtension(file.meta.name).name
const truncatedFileName = props.isWide ? truncateString(fileName, 15) : fileName
const onPauseResumeCancelRetry = (ev) => {
if (isUploaded) return
if (error) {
props.retryUpload(file.id)
return
}
if (props.resumableUploads) {
props.pauseUpload(file.id)
} else {
props.cancelUpload(file.id)
}
}
return html`<li class="UppyDashboardItem
${uploadInProgress ? 'is-inprogress' : ''}
${isProcessing ? 'is-processing' : ''}
${isUploaded ? 'is-complete' : ''}
${isPaused ? 'is-paused' : ''}
${error ? 'is-error' : ''}
${props.resumableUploads ? 'is-resumable' : ''}"
id="uppy_${file.id}"
title="${file.meta.name}">
<div class="UppyDashboardItem-preview">
<div class="UppyDashboardItem-previewInnerWrap" style="background-color: ${getFileTypeIcon(file.type).color}">
${file.preview
? html`<img alt="${file.name}" src="${file.preview}">`
: html`<div class="UppyDashboardItem-previewIconWrap">
<span class="UppyDashboardItem-previewIcon" style="color: ${getFileTypeIcon(file.type).color}">${getFileTypeIcon(file.type).icon}</span>
<svg class="UppyDashboardItem-previewIconBg" width="72" height="93" viewBox="0 0 72 93"><g><path d="M24.08 5h38.922A2.997 2.997 0 0 1 66 8.003v74.994A2.997 2.997 0 0 1 63.004 86H8.996A2.998 2.998 0 0 1 6 83.01V22.234L24.08 5z" fill="#FFF"/><path d="M24 5L6 22.248h15.007A2.995 2.995 0 0 0 24 19.244V5z" fill="#E4E4E4"/></g></svg>
</div>`
}
</div>
<div class="UppyDashboardItem-progress">
<button class="UppyDashboardItem-progressBtn"
type="button"
title="${isUploaded
? 'upload complete'
: props.resumableUploads
? file.isPaused
? 'resume upload'
: 'pause upload'
: 'cancel upload'
}"
onclick=${onPauseResumeCancelRetry}>
${error
? iconRetry()
: FileItemProgress({
progress: file.progress.percentage,
fileID: file.id
})
}
</button>
${props.showProgressDetails
? html`<div class="UppyDashboardItem-progressInfo"
title="${props.i18n('fileProgress')}"
aria-label="${props.i18n('fileProgress')}">
${!file.isPaused && !isUploaded
? html`<span>${prettyETA(getETA(file.progress))} ・ ↑ ${prettyBytes(getSpeed(file.progress))}/s</span>`
: null
}
</div>`
: null
}
</div>
</div>
<div class="UppyDashboardItem-info">
<h4 class="UppyDashboardItem-name" title="${fileName}">
${file.uploadURL
? html`<a href="${file.uploadURL}" target="_blank">
${file.extension ? truncatedFileName + '.' + file.extension : truncatedFileName}
</a>`
: file.extension ? truncatedFileName + '.' + file.extension : truncatedFileName
}
</h4>
<div class="UppyDashboardItem-status">
${file.data.size && html`<div class="UppyDashboardItem-statusSize">${prettyBytes(file.data.size)}</div>`}
${file.source && html`<div class="UppyDashboardItem-sourceIcon">
${acquirers.map(acquirer => {
if (acquirer.id === file.source) return html`<span title="${props.i18n('fileSource')}: ${acquirer.name}">${acquirer.icon()}</span>`
})}
</div>`
}
</div>
${!uploadInProgressOrComplete
? html`<button class="UppyDashboardItem-edit"
type="button"
aria-label="Edit file"
title="Edit file"
onclick=${(e) => props.showFileCard(file.id)}>
${iconEdit()}</button>`
: null
}
${file.uploadURL
? html`<button class="UppyDashboardItem-copyLink"
type="button"
aria-label="Copy link"
title="Copy link"
onclick=${() => {
copyToClipboard(file.uploadURL, props.i18n('copyLinkToClipboardFallback'))
.then(() => {
props.log('Link copied to clipboard.')
props.info(props.i18n('copyLinkToClipboardSuccess'), 'info', 3000)
})
.catch(props.log)
}}>${iconCopy()}</button>`
: null
}
</div>
<div class="UppyDashboardItem-action">
${!isUploaded
? html`<button class="UppyDashboardItem-remove"
type="button"
aria-label="Remove file"
title="Remove file"
onclick=${() => props.removeFile(file.id)}>
<svg class="UppyIcon" width="22" height="21" viewBox="0 0 18 17">
<ellipse cx="8.62" cy="8.383" rx="8.62" ry="8.383"/>
<path stroke="#FFF" fill="#FFF" d="M11 6.147L10.85 6 8.5 8.284 6.15 6 6 6.147 8.35 8.43 6 10.717l.15.146L8.5 8.578l2.35 2.284.15-.146L8.65 8.43z"/>
</svg>
</button>`
: null
}
</div>
</li>`
}
| 1 | 10,142 | We are trying to support IE 10-11, so we'll need a polyfill for this one, I think. | transloadit-uppy | js |
@@ -38,6 +38,9 @@ const { useSelect, useDispatch } = Data;
function ResetButton( { children } ) {
const postResetURL = useSelect( ( select ) => select( CORE_SITE ).getAdminURL( 'googlesitekit-splash', { notification: 'reset_success' } ) );
+ const isNavigating = useSelect( ( select ) => select( CORE_LOCATION ).isNavigating() );
+ const navigatingURL = useSelect( ( select ) => select( CORE_LOCATION ).getNavigateURL() );
+
const [ dialogActive, setDialogActive ] = useState( false );
useEffect( () => { | 1 | /**
* ResetButton component.
*
* Site Kit by Google, Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* WordPress dependencies
*/
import { __ } from '@wordpress/i18n';
import { Fragment, useState, useEffect, useCallback, createInterpolateElement } from '@wordpress/element';
import { ESCAPE } from '@wordpress/keycodes';
/**
* Internal dependencies
*/
import Data from 'googlesitekit-data';
import { clearWebStorage } from '../util';
import Dialog from './Dialog';
import Modal from './Modal';
import Link from './Link';
import { CORE_SITE } from '../googlesitekit/datastore/site/constants';
import { CORE_LOCATION } from '../googlesitekit/datastore/location/constants';
const { useSelect, useDispatch } = Data;
function ResetButton( { children } ) {
const postResetURL = useSelect( ( select ) => select( CORE_SITE ).getAdminURL( 'googlesitekit-splash', { notification: 'reset_success' } ) );
const [ dialogActive, setDialogActive ] = useState( false );
useEffect( () => {
const handleCloseModal = ( event ) => {
if ( ESCAPE === event.keyCode ) {
// Only close the modal if the "Escape" key is pressed.
setDialogActive( false );
}
};
if ( dialogActive ) {
// When the dialogActive changes and it is set to true(has opened), add the event listener.
global.addEventListener( 'keyup', handleCloseModal, false );
}
// Remove the event listener when the dialog is removed; there's no need
// to have it attached when it won't be used.
return () => {
if ( dialogActive ) {
// When the dialogActive is true(is open) and its value changes, remove the event listener.
global.removeEventListener( 'keyup', handleCloseModal );
}
};
}, [ dialogActive ] );
const { reset } = useDispatch( CORE_SITE );
const { navigateTo } = useDispatch( CORE_LOCATION );
const handleUnlinkConfirm = useCallback( async () => {
await reset();
clearWebStorage();
navigateTo( postResetURL );
}, [ reset, postResetURL ] );
const toggleDialogActive = useCallback( () => {
setDialogActive( ! dialogActive );
}, [ dialogActive ] );
const openDialog = useCallback( () => {
setDialogActive( true );
}, [] );
return (
<Fragment>
<Link
className="googlesitekit-reset-button"
onClick={ openDialog }
inherit
>
{ children || __( 'Reset Site Kit', 'google-site-kit' ) }
</Link>
<Modal>
<Dialog
dialogActive={ dialogActive }
handleConfirm={ handleUnlinkConfirm }
handleDialog={ toggleDialogActive }
title={ __( 'Reset Site Kit', 'google-site-kit' ) }
subtitle={ createInterpolateElement(
__( `Resetting will disconnect all users and remove all Site Kit settings and data within WordPress. <br />You and any other users who wish to use Site Kit will need to reconnect to restore access.`, 'google-site-kit' ),
{
br: <br />,
} ) }
confirmButton={ __( 'Reset', 'google-site-kit' ) }
danger
/>
</Modal>
</Fragment>
);
}
export default ResetButton;
| 1 | 35,547 | It looks like there's a new `isNavigatingTo( url )` selector for this very purpose so let's use this here instead. This way we just need to use the one selector rather than two. Let's assign that to a similar-named variable here (e.g. `isNavigatingToPostResetURL`) rather than the prop it's used with. | google-site-kit-wp | js |
@@ -29,8 +29,8 @@
namespace lbann {
-void im2col(const Mat& im,
- Mat& col,
+void im2col(const AbsMat& im,
+ AbsMat& col,
const int num_channels,
const int im_num_dims,
const int * im_dims, | 1 | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <lbann-dev@llnl.gov>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#include "lbann/utils/im2col.hpp"
#include "lbann/utils/exception.hpp"
namespace lbann {
void im2col(const Mat& im,
Mat& col,
const int num_channels,
const int im_num_dims,
const int * im_dims,
const int * im_pads,
const int * window_dims,
const int * window_strides) {
// Input and output parameters
const int col_height = col.Height();
const int col_width = col.Width();
const DataType *__restrict__ im_buffer = im.LockedBuffer();
DataType *__restrict__ col_buffer = col.Buffer();
// im2col parameters
std::vector<int> offset_start(im_num_dims);
std::vector<int> offset_end(im_num_dims);
std::vector<int> offset_stride(im_num_dims);
std::vector<int> offset_num(im_num_dims);
for(int d = 0; d < im_num_dims; ++d) {
offset_start[d] = -im_pads[d];
offset_end[d] = im_dims[d] + im_pads[d] - window_dims[d] + 1;
offset_stride[d] = window_strides[d];
offset_num[d] = (offset_end[d] - offset_start[d] + offset_stride[d] - 1) / offset_stride[d];
}
#ifdef LBANN_DEBUG
const int im_size = im.Height();
// Check matrix dimensions
const int expected_im_size = std::accumulate(im_dims,
im_dims + im_num_dims,
num_channels,
std::multiplies<int>());
const int expected_col_height = std::accumulate(window_dims,
window_dims + im_num_dims,
num_channels,
std::multiplies<int>());
const int expected_col_width = std::accumulate(offset_num.begin(),
offset_num.end(),
1,
std::multiplies<int>());
if(im_size != expected_im_size || im.Width() != 1) {
std::stringstream ss;
ss << "im2col: im matrix has invalid dimensions "
<< "(expected " << expected_im_size << " x " << 1 << ", "
<< "found " << im_size << " x " << im.Width() << ")";
throw lbann_exception(ss.str());
}
if(col_height != expected_col_height
|| col_width != expected_col_width) {
std::stringstream ss;
ss << "im2col: col matrix has invalid dimensions "
<< "(expected " << expected_col_height << " x " << expected_col_width << ", "
<< "found " << col_height << " x " << col_width << ")";
throw lbann_exception(ss.str());
}
#endif // LBANN_DEBUG
// Call optimized routine for 1x1 im2col
std::vector<int> zeros(im_num_dims, 0), ones(im_num_dims, 1);
if(std::equal(im_pads, im_pads + im_num_dims, zeros.begin())
&& std::equal(window_dims, window_dims + im_num_dims, ones.begin())
&& std::equal(window_strides, window_strides + im_num_dims, ones.begin())) {
im2col_1x1(im_buffer, col_buffer, num_channels, im_num_dims, im_dims);
return;
}
// Call optimized routine for 2D data
if(im_num_dims == 2) {
im2col_2d(im_buffer, col_buffer,
im_dims[1], im_dims[0], im_pads[1], im_pads[0], num_channels,
window_dims[1], window_dims[0],
window_strides[1], window_strides[0]);
return;
}
// Iterate through col matrix columns
#pragma omp parallel for
for(int col_col = 0; col_col < col_width; ++col_col) {
// Initialize arrays
std::vector<int> offset_pos(im_num_dims);
std::vector<int> window_pos(im_num_dims);
// Get position of current offset
int col_col_remainder = col_col;
for(int d = im_num_dims-1; d >= 0; --d) {
const int offset = col_col_remainder % offset_num[d];
offset_pos[d] = offset_start[d] + offset * offset_stride[d];
col_col_remainder /= offset_num[d];
}
// Iterate through col matrix entries
for(int col_row = 0; col_row < col_height; ++col_row) {
const int col_index = col_row + col_col * col_height;
// Get position in window and channel
int col_row_remainder = col_row;
for(int d = im_num_dims-1; d >= 0; --d) {
window_pos[d] = col_row_remainder % window_dims[d];
col_row_remainder /= window_dims[d];
}
const int channel = col_row_remainder;
// Get im matrix entry
bool im_pos_valid = true;
int im_index = channel;
for(int d = 0; d < im_num_dims; ++d) {
const int im_pos = offset_pos[d] + window_pos[d];
im_pos_valid = im_pos_valid && 0 <= im_pos && im_pos < im_dims[d];
im_index = im_pos + im_index * im_dims[d];
}
// Copy im matrix entry to col matrix if valid
col_buffer[col_index] = (im_pos_valid ?
im_buffer[im_index] : DataType(0));
}
}
}
void col2im(const Mat& col,
Mat& im,
const int num_channels,
const int im_num_dims,
const int * im_dims,
const int * im_pads,
const int * window_dims,
const int * window_strides) {
// Input and output parameters
const DataType *__restrict__ col_buffer = col.LockedBuffer();
DataType *__restrict__ im_buffer = im.Buffer();
// col2im parameters
std::vector<int> offset_start(im_num_dims);
std::vector<int> offset_end(im_num_dims);
std::vector<int> offset_stride(im_num_dims);
std::vector<int> offset_num(im_num_dims);
for(int d = 0; d < im_num_dims; ++d) {
offset_start[d] = -im_pads[d];
offset_end[d] = im_dims[d] + im_pads[d] - window_dims[d] + 1;
offset_stride[d] = window_strides[d];
offset_num[d] = (offset_end[d] - offset_start[d] + offset_stride[d] - 1) / offset_stride[d];
}
#ifdef LBANN_DEBUG
const int im_size = im.Height();
const int col_height = col.Height();
const int col_width = col.Width();
// Check matrix dimensions
const int expected_im_size = std::accumulate(im_dims,
im_dims + im_num_dims,
num_channels,
std::multiplies<int>());
const int expected_col_height = std::accumulate(window_dims,
window_dims + im_num_dims,
num_channels,
std::multiplies<int>());
const int expected_col_width = std::accumulate(offset_num.begin(),
offset_num.end(),
1,
std::multiplies<int>());
if(im_size != expected_im_size || im.Width() != 1) {
std::stringstream ss;
ss << "im2col: im matrix has invalid dimensions "
<< "(expected " << expected_im_size << " x " << 1 << ", "
<< "found " << im_size << " x " << im.Width() << ")";
throw lbann_exception(ss.str());
}
if(col_height != expected_col_height
|| col_width != expected_col_width) {
std::stringstream ss;
ss << "im2col: col matrix has invalid dimensions "
<< "(expected " << expected_col_height << " x " << expected_col_width << ", "
<< "found " << col_height << " x " << col_width << ")";
throw lbann_exception(ss.str());
}
#endif // LBANN_DEBUG
// Call optimized routine for 1x1 col2im
std::vector<int> zeros(im_num_dims, 0), ones(im_num_dims, 1);
if(std::equal(im_pads, im_pads + im_num_dims, zeros.begin())
&& std::equal(window_dims, window_dims + im_num_dims, ones.begin())
&& std::equal(window_strides, window_strides + im_num_dims, ones.begin())) {
col2im_1x1(col_buffer, im_buffer, num_channels, im_num_dims, im_dims);
return;
}
// Call optimized routine for 2D data
if(im_num_dims == 2) {
col2im_2d(col_buffer, im_buffer,
im_dims[1], im_dims[0], im_pads[1], im_pads[0], num_channels,
window_dims[1], window_dims[0],
window_strides[1], window_strides[0]);
return;
}
// Default algorithm
col2im(col, im, num_channels, im_num_dims,
im_dims, im_pads, window_dims, window_strides,
std::plus<DataType>());
}
void col2im(const Mat& col,
Mat& im,
const int num_channels,
const int im_num_dims,
const int * im_dims,
const int * im_pads,
const int * window_dims,
const int * window_strides,
std::function<DataType(const DataType&,const DataType&)> reduction_op) {
// Input and output parameters
const int col_height = col.Height();
const int im_size = im.Height();
const DataType *__restrict__ col_buffer = col.LockedBuffer();
DataType *__restrict__ im_buffer = im.Buffer();
// im2col parameters
std::vector<int> offset_start(im_num_dims);
std::vector<int> offset_end(im_num_dims);
std::vector<int> offset_stride(im_num_dims);
std::vector<int> offset_num(im_num_dims);
for(int d = 0; d < im_num_dims; ++d) {
offset_start[d] = -im_pads[d];
offset_end[d] = im_dims[d] + im_pads[d] - window_dims[d] + 1;
offset_stride[d] = window_strides[d];
offset_num[d] = (offset_end[d] - offset_start[d] + offset_stride[d] - 1) / offset_stride[d];
}
// Call optimized routine for 1x1 col2im
std::vector<int> zeros(im_num_dims, 0), ones(im_num_dims, 1);
if(std::equal(im_pads, im_pads + im_num_dims, zeros.begin())
&& std::equal(window_dims, window_dims + im_num_dims, ones.begin())
&& std::equal(window_strides, window_strides + im_num_dims, ones.begin())) {
col2im_1x1(col_buffer, im_buffer, num_channels, im_num_dims, im_dims);
return;
}
// Iterate through im matrix entries
#pragma omp parallel for
for(int im_index = 0; im_index < im_size; ++im_index) {
// Initialize arrays
std::vector<int> im_pos(im_num_dims);
std::vector<int> first_offset(im_num_dims);
std::vector<int> last_offset(im_num_dims);
std::vector<int> offset(im_num_dims);
// Get position of im matrix entry
int im_index_remainder = im_index;
for(int d = im_num_dims-1; d >= 0; --d) {
im_pos[d] = im_index_remainder % im_dims[d];
im_index_remainder /= im_dims[d];
}
const int channel = im_index_remainder;
// Initialize im matrix entry
DataType im_entry = 0;
bool im_entry_initialized = false;
bool offsets_finished = false;
// Get window offsets containing im matrix entry
for(int d = 0; d < im_num_dims; ++d) {
first_offset[d] = (im_pos[d] - offset_start[d] - window_dims[d] + offset_stride[d]) / offset_stride[d];
first_offset[d] = std::max(first_offset[d], 0);
last_offset[d] = (im_pos[d] - offset_start[d]) / offset_stride[d];
last_offset[d] = std::min(last_offset[d], offset_num[d] - 1);
offset[d] = first_offset[d];
if(first_offset[d] > last_offset[d]) {
offsets_finished = true;
}
}
// Iterate through window offsets containing im matrix entry
while(!offsets_finished) {
// Get col matrix entry corresponding to im matrix entry
int col_row = channel;
int col_col = 0;
for(int d = 0; d < im_num_dims; ++d) {
const int window_pos = im_pos[d] - (offset_start[d] + offset[d] * offset_stride[d]);
col_row = window_pos + col_row * window_dims[d];
col_col = offset[d] + col_col * offset_num[d];
}
const int col_index = col_row + col_col * col_height;
// Add col matrix entry to im matrix entry
const DataType col_entry = col_buffer[col_index];
im_entry = (im_entry_initialized ?
reduction_op(im_entry, col_entry) :
col_entry);
im_entry_initialized = true;
// Move to next window offset
++offset[im_num_dims-1];
for(int d = im_num_dims-1; d >= 1; --d) {
if(offset[d] > last_offset[d]) {
offset[d] = first_offset[d];
++offset[d-1];
}
}
offsets_finished = offset[0] > last_offset[0];
}
// Update output entry
im_buffer[im_index] = im_entry;
}
}
void im2col_1x1(const DataType * input_buffer,
DataType * output_buffer,
const int num_channels,
const int num_input_dims,
const int * input_dims) {
const int spatial_size = std::accumulate(input_dims,
input_dims + num_input_dims,
1,
std::multiplies<int>());
const Mat input_matrix(spatial_size, num_channels, input_buffer, spatial_size);
Mat output_matrix(num_channels, spatial_size, output_buffer, num_channels);
El::Transpose(input_matrix, output_matrix);
}
void im2col_2d(const DataType *__restrict__ input_buffer,
DataType *__restrict__ output_buffer,
const int input_dim_x,
const int input_dim_y,
const int input_pad_x,
const int input_pad_y,
const int num_channels,
const int window_dim_x,
const int window_dim_y,
const int offset_stride_x,
const int offset_stride_y) {
// im2col parameters
const int offset_start_x = -input_pad_x;
const int offset_start_y = -input_pad_y;
const int offset_end_x = input_dim_x + input_pad_x - window_dim_x + 1;
const int offset_end_y = input_dim_y + input_pad_y - window_dim_y + 1;
const int offset_num_x = (offset_end_x - offset_start_x + offset_stride_x - 1) / offset_stride_x;
const int offset_num_y = (offset_end_y - offset_start_y + offset_stride_y - 1) / offset_stride_y;
const int output_height = num_channels * window_dim_x * window_dim_y;
// Iterate through output matrix entries
#pragma omp parallel for collapse(5)
for(int offset_y = 0; offset_y < offset_num_y; ++offset_y) {
for(int offset_x = 0; offset_x < offset_num_x; ++offset_x) {
for(int channel = 0; channel < num_channels; ++channel) {
for(int window_pos_y = 0;
window_pos_y < window_dim_y;
++window_pos_y) {
for(int window_pos_x = 0;
window_pos_x < window_dim_x;
++window_pos_x) {
// Get input entry
const int offset_pos_y = offset_start_y + offset_y * offset_stride_y;
const int offset_pos_x = offset_start_x + offset_x * offset_stride_x;
const int input_pos_y = offset_pos_y + window_pos_y;
const int input_pos_x = offset_pos_x + window_pos_x;
const int input_index = (input_pos_x
+ input_pos_y * input_dim_x
+ channel * input_dim_x * input_dim_y);
const bool input_pos_valid = (0 <= input_pos_y
&& input_pos_y < input_dim_y
&& 0 <= input_pos_x
&& input_pos_x < input_dim_x);
// Get output entry
const int output_row = (window_pos_x
+ window_pos_y * window_dim_x
+ channel * window_dim_x * window_dim_y);
const int output_col = offset_x + offset_y * offset_num_x;
const int output_index = output_row + output_col * output_height;
// Copy input entry to output entry if valid
output_buffer[output_index]
= input_pos_valid ? input_buffer[input_index] : DataType(0);
}
}
}
}
}
}
void col2im_1x1(const DataType * input_buffer,
DataType * output_buffer,
const int num_channels,
const int num_output_dims,
const int * output_dims) {
const int spatial_size = std::accumulate(output_dims,
output_dims + num_output_dims,
1,
std::multiplies<int>());
const Mat input_matrix(num_channels, spatial_size, input_buffer, num_channels);
Mat output_matrix(spatial_size, num_channels, output_buffer, spatial_size);
El::Transpose(input_matrix, output_matrix);
}
void col2im_2d(const DataType *__restrict__ input_buffer,
DataType *__restrict__ output_buffer,
const int output_dim_x,
const int output_dim_y,
const int output_pad_x,
const int output_pad_y,
const int num_channels,
const int window_dim_x,
const int window_dim_y,
const int offset_stride_x,
const int offset_stride_y) {
// col2im parameters
const int offset_start_x = -output_pad_x;
const int offset_start_y = -output_pad_y;
const int offset_end_x = output_dim_x + output_pad_x - window_dim_x + 1;
const int offset_end_y = output_dim_y + output_pad_y - window_dim_y + 1;
const int offset_num_x = (offset_end_x - offset_start_x + offset_stride_x - 1) / offset_stride_x;
const int offset_num_y = (offset_end_y - offset_start_y + offset_stride_y - 1) / offset_stride_y;
const int input_height = num_channels * window_dim_x * window_dim_y;
// Iterate through output entries
#pragma omp parallel for collapse(3)
for(int channel = 0; channel < num_channels; ++channel) {
for(int output_pos_y = 0;
output_pos_y < output_dim_y;
++output_pos_y) {
for(int output_pos_x = 0;
output_pos_x < output_dim_x;
++output_pos_x) {
// Get output entry
const int output_index = (output_pos_x
+ output_pos_y * output_dim_x
+ channel * output_dim_x * output_dim_y);
DataType output_entry = 0;
// Get window offsets containing output entry
const int offset_x_lower = (output_pos_x - offset_start_x - window_dim_x + offset_stride_x) / offset_stride_x;
const int offset_y_lower = (output_pos_y - offset_start_y - window_dim_y + offset_stride_y) / offset_stride_y;
const int offset_x_upper = (output_pos_x - offset_start_x) / offset_stride_x;
const int offset_y_upper = (output_pos_y - offset_start_y) / offset_stride_y;
const int first_offset_x = std::max(offset_x_lower, 0);
const int first_offset_y = std::max(offset_y_lower, 0);
const int last_offset_x = std::min(offset_x_upper, offset_num_x - 1);
const int last_offset_y = std::min(offset_y_upper, offset_num_y - 1);
// Iterate through window offsets
for(int offset_y = first_offset_y;
offset_y <= last_offset_y;
++offset_y) {
const int window_pos_y = output_pos_y - (offset_start_y + offset_y * offset_stride_y);
for(int offset_x = first_offset_x;
offset_x <= last_offset_x;
++offset_x) {
const int window_pos_x = output_pos_x - (offset_start_x + offset_x * offset_stride_x);
// Get input entry
const int input_row = (window_pos_x
+ window_pos_y * window_dim_x
+ channel * window_dim_x * window_dim_y);
const int input_col = offset_x + offset_y * offset_num_x;
const int input_index = input_row + input_col * input_height;
// Add input entry to output entry
output_entry += input_buffer[input_index];
}
}
// Update output entry
output_buffer[output_index] = output_entry;
}
}
}
}
} // namespace lbann
| 1 | 12,526 | I think im2col should only accommodate CPUMat. | LLNL-lbann | cpp |
@@ -24,6 +24,8 @@ from google.cloud.forseti.notifier.notifiers import cscc_notifier
from google.cloud.forseti.notifier.notifiers.inventory_summary import InventorySummary
from google.cloud.forseti.services.inventory.storage import DataAccess
from google.cloud.forseti.services.scanner import dao as scanner_dao
+from google.cloud.forseti.common.util.email.email_factory import EmailFactory
+from google.cloud.forseti.notifier.notifiers import email_violations
# pylint: enable=line-too-long
| 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Notifier runner."""
import importlib
import inspect
# pylint: disable=line-too-long
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.common.util import string_formats
from google.cloud.forseti.notifier.notifiers.base_notification import BaseNotification
from google.cloud.forseti.notifier.notifiers import cscc_notifier
from google.cloud.forseti.notifier.notifiers.inventory_summary import InventorySummary
from google.cloud.forseti.services.inventory.storage import DataAccess
from google.cloud.forseti.services.scanner import dao as scanner_dao
# pylint: enable=line-too-long
LOGGER = logger.get_logger(__name__)
# pylint: disable=inconsistent-return-statements
def find_notifiers(notifier_name):
"""Get the first class in the given sub module
Args:
notifier_name (str): Name of the notifier.
Return:
class: The class in the sub module
"""
try:
module = importlib.import_module(
'google.cloud.forseti.notifier.notifiers.{0}'.format(
notifier_name))
for filename in dir(module):
obj = getattr(module, filename)
if inspect.isclass(obj) \
and issubclass(obj, BaseNotification) \
and obj is not BaseNotification:
return obj
except ImportError:
LOGGER.exception('Can\'t import notifier %s', notifier_name)
# pylint: enable=inconsistent-return-statements
def convert_to_timestamp(violations):
"""Convert violation created_at_datetime to timestamp string.
Args:
violations (dict): List of violations as dict with
created_at_datetime.
Returns:
list: List of violations as dict with created_at_datetime
converted to timestamp string.
"""
for violation in violations:
violation['created_at_datetime'] = (
violation['created_at_datetime'].strftime(
string_formats.TIMESTAMP_TIMEZONE))
return violations
# pylint: disable=too-many-branches,too-many-statements
def run(inventory_index_id,
scanner_index_id,
progress_queue,
service_config=None):
"""Run the notifier.
Entry point when the notifier is run as a library.
Args:
inventory_index_id (int64): Inventory index id.
scanner_index_id (int64): Scanner index id.
progress_queue (Queue): The progress queue.
service_config (ServiceConfig): Forseti 2.0 service configs.
Returns:
int: Status code.
"""
# pylint: disable=too-many-locals
global_configs = service_config.get_global_config()
notifier_configs = service_config.get_notifier_config()
with service_config.scoped_session() as session:
if scanner_index_id:
inventory_index_id = (
DataAccess.get_inventory_index_id_by_scanner_index_id(
session,
scanner_index_id))
else:
if not inventory_index_id:
inventory_index_id = (
DataAccess.get_latest_inventory_index_id(session))
scanner_index_id = scanner_dao.get_latest_scanner_index_id(
session, inventory_index_id)
if not scanner_index_id:
LOGGER.error(
'No success or partial success scanner index found for '
'inventory index: "%s".', str(inventory_index_id))
else:
# get violations
violation_access = scanner_dao.ViolationAccess(session)
violations = violation_access.list(
scanner_index_id=scanner_index_id)
violations_as_dict = []
for violation in violations:
violations_as_dict.append(
scanner_dao.convert_sqlalchemy_object_to_dict(violation))
violations_as_dict = convert_to_timestamp(violations_as_dict)
violation_map = scanner_dao.map_by_resource(violations_as_dict)
for retrieved_v in violation_map:
log_message = (
'Retrieved {} violations for resource \'{}\''.format(
len(violation_map[retrieved_v]), retrieved_v))
LOGGER.info(log_message)
progress_queue.put(log_message)
# build notification notifiers
notifiers = []
for resource in notifier_configs['resources']:
if violation_map.get(resource['resource']) is None:
log_message = 'Resource \'{}\' has no violations'.format(
resource['resource'])
progress_queue.put(log_message)
LOGGER.info(log_message)
continue
if not resource['should_notify']:
LOGGER.debug('Not notifying for: %s', resource['resource'])
continue
for notifier in resource['notifiers']:
log_message = (
'Running \'{}\' notifier for resource \'{}\''.format(
notifier['name'], resource['resource']))
progress_queue.put(log_message)
LOGGER.info(log_message)
chosen_pipeline = find_notifiers(notifier['name'])
notifiers.append(chosen_pipeline(
resource['resource'], inventory_index_id,
violation_map[resource['resource']], global_configs,
notifier_configs, notifier['configuration']))
# Run the notifiers.
for notifier in notifiers:
notifier.run()
# Run the CSCC notifier.
violation_configs = notifier_configs.get('violation')
if violation_configs:
if violation_configs.get('cscc').get('enabled'):
source_id = violation_configs.get('cscc').get('source_id')
if source_id:
# beta mode
LOGGER.debug(
'Running CSCC notifier with beta API. source_id: '
'%s', source_id)
(cscc_notifier.CsccNotifier(inventory_index_id)
.run(violations_as_dict, source_id=source_id))
else:
# alpha mode
LOGGER.debug('Running CSCC notifier with alpha API.')
gcs_path = (
violation_configs.get('cscc').get('gcs_path'))
mode = violation_configs.get('cscc').get('mode')
organization_id = (
violation_configs.get('cscc').get(
'organization_id'))
(cscc_notifier.CsccNotifier(inventory_index_id)
.run(violations_as_dict, gcs_path, mode,
organization_id))
InventorySummary(service_config, inventory_index_id).run()
log_message = 'Notification completed!'
progress_queue.put(log_message)
progress_queue.put(None)
LOGGER.info(log_message)
return 0
# pylint: enable=too-many-branches,too-many-statements
| 1 | 32,995 | alpha sort the imports | forseti-security-forseti-security | py |
@@ -1067,7 +1067,7 @@ fpga_result mmio_error(struct RASCommandLine *rasCmdLine)
if ( rasCmdLine->function >0 )
function = rasCmdLine->bus;
- snprintf(sysfs_path, sizeof(sysfs_path),
+ snprintf_s_iiii(sysfs_path, sizeof(sysfs_path),
DEVICEID_PATH,0,bus,device,function);
result = sysfs_read_u64(sysfs_path, &value); | 1 | // Copyright(c) 2017, Intel Corporation
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Intel Corporation nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include <errno.h>
#include <stdbool.h>
#include <malloc.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <time.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <unistd.h>
#include <uuid/uuid.h>
#include "safe_string/safe_string.h"
#include "opae/fpga.h"
#include "types_int.h"
#include "common_int.h"
// SYSFS FME Errors
#define FME_SYSFS_FME_ERRORS "errors/fme-errors/errors"
#define FME_SYSFS_PCIE0_ERRORS "errors/pcie0_errors"
#define FME_SYSFS_PCIE1_ERRORS "errors/pcie1_errors"
#define FME_SYSFS_BBS_ERRORS "errors/bbs_errors"
#define FME_SYSFS_GBS_ERRORS "errors/gbs_errors"
#define FME_SYSFS_WARNING_ERRORS "errors/warning_errors"
#define FME_SYSFS_NONFATAL_ERRORS "errors/nonfatal_errors"
#define FME_SYSFS_CATFATAL_ERRORS "errors/catfatal_errors"
#define FME_SYSFS_INJECT_ERROR "errors/inject_error"
#define FME_SYSFS_ERR_REVISION "errors/revision"
#define PORT_SYSFS_ERR "errors/errors"
#define PORT_SYSFS_ERR_CLEAR "errors/clear"
// SYFS Thermal
#define FME_SYSFS_THERMAL_MGMT_TEMP "thermal_mgmt/temperature"
#define FME_SYSFS_THERMAL_MGMT_THRESHOLD_TRIP "thermal_mgmt/threshold_trip"
// SYSFS Power
#define FME_SYSFS_POWER_MGMT_CONSUMED "power_mgmt/consumed"
// MMIO scratchpad
#define PORT_SCRATCHPAD0 0x0028
#define NLB_CSR_SCRATCHPAD (0x40000 + 0x0104 )
#define PORT_MMIO_LEN (0x40000 + 0x0512 )
#define MMO_WRITE64_VALUE 0xF1F1F1F1F1F1F1F1
#define MMO_WRITE32_VALUE 0xF1F1F1
#define FPGA_CSR_LEN 64
#define DEVICEID_PATH "/sys/bus/pci/devices/%04x:%02x:%02x.%d/device"
#define FPGA_PORT_RES_PATH "/sys/bus/pci/devices/%04x:%02x:%02x.%d/resource2"
#define FPGA_SET_BIT(val, index) val |= (1 << index)
#define FPGA_CLEAR_BIT(val, index) val &= ~(1 << index)
#define FPGA_TOGGLE_BIT(val, index) val ^= (1 << index)
#define FPGA_BIT_IS_SET(val, index) (((val) >> (index)) & 1)
/* Type definitions */
typedef struct {
uint32_t uint[16];
} cache_line;
int usleep(unsigned);
#ifndef CL
# define CL(x) ((x) * 64)
#endif // CL
#ifndef LOG2_CL
# define LOG2_CL 6
#endif // LOG2_CL
#ifndef MB
# define MB(x) ((x) * 1024 * 1024)
#endif // MB
#define CACHELINE_ALIGNED_ADDR(p) ((p) >> LOG2_CL)
#define LPBK1_BUFFER_SIZE MB(1)
#define LPBK1_BUFFER_ALLOCATION_SIZE MB(2)
#define LPBK1_DSM_SIZE MB(2)
#define CSR_SRC_ADDR 0x0120
#define CSR_DST_ADDR 0x0128
#define CSR_CTL 0x0138
#define CSR_CFG 0x0140
#define CSR_NUM_LINES 0x0130
#define DSM_STATUS_TEST_COMPLETE 0x40
#define CSR_AFU_DSM_BASEL 0x0110
#define CSR_AFU_DSM_BASEH 0x0114
/* SKX-P NLB0 AFU_ID */
#define SKX_P_NLB0_AFUID "D8424DC4-A4A3-C413-F89E-433683F9040B"
static const char * const FME_ERROR[] = {
"Fabric error detected", \
"Fabric fifo under / overflow error detected", \
"KTI CDC Parity Error detected", \
"KTI CDC Parity Error detected", \
"IOMMU Parity error detected", \
"AFU PF/VF access mismatch detected", \
"Indicates an MBP event error detected", \
};
static const char * const PCIE0_ERROR[] = {
"TLP format/type error detected", \
"TTLP MW address error detected", \
"TLP MW length error detected", \
"TLP MR address error detected", \
"TLP MR length error detected", \
"TLP CPL tag error detected", \
"TLP CPL status error detected", \
"TLP CPL timeout error detected", \
"CCI bridge parity error detected", \
"TLP with EP error detected", \
};
static const char * const PCIE1_ERROR[] = {
"TLP format/type error detected", \
"TTLP MW address error detected", \
"TLP MW length error detected", \
"TLP MR address error detected", \
"TLP MR length error detected", \
"TLP CPL tag error detected", \
"TLP CPL status error detected", \
"TLP CPL timeout error detected", \
"CCI bridge parity error detected", \
"TLP with EP error detected", \
};
static const char * const RAS_NONFATAL_ERROR [] = {
"Temperature threshold triggered AP1 detected", \
"Temperature threshold triggered AP2 detected", \
"PCIe error detected", \
"AFU port Fatal error detected", \
"ProcHot event error detected", \
"AFU PF/VF access mismatch error detected", \
"Injected Warning Error detected", \
"Reserved", \
"Reserved", \
"Temperature threshold triggered AP6 detected", \
"Power threshold triggered AP1 error detected", \
"Power threshold triggered AP2 error detected", \
"MBP event error detected", \
};
static const char * const RAS_CATFATAL_ERROR[] = {
"KTI link layer error detected.", \
"tag-n-cache error detected.", \
"CCI error detected.", \
"KTI protocol error detected.", \
"Fatal DRAM error detected", \
"IOMMU fatal parity error detected.", \
"Fabric fatal error detected", \
"Poison error from any of PCIe ports detected", \
"Injected Fatal Error detected", \
"Catastrophic CRC error detected", \
"Catastrophic thermal runaway event detected", \
"Injected Catastrophic Error detected", \
};
static const char * const RAS_INJECT_ERROR[] = {
"Set Catastrophic error .", \
"Set Fatal error.", \
"Ser Non-fatal error .", \
};
static const char * const RAS_GBS_ERROR [] = {
"Temperature threshold triggered AP1 detected", \
"Temperature threshold triggered AP2 detected", \
"PCIe error detected", \
"AFU port Fatal error detected", \
"ProcHot event error detected", \
"AFU PF/VF access mismatch error detected", \
"Injected Warning Error detected", \
"Poison error from any of PCIe ports detected", \
"GBS CRC errordetected ", \
"Temperature threshold triggered AP6 detected", \
"Power threshold triggered AP1 error detected", \
"Power threshold triggered AP2 error detected", \
"MBP event error detected", \
};
static const char * const RAS_BBS_ERROR[] = {
"KTI link layer error detected.", \
"tag-n-cache error detected.", \
"CCI error detected.", \
"KTI protocol error detected.", \
"Fatal DRAM error detected", \
"IOMMU fatal parity error detected.", \
"Fabric fatal error detected", \
"Poison error from any of PCIe ports detected", \
"Injected Fatal Error detected", \
"Catastrophic CRC error detected", \
"Catastrophic thermal runaway event detected", \
"Injected Catastrophic Error detected", \
};
static const char * const RAS_WARNING_ERROR[] = {
"Green bitstream fatal event error detected.", \
};
static const char * const PORT_ERROR[] = {
"Tx Channel 0 overflow error detected.", \
"Tx Channel 0 invalid request encodingr error detected.", \
"Tx Channel 0 cl_len=3 not supported error detected.", \
"Tx Channel 0 request with cl_len=2 does NOT have a 2CL aligned address error detected.", \
"Tx Channel 0 request with cl_len=4 does NOT have a 4CL aligned address error detected.", \
"RSVD.", "RSVD.", "RSVD.","RSVD.",\
"AFU MMIO RD received while PORT is in reset error detected", \
"AFU MMIO WR received while PORT is in reset error detected", \
"RSVD.", "RSVD.", "RSVD.", "RSVD.", "RSVD.",\
"Tx Channel 1 invalid request encoding error detected", \
"Tx Channel 1 cl_len=3 not supported error detected.", \
"Tx Channel 1 request with cl_len=2 does NOT have a 2CL aligned address error detected", \
"Tx Channel 1 request with cl_len=4 does NOT have a 4CL aligned address error detected", \
"Tx Channel 1 insufficient data payload Error detected", \
"Tx Channel 1 data payload overrun error detected", \
"Tx Channel 1 incorrect address on subsequent payloads error detected", \
"Tx Channel 1 Non-zero SOP detected for requests!=WrLine_* error detected", \
"Tx Channel 1 Illegal VC_SEL. Atomic request is only supported on VL0 error detected", \
"RSVD.", "RSVD.", "RSVD.", "RSVD.", "RSVD.", "RSVD.", "RSVD.",\
"MMIO TimedOut error detected", \
"Tx Channel 2 fifo overflo error detected", \
"MMIO Read response received, with no matching request pending error detected", \
"RSVD.", "RSVD.", "RSVD.", "RSVD.", "RSVD.", \
"Number of pending requests: counter overflow error detected", \
"Request with Address violating SMM range error detected", \
"Request with Address violating second SMM range error detected", \
"Request with Address violating ME stolen range", \
"Request with Address violating Generic protected range error detected ", \
"Request with Address violating Legacy Range Low error detected", \
"Request with Address violating Legacy Range High error detected", \
"Request with Address violating VGA memory range error detected", \
"Page Fault error detected", \
"PMR Erro error detected", \
"AP6 event detected ", \
"VF FLR detected on port when PORT configured in PF access mode error detected ", \
};
// RAS Error Inject CSR
struct ras_inject_error {
union {
uint64_t csr;
struct {
/* Catastrophic error */
uint64_t catastrophicr_error : 1;
/* Fatal error */
uint64_t fatal_error : 1;
/* Non-fatal error */
uint64_t nonfatal_error : 1;
/* Reserved */
uint64_t rsvd : 61;
};
};
};
#define GETOPT_STRING ":hB:D:F:S:PQRNTCEGHIO"
struct option longopts[] = {
{"help", no_argument, NULL, 'h'},
{"bus-number", required_argument, NULL, 'B'},
{"device-number", required_argument, NULL, 'D'},
{"function-number", required_argument, NULL, 'F'},
{"socket-number", required_argument, NULL, 'S'},
{"print-error", no_argument, NULL, 'P'},
{"catast-error", no_argument, NULL, 'Q'},
{"fatal-error", no_argument, NULL, 'R'},
{"nofatal-error", no_argument, NULL, 'N'},
{"thermal-trip", no_argument, NULL, 'T'},
{"clearinj-error", no_argument, NULL, 'C'},
{"mwaddress-error", no_argument, NULL, 'E'},
{"mraddress-error", no_argument, NULL, 'G'},
{"mwlength-error", no_argument, NULL, 'H'},
{"mrlength-error", no_argument, NULL, 'I'},
{"pagefault-error", no_argument, NULL, 'O'},
{0,0,0,0}
};
// RAS Command line struct
struct RASCommandLine
{
uint32_t flags;
#define RASAPP_CMD_FLAG_HELP 0x00000001
#define RASAPP_CMD_FLAG_VERSION 0x00000002
#define RASAPP_CMD_PARSE_ERROR 0x00000003
#define RASAPP_CMD_FLAG_BUS 0x00000008
#define RASAPP_CMD_FLAG_DEV 0x00000010
#define RASAPP_CMD_FLAG_FUNC 0x00000020
#define RASAPP_CMD_FLAG_SOCKET 0x00000040
int bus;
int device;
int function;
int socket;
bool print_error;
bool catast_error;
bool fatal_error;
bool nonfatal_error;
bool clear_injerror;
bool mwaddress_error;
bool mraddress_error;
bool mwlength_error;
bool mrlength_error;
bool pagefault_error;
};
struct RASCommandLine rasCmdLine = { 0, -1, -1, -1, -1, false,
false, false, false,false,
false, false, false, false, false};
// RAS Command line input help
void RASAppShowHelp()
{
printf("Usage:\n");
printf("./ras \n");
printf("<Bus> --bus=<BUS NUMBER> "
"OR -B=<BUS NUMBER>\n");
printf("<Device> --device=<DEVICE NUMBER> "
"OR -D=<DEVICE NUMBER>\n");
printf("<Function> --function=<FUNCTION NUMBER> "
"OR -F=<FUNCTION NUMBER>\n");
printf("<Socket> --socket=<socket NUMBER> "
" OR -S=<SOCKET NUMBER>\n");
printf("<Print Error> --print-error OR -P \n");
printf("<Catast Error> --catast-error OR -Q \n");
printf("<Fatal Error> --fatal-error OR -R \n");
printf("<NoFatal Error> --nofatal-error OR -N \n");
printf("<Clear Inj Error> --clearinj-error OR -C \n");
printf("<MW Address error> --mwaddress-error OR -E \n");
printf("<MR Address error> --mwaddress-error OR -G \n");
printf("<MW Length error> --mwlength-error OR -H \n");
printf("<MR Length error> --mrlength-error OR -I \n");
printf("<Page Fault Error> --pagefault-error OR -O \n");
printf("\n");
}
/*
* macro to check return codes, print error message, and goto cleanup label
* NOTE: this changes the program flow (uses goto)!
*/
#define ON_ERR_GOTO(res, label, desc) \
do { \
if ((res) != FPGA_OK) { \
print_err((desc), (res)); \
goto label; \
} \
} while (0)
void print_err(const char *s, fpga_result res)
{
fprintf(stderr, "Error %s: %s\n", s, fpgaErrStr(res));
}
fpga_result print_ras_errors(fpga_token token);
fpga_result print_pwr_temp(fpga_token token);
fpga_result clear_inject_ras_errors(fpga_token token,
struct RASCommandLine *rasCmdLine);
fpga_result inject_ras_errors(fpga_token token,
struct RASCommandLine *rasCmdLine);
fpga_result mmio_error(struct RASCommandLine *rasCmdLine);
fpga_result print_port_errors(fpga_token token);
fpga_result clear_port_errors(fpga_token token);
fpga_result page_fault_errors();
int ParseCmds(struct RASCommandLine *rasCmdLine, int argc, char *argv[]);
int main( int argc, char** argv )
{
fpga_result result = 0;
fpga_properties filter = NULL;
fpga_token fme_token ;
uint32_t num_matches = 1;
// Parse command line
if ( argc < 2 ) {
RASAppShowHelp();
return 1;
} else if ( 0!= ParseCmds(&rasCmdLine, argc, argv) ) {
FPGA_ERR( "Error scanning command line \n.");
return 2;
}
printf(" ------- Command line Input Start ---- \n \n");
printf(" Bus : %d\n", rasCmdLine.bus);
printf(" Device : %d \n", rasCmdLine.device);
printf(" Function : %d \n", rasCmdLine.function);
printf(" Socket : %d \n", rasCmdLine.socket);
printf(" Print Error : %d \n", rasCmdLine.print_error);
printf(" Catas Error : %d \n", rasCmdLine.catast_error);
printf(" Fatal Error : %d \n", rasCmdLine.fatal_error);
printf(" NonFatal Error : %d \n", rasCmdLine.nonfatal_error);
printf(" Clear Error : %d \n", rasCmdLine.clear_injerror);
printf(" MW Address Error : %d \n", rasCmdLine.mwaddress_error);
printf(" MR Address Error : %d \n", rasCmdLine.mraddress_error);
printf(" MW Length Error : %d \n", rasCmdLine.mwlength_error);
printf(" MR Length Error : %d \n", rasCmdLine.mrlength_error);
printf(" Page Fault Error : %d \n", rasCmdLine.pagefault_error);
printf(" ------- Command line Input END ---- \n\n");
// Enum FPGA device
result = fpgaGetProperties(NULL, &filter);
ON_ERR_GOTO(result, out_exit, "creating properties object");
result = fpgaPropertiesSetObjectType(filter, FPGA_DEVICE);
ON_ERR_GOTO(result, out_destroy_prop, "setting object type");
if (rasCmdLine.bus >0){
result = fpgaPropertiesSetBus(filter, rasCmdLine.bus);
ON_ERR_GOTO(result, out_destroy_prop, "setting bus");
}
if (rasCmdLine.device >0) {
result = fpgaPropertiesSetDevice(filter, rasCmdLine.device);
ON_ERR_GOTO(result, out_destroy_prop, "setting device");
}
if (rasCmdLine.function >0){
result = fpgaPropertiesSetFunction(filter, rasCmdLine.function);
ON_ERR_GOTO(result, out_destroy_prop, "setting function");
}
if (rasCmdLine.socket >0){
result = fpgaPropertiesSetSocketID(filter, rasCmdLine.socket);
ON_ERR_GOTO(result, out_destroy_prop, "setting socket");
}
result = fpgaEnumerate(&filter, 1, &fme_token,1, &num_matches);
ON_ERR_GOTO(result, out_destroy_prop, "enumerating FPGAs");
if (num_matches < 1) {
fprintf(stderr, "FPGA Resource not found.\n");
result = fpgaDestroyProperties(&filter);
return FPGA_INVALID_PARAM;
}
fprintf(stderr, "FME Resource found.\n");
// Inject error
if (rasCmdLine.catast_error ||
rasCmdLine.fatal_error ||
rasCmdLine.nonfatal_error) {
// Inject RAS ERROR
result = inject_ras_errors(fme_token,&rasCmdLine);
if (result != FPGA_OK) {
FPGA_ERR("Failed to print fme errors");
goto out_destroy_prop;
}
}
// inject MMIO error
if ( (rasCmdLine.mwaddress_error == true) ||
(rasCmdLine.mraddress_error == true) ||
(rasCmdLine.mwlength_error == true) ||
(rasCmdLine.mrlength_error == true) ) {
result = mmio_error(&rasCmdLine);
if (result != FPGA_OK) {
FPGA_ERR("Failed set MMIO errors");
goto out_destroy_prop;
}
}
// Clear Inject Error
if (rasCmdLine.clear_injerror ) {
// clear RAS ERROR
result = clear_inject_ras_errors(fme_token,&rasCmdLine);
if (result != FPGA_OK) {
FPGA_ERR("Failed to clear inject errors");
goto out_destroy_prop;
}
// clear Port ERROR
result = clear_port_errors(fme_token);
if (result != FPGA_OK) {
FPGA_ERR("Failed to clear port errors");
goto out_destroy_prop;
}
}
if (rasCmdLine.pagefault_error) {
// Page fault error
result = page_fault_errors();
if (result != FPGA_OK) {
FPGA_ERR("Failed to trigger page fault errors");
goto out_destroy_prop;
}
}
sleep(1);
if (rasCmdLine.print_error) {
// Print RAS Error
result = print_ras_errors(fme_token);
if (result != FPGA_OK) {
FPGA_ERR("Failed to print fme errors");
goto out_destroy_prop;
}
// Print port Error
result = print_port_errors(fme_token);
if (result != FPGA_OK) {
FPGA_ERR("Failed to print port errors");
goto out_destroy_prop;
}
// Print power and temp
result = print_pwr_temp(fme_token);
if (result != FPGA_OK) {
FPGA_ERR("Failed to get power and temp");
goto out_destroy_prop;
}
}
/* Destroy properties object */
out_destroy_prop:
result = fpgaDestroyProperties(&filter);
ON_ERR_GOTO(result, out_exit, "destroying properties object");
out_exit:
return result;
}
// Print Error
fpga_result print_errors(fpga_token token,
const char * err_path,
const char * const* err_strings,
int size)
{
struct _fpga_token *_token = 0;
int i = 0;
uint64_t value = 0;
char syfs_path[SYSFS_PATH_MAX] = {0};
fpga_result result = FPGA_OK;
_token = (struct _fpga_token*)token;
if (_token == NULL) {
FPGA_ERR("Token not found");
return FPGA_INVALID_PARAM;
}
if(err_path == NULL ||
err_strings == NULL) {
FPGA_ERR("Invalid input sting");
return FPGA_INVALID_PARAM;
}
snprintf_s_ss(syfs_path, sizeof(syfs_path), "%s/%s",
_token->sysfspath,
err_path );
// Read error.
result = sysfs_read_u64(syfs_path, &value);
if (result != FPGA_OK) {
FPGA_ERR("Failed to get errors");
return result;
}
printf(" CSR : 0x%lx \n", value);
for (i = 0; i < FPGA_CSR_LEN; i++) {
if ((i < size) && FPGA_BIT_IS_SET(value, i)) {
printf("\t %s \n", err_strings[i]);
}
}
return result;
}
// prints RAS errors
fpga_result print_ras_errors(fpga_token token)
{
struct _fpga_token *_token = 0;
uint64_t revision = 0;
char syfs_path[SYSFS_PATH_MAX] = {0};
fpga_result result = FPGA_OK;
_token = (struct _fpga_token*)token;
if (_token == NULL) {
FPGA_ERR("Token not found");
return FPGA_INVALID_PARAM;
}
printf("\n ==========================================\n");
printf(" ----------- PRINT FME ERROR START-------- \n \n");
// get revision
snprintf_s_ss(syfs_path, sizeof(syfs_path), "%s/%s",
_token->sysfspath,
FME_SYSFS_ERR_REVISION );
// Read revision.
result = sysfs_read_u64(syfs_path, &revision);
if (result != FPGA_OK) {
FPGA_ERR("Failed to get fme revison");
return result;
}
printf(" fme error revison : %ld \n", revision);
// Revision 0
if( revision == 1 ) {
// Non Fatal Error
printf("\n ------- Non Fatal error ------------ \n");
result = print_errors(token,
FME_SYSFS_NONFATAL_ERRORS,
RAS_NONFATAL_ERROR,
sizeof(RAS_NONFATAL_ERROR) /sizeof(RAS_NONFATAL_ERROR[0]));
if (result != FPGA_OK) {
FPGA_ERR("Failed to get fme non fatal errors");
return result;
}
// Fatal Error
printf("\n ------- Fatal error ------------ \n");
result = print_errors(token,
FME_SYSFS_CATFATAL_ERRORS,
RAS_CATFATAL_ERROR,
sizeof(RAS_CATFATAL_ERROR) /sizeof(RAS_CATFATAL_ERROR[0]));
if (result != FPGA_OK) {
FPGA_ERR("Failed to get fme fatal errors");
return result;
}
// Injected error
printf("\n ------- Injected error ------------ \n");
result = print_errors(token,
FME_SYSFS_INJECT_ERROR,
RAS_INJECT_ERROR,
sizeof(RAS_INJECT_ERROR) /sizeof(RAS_INJECT_ERROR[0]));
if (result != FPGA_OK) {
FPGA_ERR("Failed to get fme Injected errors");
return result;
}
// FME error
printf("\n ------- FME error ------------ \n");
result = print_errors(token,
FME_SYSFS_FME_ERRORS,
FME_ERROR,
sizeof(FME_ERROR) /sizeof(FME_ERROR[0]));
if (result != FPGA_OK) {
FPGA_ERR("Failed to get fme errors");
return result;
}
// PCIe0 error
printf("\n ------- PCIe0 error ------------ \n");
result = print_errors(token,
FME_SYSFS_PCIE0_ERRORS,
PCIE0_ERROR,
sizeof(PCIE0_ERROR) /sizeof(PCIE0_ERROR[0]));
if (result != FPGA_OK) {
FPGA_ERR("Failed to get pcie0 errors");
return result;
}
// PCIe1 error
printf("\n ------- PCIe1 error ------------ \n");
result = print_errors(token,
FME_SYSFS_PCIE1_ERRORS,
PCIE1_ERROR,
sizeof(PCIE1_ERROR) /sizeof(PCIE1_ERROR[0]));
if (result != FPGA_OK) {
FPGA_ERR("Failed to get pcie1 errors");
return result;
}
// Revision 0
} else if( revision == 0){
// GBS Error
printf("\n ------- GBS error ------------ \n");
result = print_errors(token,
FME_SYSFS_GBS_ERRORS,
RAS_GBS_ERROR,
sizeof(RAS_GBS_ERROR) /sizeof(RAS_GBS_ERROR[0]));
if (result != FPGA_OK) {
FPGA_ERR("Failed to get fme gbs errors");
return result;
}
// BBS Error
printf("\n ------- BBS error ------------ \n");
result = print_errors(token,
FME_SYSFS_BBS_ERRORS,
RAS_BBS_ERROR,
sizeof(RAS_BBS_ERROR) /sizeof(RAS_BBS_ERROR[0]));
if (result != FPGA_OK) {
FPGA_ERR("Failed to get fme bbs errors");
return result;
}
// Injected error
printf("\n ------- Injected error ------------ \n");
result = print_errors(token,
FME_SYSFS_INJECT_ERROR,
RAS_INJECT_ERROR,
sizeof(RAS_INJECT_ERROR) /sizeof(RAS_INJECT_ERROR[0]));
if (result != FPGA_OK) {
FPGA_ERR("Failed to get fme Injected errors");
return result;
}
// FME error
printf("\n ------- FME error ------------ \n");
result = print_errors(token,
FME_SYSFS_FME_ERRORS,
FME_ERROR,
sizeof(FME_ERROR) /sizeof(FME_ERROR[0]));
if (result != FPGA_OK) {
FPGA_ERR("Failed to get fme errors");
return result;
}
// PCIe0 error
printf("\n ------- PCIe0 error ------------ \n");
result = print_errors(token,
FME_SYSFS_PCIE0_ERRORS,
PCIE0_ERROR,
sizeof(PCIE0_ERROR) /sizeof(PCIE0_ERROR[0]));
if (result != FPGA_OK) {
FPGA_ERR("Failed to get pcie0 errors");
return result;
}
// PCIe1 error
printf("\n ------- PCIe1 error ------------ \n");
result = print_errors(token,
FME_SYSFS_PCIE1_ERRORS,
PCIE1_ERROR,
sizeof(PCIE1_ERROR) /sizeof(PCIE1_ERROR[0]));
if (result != FPGA_OK) {
FPGA_ERR("Failed to get pcie1 errors");
return result;
}
} else {
printf("\n Invalid FME Error Revision \n");
}
printf("\n ----------- PRINT FME ERROR END----------\n");
printf(" ========================================== \n \n");
return result;
}
// prints PORT errors
fpga_result print_port_errors(fpga_token token)
{
struct _fpga_token *_token = 0;
int i = 0;
uint64_t value = 0;
int size = 0;
char sysfs_port[SYSFS_PATH_MAX] = {0};
fpga_result result = FPGA_OK;
char *p = 0;
int device_id = 0;
_token = (struct _fpga_token*)token;
if (_token == NULL) {
FPGA_ERR("Token not found");
return FPGA_INVALID_PARAM;
}
printf("\n ==========================================\n");
printf(" ----------- PRINT PORT ERROR START-------- \n \n");
p = strstr(_token->sysfspath, FPGA_SYSFS_FME);
if (NULL == p)
return FPGA_INVALID_PARAM;
p = strrchr(_token->sysfspath, '.');
if (NULL == p)
return FPGA_INVALID_PARAM;
device_id = atoi(p + 1);
snprintf_s_iis(sysfs_port, SYSFS_PATH_MAX,
SYSFS_FPGA_CLASS_PATH SYSFS_AFU_PATH_FMT"/%s",
device_id, device_id,PORT_SYSFS_ERR);
// Read port error.
result = sysfs_read_u64(sysfs_port, &value);
if (result != FPGA_OK) {
FPGA_ERR("Failed to get fme errors");
return result;
}
printf("\n \n Port error CSR : 0x%lx \n", value);
size = sizeof(PORT_ERROR) /sizeof(PORT_ERROR[0]);
for (i = 0; i < 64; i++) {
if ( FPGA_BIT_IS_SET(value, i) && (i < size)) {
printf("\t %s \n", PORT_ERROR[i]);
}
}
printf("\n ----------- PRINT PORT ERROR END----------\n");
printf(" ========================================== \n \n");
return result;
}
// clear PORT errors
fpga_result clear_port_errors(fpga_token token)
{
struct _fpga_token *_token = 0;
uint64_t value = 0;
char sysfs_port[SYSFS_PATH_MAX] = {0};
fpga_result result = FPGA_OK;
char *p = 0;
int device_id = 0;
_token = (struct _fpga_token*)token;
if (_token == NULL) {
FPGA_ERR("Token not found");
return FPGA_INVALID_PARAM;
}
printf(" ----------- Clear port error-------- \n \n");
p = strstr(_token->sysfspath, FPGA_SYSFS_FME);
if (NULL == p)
return FPGA_INVALID_PARAM;
p = strrchr(_token->sysfspath, '.');
if (NULL == p)
return FPGA_INVALID_PARAM;
device_id = atoi(p + 1);
snprintf_s_iis(sysfs_port, SYSFS_PATH_MAX,
SYSFS_FPGA_CLASS_PATH SYSFS_AFU_PATH_FMT"/%s",
device_id, device_id,PORT_SYSFS_ERR);
// Read port error.
result = sysfs_read_u64(sysfs_port, &value);
if (result != FPGA_OK) {
FPGA_ERR("Failed to get port errors");
return result;
}
printf("\n \n Port error CSR : 0x%lx \n", value);
snprintf_s_iis(sysfs_port, SYSFS_PATH_MAX,
SYSFS_FPGA_CLASS_PATH SYSFS_AFU_PATH_FMT"/%s",
device_id, device_id,PORT_SYSFS_ERR_CLEAR);
result = sysfs_write_u64(sysfs_port, value);
if (result != FPGA_OK) {
FPGA_ERR("Failed to write errors");
}
return result;
}
// Inject RAS errors
fpga_result inject_ras_errors(fpga_token token,
struct RASCommandLine *rasCmdLine)
{
struct _fpga_token *_token = NULL;
struct ras_inject_error inj_error = {{0}};
char sysfs_path[SYSFS_PATH_MAX] = {0};
fpga_result result = FPGA_OK;
_token = (struct _fpga_token*)token;
if (_token == NULL) {
FPGA_ERR("Token not found");
return FPGA_INVALID_PARAM;
}
printf("----------- INJECT ERROR START -------- \n \n");
snprintf_s_ss(sysfs_path, sizeof(sysfs_path), "%s/%s",
_token->sysfspath,
FME_SYSFS_INJECT_ERROR);
result = sysfs_read_u64(sysfs_path, &inj_error.csr);
if (result != FPGA_OK) {
FPGA_ERR("Failed to get fme errors");
return result;
}
printf("inj_error.csr: %ld \n", inj_error.csr);
if (rasCmdLine->catast_error ) {
inj_error.catastrophicr_error = 1;
}
if (rasCmdLine->fatal_error ) {
inj_error.fatal_error = 1;
}
if (rasCmdLine->nonfatal_error ) {
inj_error.nonfatal_error = 1;
}
printf("inj_error.csr: %ld \n", inj_error.csr);
result = sysfs_write_u64(sysfs_path ,inj_error.csr);
if (result != FPGA_OK) {
FPGA_ERR("Failed to write RAS inject errors");
return result;
}
printf("----------- INJECT ERROR END-------- \n \n");
return result;
}
// Clear Inject RAS errors
fpga_result clear_inject_ras_errors(fpga_token token,
struct RASCommandLine *rasCmdLine)
{
struct _fpga_token *_token = NULL;
struct ras_inject_error inj_error = {{0}};
char sysfs_path[SYSFS_PATH_MAX] = {0};
fpga_result result = FPGA_OK;
UNUSED_PARAM(rasCmdLine);
_token = (struct _fpga_token*)token;
if (_token == NULL) {
FPGA_ERR("Token not found");
return FPGA_INVALID_PARAM;
}
snprintf_s_ss(sysfs_path, sizeof(sysfs_path), "%s/%s",
_token->sysfspath,
FME_SYSFS_INJECT_ERROR);
result = sysfs_read_u64(sysfs_path, &inj_error.csr);
if (result != FPGA_OK) {
FPGA_ERR("Failed to read inject error");
return result;
}
printf(" Clear inj_error.csr: 0x%lx \n", inj_error.csr);
result = sysfs_write_u64(sysfs_path ,0x0);
if (result != FPGA_OK) {
FPGA_ERR("Failed to clear inject errors");
return result;
}
return result;
}
// Print FPGA power and temperature
fpga_result print_pwr_temp(fpga_token token)
{
struct _fpga_token *_token = 0;
uint64_t value = 0;
char sysfs_path[SYSFS_PATH_MAX] = {0};
fpga_result result = FPGA_OK;
_token = (struct _fpga_token*)token;
if (_token == NULL) {
FPGA_ERR("Token not found");
return FPGA_INVALID_PARAM;
}
printf("\n ----------- POWER & THERMAL -------------\n");
printf(" ========================================== \n \n");
snprintf_s_ss(sysfs_path, sizeof(sysfs_path), "%s/%s",
_token->sysfspath,
FME_SYSFS_POWER_MGMT_CONSUMED);
result = sysfs_read_u64(sysfs_path, &value);
if (result != FPGA_OK) {
FPGA_ERR("Failed to get power consumed");
return result;
}
printf(" Power consumed : %lu watts \n",value);
snprintf_s_ss(sysfs_path, sizeof(sysfs_path), "%s/%s",
_token->sysfspath,
FME_SYSFS_THERMAL_MGMT_TEMP);
result = sysfs_read_u64(sysfs_path, &value);
if (result != FPGA_OK) {
FPGA_ERR("Failed to get temperature");
return result;
}
printf(" Temperature : %lu Centigrade \n",value );
snprintf_s_ss(sysfs_path, sizeof(sysfs_path), "%s/%s",
_token->sysfspath,
FME_SYSFS_THERMAL_MGMT_THRESHOLD_TRIP);
result = sysfs_read_u64(sysfs_path, &value);
if (result != FPGA_OK) {
FPGA_ERR("Failed to get temperature");
return result;
}
printf(" Thermal Trip : %lu Centigrade \n",value );
printf("\n ----------- POWER & THERMAL -------------\n");
printf(" ========================================== \n \n");
return result;
}
// MMIO erros
fpga_result mmio_error(struct RASCommandLine *rasCmdLine)
{
char sysfs_path[SYSFS_PATH_MAX] = {0};
fpga_result result = FPGA_OK;
int bus = 0;
int device = 0;
int function = 0;
uint64_t value = 0;
int fd = 0;
uint8_t *ptr = 0;
if (rasCmdLine == NULL ) {
FPGA_ERR("Invalid input ");
return FPGA_INVALID_PARAM;
}
if ( rasCmdLine->bus >0 )
bus = rasCmdLine->bus;
if ( rasCmdLine->device >0 )
device = rasCmdLine->bus;
if ( rasCmdLine->function >0 )
function = rasCmdLine->bus;
snprintf(sysfs_path, sizeof(sysfs_path),
DEVICEID_PATH,0,bus,device,function);
result = sysfs_read_u64(sysfs_path, &value);
if (result != FPGA_OK) {
FPGA_ERR("Failed to read Device id");
return result;
}
if(value != FPGA_INTEGRATED_DEVICEID) {
FPGA_ERR("Failed to read Device id");
return FPGA_NOT_SUPPORTED;
}
snprintf(sysfs_path, sizeof(sysfs_path),
FPGA_PORT_RES_PATH,0,bus,device,function);
fd = open(sysfs_path, O_RDWR);
if (fd < 0) {
FPGA_ERR("Failed to open FPGA PCIE BAR2");
return FPGA_EXCEPTION;
}
ptr = mmap(NULL, PORT_MMIO_LEN,
PROT_READ|PROT_WRITE,MAP_SHARED, fd, 0);
if (ptr == MAP_FAILED ) {
FPGA_ERR("Failed to map FPGA PCIE BAR2");
result = FPGA_EXCEPTION;
goto out_close ;
}
// Memory Write length error
if(rasCmdLine->mwlength_error) {
FPGA_DBG("Memory Write length error \n");
*((volatile uint64_t *) (ptr + PORT_SCRATCHPAD0+3))
= (uint16_t)MMO_WRITE64_VALUE;
}
// Memory Read length error
if(rasCmdLine->mrlength_error) {
FPGA_DBG(" Memory Read length error \n");
value = *((volatile uint64_t *) (ptr + PORT_SCRATCHPAD0+3));
FPGA_DBG(" Memory Read length value %lx\n",value);
}
// Memory Read addresss error
if(rasCmdLine->mraddress_error) {
FPGA_DBG("Memory Read addresss error \n");
value = *((volatile uint16_t *) (ptr + NLB_CSR_SCRATCHPAD +3));
FPGA_DBG("Memory Read addresss value %lx\n",value);
value = *((volatile uint64_t *) (ptr + PORT_SCRATCHPAD0+3));
FPGA_DBG("Memory Read addresss value %lx\n",value);
}
// Memory Write addresss error
if(rasCmdLine->mwaddress_error) {
FPGA_DBG("Memory Write addresss error \n");
*((volatile uint16_t *) (ptr + NLB_CSR_SCRATCHPAD +3))
= (uint16_t)MMO_WRITE32_VALUE;
}
if(ptr)
munmap(ptr, PORT_MMIO_LEN);
out_close:
if(fd >=0)
close(fd);
return result;
}
// page fault errors
fpga_result page_fault_errors()
{
fpga_properties filter = NULL;
fpga_token accelerator_token;
fpga_handle accelerator_handle;
fpga_guid guid;
uint32_t num_matches;
volatile uint64_t *dsm_ptr = NULL;
volatile uint64_t *input_ptr = NULL;
volatile uint64_t *output_ptr = NULL;
uint64_t dsm_wsid;
uint64_t input_wsid;
uint64_t output_wsid;
fpga_result res = FPGA_OK;
if (uuid_parse(SKX_P_NLB0_AFUID, guid) < 0) {
fprintf(stderr, "Error parsing guid '%s'\n", SKX_P_NLB0_AFUID);
goto out_exit;
}
/* Look for accelerator with MY_ACCELERATOR_ID */
res = fpgaGetProperties(NULL, &filter);
ON_ERR_GOTO(res, out_exit, "creating properties object");
res = fpgaPropertiesSetObjectType(filter, FPGA_ACCELERATOR);
ON_ERR_GOTO(res, out_destroy_prop, "setting object type");
res = fpgaPropertiesSetGUID(filter, guid);
ON_ERR_GOTO(res, out_destroy_prop, "setting GUID");
if (rasCmdLine.bus >0){
res = fpgaPropertiesSetBus(filter, rasCmdLine.bus);
ON_ERR_GOTO(res, out_destroy_prop, "setting bus");
}
if (rasCmdLine.device >0) {
res = fpgaPropertiesSetDevice(filter, rasCmdLine.device);
ON_ERR_GOTO(res, out_destroy_prop, "setting device");
}
if (rasCmdLine.function >0){
res = fpgaPropertiesSetFunction(filter, rasCmdLine.function);
ON_ERR_GOTO(res, out_destroy_prop, "setting function");
}
res = fpgaEnumerate(&filter, 1, &accelerator_token, 1, &num_matches);
ON_ERR_GOTO(res, out_destroy_prop, "enumerating accelerators");
if (num_matches < 1) {
fprintf(stderr, "accelerator not found.\n");
res = fpgaDestroyProperties(&filter);
return FPGA_INVALID_PARAM;
}
/* Open accelerator and map MMIO */
res = fpgaOpen(accelerator_token, &accelerator_handle, FPGA_OPEN_SHARED);
ON_ERR_GOTO(res, out_destroy_tok, "opening accelerator");
res = fpgaMapMMIO(accelerator_handle, 0, NULL);
ON_ERR_GOTO(res, out_close, "mapping MMIO space");
/* Allocate buffers */
res = fpgaPrepareBuffer(accelerator_handle, LPBK1_DSM_SIZE,
(void **)&dsm_ptr, &dsm_wsid, 0);
ON_ERR_GOTO(res, out_close, "allocating DSM buffer");
res = fpgaPrepareBuffer(accelerator_handle, LPBK1_BUFFER_ALLOCATION_SIZE,
(void **)&input_ptr, &input_wsid, 0);
ON_ERR_GOTO(res, out_free_dsm, "allocating input buffer");
res = fpgaPrepareBuffer(accelerator_handle, LPBK1_BUFFER_ALLOCATION_SIZE,
(void **)&output_ptr, &output_wsid, 0);
ON_ERR_GOTO(res, out_free_input, "allocating output buffer");
printf("Running Test\n");
/* Initialize buffers */
memset((void *)dsm_ptr, 0, LPBK1_DSM_SIZE);
memset((void *)input_ptr, 0xAF, LPBK1_BUFFER_SIZE);
memset((void *)output_ptr, 0xBE, LPBK1_BUFFER_SIZE);
cache_line *cl_ptr = (cache_line *)input_ptr;
for (uint32_t i = 0; i < LPBK1_BUFFER_SIZE / CL(1); ++i) {
cl_ptr[i].uint[15] = i+1; /* set the last uint in every cacheline */
}
/* Reset accelerator */
res = fpgaReset(accelerator_handle);
ON_ERR_GOTO(res, out_free_output, "resetting accelerator");
/* Program DMA addresses */
uint64_t iova;
res = fpgaGetIOAddress(accelerator_handle, dsm_wsid, &iova);
ON_ERR_GOTO(res, out_free_output, "getting DSM IOVA");
res = fpgaWriteMMIO64(accelerator_handle, 0, CSR_AFU_DSM_BASEL, iova);
ON_ERR_GOTO(res, out_free_output, "writing CSR_AFU_DSM_BASEL");
res = fpgaWriteMMIO32(accelerator_handle, 0, CSR_CTL, 0);
ON_ERR_GOTO(res, out_free_output, "writing CSR_CFG");
res = fpgaWriteMMIO32(accelerator_handle, 0, CSR_CTL, 1);
ON_ERR_GOTO(res, out_free_output, "writing CSR_CFG");
res = fpgaGetIOAddress(accelerator_handle, input_wsid, &iova);
ON_ERR_GOTO(res, out_free_output, "getting input IOVA");
// Free Input buffer
res = fpgaReleaseBuffer(accelerator_handle, input_wsid);
res = fpgaWriteMMIO64(accelerator_handle, 0, CSR_SRC_ADDR, CACHELINE_ALIGNED_ADDR(iova));
ON_ERR_GOTO(res, out_free_output, "writing CSR_SRC_ADDR");
res = fpgaGetIOAddress(accelerator_handle, output_wsid, &iova);
ON_ERR_GOTO(res, out_free_output, "getting output IOVA");
res = fpgaWriteMMIO64(accelerator_handle, 0, CSR_DST_ADDR, CACHELINE_ALIGNED_ADDR(iova));
ON_ERR_GOTO(res, out_free_output, "writing CSR_DST_ADDR");
res = fpgaWriteMMIO32(accelerator_handle, 0, CSR_NUM_LINES, LPBK1_BUFFER_SIZE / CL(1));
ON_ERR_GOTO(res, out_free_output, "writing CSR_NUM_LINES");
res = fpgaWriteMMIO32(accelerator_handle, 0, CSR_CFG, 0x42000);
ON_ERR_GOTO(res, out_free_output, "writing CSR_CFG");
/* Start the test */
res = fpgaWriteMMIO32(accelerator_handle, 0, CSR_CTL, 3);
ON_ERR_GOTO(res, out_free_output, "writing CSR_CFG");
/* Wait for test completion */
usleep(10000);
/* Stop the device */
res = fpgaWriteMMIO32(accelerator_handle, 0, CSR_CTL, 7);
ON_ERR_GOTO(res, out_free_output, "writing CSR_CFG");
printf("Done Running Test\n");
/* Release buffers */
out_free_output:
res = fpgaReleaseBuffer(accelerator_handle, output_wsid);
ON_ERR_GOTO(res, out_free_input, "releasing output buffer");
out_free_input:
// res = fpgaReleaseBuffer(accelerator_handle, input_wsid);
// ON_ERR_GOTO(res, out_free_dsm, "releasing input buffer");
out_free_dsm:
res = fpgaReleaseBuffer(accelerator_handle, dsm_wsid);
ON_ERR_GOTO(res, out_unmap, "releasing DSM buffer");
/* Unmap MMIO space */
out_unmap:
res = fpgaUnmapMMIO(accelerator_handle, 0);
ON_ERR_GOTO(res, out_close, "unmapping MMIO space");
/* Release accelerator */
out_close:
res = fpgaClose(accelerator_handle);
ON_ERR_GOTO(res, out_destroy_tok, "closing accelerator");
/* Destroy token */
out_destroy_tok:
res = fpgaDestroyToken(&accelerator_token);
ON_ERR_GOTO(res, out_destroy_prop, "destroying token");
/* Destroy properties object */
out_destroy_prop:
res = fpgaDestroyProperties(&filter);
ON_ERR_GOTO(res, out_exit, "destroying properties object");
out_exit:
return res;
}
// parse Input command line
int ParseCmds(struct RASCommandLine *rasCmdLine, int argc, char *argv[])
{
int getopt_ret = 0;
int option_index = 0;
char *endptr = NULL;
while( -1 != ( getopt_ret = getopt_long(argc, argv,
GETOPT_STRING,
longopts,
&option_index))){
const char *tmp_optarg = optarg;
if ((optarg) &&
('=' == *tmp_optarg)){
++tmp_optarg;
}
switch(getopt_ret){
case 'h':
// Command line help
RASAppShowHelp();
return -2;
break;
case 'B':
// bus number
if (tmp_optarg == NULL ) break;
endptr = NULL;
rasCmdLine->bus = strtol(tmp_optarg, &endptr, 0);
break;
case 'D':
// Device number
if (tmp_optarg == NULL ) break;
endptr = NULL;
rasCmdLine->device = strtol(tmp_optarg, &endptr, 0);
break;
case 'F':
// Function number
if (tmp_optarg == NULL ) break;
endptr = NULL;
rasCmdLine->function = strtol(tmp_optarg, &endptr, 0);
break;
case 'S':
// Socket number
if (tmp_optarg == NULL ) break;
endptr = NULL;
rasCmdLine->socket = strtol(tmp_optarg, &endptr, 0);
break;
case 'P':
// Print Errors
rasCmdLine->print_error = true;
break;
case 'Q':
// Set Cast error
rasCmdLine->catast_error = true;
break;
case 'R':
// Set Fatal error
rasCmdLine->fatal_error = true;
break;
case 'O':
// Set page fault error
rasCmdLine->pagefault_error = true;
break;
case 'N':
// Set Non Fatal error
rasCmdLine->nonfatal_error = true;
break;
case 'C':
// Clear Injected Error
rasCmdLine->clear_injerror = true;
break;
case 'E':
// Set MW Address error
rasCmdLine->mwaddress_error = true;
break;
case 'G':
// Set MR Address error
rasCmdLine->mraddress_error = true;
break;
case 'H':
// Set MW Length error
rasCmdLine->mwlength_error = true;
break;
case 'I':
// Set MR Length error
rasCmdLine->mrlength_error = true;
break;
case ':': /* missing option argument */
printf("Missing option argument.\n");
return -1;
case '?':
default: /* invalid option */
printf("Invalid cmdline options.\n");
return -1;
}
}
return 0;
}
| 1 | 14,917 | Can you explain why is this necessary? Is `snprintf()` with four integer arguments unsafe? | OPAE-opae-sdk | c |
@@ -219,7 +219,19 @@ def internal_keyDownEvent(vkCode,scanCode,extended,injected):
for k in range(256):
keyStates[k]=ctypes.windll.user32.GetKeyState(k)
charBuf=ctypes.create_unicode_buffer(5)
+ # First try getting the keyboard layout from the thread with the focus (input thread)
hkl=ctypes.windll.user32.GetKeyboardLayout(focus.windowThreadID)
+ if not hkl:
+ log.debug("Failed to fetch keyboard layout from focus, trying layout from last detected change")
+ # Some threads, such as for Windows consoles
+ # Do not allow getKeyboardLayout to work.
+ # Therefore, use the cached keyboard layout from the last inputLangChange detected by NVDA
+ # on the foreground object.
+ hkl = getattr(api.getForegroundObject(), '_lastDetectedKeyboardLayoutChange', 0)
+ if not hkl:
+ log.debug("No layout cached, falling back to layout of NVDA main thread")
+ # As a last resort, use the keyboard layout of NVDA's main thread.
+ hkl = ctypes.windll.user32.GetKeyboardLayout(core.mainThreadId)
# In previous Windows builds, calling ToUnicodeEx would destroy keyboard buffer state and therefore cause the app to not produce the right WM_CHAR message.
# However, ToUnicodeEx now can take a new flag of 0x4, which stops it from destroying keyboard state, thus allowing us to safely call it here.
res=ctypes.windll.user32.ToUnicodeEx(vkCode,scanCode,keyStates,charBuf,len(charBuf),0x4,hkl) | 1 | # -*- coding: UTF-8 -*-
#keyboardHandler.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2006-2017 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Babbage B.V.
"""Keyboard support"""
import ctypes
import sys
import time
import re
import wx
import winVersion
import winUser
import vkCodes
import eventHandler
import speech
import ui
from keyLabels import localizedKeyLabels
from logHandler import log
import queueHandler
import config
import api
import winInputHook
import inputCore
import tones
import core
from contextlib import contextmanager
import threading
ignoreInjected=False
# Fake vk codes.
# These constants should be assigned to the name that NVDA will use for the key.
VK_WIN = "windows"
VK_NVDA = "NVDA"
#: Keys which have been trapped by NVDA and should not be passed to the OS.
trappedKeys=set()
#: Tracks the number of keys passed through by request of the user.
#: If -1, pass through is disabled.
#: If 0 or higher then key downs and key ups will be passed straight through.
passKeyThroughCount=-1
#: The last key down passed through by request of the user.
lastPassThroughKeyDown = None
#: The last NVDA modifier key that was pressed with no subsequent key presses.
lastNVDAModifier = None
#: When the last NVDA modifier key was released.
lastNVDAModifierReleaseTime = None
#: Indicates that the NVDA modifier's special functionality should be bypassed until a key is next released.
bypassNVDAModifier = False
#: The modifiers currently being pressed.
currentModifiers = set()
#: A counter which is incremented each time a key is pressed.
#: Note that this may be removed in future, so reliance on it should generally be avoided.
#: @type: int
keyCounter = 0
#: The current sticky NVDa modifier key.
stickyNVDAModifier = None
#: Whether the sticky NVDA modifier is locked.
stickyNVDAModifierLocked = False
_ignoreInjectionLock = threading.Lock()
@contextmanager
def ignoreInjection():
"""Context manager that allows ignoring injected keys temporarily by using a with statement."""
global ignoreInjected
with _ignoreInjectionLock:
ignoreInjected=True
yield
ignoreInjected=False
def passNextKeyThrough():
global passKeyThroughCount
if passKeyThroughCount==-1:
passKeyThroughCount=0
def isNVDAModifierKey(vkCode,extended):
if config.conf["keyboard"]["useNumpadInsertAsNVDAModifierKey"] and vkCode==winUser.VK_INSERT and not extended:
return True
elif config.conf["keyboard"]["useExtendedInsertAsNVDAModifierKey"] and vkCode==winUser.VK_INSERT and extended:
return True
elif config.conf["keyboard"]["useCapsLockAsNVDAModifierKey"] and vkCode==winUser.VK_CAPITAL:
return True
else:
return False
SUPPORTED_NVDA_MODIFIER_KEYS = ("capslock", "numpadinsert", "insert")
def getNVDAModifierKeys():
keys=[]
if config.conf["keyboard"]["useExtendedInsertAsNVDAModifierKey"]:
keys.append(vkCodes.byName["insert"])
if config.conf["keyboard"]["useNumpadInsertAsNVDAModifierKey"]:
keys.append(vkCodes.byName["numpadinsert"])
if config.conf["keyboard"]["useCapsLockAsNVDAModifierKey"]:
keys.append(vkCodes.byName["capslock"])
return keys
def internal_keyDownEvent(vkCode,scanCode,extended,injected):
"""Event called by winInputHook when it receives a keyDown.
"""
gestureExecuted=False
try:
global lastNVDAModifier, lastNVDAModifierReleaseTime, bypassNVDAModifier, passKeyThroughCount, lastPassThroughKeyDown, currentModifiers, keyCounter, stickyNVDAModifier, stickyNVDAModifierLocked
# Injected keys should be ignored in some cases.
if injected and (ignoreInjected or not config.conf['keyboard']['handleInjectedKeys']):
return True
keyCode = (vkCode, extended)
if passKeyThroughCount >= 0:
# We're passing keys through.
if lastPassThroughKeyDown != keyCode:
# Increment the pass key through count.
# We only do this if this isn't a repeat of the previous key down, as we don't receive key ups for repeated key downs.
passKeyThroughCount += 1
lastPassThroughKeyDown = keyCode
return True
keyCounter += 1
stickyKeysFlags = winUser.getSystemStickyKeys().dwFlags
if stickyNVDAModifier and not stickyKeysFlags & winUser.SKF_STICKYKEYSON:
# Sticky keys has been disabled,
# so clear the sticky NVDA modifier.
currentModifiers.discard(stickyNVDAModifier)
stickyNVDAModifier = None
stickyNVDAModifierLocked = False
gesture = KeyboardInputGesture(currentModifiers, vkCode, scanCode, extended)
if not (stickyKeysFlags & winUser.SKF_STICKYKEYSON) and (bypassNVDAModifier or (keyCode == lastNVDAModifier and lastNVDAModifierReleaseTime and time.time() - lastNVDAModifierReleaseTime < 0.5)):
# The user wants the key to serve its normal function instead of acting as an NVDA modifier key.
# There may be key repeats, so ensure we do this until they stop.
bypassNVDAModifier = True
gesture.isNVDAModifierKey = False
lastNVDAModifierReleaseTime = None
if gesture.isNVDAModifierKey:
lastNVDAModifier = keyCode
if stickyKeysFlags & winUser.SKF_STICKYKEYSON:
if keyCode == stickyNVDAModifier:
if stickyKeysFlags & winUser.SKF_TRISTATE and not stickyNVDAModifierLocked:
# The NVDA modifier is being locked.
stickyNVDAModifierLocked = True
if stickyKeysFlags & winUser.SKF_AUDIBLEFEEDBACK:
tones.beep(1984, 60)
return False
else:
# The NVDA modifier is being unlatched/unlocked.
stickyNVDAModifier = None
stickyNVDAModifierLocked = False
if stickyKeysFlags & winUser.SKF_AUDIBLEFEEDBACK:
tones.beep(496, 60)
return False
else:
# The NVDA modifier is being latched.
if stickyNVDAModifier:
# Clear the previous sticky NVDA modifier.
currentModifiers.discard(stickyNVDAModifier)
stickyNVDAModifierLocked = False
stickyNVDAModifier = keyCode
if stickyKeysFlags & winUser.SKF_AUDIBLEFEEDBACK:
tones.beep(1984, 60)
else:
# Another key was pressed after the last NVDA modifier key, so it should not be passed through on the next press.
lastNVDAModifier = None
if gesture.isModifier:
if gesture.speechEffectWhenExecuted in (gesture.SPEECHEFFECT_PAUSE, gesture.SPEECHEFFECT_RESUME) and keyCode in currentModifiers:
# Ignore key repeats for the pause speech key to avoid speech stuttering as it continually pauses and resumes.
return True
currentModifiers.add(keyCode)
elif stickyNVDAModifier and not stickyNVDAModifierLocked:
# A non-modifier was pressed, so unlatch the NVDA modifier.
currentModifiers.discard(stickyNVDAModifier)
stickyNVDAModifier = None
try:
inputCore.manager.executeGesture(gesture)
gestureExecuted=True
trappedKeys.add(keyCode)
if canModifiersPerformAction(gesture.generalizedModifiers):
# #3472: These modifiers can perform an action if pressed alone
# and we've just consumed the main key.
# Send special reserved vkcode (0xff) to at least notify the app's key state that something happendd.
# This allows alt and windows to be bound to scripts and
# stops control+shift from switching keyboard layouts in cursorManager selection scripts.
KeyboardInputGesture((),0xff,0,False).send()
return False
except inputCore.NoInputGestureAction:
if gesture.isNVDAModifierKey:
# Never pass the NVDA modifier key to the OS.
trappedKeys.add(keyCode)
return False
except:
log.error("internal_keyDownEvent", exc_info=True)
finally:
# #6017: handle typed characters in Win10 RS2 and above where we can't detect typed characters in-process
# This code must be in the 'finally' block as code above returns in several places yet we still want to execute this particular code.
focus=api.getFocusObject()
from NVDAObjects.behaviors import KeyboardHandlerBasedTypedCharSupport
if (
# This is only possible in Windows 10 1607 and above
winVersion.isWin10(1607)
# And we only want to do this if the gesture did not result in an executed action
and not gestureExecuted
# and not if this gesture is a modifier key
and not isNVDAModifierKey(vkCode,extended) and not vkCode in KeyboardInputGesture.NORMAL_MODIFIER_KEYS
and ( # Either of
# We couldn't inject in-process, and its not a legacy console window without keyboard support.
# console windows have their own specific typed character support.
(not focus.appModule.helperLocalBindingHandle and focus.windowClassName!='ConsoleWindowClass')
# or the focus is within a UWP app, where WM_CHAR never gets sent
or focus.windowClassName.startswith('Windows.UI.Core')
#Or this is a console with keyboard support, where WM_CHAR messages are doubled
or isinstance(focus, KeyboardHandlerBasedTypedCharSupport)
)
):
keyStates=(ctypes.c_byte*256)()
for k in range(256):
keyStates[k]=ctypes.windll.user32.GetKeyState(k)
charBuf=ctypes.create_unicode_buffer(5)
hkl=ctypes.windll.user32.GetKeyboardLayout(focus.windowThreadID)
# In previous Windows builds, calling ToUnicodeEx would destroy keyboard buffer state and therefore cause the app to not produce the right WM_CHAR message.
# However, ToUnicodeEx now can take a new flag of 0x4, which stops it from destroying keyboard state, thus allowing us to safely call it here.
res=ctypes.windll.user32.ToUnicodeEx(vkCode,scanCode,keyStates,charBuf,len(charBuf),0x4,hkl)
if res>0:
for ch in charBuf[:res]:
eventHandler.queueEvent("typedCharacter",focus,ch=ch)
return True
def internal_keyUpEvent(vkCode,scanCode,extended,injected):
"""Event called by winInputHook when it receives a keyUp.
"""
try:
global lastNVDAModifier, lastNVDAModifierReleaseTime, bypassNVDAModifier, passKeyThroughCount, lastPassThroughKeyDown, currentModifiers
# Injected keys should be ignored in some cases.
if injected and (ignoreInjected or not config.conf['keyboard']['handleInjectedKeys']):
return True
keyCode = (vkCode, extended)
if passKeyThroughCount >= 1:
if lastPassThroughKeyDown == keyCode:
# This key has been released.
lastPassThroughKeyDown = None
passKeyThroughCount -= 1
if passKeyThroughCount == 0:
passKeyThroughCount = -1
return True
if lastNVDAModifier and keyCode == lastNVDAModifier:
# The last pressed NVDA modifier key is being released and there were no key presses in between.
# The user may want to press it again quickly to pass it through.
lastNVDAModifierReleaseTime = time.time()
# If we were bypassing the NVDA modifier, stop doing so now, as there will be no more repeats.
bypassNVDAModifier = False
if keyCode != stickyNVDAModifier:
currentModifiers.discard(keyCode)
# help inputCore manage its sayAll state for keyboard modifiers -- inputCore itself has no concept of key releases
if not currentModifiers:
inputCore.manager.lastModifierWasInSayAll=False
if keyCode in trappedKeys:
trappedKeys.remove(keyCode)
return False
except:
log.error("", exc_info=True)
return True
#Register internal key press event with operating system
def initialize():
"""Initialises keyboard support."""
winInputHook.initialize()
winInputHook.setCallbacks(keyDown=internal_keyDownEvent,keyUp=internal_keyUpEvent)
def terminate():
winInputHook.terminate()
def getInputHkl():
"""Obtain the hkl currently being used for input.
This retrieves the hkl from the thread of the focused window.
"""
focus = api.getFocusObject()
if focus:
thread = focus.windowThreadID
else:
thread = 0
return winUser.user32.GetKeyboardLayout(thread)
def canModifiersPerformAction(modifiers):
"""Determine whether given generalized modifiers can perform an action if pressed alone.
For example, alt activates the menu bar if it isn't modifying another key.
"""
if inputCore.manager.isInputHelpActive:
return False
control = shift = other = False
for vk, ext in modifiers:
if vk in (winUser.VK_MENU, VK_WIN):
# Alt activates the menu bar.
# Windows activates the Start Menu.
return True
elif vk == winUser.VK_CONTROL:
control = True
elif vk == winUser.VK_SHIFT:
shift = True
elif (vk, ext) not in trappedKeys :
# Trapped modifiers aren't relevant.
other = True
if control and shift and not other:
# Shift+control switches keyboard layouts.
return True
return False
class KeyboardInputGesture(inputCore.InputGesture):
"""A key pressed on the traditional system keyboard.
"""
#: All normal modifier keys, where modifier vk codes are mapped to a more general modifier vk code or C{None} if not applicable.
#: @type: dict
NORMAL_MODIFIER_KEYS = {
winUser.VK_LCONTROL: winUser.VK_CONTROL,
winUser.VK_RCONTROL: winUser.VK_CONTROL,
winUser.VK_CONTROL: None,
winUser.VK_LSHIFT: winUser.VK_SHIFT,
winUser.VK_RSHIFT: winUser.VK_SHIFT,
winUser.VK_SHIFT: None,
winUser.VK_LMENU: winUser.VK_MENU,
winUser.VK_RMENU: winUser.VK_MENU,
winUser.VK_MENU: None,
winUser.VK_LWIN: VK_WIN,
winUser.VK_RWIN: VK_WIN,
VK_WIN: None,
}
#: All possible toggle key vk codes.
#: @type: frozenset
TOGGLE_KEYS = frozenset((winUser.VK_CAPITAL, winUser.VK_NUMLOCK, winUser.VK_SCROLL))
#: All possible keyboard layouts, where layout names are mapped to localised layout names.
#: @type: dict
LAYOUTS = {
# Translators: One of the keyboard layouts for NVDA.
"desktop": _("desktop"),
# Translators: One of the keyboard layouts for NVDA.
"laptop": _("laptop"),
}
@classmethod
def getVkName(cls, vkCode, isExtended):
if isinstance(vkCode, str):
return vkCode
name = vkCodes.byCode.get((vkCode, isExtended))
if not name and isExtended is not None:
# Whether the key is extended doesn't matter for many keys, so try None.
name = vkCodes.byCode.get((vkCode, None))
return name if name else ""
def __init__(self, modifiers, vkCode, scanCode, isExtended):
#: The keyboard layout in which this gesture was created.
#: @type: str
self.layout = config.conf["keyboard"]["keyboardLayout"]
self.modifiers = modifiers = set(modifiers)
# Don't double up if this is a modifier key repeat.
modifiers.discard((vkCode, isExtended))
if vkCode in (winUser.VK_DIVIDE, winUser.VK_MULTIPLY, winUser.VK_SUBTRACT, winUser.VK_ADD) and winUser.getKeyState(winUser.VK_NUMLOCK) & 1:
# Some numpad keys have the same vkCode regardless of numlock.
# For these keys, treat numlock as a modifier.
modifiers.add((winUser.VK_NUMLOCK, False))
self.generalizedModifiers = set((self.NORMAL_MODIFIER_KEYS.get(mod) or mod, extended) for mod, extended in modifiers)
self.vkCode = vkCode
self.scanCode = scanCode
self.isExtended = isExtended
super(KeyboardInputGesture, self).__init__()
def _get_bypassInputHelp(self):
# #4226: Numlock must always be handled normally otherwise the Keyboard controller and Windows can get out of synk wih each other in regard to this key state.
return self.vkCode==winUser.VK_NUMLOCK
def _get_isNVDAModifierKey(self):
return isNVDAModifierKey(self.vkCode, self.isExtended)
def _get_isModifier(self):
return self.vkCode in self.NORMAL_MODIFIER_KEYS or self.isNVDAModifierKey
def _get_mainKeyName(self):
if self.isNVDAModifierKey:
return "NVDA"
name = self.getVkName(self.vkCode, self.isExtended)
if name:
return name
if 32 < self.vkCode < 128:
return chr(self.vkCode).lower()
if self.vkCode == vkCodes.VK_PACKET:
# Unicode character from non-keyboard input.
return chr(self.scanCode)
vkChar = winUser.user32.MapVirtualKeyExW(self.vkCode, winUser.MAPVK_VK_TO_CHAR, getInputHkl())
if vkChar>0:
if vkChar == 43: # "+"
# A gesture identifier can't include "+" except as a separator.
return "plus"
return chr(vkChar).lower()
if self.vkCode == 0xFF:
# #3468: This key is unknown to Windows.
# GetKeyNameText often returns something inappropriate in these cases
# due to disregarding the extended flag.
return "unknown_%02x" % self.scanCode
return winUser.getKeyNameText(self.scanCode, self.isExtended)
def _get_modifierNames(self):
modTexts = []
for modVk, modExt in self.generalizedModifiers:
if isNVDAModifierKey(modVk, modExt):
modTexts.append("NVDA")
else:
modTexts.append(self.getVkName(modVk, None))
return modTexts
def _get__keyNamesInDisplayOrder(self):
return tuple(self.modifierNames) + (self.mainKeyName,)
def _get_displayName(self):
return "+".join(
# Translators: Reported for an unknown key press.
# %s will be replaced with the key code.
_("unknown %s") % key[8:] if key.startswith("unknown_")
else localizedKeyLabels.get(key.lower(), key) for key in self._keyNamesInDisplayOrder)
def _get_identifiers(self):
keyName = "+".join(self._keyNamesInDisplayOrder)
return (
u"kb({layout}):{key}".format(layout=self.layout, key=keyName),
u"kb:{key}".format(key=keyName)
)
def _get_shouldReportAsCommand(self):
if self.isExtended and winUser.VK_VOLUME_MUTE <= self.vkCode <= winUser.VK_VOLUME_UP:
# Don't report volume controlling keys.
return False
if self.vkCode == 0xFF:
# #3468: This key is unknown to Windows.
# This could be for an event such as gyroscope movement,
# so don't report it.
return False
if self.vkCode in self.TOGGLE_KEYS:
# #5490: Dont report for keys that toggle on off.
# This is to avoid them from being reported twice: once by the 'speak command keys' feature,
# and once to announce that the state has changed.
return False
return not self.isCharacter
def _get_isCharacter(self):
# Aside from space, a key name of more than 1 character is a potential command and therefore is not a character.
if self.vkCode != winUser.VK_SPACE and len(self.mainKeyName) > 1:
return False
# If this key has modifiers other than shift, it is a command and not a character; e.g. shift+f is a character, but control+f is a command.
modifiers = self.generalizedModifiers
if modifiers and (len(modifiers) > 1 or tuple(modifiers)[0][0] != winUser.VK_SHIFT):
return False
return True
def _get_speechEffectWhenExecuted(self):
if inputCore.manager.isInputHelpActive:
return self.SPEECHEFFECT_CANCEL
if self.isExtended and winUser.VK_VOLUME_MUTE <= self.vkCode <= winUser.VK_VOLUME_UP:
return None
if self.vkCode == 0xFF:
# #3468: This key is unknown to Windows.
# This could be for an event such as gyroscope movement,
# so don't interrupt speech.
return None
if not config.conf['keyboard']['speechInterruptForCharacters'] and (not self.shouldReportAsCommand or self.vkCode in (winUser.VK_SHIFT, winUser.VK_LSHIFT, winUser.VK_RSHIFT)):
return None
if self.vkCode==winUser.VK_RETURN and not config.conf['keyboard']['speechInterruptForEnter']:
return None
if self.vkCode in (winUser.VK_SHIFT, winUser.VK_LSHIFT, winUser.VK_RSHIFT):
return self.SPEECHEFFECT_RESUME if speech.isPaused else self.SPEECHEFFECT_PAUSE
return self.SPEECHEFFECT_CANCEL
def reportExtra(self):
if self.vkCode in self.TOGGLE_KEYS:
core.callLater(30, self._reportToggleKey)
def _reportToggleKey(self):
toggleState = winUser.getKeyState(self.vkCode) & 1
key = self.mainKeyName
ui.message(u"{key} {state}".format(
key=localizedKeyLabels.get(key.lower(), key),
state=_("on") if toggleState else _("off")))
def send(self):
keys = []
for vk, ext in self.generalizedModifiers:
if vk == VK_WIN:
if winUser.getKeyState(winUser.VK_LWIN) & 32768 or winUser.getKeyState(winUser.VK_RWIN) & 32768:
# Already down.
continue
vk = winUser.VK_LWIN
elif winUser.getKeyState(vk) & 32768:
# Already down.
continue
keys.append((vk, 0, ext))
keys.append((self.vkCode, self.scanCode, self.isExtended))
with ignoreInjection():
if winUser.getKeyState(self.vkCode) & 32768:
# This key is already down, so send a key up for it first.
winUser.keybd_event(self.vkCode, self.scanCode, self.isExtended + 2, 0)
# Send key down events for these keys.
for vk, scan, ext in keys:
winUser.keybd_event(vk, scan, ext, 0)
# Send key up events for the keys in reverse order.
for vk, scan, ext in reversed(keys):
winUser.keybd_event(vk, scan, ext + 2, 0)
if not queueHandler.isPendingItems(queueHandler.eventQueue):
# We want to guarantee that by the time that
# this function returns,the keyboard input generated
# has been injected and NVDA has received and processed it.
time.sleep(0.01)
wx.Yield()
@classmethod
def fromName(cls, name):
"""Create an instance given a key name.
@param name: The key name.
@type name: str
@return: A gesture for the specified key.
@rtype: L{KeyboardInputGesture}
"""
keyNames = name.split("+")
keys = []
for keyName in keyNames:
if keyName == "plus":
# A key name can't include "+" except as a separator.
keyName = "+"
if keyName == VK_WIN:
vk = winUser.VK_LWIN
ext = False
elif keyName.lower() == VK_NVDA.lower():
vk, ext = getNVDAModifierKeys()[0]
elif len(keyName) == 1:
ext = False
requiredMods, vk = winUser.VkKeyScanEx(keyName, getInputHkl())
if requiredMods & 1:
keys.append((winUser.VK_SHIFT, False))
if requiredMods & 2:
keys.append((winUser.VK_CONTROL, False))
if requiredMods & 4:
keys.append((winUser.VK_MENU, False))
# Not sure whether we need to support the Hankaku modifier (& 8).
else:
vk, ext = vkCodes.byName[keyName.lower()]
if ext is None:
ext = False
keys.append((vk, ext))
if not keys:
raise ValueError
return cls(keys[:-1], vk, 0, ext)
RE_IDENTIFIER = re.compile(r"^kb(?:\((.+?)\))?:(.*)$")
@classmethod
def getDisplayTextForIdentifier(cls, identifier):
layout, keys = cls.RE_IDENTIFIER.match(identifier).groups()
dispSource = None
if layout:
try:
# Translators: Used when describing keys on the system keyboard with a particular layout.
# %s is replaced with the layout name.
# For example, in English, this might produce "laptop keyboard".
dispSource = _("%s keyboard") % cls.LAYOUTS[layout]
except KeyError:
pass
if not dispSource:
# Translators: Used when describing keys on the system keyboard applying to all layouts.
dispSource = _("keyboard, all layouts")
keys = set(keys.split("+"))
names = []
main = None
try:
# If present, the NVDA key should appear first.
keys.remove("nvda")
names.append("NVDA")
except KeyError:
pass
for key in keys:
try:
# vkCodes.byName values are (vk, ext)
vk = vkCodes.byName[key][0]
except KeyError:
# This could be a fake vk.
vk = key
label = localizedKeyLabels.get(key, key)
if vk in cls.NORMAL_MODIFIER_KEYS:
names.append(label)
else:
# The main key must be last, so handle that outside the loop.
main = label
names.append(main)
return dispSource, "+".join(names)
inputCore.registerGestureSource("kb", KeyboardInputGesture)
def injectRawKeyboardInput(isPress, code, isExtended):
"""Inject raw input from a system keyboard that is not handled natively by Windows.
For example, this might be used for input from a QWERTY keyboard on a braille display.
NVDA will treat the key as if it had been pressed on a normal system keyboard.
If it is not handled by NVDA, it will be sent to the operating system.
@param isPress: Whether the key is being pressed.
@type isPress: bool
@param code: The scan code (PC set 1) of the key.
@type code: int
@param isExtended: Whether this is an extended key.
@type isExtended: bool
"""
mapScan = code
if isExtended:
# Change what we pass to MapVirtualKeyEx, but don't change what NVDA gets.
mapScan |= 0xE000
vkCode = winUser.user32.MapVirtualKeyExW(mapScan, winUser.MAPVK_VSC_TO_VK_EX, getInputHkl())
if isPress:
shouldSend = internal_keyDownEvent(vkCode, code, isExtended, False)
else:
shouldSend = internal_keyUpEvent(vkCode, code, isExtended, False)
if shouldSend:
flags = 0
if not isPress:
flags |= 2
if isExtended:
flags |= 1
with ignoreInjection():
winUser.keybd_event(vkCode, code, flags, None)
wx.Yield()
| 1 | 28,140 | How likely would it be that the keyboard layout for the NVDA main thread differs from the keyboard layout of the currently focused app? | nvaccess-nvda | py |
@@ -198,6 +198,10 @@ class Driver extends webdriver.WebDriver {
* @return {!Driver} A new driver instance.
*/
static createSession(options, service = getDefaultService()) {
+ if (!service) {
+ service = getDefaultService();
+ }
+
let client = service.start().then(url => new http.HttpClient(url));
let executor = new http.Executor(client);
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
/**
* @fileoverview Defines a {@linkplain Driver WebDriver} client for
* Microsoft's Edge web browser. Before using this module,
* you must download and install the latest
* [MicrosoftEdgeDriver](http://go.microsoft.com/fwlink/?LinkId=619687) server.
* Ensure that the MicrosoftEdgeDriver is on your
* [PATH](http://en.wikipedia.org/wiki/PATH_%28variable%29).
*
* There are three primary classes exported by this module:
*
* 1. {@linkplain ServiceBuilder}: configures the
* {@link ./remote.DriverService remote.DriverService}
* that manages the [MicrosoftEdgeDriver] child process.
*
* 2. {@linkplain Options}: defines configuration options for each new
* MicrosoftEdgeDriver session, such as which
* {@linkplain Options#setProxy proxy} to use when starting the browser.
*
* 3. {@linkplain Driver}: the WebDriver client; each new instance will control
* a unique browser session.
*
* __Customizing the MicrosoftEdgeDriver Server__ <a id="custom-server"></a>
*
* By default, every MicrosoftEdge session will use a single driver service,
* which is started the first time a {@link Driver} instance is created and
* terminated when this process exits. The default service will inherit its
* environment from the current process.
* You may obtain a handle to this default service using
* {@link #getDefaultService getDefaultService()} and change its configuration
* with {@link #setDefaultService setDefaultService()}.
*
* You may also create a {@link Driver} with its own driver service. This is
* useful if you need to capture the server's log output for a specific session:
*
* var edge = require('selenium-webdriver/edge');
*
* var service = new edge.ServiceBuilder()
* .setPort(55555)
* .build();
*
* var options = new edge.Options();
* // configure browser options ...
*
* var driver = edge.Driver.createSession(options, service);
*
* Users should only instantiate the {@link Driver} class directly when they
* need a custom driver service configuration (as shown above). For normal
* operation, users should start MicrosoftEdge using the
* {@link ./builder.Builder selenium-webdriver.Builder}.
*
* [MicrosoftEdgeDriver]: https://msdn.microsoft.com/en-us/library/mt188085(v=vs.85).aspx
*/
'use strict';
const fs = require('fs');
const util = require('util');
const http = require('./http');
const io = require('./io');
const portprober = require('./net/portprober');
const promise = require('./lib/promise');
const remote = require('./remote');
const Symbols = require('./lib/symbols');
const webdriver = require('./lib/webdriver');
const {Browser, Capabilities} = require('./lib/capabilities');
const EDGEDRIVER_EXE = 'MicrosoftWebDriver.exe';
/**
* _Synchronously_ attempts to locate the edge driver executable on the current
* system.
*
* @return {?string} the located executable, or `null`.
*/
function locateSynchronously() {
return process.platform === 'win32'
? io.findInPath(EDGEDRIVER_EXE, true) : null;
}
/**
* Class for managing MicrosoftEdgeDriver specific options.
*/
class Options extends Capabilities {
/**
* @param {(Capabilities|Map<string, ?>|Object)=} other Another set of
* capabilities to initialize this instance from.
*/
constructor(other = undefined) {
super(other);
this.setBrowserName(Browser.EDGE);
}
}
/**
* Creates {@link remote.DriverService} instances that manage a
* MicrosoftEdgeDriver server in a child process.
*/
class ServiceBuilder extends remote.DriverService.Builder {
/**
* @param {string=} opt_exe Path to the server executable to use. If omitted,
* the builder will attempt to locate the MicrosoftEdgeDriver on the current
* PATH.
* @throws {Error} If provided executable does not exist, or the
* MicrosoftEdgeDriver cannot be found on the PATH.
*/
constructor(opt_exe) {
let exe = opt_exe || locateSynchronously();
if (!exe) {
throw Error(
'The ' + EDGEDRIVER_EXE + ' could not be found on the current PATH. ' +
'Please download the latest version of the MicrosoftEdgeDriver from ' +
'https://www.microsoft.com/en-us/download/details.aspx?id=48212 and ' +
'ensure it can be found on your PATH.');
}
super(exe);
// Binding to the loopback address will fail if not running with
// administrator privileges. Since we cannot test for that in script
// (or can we?), force the DriverService to use "localhost".
this.setHostname('localhost');
}
/**
* Enables verbose logging.
* @return {!ServiceBuilder} A self reference.
*/
enableVerboseLogging() {
return this.addArguments('--verbose');
}
}
/** @type {remote.DriverService} */
var defaultService = null;
/**
* Sets the default service to use for new MicrosoftEdgeDriver instances.
* @param {!remote.DriverService} service The service to use.
* @throws {Error} If the default service is currently running.
*/
function setDefaultService(service) {
if (defaultService && defaultService.isRunning()) {
throw Error(
'The previously configured EdgeDriver service is still running. ' +
'You must shut it down before you may adjust its configuration.');
}
defaultService = service;
}
/**
* Returns the default MicrosoftEdgeDriver service. If such a service has
* not been configured, one will be constructed using the default configuration
* for an MicrosoftEdgeDriver executable found on the system PATH.
* @return {!remote.DriverService} The default MicrosoftEdgeDriver service.
*/
function getDefaultService() {
if (!defaultService) {
defaultService = new ServiceBuilder().build();
}
return defaultService;
}
/**
* Creates a new WebDriver client for Microsoft's Edge.
*/
class Driver extends webdriver.WebDriver {
/**
* Creates a new browser session for Microsoft's Edge browser.
*
* @param {(Capabilities|Options)=} options The configuration options.
* @param {remote.DriverService=} service The session to use; will use
* the {@linkplain #getDefaultService default service} by default.
* @return {!Driver} A new driver instance.
*/
static createSession(options, service = getDefaultService()) {
let client = service.start().then(url => new http.HttpClient(url));
let executor = new http.Executor(client);
options = options || new Options();
return /** @type {!Driver} */(super.createSession(
executor, options, () => service.kill()));
}
/**
* This function is a no-op as file detectors are not supported by this
* implementation.
* @override
*/
setFileDetector() {}
}
// PUBLIC API
exports.Driver = Driver;
exports.Options = Options;
exports.ServiceBuilder = ServiceBuilder;
exports.getDefaultService = getDefaultService;
exports.setDefaultService = setDefaultService;
exports.locateSynchronously = locateSynchronously;
| 1 | 15,395 | Would you mind removing the default parameter above? (I doubt I'll ever use defaults again since you still have to protect against callers explicitly passing `null` or `undefined`) | SeleniumHQ-selenium | rb |
@@ -2186,7 +2186,7 @@ class WebElement {
if (!this.driver_.fileDetector_) {
return this.schedule_(
new command.Command(command.Name.SEND_KEYS_TO_ELEMENT).
- setParameter('text', keys).
+ setParameter('text', keys.then(keys => keys.join(''))).
setParameter('value', keys),
'WebElement.sendKeys()');
} | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
/**
* @fileoverview The heart of the WebDriver JavaScript API.
*/
'use strict';
const actions = require('./actions');
const by = require('./by');
const Capabilities = require('./capabilities').Capabilities;
const command = require('./command');
const error = require('./error');
const input = require('./input');
const logging = require('./logging');
const {Session} = require('./session');
const Symbols = require('./symbols');
const promise = require('./promise');
/**
* Defines a condition for use with WebDriver's {@linkplain WebDriver#wait wait
* command}.
*
* @template OUT
*/
class Condition {
/**
* @param {string} message A descriptive error message. Should complete the
* sentence "Waiting [...]"
* @param {function(!WebDriver): OUT} fn The condition function to
* evaluate on each iteration of the wait loop.
*/
constructor(message, fn) {
/** @private {string} */
this.description_ = 'Waiting ' + message;
/** @type {function(!WebDriver): OUT} */
this.fn = fn;
}
/** @return {string} A description of this condition. */
description() {
return this.description_;
}
}
/**
* Defines a condition that will result in a {@link WebElement}.
*
* @extends {Condition<!(WebElement|IThenable<!WebElement>)>}
*/
class WebElementCondition extends Condition {
/**
* @param {string} message A descriptive error message. Should complete the
* sentence "Waiting [...]"
* @param {function(!WebDriver): !(WebElement|IThenable<!WebElement>)}
* fn The condition function to evaluate on each iteration of the wait
* loop.
*/
constructor(message, fn) {
super(message, fn);
}
}
//////////////////////////////////////////////////////////////////////////////
//
// WebDriver
//
//////////////////////////////////////////////////////////////////////////////
/**
* Translates a command to its wire-protocol representation before passing it
* to the given `executor` for execution.
* @param {!command.Executor} executor The executor to use.
* @param {!command.Command} command The command to execute.
* @return {!Promise} A promise that will resolve with the command response.
*/
function executeCommand(executor, command) {
return toWireValue(command.getParameters()).
then(function(parameters) {
command.setParameters(parameters);
return executor.execute(command);
});
}
/**
* Converts an object to its JSON representation in the WebDriver wire protocol.
* When converting values of type object, the following steps will be taken:
* <ol>
* <li>if the object is a WebElement, the return value will be the element's
* server ID
* <li>if the object defines a {@link Symbols.serialize} method, this algorithm
* will be recursively applied to the object's serialized representation
* <li>if the object provides a "toJSON" function, this algorithm will
* recursively be applied to the result of that function
* <li>otherwise, the value of each key will be recursively converted according
* to the rules above.
* </ol>
*
* @param {*} obj The object to convert.
* @return {!Promise<?>} A promise that will resolve to the input value's JSON
* representation.
*/
function toWireValue(obj) {
if (promise.isPromise(obj)) {
return Promise.resolve(obj).then(toWireValue);
}
return Promise.resolve(convertValue(obj));
}
function convertValue(value) {
if (value === void 0 || value === null) {
return value;
}
if (typeof value === 'boolean'
|| typeof value === 'number'
|| typeof value === 'string') {
return value;
}
if (Array.isArray(value)) {
return convertKeys(value);
}
if (typeof value === 'function') {
return '' + value;
}
if (typeof value[Symbols.serialize] === 'function') {
return toWireValue(value[Symbols.serialize]());
} else if (typeof value.toJSON === 'function') {
return toWireValue(value.toJSON());
}
return convertKeys(value);
}
function convertKeys(obj) {
const isArray = Array.isArray(obj);
const numKeys = isArray ? obj.length : Object.keys(obj).length;
const ret = isArray ? new Array(numKeys) : {};
if (!numKeys) {
return Promise.resolve(ret);
}
let numResolved = 0;
function forEachKey(obj, fn) {
if (Array.isArray(obj)) {
for (let i = 0, n = obj.length; i < n; i++) {
fn(obj[i], i);
}
} else {
for (let key in obj) {
fn(obj[key], key);
}
}
}
return new Promise(function(done, reject) {
forEachKey(obj, function(value, key) {
if (promise.isPromise(value)) {
value.then(toWireValue).then(setValue, reject);
} else {
value = convertValue(value);
if (promise.isPromise(value)) {
value.then(toWireValue).then(setValue, reject);
} else {
setValue(value);
}
}
function setValue(value) {
ret[key] = value;
maybeFulfill();
}
});
function maybeFulfill() {
if (++numResolved === numKeys) {
done(ret);
}
}
});
}
/**
* Converts a value from its JSON representation according to the WebDriver wire
* protocol. Any JSON object that defines a WebElement ID will be decoded to a
* {@link WebElement} object. All other values will be passed through as is.
*
* @param {!WebDriver} driver The driver to use as the parent of any unwrapped
* {@link WebElement} values.
* @param {*} value The value to convert.
* @return {*} The converted value.
*/
function fromWireValue(driver, value) {
if (Array.isArray(value)) {
value = value.map(v => fromWireValue(driver, v));
} else if (WebElement.isId(value)) {
let id = WebElement.extractId(value);
value = new WebElement(driver, id);
} else if (value && typeof value === 'object') {
let result = {};
for (let key in value) {
if (value.hasOwnProperty(key)) {
result[key] = fromWireValue(driver, value[key]);
}
}
value = result;
}
return value;
}
/**
* Structural interface for a WebDriver client.
*
* @record
*/
class IWebDriver {
/** @return {!promise.ControlFlow} The control flow used by this instance. */
controlFlow() {}
/**
* Schedules a {@link command.Command} to be executed by this driver's
* {@link command.Executor}.
*
* @param {!command.Command} command The command to schedule.
* @param {string} description A description of the command for debugging.
* @return {!promise.Thenable<T>} A promise that will be resolved
* with the command result.
* @template T
*/
schedule(command, description) {}
/**
* Sets the {@linkplain input.FileDetector file detector} that should be
* used with this instance.
* @param {input.FileDetector} detector The detector to use or {@code null}.
*/
setFileDetector(detector) {}
/**
* @return {!command.Executor} The command executor used by this instance.
*/
getExecutor() {}
/**
* @return {!promise.Thenable<!Session>} A promise for this client's session.
*/
getSession() {}
/**
* @return {!promise.Thenable<!Capabilities>} A promise that will resolve with
* the this instance's capabilities.
*/
getCapabilities() {}
/**
* Terminates the browser session. After calling quit, this instance will be
* invalidated and may no longer be used to issue commands against the
* browser.
*
* @return {!promise.Thenable<void>} A promise that will be resolved when the
* command has completed.
*/
quit() {}
/**
* Creates a new action sequence using this driver. The sequence will not be
* scheduled for execution until {@link actions.ActionSequence#perform} is
* called. Example:
*
* driver.actions().
* mouseDown(element1).
* mouseMove(element2).
* mouseUp().
* perform();
*
* @return {!actions.ActionSequence} A new action sequence for this instance.
*/
actions() {}
/**
* Creates a new touch sequence using this driver. The sequence will not be
* scheduled for execution until {@link actions.TouchSequence#perform} is
* called. Example:
*
* driver.touchActions().
* tap(element1).
* doubleTap(element2).
* perform();
*
* @return {!actions.TouchSequence} A new touch sequence for this instance.
*/
touchActions() {}
/**
* Schedules a command to execute JavaScript in the context of the currently
* selected frame or window. The script fragment will be executed as the body
* of an anonymous function. If the script is provided as a function object,
* that function will be converted to a string for injection into the target
* window.
*
* Any arguments provided in addition to the script will be included as script
* arguments and may be referenced using the {@code arguments} object.
* Arguments may be a boolean, number, string, or {@linkplain WebElement}.
* Arrays and objects may also be used as script arguments as long as each item
* adheres to the types previously mentioned.
*
* The script may refer to any variables accessible from the current window.
* Furthermore, the script will execute in the window's context, thus
* {@code document} may be used to refer to the current document. Any local
* variables will not be available once the script has finished executing,
* though global variables will persist.
*
* If the script has a return value (i.e. if the script contains a return
* statement), then the following steps will be taken for resolving this
* functions return value:
*
* - For a HTML element, the value will resolve to a {@linkplain WebElement}
* - Null and undefined return values will resolve to null</li>
* - Booleans, numbers, and strings will resolve as is</li>
* - Functions will resolve to their string representation</li>
* - For arrays and objects, each member item will be converted according to
* the rules above
*
* @param {!(string|Function)} script The script to execute.
* @param {...*} var_args The arguments to pass to the script.
* @return {!promise.Thenable<T>} A promise that will resolve to the
* scripts return value.
* @template T
*/
executeScript(script, var_args) {}
/**
* Schedules a command to execute asynchronous JavaScript in the context of the
* currently selected frame or window. The script fragment will be executed as
* the body of an anonymous function. If the script is provided as a function
* object, that function will be converted to a string for injection into the
* target window.
*
* Any arguments provided in addition to the script will be included as script
* arguments and may be referenced using the {@code arguments} object.
* Arguments may be a boolean, number, string, or {@code WebElement}.
* Arrays and objects may also be used as script arguments as long as each item
* adheres to the types previously mentioned.
*
* Unlike executing synchronous JavaScript with {@link #executeScript},
* scripts executed with this function must explicitly signal they are finished
* by invoking the provided callback. This callback will always be injected
* into the executed function as the last argument, and thus may be referenced
* with {@code arguments[arguments.length - 1]}. The following steps will be
* taken for resolving this functions return value against the first argument
* to the script's callback function:
*
* - For a HTML element, the value will resolve to a
* {@link WebElement}
* - Null and undefined return values will resolve to null
* - Booleans, numbers, and strings will resolve as is
* - Functions will resolve to their string representation
* - For arrays and objects, each member item will be converted according to
* the rules above
*
* __Example #1:__ Performing a sleep that is synchronized with the currently
* selected window:
*
* var start = new Date().getTime();
* driver.executeAsyncScript(
* 'window.setTimeout(arguments[arguments.length - 1], 500);').
* then(function() {
* console.log(
* 'Elapsed time: ' + (new Date().getTime() - start) + ' ms');
* });
*
* __Example #2:__ Synchronizing a test with an AJAX application:
*
* var button = driver.findElement(By.id('compose-button'));
* button.click();
* driver.executeAsyncScript(
* 'var callback = arguments[arguments.length - 1];' +
* 'mailClient.getComposeWindowWidget().onload(callback);');
* driver.switchTo().frame('composeWidget');
* driver.findElement(By.id('to')).sendKeys('dog@example.com');
*
* __Example #3:__ Injecting a XMLHttpRequest and waiting for the result. In
* this example, the inject script is specified with a function literal. When
* using this format, the function is converted to a string for injection, so it
* should not reference any symbols not defined in the scope of the page under
* test.
*
* driver.executeAsyncScript(function() {
* var callback = arguments[arguments.length - 1];
* var xhr = new XMLHttpRequest();
* xhr.open("GET", "/resource/data.json", true);
* xhr.onreadystatechange = function() {
* if (xhr.readyState == 4) {
* callback(xhr.responseText);
* }
* };
* xhr.send('');
* }).then(function(str) {
* console.log(JSON.parse(str)['food']);
* });
*
* @param {!(string|Function)} script The script to execute.
* @param {...*} var_args The arguments to pass to the script.
* @return {!promise.Thenable<T>} A promise that will resolve to the
* scripts return value.
* @template T
*/
executeAsyncScript(script, var_args) {}
/**
* Schedules a command to execute a custom function.
* @param {function(...): (T|IThenable<T>)} fn The function to execute.
* @param {Object=} opt_scope The object in whose scope to execute the function.
* @param {...*} var_args Any arguments to pass to the function.
* @return {!promise.Thenable<T>} A promise that will be resolved'
* with the function's result.
* @template T
*/
call(fn, opt_scope, var_args) {}
/**
* Schedules a command to wait for a condition to hold. The condition may be
* specified by a {@link Condition}, as a custom function, or as any
* promise-like thenable.
*
* For a {@link Condition} or function, the wait will repeatedly
* evaluate the condition until it returns a truthy value. If any errors occur
* while evaluating the condition, they will be allowed to propagate. In the
* event a condition returns a {@link promise.Promise promise}, the polling
* loop will wait for it to be resolved and use the resolved value for whether
* the condition has been satisfied. Note the resolution time for a promise
* is factored into whether a wait has timed out.
*
* Note, if the provided condition is a {@link WebElementCondition}, then
* the wait will return a {@link WebElementPromise} that will resolve to the
* element that satisfied the condition.
*
* _Example:_ waiting up to 10 seconds for an element to be present on the
* page.
*
* var button = driver.wait(until.elementLocated(By.id('foo')), 10000);
* button.click();
*
* This function may also be used to block the command flow on the resolution
* of any thenable promise object. When given a promise, the command will
* simply wait for its resolution before completing. A timeout may be provided
* to fail the command if the promise does not resolve before the timeout
* expires.
*
* _Example:_ Suppose you have a function, `startTestServer`, that returns a
* promise for when a server is ready for requests. You can block a WebDriver
* client on this promise with:
*
* var started = startTestServer();
* driver.wait(started, 5 * 1000, 'Server should start within 5 seconds');
* driver.get(getServerUrl());
*
* @param {!(IThenable<T>|
* Condition<T>|
* function(!WebDriver): T)} condition The condition to
* wait on, defined as a promise, condition object, or a function to
* evaluate as a condition.
* @param {number=} opt_timeout How long to wait for the condition to be true.
* @param {string=} opt_message An optional message to use if the wait times
* out.
* @return {!(promise.Thenable<T>|WebElementPromise)} A promise that will be
* resolved with the first truthy value returned by the condition
* function, or rejected if the condition times out. If the input
* input condition is an instance of a {@link WebElementCondition},
* the returned value will be a {@link WebElementPromise}.
* @throws {TypeError} if the provided `condition` is not a valid type.
* @template T
*/
wait(condition, opt_timeout, opt_message) {}
/**
* Schedules a command to make the driver sleep for the given amount of time.
* @param {number} ms The amount of time, in milliseconds, to sleep.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the sleep has finished.
*/
sleep(ms) {}
/**
* Schedules a command to retrieve the current window handle.
* @return {!promise.Thenable<string>} A promise that will be
* resolved with the current window handle.
*/
getWindowHandle() {}
/**
* Schedules a command to retrieve the current list of available window handles.
* @return {!promise.Thenable<!Array<string>>} A promise that will
* be resolved with an array of window handles.
*/
getAllWindowHandles() {}
/**
* Schedules a command to retrieve the current page's source. The page source
* returned is a representation of the underlying DOM: do not expect it to be
* formatted or escaped in the same way as the response sent from the web
* server.
* @return {!promise.Thenable<string>} A promise that will be
* resolved with the current page source.
*/
getPageSource() {}
/**
* Schedules a command to close the current window.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when this command has completed.
*/
close() {}
/**
* Schedules a command to navigate to the given URL.
* @param {string} url The fully qualified URL to open.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the document has finished loading.
*/
get(url) {}
/**
* Schedules a command to retrieve the URL of the current page.
* @return {!promise.Thenable<string>} A promise that will be
* resolved with the current URL.
*/
getCurrentUrl() {}
/**
* Schedules a command to retrieve the current page's title.
* @return {!promise.Thenable<string>} A promise that will be
* resolved with the current page's title.
*/
getTitle() {}
/**
* Schedule a command to find an element on the page. If the element cannot be
* found, a {@link bot.ErrorCode.NO_SUCH_ELEMENT} result will be returned
* by the driver. Unlike other commands, this error cannot be suppressed. In
* other words, scheduling a command to find an element doubles as an assert
* that the element is present on the page. To test whether an element is
* present on the page, use {@link #findElements}:
*
* driver.findElements(By.id('foo'))
* .then(found => console.log('Element found? %s', !!found.length));
*
* The search criteria for an element may be defined using one of the
* factories in the {@link webdriver.By} namespace, or as a short-hand
* {@link webdriver.By.Hash} object. For example, the following two statements
* are equivalent:
*
* var e1 = driver.findElement(By.id('foo'));
* var e2 = driver.findElement({id:'foo'});
*
* You may also provide a custom locator function, which takes as input this
* instance and returns a {@link WebElement}, or a promise that will resolve
* to a WebElement. If the returned promise resolves to an array of
* WebElements, WebDriver will use the first element. For example, to find the
* first visible link on a page, you could write:
*
* var link = driver.findElement(firstVisibleLink);
*
* function firstVisibleLink(driver) {
* var links = driver.findElements(By.tagName('a'));
* return promise.filter(links, function(link) {
* return link.isDisplayed();
* });
* }
*
* @param {!(by.By|Function)} locator The locator to use.
* @return {!WebElementPromise} A WebElement that can be used to issue
* commands against the located element. If the element is not found, the
* element will be invalidated and all scheduled commands aborted.
*/
findElement(locator) {}
/**
* Schedule a command to search for multiple elements on the page.
*
* @param {!(by.By|Function)} locator The locator to use.
* @return {!promise.Thenable<!Array<!WebElement>>} A
* promise that will resolve to an array of WebElements.
*/
findElements(locator) {}
/**
* Schedule a command to take a screenshot. The driver makes a best effort to
* return a screenshot of the following, in order of preference:
*
* 1. Entire page
* 2. Current window
* 3. Visible portion of the current frame
* 4. The entire display containing the browser
*
* @return {!promise.Thenable<string>} A promise that will be
* resolved to the screenshot as a base-64 encoded PNG.
*/
takeScreenshot() {}
/**
* @return {!Options} The options interface for this instance.
*/
manage() {}
/**
* @return {!Navigation} The navigation interface for this instance.
*/
navigate() {}
/**
* @return {!TargetLocator} The target locator interface for this
* instance.
*/
switchTo() {}
}
/**
* Each WebDriver instance provides automated control over a browser session.
*
* @implements {IWebDriver}
*/
class WebDriver {
/**
* @param {!(Session|IThenable<!Session>)} session Either a known session or a
* promise that will be resolved to a session.
* @param {!command.Executor} executor The executor to use when sending
* commands to the browser.
* @param {promise.ControlFlow=} opt_flow The flow to
* schedule commands through. Defaults to the active flow object.
* @param {(function(this: void): ?)=} opt_onQuit A function to call, if any,
* when the session is terminated.
*/
constructor(session, executor, opt_flow, opt_onQuit) {
/** @private {!promise.ControlFlow} */
this.flow_ = opt_flow || promise.controlFlow();
/** @private {!promise.Thenable<!Session>} */
this.session_ = this.flow_.promise(resolve => resolve(session));
/** @private {!command.Executor} */
this.executor_ = executor;
/** @private {input.FileDetector} */
this.fileDetector_ = null;
/** @private @const {(function(this: void): ?|undefined)} */
this.onQuit_ = opt_onQuit;
}
/**
* Creates a new WebDriver client for an existing session.
* @param {!command.Executor} executor Command executor to use when querying
* for session details.
* @param {string} sessionId ID of the session to attach to.
* @param {promise.ControlFlow=} opt_flow The control flow all
* driver commands should execute under. Defaults to the
* {@link promise.controlFlow() currently active} control flow.
* @return {!WebDriver} A new client for the specified session.
*/
static attachToSession(executor, sessionId, opt_flow) {
let flow = opt_flow || promise.controlFlow();
let cmd = new command.Command(command.Name.DESCRIBE_SESSION)
.setParameter('sessionId', sessionId);
let session = flow.execute(
() => executeCommand(executor, cmd).catch(err => {
// The DESCRIBE_SESSION command is not supported by the W3C spec, so
// if we get back an unknown command, just return a session with
// unknown capabilities.
if (err instanceof error.UnknownCommandError) {
return new Session(sessionId, new Capabilities);
}
throw err;
}),
'WebDriver.attachToSession()');
return new WebDriver(session, executor, flow);
}
/**
* Creates a new WebDriver session.
*
* By default, the requested session `capabilities` are merely "desired" and
* the remote end will still create a new session even if it cannot satisfy
* all of the requested capabilities. You can query which capabilities a
* session actually has using the
* {@linkplain #getCapabilities() getCapabilities()} method on the returned
* WebDriver instance.
*
* To define _required capabilities_, provide the `capabilities` as an object
* literal with `required` and `desired` keys. The `desired` key may be
* omitted if all capabilities are required, and vice versa. If the server
* cannot create a session with all of the required capabilities, it will
* return an {@linkplain error.SessionNotCreatedError}.
*
* let required = new Capabilities().set('browserName', 'firefox');
* let desired = new Capabilities().set('version', '45');
* let driver = WebDriver.createSession(executor, {required, desired});
*
* This function will always return a WebDriver instance. If there is an error
* creating the session, such as the aforementioned SessionNotCreatedError,
* the driver will have a rejected {@linkplain #getSession session} promise.
* It is recommended that this promise is left _unhandled_ so it will
* propagate through the {@linkplain promise.ControlFlow control flow} and
* cause subsequent commands to fail.
*
* let required = Capabilities.firefox();
* let driver = WebDriver.createSession(executor, {required});
*
* // If the createSession operation failed, then this command will also
* // also fail, propagating the creation failure.
* driver.get('http://www.google.com').catch(e => console.log(e));
*
* @param {!command.Executor} executor The executor to create the new session
* with.
* @param {(!Capabilities|
* {desired: (Capabilities|undefined),
* required: (Capabilities|undefined)})} capabilities The desired
* capabilities for the new session.
* @param {promise.ControlFlow=} opt_flow The control flow all driver
* commands should execute under, including the initial session creation.
* Defaults to the {@link promise.controlFlow() currently active}
* control flow.
* @param {(function(new: WebDriver,
* !IThenable<!Session>,
* !command.Executor,
* promise.ControlFlow=))=} opt_ctor
* A reference to the constructor of the specific type of WebDriver client
* to instantiate. Will create a vanilla {@linkplain WebDriver} instance
* if a constructor is not provided.
* @param {(function(this: void): ?)=} opt_onQuit A callback to invoke when
* the newly created session is terminated. This should be used to clean
* up any resources associated with the session.
* @return {!WebDriver} The driver for the newly created session.
*/
static createSession(
executor, capabilities, opt_flow, opt_ctor, opt_onQuit) {
let flow = opt_flow || promise.controlFlow();
let cmd = new command.Command(command.Name.NEW_SESSION);
if (capabilities && (capabilities.desired || capabilities.required)) {
cmd.setParameter('desiredCapabilities', capabilities.desired);
cmd.setParameter('requiredCapabilities', capabilities.required);
} else {
cmd.setParameter('desiredCapabilities', capabilities);
}
let session = flow.execute(
() => executeCommand(executor, cmd),
'WebDriver.createSession()');
if (typeof opt_onQuit === 'function') {
session = session.catch(err => {
return Promise.resolve(opt_onQuit.call(void 0)).then(_ => {throw err;});
});
}
const ctor = opt_ctor || WebDriver;
return new ctor(session, executor, flow, opt_onQuit);
}
/** @override */
controlFlow() {
return this.flow_;
}
/** @override */
schedule(command, description) {
command.setParameter('sessionId', this.session_);
// If any of the command parameters are rejected promises, those
// rejections may be reported as unhandled before the control flow
// attempts to execute the command. To ensure parameters errors
// propagate through the command itself, we resolve all of the
// command parameters now, but suppress any errors until the ControlFlow
// actually executes the command. This addresses scenarios like catching
// an element not found error in:
//
// driver.findElement(By.id('foo')).click().catch(function(e) {
// if (e instanceof NoSuchElementError) {
// // Do something.
// }
// });
var prepCommand = toWireValue(command.getParameters());
prepCommand.catch(function() {});
var flow = this.flow_;
var executor = this.executor_;
return flow.execute(() => {
// Retrieve resolved command parameters; any previously suppressed errors
// will now propagate up through the control flow as part of the command
// execution.
return prepCommand.then(function(parameters) {
command.setParameters(parameters);
return executor.execute(command);
}).then(value => fromWireValue(this, value));
}, description);
}
/** @override */
setFileDetector(detector) {
this.fileDetector_ = detector;
}
/** @override */
getExecutor() {
return this.executor_;
}
/** @override */
getSession() {
return this.session_;
}
/** @override */
getCapabilities() {
return this.session_.then(s => s.getCapabilities());
}
/** @override */
quit() {
var result = this.schedule(
new command.Command(command.Name.QUIT),
'WebDriver.quit()');
// Delete our session ID when the quit command finishes; this will allow us
// to throw an error when attempting to use a driver post-quit.
return /** @type {!promise.Thenable} */(promise.finally(result, () => {
this.session_ = this.flow_.promise((_, reject) => {
reject(new error.NoSuchSessionError(
'This driver instance does not have a valid session ID ' +
'(did you call WebDriver.quit()?) and may no longer be used.'));
});
// Only want the session rejection to bubble if accessed.
this.session_.catch(function() {});
if (this.onQuit_) {
return this.onQuit_.call(void 0);
}
}));
}
/** @override */
actions() {
return new actions.ActionSequence(this);
}
/** @override */
touchActions() {
return new actions.TouchSequence(this);
}
/** @override */
executeScript(script, var_args) {
if (typeof script === 'function') {
script = 'return (' + script + ').apply(null, arguments);';
}
let args =
arguments.length > 1 ? Array.prototype.slice.call(arguments, 1) : [];
return this.schedule(
new command.Command(command.Name.EXECUTE_SCRIPT).
setParameter('script', script).
setParameter('args', args),
'WebDriver.executeScript()');
}
/** @override */
executeAsyncScript(script, var_args) {
if (typeof script === 'function') {
script = 'return (' + script + ').apply(null, arguments);';
}
let args = Array.prototype.slice.call(arguments, 1);
return this.schedule(
new command.Command(command.Name.EXECUTE_ASYNC_SCRIPT).
setParameter('script', script).
setParameter('args', args),
'WebDriver.executeScript()');
}
/** @override */
call(fn, opt_scope, var_args) {
let args = Array.prototype.slice.call(arguments, 2);
return this.flow_.execute(function() {
return promise.fullyResolved(args).then(function(args) {
if (promise.isGenerator(fn)) {
args.unshift(fn, opt_scope);
return promise.consume.apply(null, args);
}
return fn.apply(opt_scope, args);
});
}, 'WebDriver.call(' + (fn.name || 'function') + ')');
}
/** @override */
wait(condition, opt_timeout, opt_message) {
if (promise.isPromise(condition)) {
return this.flow_.wait(
/** @type {!IThenable} */(condition),
opt_timeout, opt_message);
}
var message = opt_message;
var fn = /** @type {!Function} */(condition);
if (condition instanceof Condition) {
message = message || condition.description();
fn = condition.fn;
}
if (typeof fn !== 'function') {
throw TypeError(
'Wait condition must be a promise-like object, function, or a '
+ 'Condition object');
}
var driver = this;
var result = this.flow_.wait(function() {
if (promise.isGenerator(fn)) {
return promise.consume(fn, null, [driver]);
}
return fn(driver);
}, opt_timeout, message);
if (condition instanceof WebElementCondition) {
result = new WebElementPromise(this, result.then(function(value) {
if (!(value instanceof WebElement)) {
throw TypeError(
'WebElementCondition did not resolve to a WebElement: '
+ Object.prototype.toString.call(value));
}
return value;
}));
}
return result;
}
/** @override */
sleep(ms) {
return this.flow_.timeout(ms, 'WebDriver.sleep(' + ms + ')');
}
/** @override */
getWindowHandle() {
return this.schedule(
new command.Command(command.Name.GET_CURRENT_WINDOW_HANDLE),
'WebDriver.getWindowHandle()');
}
/** @override */
getAllWindowHandles() {
return this.schedule(
new command.Command(command.Name.GET_WINDOW_HANDLES),
'WebDriver.getAllWindowHandles()');
}
/** @override */
getPageSource() {
return this.schedule(
new command.Command(command.Name.GET_PAGE_SOURCE),
'WebDriver.getPageSource()');
}
/** @override */
close() {
return this.schedule(new command.Command(command.Name.CLOSE),
'WebDriver.close()');
}
/** @override */
get(url) {
return this.navigate().to(url);
}
/** @override */
getCurrentUrl() {
return this.schedule(
new command.Command(command.Name.GET_CURRENT_URL),
'WebDriver.getCurrentUrl()');
}
/** @override */
getTitle() {
return this.schedule(new command.Command(command.Name.GET_TITLE),
'WebDriver.getTitle()');
}
/** @override */
findElement(locator) {
let id;
locator = by.checkedLocator(locator);
if (typeof locator === 'function') {
id = this.findElementInternal_(locator, this);
} else {
let cmd = new command.Command(command.Name.FIND_ELEMENT).
setParameter('using', locator.using).
setParameter('value', locator.value);
id = this.schedule(cmd, 'WebDriver.findElement(' + locator + ')');
}
return new WebElementPromise(this, id);
}
/**
* @param {!Function} locatorFn The locator function to use.
* @param {!(WebDriver|WebElement)} context The search
* context.
* @return {!promise.Thenable<!WebElement>} A
* promise that will resolve to a list of WebElements.
* @private
*/
findElementInternal_(locatorFn, context) {
return this.call(() => locatorFn(context)).then(function(result) {
if (Array.isArray(result)) {
result = result[0];
}
if (!(result instanceof WebElement)) {
throw new TypeError('Custom locator did not return a WebElement');
}
return result;
});
}
/** @override */
findElements(locator) {
locator = by.checkedLocator(locator);
if (typeof locator === 'function') {
return this.findElementsInternal_(locator, this);
} else {
let cmd = new command.Command(command.Name.FIND_ELEMENTS).
setParameter('using', locator.using).
setParameter('value', locator.value);
let res = this.schedule(cmd, 'WebDriver.findElements(' + locator + ')');
return res.catch(function(e) {
if (e instanceof error.NoSuchElementError) {
return [];
}
throw e;
});
}
}
/**
* @param {!Function} locatorFn The locator function to use.
* @param {!(WebDriver|WebElement)} context The search context.
* @return {!promise.Thenable<!Array<!WebElement>>} A promise that
* will resolve to an array of WebElements.
* @private
*/
findElementsInternal_(locatorFn, context) {
return this.call(() => locatorFn(context)).then(function(result) {
if (result instanceof WebElement) {
return [result];
}
if (!Array.isArray(result)) {
return [];
}
return result.filter(function(item) {
return item instanceof WebElement;
});
});
}
/** @override */
takeScreenshot() {
return this.schedule(new command.Command(command.Name.SCREENSHOT),
'WebDriver.takeScreenshot()');
}
/** @override */
manage() {
return new Options(this);
}
/** @override */
navigate() {
return new Navigation(this);
}
/** @override */
switchTo() {
return new TargetLocator(this);
}
}
/**
* Interface for navigating back and forth in the browser history.
*
* This class should never be instantiated directly. Instead, obtain an instance
* with
*
* webdriver.navigate()
*
* @see WebDriver#navigate()
*/
class Navigation {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Schedules a command to navigate to a new URL.
* @param {string} url The URL to navigate to.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the URL has been loaded.
*/
to(url) {
return this.driver_.schedule(
new command.Command(command.Name.GET).
setParameter('url', url),
'WebDriver.navigate().to(' + url + ')');
}
/**
* Schedules a command to move backwards in the browser history.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the navigation event has completed.
*/
back() {
return this.driver_.schedule(
new command.Command(command.Name.GO_BACK),
'WebDriver.navigate().back()');
}
/**
* Schedules a command to move forwards in the browser history.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the navigation event has completed.
*/
forward() {
return this.driver_.schedule(
new command.Command(command.Name.GO_FORWARD),
'WebDriver.navigate().forward()');
}
/**
* Schedules a command to refresh the current page.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the navigation event has completed.
*/
refresh() {
return this.driver_.schedule(
new command.Command(command.Name.REFRESH),
'WebDriver.navigate().refresh()');
}
}
/**
* Provides methods for managing browser and driver state.
*
* This class should never be instantiated directly. Instead, obtain an instance
* with {@linkplain WebDriver#manage() webdriver.manage()}.
*/
class Options {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Schedules a command to add a cookie.
*
* __Sample Usage:__
*
* // Set a basic cookie.
* driver.options().addCookie({name: 'foo', value: 'bar'});
*
* // Set a cookie that expires in 10 minutes.
* let expiry = new Date(Date.now() + (10 * 60 * 1000));
* driver.options().addCookie({name: 'foo', value: 'bar', expiry});
*
* // The cookie expiration may also be specified in seconds since epoch.
* driver.options().addCookie({
* name: 'foo',
* value: 'bar',
* expiry: Math.floor(Date.now() / 1000)
* });
*
* @param {!Options.Cookie} spec Defines the cookie to add.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the cookie has been added to the page.
* @throws {error.InvalidArgumentError} if any of the cookie parameters are
* invalid.
* @throws {TypeError} if `spec` is not a cookie object.
*/
addCookie(spec) {
if (!spec || typeof spec !== 'object') {
throw TypeError('addCookie called with non-cookie parameter');
}
// We do not allow '=' or ';' in the name.
let name = spec.name;
if (/[;=]/.test(name)) {
throw new error.InvalidArgumentError(
'Invalid cookie name "' + name + '"');
}
// We do not allow ';' in value.
let value = spec.value;
if (/;/.test(value)) {
throw new error.InvalidArgumentError(
'Invalid cookie value "' + value + '"');
}
let cookieString = name + '=' + value +
(spec.domain ? ';domain=' + spec.domain : '') +
(spec.path ? ';path=' + spec.path : '') +
(spec.secure ? ';secure' : '');
let expiry;
if (typeof spec.expiry === 'number') {
expiry = Math.floor(spec.expiry);
cookieString += ';expires=' + new Date(spec.expiry * 1000).toUTCString();
} else if (spec.expiry instanceof Date) {
let date = /** @type {!Date} */(spec.expiry);
expiry = Math.floor(date.getTime() / 1000);
cookieString += ';expires=' + date.toUTCString();
}
return this.driver_.schedule(
new command.Command(command.Name.ADD_COOKIE).
setParameter('cookie', {
'name': name,
'value': value,
'path': spec.path,
'domain': spec.domain,
'secure': !!spec.secure,
'expiry': expiry
}),
'WebDriver.manage().addCookie(' + cookieString + ')');
}
/**
* Schedules a command to delete all cookies visible to the current page.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when all cookies have been deleted.
*/
deleteAllCookies() {
return this.driver_.schedule(
new command.Command(command.Name.DELETE_ALL_COOKIES),
'WebDriver.manage().deleteAllCookies()');
}
/**
* Schedules a command to delete the cookie with the given name. This command
* is a no-op if there is no cookie with the given name visible to the current
* page.
* @param {string} name The name of the cookie to delete.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the cookie has been deleted.
*/
deleteCookie(name) {
return this.driver_.schedule(
new command.Command(command.Name.DELETE_COOKIE).
setParameter('name', name),
'WebDriver.manage().deleteCookie(' + name + ')');
}
/**
* Schedules a command to retrieve all cookies visible to the current page.
* Each cookie will be returned as a JSON object as described by the WebDriver
* wire protocol.
* @return {!promise.Thenable<!Array<!Options.Cookie>>} A promise that will be
* resolved with the cookies visible to the current browsing context.
*/
getCookies() {
return this.driver_.schedule(
new command.Command(command.Name.GET_ALL_COOKIES),
'WebDriver.manage().getCookies()');
}
/**
* Schedules a command to retrieve the cookie with the given name. Returns null
* if there is no such cookie. The cookie will be returned as a JSON object as
* described by the WebDriver wire protocol.
*
* @param {string} name The name of the cookie to retrieve.
* @return {!promise.Thenable<?Options.Cookie>} A promise that will be resolved
* with the named cookie, or `null` if there is no such cookie.
*/
getCookie(name) {
return this.getCookies().then(function(cookies) {
for (let cookie of cookies) {
if (cookie && cookie['name'] === name) {
return cookie;
}
}
return null;
});
}
/**
* Schedules a command to fetch the timeouts currently configured for the
* current session.
*
* @return {!promise.Thenable<{script: number,
* pageLoad: number,
* implicit: number}>} A promise that will be
* resolved with the timeouts currently configured for the current
* session.
* @see #setTimeouts()
*/
getTimeouts() {
return this.driver_.schedule(
new command.Command(command.Name.GET_TIMEOUT),
`WebDriver.manage().getTimeouts()`)
}
/**
* Schedules a command to set timeout durations associated with the current
* session.
*
* The following timeouts are supported (all timeouts are specified in
* milliseconds):
*
* - `implicit` specifies the maximum amount of time to wait for an element
* locator to succeed when {@linkplain WebDriver#findElement locating}
* {@linkplain WebDriver#findElements elements} on the page.
* Defaults to 0 milliseconds.
*
* - `pageLoad` specifies the maximum amount of time to wait for a page to
* finishing loading. Defaults to 300000 milliseconds.
*
* - `script` specifies the maximum amount of time to wait for an
* {@linkplain WebDriver#executeScript evaluated script} to run. If set to
* `null`, the script timeout will be indefinite.
* Defaults to 30000 milliseconds.
*
* @param {{script: (number|null|undefined),
* pageLoad: (number|null|undefined),
* implicit: (number|null|undefined)}} conf
* The desired timeout configuration.
* @return {!promise.Thenable<void>} A promise that will be resolved when the
* timeouts have been set.
* @throws {!TypeError} if an invalid options object is provided.
* @see #getTimeouts()
* @see <https://w3c.github.io/webdriver/webdriver-spec.html#dfn-set-timeouts>
*/
setTimeouts({script, pageLoad, implicit} = {}) {
let cmd = new command.Command(command.Name.SET_TIMEOUT);
let valid = false;
function setParam(key, value) {
if (value === null || typeof value === 'number') {
valid = true;
cmd.setParameter(key, value);
} else if (typeof value !== 'undefined') {
throw TypeError(
'invalid timeouts configuration:'
+ ` expected "${key}" to be a number, got ${typeof value}`);
}
}
setParam('implicit', implicit);
setParam('pageLoad', pageLoad);
setParam('script', script);
if (valid) {
return this.driver_.schedule(cmd, `WebDriver.manage().setTimeouts()`)
.catch(() => {
// Fallback to the legacy method.
let cmds = [];
if (typeof script === 'number') {
cmds.push(legacyTimeout(this.driver_, 'script', script));
}
if (typeof implicit === 'number') {
cmds.push(legacyTimeout(this.driver_, 'implicit', implicit));
}
if (typeof pageLoad === 'number') {
cmds.push(legacyTimeout(this.driver_, 'page load', pageLoad));
}
return Promise.all(cmds);
});
}
throw TypeError('no timeouts specified');
}
/**
* @return {!Logs} The interface for managing driver
* logs.
*/
logs() {
return new Logs(this.driver_);
}
/**
* @return {!Timeouts} The interface for managing driver timeouts.
* @deprecated Use {@link #setTimeouts()} instead.
*/
timeouts() {
return new Timeouts(this.driver_);
}
/**
* @return {!Window} The interface for managing the current window.
*/
window() {
return new Window(this.driver_);
}
}
/**
* @param {!WebDriver} driver
* @param {string} type
* @param {number} ms
* @return {!promise.Thenable<void>}
*/
function legacyTimeout(driver, type, ms) {
return driver.schedule(
new command.Command(command.Name.SET_TIMEOUT)
.setParameter('type', type)
.setParameter('ms', ms),
`WebDriver.manage().setTimeouts({${type}: ${ms}})`);
}
/**
* A record object describing a browser cookie.
*
* @record
*/
Options.Cookie = function() {};
/**
* The name of the cookie.
*
* @type {string}
*/
Options.Cookie.prototype.name;
/**
* The cookie value.
*
* @type {string}
*/
Options.Cookie.prototype.value;
/**
* The cookie path. Defaults to "/" when adding a cookie.
*
* @type {(string|undefined)}
*/
Options.Cookie.prototype.path;
/**
* The domain the cookie is visible to. Defaults to the current browsing
* context's document's URL when adding a cookie.
*
* @type {(string|undefined)}
*/
Options.Cookie.prototype.domain;
/**
* Whether the cookie is a secure cookie. Defaults to false when adding a new
* cookie.
*
* @type {(boolean|undefined)}
*/
Options.Cookie.prototype.secure;
/**
* Whether the cookie is an HTTP only cookie. Defaults to false when adding a
* new cookie.
*
* @type {(boolean|undefined)}
*/
Options.Cookie.prototype.httpOnly;
/**
* When the cookie expires.
*
* When {@linkplain Options#addCookie() adding a cookie}, this may be specified
* in _seconds_ since Unix epoch (January 1, 1970). The expiry will default to
* 20 years in the future if omitted.
*
* The expiry is always returned in seconds since epoch when
* {@linkplain Options#getCookies() retrieving cookies} from the browser.
*
* @type {(!Date|number|undefined)}
*/
Options.Cookie.prototype.expiry;
/**
* An interface for managing timeout behavior for WebDriver instances.
*
* This class should never be instantiated directly. Instead, obtain an instance
* with
*
* webdriver.manage().timeouts()
*
* @deprecated This has been deprecated in favor of
* {@link Options#setTimeouts()}, which supports setting multiple timeouts
* at once.
* @see WebDriver#manage()
* @see Options#timeouts()
*/
class Timeouts {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Specifies the amount of time the driver should wait when searching for an
* element if it is not immediately present.
*
* When searching for a single element, the driver should poll the page
* until the element has been found, or this timeout expires before failing
* with a {@link bot.ErrorCode.NO_SUCH_ELEMENT} error. When searching
* for multiple elements, the driver should poll the page until at least one
* element has been found or this timeout has expired.
*
* Setting the wait timeout to 0 (its default value), disables implicit
* waiting.
*
* Increasing the implicit wait timeout should be used judiciously as it
* will have an adverse effect on test run time, especially when used with
* slower location strategies like XPath.
*
* @param {number} ms The amount of time to wait, in milliseconds.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the implicit wait timeout has been set.
* @deprecated Use {@link Options#setTimeouts()
* driver.manage().setTimeouts({implicit: ms})}.
*/
implicitlyWait(ms) {
return this.driver_.manage().setTimeouts({implicit: ms});
}
/**
* Sets the amount of time to wait, in milliseconds, for an asynchronous
* script to finish execution before returning an error. If the timeout is
* less than or equal to 0, the script will be allowed to run indefinitely.
*
* @param {number} ms The amount of time to wait, in milliseconds.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the script timeout has been set.
* @deprecated Use {@link Options#setTimeouts()
* driver.manage().setTimeouts({script: ms})}.
*/
setScriptTimeout(ms) {
return this.driver_.manage().setTimeouts({script: ms});
}
/**
* Sets the amount of time to wait for a page load to complete before
* returning an error. If the timeout is negative, page loads may be
* indefinite.
*
* @param {number} ms The amount of time to wait, in milliseconds.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the timeout has been set.
* @deprecated Use {@link Options#setTimeouts()
* driver.manage().setTimeouts({pageLoad: ms})}.
*/
pageLoadTimeout(ms) {
return this.driver_.manage().setTimeouts({pageLoad: ms});
}
}
/**
* An interface for managing the current window.
*
* This class should never be instantiated directly. Instead, obtain an instance
* with
*
* webdriver.manage().window()
*
* @see WebDriver#manage()
* @see Options#window()
*/
class Window {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Retrieves the window's current position, relative to the top left corner of
* the screen.
* @return {!promise.Thenable<{x: number, y: number}>} A promise
* that will be resolved with the window's position in the form of a
* {x:number, y:number} object literal.
*/
getPosition() {
return this.driver_.schedule(
new command.Command(command.Name.GET_WINDOW_POSITION).
setParameter('windowHandle', 'current'),
'WebDriver.manage().window().getPosition()');
}
/**
* Repositions the current window.
* @param {number} x The desired horizontal position, relative to the left
* side of the screen.
* @param {number} y The desired vertical position, relative to the top of the
* of the screen.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the command has completed.
*/
setPosition(x, y) {
return this.driver_.schedule(
new command.Command(command.Name.SET_WINDOW_POSITION).
setParameter('windowHandle', 'current').
setParameter('x', x).
setParameter('y', y),
'WebDriver.manage().window().setPosition(' + x + ', ' + y + ')');
}
/**
* Retrieves the window's current size.
* @return {!promise.Thenable<{width: number, height: number}>} A
* promise that will be resolved with the window's size in the form of a
* {width:number, height:number} object literal.
*/
getSize() {
return this.driver_.schedule(
new command.Command(command.Name.GET_WINDOW_SIZE).
setParameter('windowHandle', 'current'),
'WebDriver.manage().window().getSize()');
}
/**
* Resizes the current window.
* @param {number} width The desired window width.
* @param {number} height The desired window height.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the command has completed.
*/
setSize(width, height) {
return this.driver_.schedule(
new command.Command(command.Name.SET_WINDOW_SIZE).
setParameter('windowHandle', 'current').
setParameter('width', width).
setParameter('height', height),
'WebDriver.manage().window().setSize(' + width + ', ' + height + ')');
}
/**
* Maximizes the current window.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the command has completed.
*/
maximize() {
return this.driver_.schedule(
new command.Command(command.Name.MAXIMIZE_WINDOW).
setParameter('windowHandle', 'current'),
'WebDriver.manage().window().maximize()');
}
}
/**
* Interface for managing WebDriver log records.
*
* This class should never be instantiated directly. Instead, obtain an
* instance with
*
* webdriver.manage().logs()
*
* @see WebDriver#manage()
* @see Options#logs()
*/
class Logs {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Fetches available log entries for the given type.
*
* Note that log buffers are reset after each call, meaning that available
* log entries correspond to those entries not yet returned for a given log
* type. In practice, this means that this call will return the available log
* entries since the last call, or from the start of the session.
*
* @param {!logging.Type} type The desired log type.
* @return {!promise.Thenable<!Array.<!logging.Entry>>} A
* promise that will resolve to a list of log entries for the specified
* type.
*/
get(type) {
let cmd = new command.Command(command.Name.GET_LOG).
setParameter('type', type);
return this.driver_.schedule(
cmd, 'WebDriver.manage().logs().get(' + type + ')').
then(function(entries) {
return entries.map(function(entry) {
if (!(entry instanceof logging.Entry)) {
return new logging.Entry(
entry['level'], entry['message'], entry['timestamp'],
entry['type']);
}
return entry;
});
});
}
/**
* Retrieves the log types available to this driver.
* @return {!promise.Thenable<!Array<!logging.Type>>} A
* promise that will resolve to a list of available log types.
*/
getAvailableLogTypes() {
return this.driver_.schedule(
new command.Command(command.Name.GET_AVAILABLE_LOG_TYPES),
'WebDriver.manage().logs().getAvailableLogTypes()');
}
}
/**
* An interface for changing the focus of the driver to another frame or window.
*
* This class should never be instantiated directly. Instead, obtain an
* instance with
*
* webdriver.switchTo()
*
* @see WebDriver#switchTo()
*/
class TargetLocator {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Schedules a command retrieve the {@code document.activeElement} element on
* the current document, or {@code document.body} if activeElement is not
* available.
* @return {!WebElementPromise} The active element.
*/
activeElement() {
var id = this.driver_.schedule(
new command.Command(command.Name.GET_ACTIVE_ELEMENT),
'WebDriver.switchTo().activeElement()');
return new WebElementPromise(this.driver_, id);
}
/**
* Schedules a command to switch focus of all future commands to the topmost
* frame on the page.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the driver has changed focus to the default content.
*/
defaultContent() {
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_FRAME).
setParameter('id', null),
'WebDriver.switchTo().defaultContent()');
}
/**
* Schedules a command to switch the focus of all future commands to another
* frame on the page. The target frame may be specified as one of the
* following:
*
* - A number that specifies a (zero-based) index into [window.frames](
* https://developer.mozilla.org/en-US/docs/Web/API/Window.frames).
* - A {@link WebElement} reference, which correspond to a `frame` or `iframe`
* DOM element.
* - The `null` value, to select the topmost frame on the page. Passing `null`
* is the same as calling {@link #defaultContent defaultContent()}.
*
* If the specified frame can not be found, the returned promise will be
* rejected with a {@linkplain error.NoSuchFrameError}.
*
* @param {(number|WebElement|null)} id The frame locator.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the driver has changed focus to the specified frame.
*/
frame(id) {
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_FRAME).
setParameter('id', id),
'WebDriver.switchTo().frame(' + id + ')');
}
/**
* Schedules a command to switch the focus of all future commands to another
* window. Windows may be specified by their {@code window.name} attribute or
* by its handle (as returned by {@link WebDriver#getWindowHandles}).
*
* If the specified window cannot be found, the returned promise will be
* rejected with a {@linkplain error.NoSuchWindowError}.
*
* @param {string} nameOrHandle The name or window handle of the window to
* switch focus to.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the driver has changed focus to the specified window.
*/
window(nameOrHandle) {
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_WINDOW).
// "name" supports the legacy drivers. "handle" is the W3C
// compliant parameter.
setParameter('name', nameOrHandle).
setParameter('handle', nameOrHandle),
'WebDriver.switchTo().window(' + nameOrHandle + ')');
}
/**
* Schedules a command to change focus to the active modal dialog, such as
* those opened by `window.alert()`, `window.confirm()`, and
* `window.prompt()`. The returned promise will be rejected with a
* {@linkplain error.NoSuchAlertError} if there are no open alerts.
*
* @return {!AlertPromise} The open alert.
*/
alert() {
var text = this.driver_.schedule(
new command.Command(command.Name.GET_ALERT_TEXT),
'WebDriver.switchTo().alert()');
var driver = this.driver_;
return new AlertPromise(driver, text.then(function(text) {
return new Alert(driver, text);
}));
}
}
//////////////////////////////////////////////////////////////////////////////
//
// WebElement
//
//////////////////////////////////////////////////////////////////////////////
const LEGACY_ELEMENT_ID_KEY = 'ELEMENT';
const ELEMENT_ID_KEY = 'element-6066-11e4-a52e-4f735466cecf';
/**
* Represents a DOM element. WebElements can be found by searching from the
* document root using a {@link WebDriver} instance, or by searching
* under another WebElement:
*
* driver.get('http://www.google.com');
* var searchForm = driver.findElement(By.tagName('form'));
* var searchBox = searchForm.findElement(By.name('q'));
* searchBox.sendKeys('webdriver');
*/
class WebElement {
/**
* @param {!WebDriver} driver the parent WebDriver instance for this element.
* @param {(!IThenable<string>|string)} id The server-assigned opaque ID for
* the underlying DOM element.
*/
constructor(driver, id) {
/** @private {!WebDriver} */
this.driver_ = driver;
/** @private {!promise.Thenable<string>} */
this.id_ = driver.controlFlow().promise(resolve => resolve(id));
}
/**
* @param {string} id The raw ID.
* @param {boolean=} opt_noLegacy Whether to exclude the legacy element key.
* @return {!Object} The element ID for use with WebDriver's wire protocol.
*/
static buildId(id, opt_noLegacy) {
return opt_noLegacy
? {[ELEMENT_ID_KEY]: id}
: {[ELEMENT_ID_KEY]: id, [LEGACY_ELEMENT_ID_KEY]: id};
}
/**
* Extracts the encoded WebElement ID from the object.
*
* @param {?} obj The object to extract the ID from.
* @return {string} the extracted ID.
* @throws {TypeError} if the object is not a valid encoded ID.
*/
static extractId(obj) {
if (obj && typeof obj === 'object') {
if (typeof obj[ELEMENT_ID_KEY] === 'string') {
return obj[ELEMENT_ID_KEY];
} else if (typeof obj[LEGACY_ELEMENT_ID_KEY] === 'string') {
return obj[LEGACY_ELEMENT_ID_KEY];
}
}
throw new TypeError('object is not a WebElement ID');
}
/**
* @param {?} obj the object to test.
* @return {boolean} whether the object is a valid encoded WebElement ID.
*/
static isId(obj) {
return obj && typeof obj === 'object'
&& (typeof obj[ELEMENT_ID_KEY] === 'string'
|| typeof obj[LEGACY_ELEMENT_ID_KEY] === 'string');
}
/**
* Compares two WebElements for equality.
*
* @param {!WebElement} a A WebElement.
* @param {!WebElement} b A WebElement.
* @return {!promise.Thenable<boolean>} A promise that will be
* resolved to whether the two WebElements are equal.
*/
static equals(a, b) {
if (a === b) {
return a.driver_.controlFlow().promise(resolve => resolve(true));
}
let ids = [a.getId(), b.getId()];
return promise.all(ids).then(function(ids) {
// If the two element's have the same ID, they should be considered
// equal. Otherwise, they may still be equivalent, but we'll need to
// ask the server to check for us.
if (ids[0] === ids[1]) {
return true;
}
let cmd = new command.Command(command.Name.ELEMENT_EQUALS);
cmd.setParameter('id', ids[0]);
cmd.setParameter('other', ids[1]);
return a.driver_.schedule(cmd, 'WebElement.equals()');
});
}
/** @return {!WebDriver} The parent driver for this instance. */
getDriver() {
return this.driver_;
}
/**
* @return {!promise.Thenable<string>} A promise that resolves to
* the server-assigned opaque ID assigned to this element.
*/
getId() {
return this.id_;
}
/**
* @return {!Object} Returns the serialized representation of this WebElement.
*/
[Symbols.serialize]() {
return this.getId().then(WebElement.buildId);
}
/**
* Schedules a command that targets this element with the parent WebDriver
* instance. Will ensure this element's ID is included in the command
* parameters under the "id" key.
*
* @param {!command.Command} command The command to schedule.
* @param {string} description A description of the command for debugging.
* @return {!promise.Thenable<T>} A promise that will be resolved
* with the command result.
* @template T
* @see WebDriver#schedule
* @private
*/
schedule_(command, description) {
command.setParameter('id', this);
return this.driver_.schedule(command, description);
}
/**
* Schedule a command to find a descendant of this element. If the element
* cannot be found, the returned promise will be rejected with a
* {@linkplain error.NoSuchElementError NoSuchElementError}.
*
* The search criteria for an element may be defined using one of the static
* factories on the {@link by.By} class, or as a short-hand
* {@link ./by.ByHash} object. For example, the following two statements
* are equivalent:
*
* var e1 = element.findElement(By.id('foo'));
* var e2 = element.findElement({id:'foo'});
*
* You may also provide a custom locator function, which takes as input this
* instance and returns a {@link WebElement}, or a promise that will resolve
* to a WebElement. If the returned promise resolves to an array of
* WebElements, WebDriver will use the first element. For example, to find the
* first visible link on a page, you could write:
*
* var link = element.findElement(firstVisibleLink);
*
* function firstVisibleLink(element) {
* var links = element.findElements(By.tagName('a'));
* return promise.filter(links, function(link) {
* return link.isDisplayed();
* });
* }
*
* @param {!(by.By|Function)} locator The locator strategy to use when
* searching for the element.
* @return {!WebElementPromise} A WebElement that can be used to issue
* commands against the located element. If the element is not found, the
* element will be invalidated and all scheduled commands aborted.
*/
findElement(locator) {
locator = by.checkedLocator(locator);
let id;
if (typeof locator === 'function') {
id = this.driver_.findElementInternal_(locator, this);
} else {
let cmd = new command.Command(
command.Name.FIND_CHILD_ELEMENT).
setParameter('using', locator.using).
setParameter('value', locator.value);
id = this.schedule_(cmd, 'WebElement.findElement(' + locator + ')');
}
return new WebElementPromise(this.driver_, id);
}
/**
* Schedules a command to find all of the descendants of this element that
* match the given search criteria.
*
* @param {!(by.By|Function)} locator The locator strategy to use when
* searching for the element.
* @return {!promise.Thenable<!Array<!WebElement>>} A
* promise that will resolve to an array of WebElements.
*/
findElements(locator) {
locator = by.checkedLocator(locator);
let id;
if (typeof locator === 'function') {
return this.driver_.findElementsInternal_(locator, this);
} else {
var cmd = new command.Command(
command.Name.FIND_CHILD_ELEMENTS).
setParameter('using', locator.using).
setParameter('value', locator.value);
return this.schedule_(cmd, 'WebElement.findElements(' + locator + ')');
}
}
/**
* Schedules a command to click on this element.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the click command has completed.
*/
click() {
return this.schedule_(
new command.Command(command.Name.CLICK_ELEMENT),
'WebElement.click()');
}
/**
* Schedules a command to type a sequence on the DOM element represented by
* this instance.
*
* Modifier keys (SHIFT, CONTROL, ALT, META) are stateful; once a modifier is
* processed in the key sequence, that key state is toggled until one of the
* following occurs:
*
* - The modifier key is encountered again in the sequence. At this point the
* state of the key is toggled (along with the appropriate keyup/down
* events).
* - The {@link input.Key.NULL} key is encountered in the sequence. When
* this key is encountered, all modifier keys current in the down state are
* released (with accompanying keyup events). The NULL key can be used to
* simulate common keyboard shortcuts:
*
* element.sendKeys("text was",
* Key.CONTROL, "a", Key.NULL,
* "now text is");
* // Alternatively:
* element.sendKeys("text was",
* Key.chord(Key.CONTROL, "a"),
* "now text is");
*
* - The end of the key sequence is encountered. When there are no more keys
* to type, all depressed modifier keys are released (with accompanying
* keyup events).
*
* If this element is a file input ({@code <input type="file">}), the
* specified key sequence should specify the path to the file to attach to
* the element. This is analogous to the user clicking "Browse..." and entering
* the path into the file select dialog.
*
* var form = driver.findElement(By.css('form'));
* var element = form.findElement(By.css('input[type=file]'));
* element.sendKeys('/path/to/file.txt');
* form.submit();
*
* For uploads to function correctly, the entered path must reference a file
* on the _browser's_ machine, not the local machine running this script. When
* running against a remote Selenium server, a {@link input.FileDetector}
* may be used to transparently copy files to the remote machine before
* attempting to upload them in the browser.
*
* __Note:__ On browsers where native keyboard events are not supported
* (e.g. Firefox on OS X), key events will be synthesized. Special
* punctuation keys will be synthesized according to a standard QWERTY en-us
* keyboard layout.
*
* @param {...(number|string|!IThenable<(number|string)>)} var_args The
* sequence of keys to type. Number keys may be referenced numerically or
* by string (1 or '1'). All arguments will be joined into a single
* sequence.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when all keys have been typed.
*/
sendKeys(var_args) {
let keys = Promise.all(Array.prototype.slice.call(arguments, 0)).
then(keys => {
let ret = [];
keys.forEach(key => {
let type = typeof key;
if (type === 'number') {
key = String(key);
} else if (type !== 'string') {
throw TypeError(
'each key must be a number of string; got ' + type);
}
// The W3C protocol requires keys to be specified as an array where
// each element is a single key.
ret.push.apply(ret, key.split(''));
});
return ret;
});
if (!this.driver_.fileDetector_) {
return this.schedule_(
new command.Command(command.Name.SEND_KEYS_TO_ELEMENT).
setParameter('text', keys).
setParameter('value', keys),
'WebElement.sendKeys()');
}
// Suppress unhandled rejection errors until the flow executes the command.
keys.catch(function() {});
var element = this;
return this.getDriver().controlFlow().execute(function() {
return keys.then(function(keys) {
return element.driver_.fileDetector_
.handleFile(element.driver_, keys.join(''));
}).then(function(keys) {
return element.schedule_(
new command.Command(command.Name.SEND_KEYS_TO_ELEMENT).
setParameter('text', keys).
setParameter('value', keys.split('')),
'WebElement.sendKeys()');
});
}, 'WebElement.sendKeys()');
}
/**
* Schedules a command to query for the tag/node name of this element.
* @return {!promise.Thenable<string>} A promise that will be
* resolved with the element's tag name.
*/
getTagName() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_TAG_NAME),
'WebElement.getTagName()');
}
/**
* Schedules a command to query for the computed style of the element
* represented by this instance. If the element inherits the named style from
* its parent, the parent will be queried for its value. Where possible, color
* values will be converted to their hex representation (e.g. #00ff00 instead
* of rgb(0, 255, 0)).
*
* _Warning:_ the value returned will be as the browser interprets it, so
* it may be tricky to form a proper assertion.
*
* @param {string} cssStyleProperty The name of the CSS style property to look
* up.
* @return {!promise.Thenable<string>} A promise that will be
* resolved with the requested CSS value.
*/
getCssValue(cssStyleProperty) {
var name = command.Name.GET_ELEMENT_VALUE_OF_CSS_PROPERTY;
return this.schedule_(
new command.Command(name).
setParameter('propertyName', cssStyleProperty),
'WebElement.getCssValue(' + cssStyleProperty + ')');
}
/**
* Schedules a command to query for the value of the given attribute of the
* element. Will return the current value, even if it has been modified after
* the page has been loaded. More exactly, this method will return the value
* of the given attribute, unless that attribute is not present, in which case
* the value of the property with the same name is returned. If neither value
* is set, null is returned (for example, the "value" property of a textarea
* element). The "style" attribute is converted as best can be to a
* text representation with a trailing semi-colon. The following are deemed to
* be "boolean" attributes and will return either "true" or null:
*
* async, autofocus, autoplay, checked, compact, complete, controls, declare,
* defaultchecked, defaultselected, defer, disabled, draggable, ended,
* formnovalidate, hidden, indeterminate, iscontenteditable, ismap, itemscope,
* loop, multiple, muted, nohref, noresize, noshade, novalidate, nowrap, open,
* paused, pubdate, readonly, required, reversed, scoped, seamless, seeking,
* selected, spellcheck, truespeed, willvalidate
*
* Finally, the following commonly mis-capitalized attribute/property names
* are evaluated as expected:
*
* - "class"
* - "readonly"
*
* @param {string} attributeName The name of the attribute to query.
* @return {!promise.Thenable<?string>} A promise that will be
* resolved with the attribute's value. The returned value will always be
* either a string or null.
*/
getAttribute(attributeName) {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_ATTRIBUTE).
setParameter('name', attributeName),
'WebElement.getAttribute(' + attributeName + ')');
}
/**
* Get the visible (i.e. not hidden by CSS) innerText of this element,
* including sub-elements, without any leading or trailing whitespace.
*
* @return {!promise.Thenable<string>} A promise that will be
* resolved with the element's visible text.
*/
getText() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_TEXT),
'WebElement.getText()');
}
/**
* Schedules a command to compute the size of this element's bounding box, in
* pixels.
* @return {!promise.Thenable<{width: number, height: number}>} A
* promise that will be resolved with the element's size as a
* {@code {width:number, height:number}} object.
*/
getSize() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_SIZE),
'WebElement.getSize()');
}
/**
* Schedules a command to compute the location of this element in page space.
* @return {!promise.Thenable<{x: number, y: number}>} A promise that
* will be resolved to the element's location as a
* {@code {x:number, y:number}} object.
*/
getLocation() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_LOCATION),
'WebElement.getLocation()');
}
/**
* Schedules a command to query whether the DOM element represented by this
* instance is enabled, as dictated by the {@code disabled} attribute.
* @return {!promise.Thenable<boolean>} A promise that will be
* resolved with whether this element is currently enabled.
*/
isEnabled() {
return this.schedule_(
new command.Command(command.Name.IS_ELEMENT_ENABLED),
'WebElement.isEnabled()');
}
/**
* Schedules a command to query whether this element is selected.
* @return {!promise.Thenable<boolean>} A promise that will be
* resolved with whether this element is currently selected.
*/
isSelected() {
return this.schedule_(
new command.Command(command.Name.IS_ELEMENT_SELECTED),
'WebElement.isSelected()');
}
/**
* Schedules a command to submit the form containing this element (or this
* element if it is a FORM element). This command is a no-op if the element is
* not contained in a form.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the form has been submitted.
*/
submit() {
return this.schedule_(
new command.Command(command.Name.SUBMIT_ELEMENT),
'WebElement.submit()');
}
/**
* Schedules a command to clear the `value` of this element. This command has
* no effect if the underlying DOM element is neither a text INPUT element
* nor a TEXTAREA element.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the element has been cleared.
*/
clear() {
return this.schedule_(
new command.Command(command.Name.CLEAR_ELEMENT),
'WebElement.clear()');
}
/**
* Schedules a command to test whether this element is currently displayed.
* @return {!promise.Thenable<boolean>} A promise that will be
* resolved with whether this element is currently visible on the page.
*/
isDisplayed() {
return this.schedule_(
new command.Command(command.Name.IS_ELEMENT_DISPLAYED),
'WebElement.isDisplayed()');
}
/**
* Take a screenshot of the visible region encompassed by this element's
* bounding rectangle.
*
* @param {boolean=} opt_scroll Optional argument that indicates whether the
* element should be scrolled into view before taking a screenshot.
* Defaults to false.
* @return {!promise.Thenable<string>} A promise that will be
* resolved to the screenshot as a base-64 encoded PNG.
*/
takeScreenshot(opt_scroll) {
var scroll = !!opt_scroll;
return this.schedule_(
new command.Command(command.Name.TAKE_ELEMENT_SCREENSHOT)
.setParameter('scroll', scroll),
'WebElement.takeScreenshot(' + scroll + ')');
}
}
/**
* WebElementPromise is a promise that will be fulfilled with a WebElement.
* This serves as a forward proxy on WebElement, allowing calls to be
* scheduled without directly on this instance before the underlying
* WebElement has been fulfilled. In other words, the following two statements
* are equivalent:
*
* driver.findElement({id: 'my-button'}).click();
* driver.findElement({id: 'my-button'}).then(function(el) {
* return el.click();
* });
*
* @implements {promise.CancellableThenable<!WebElement>}
* @final
*/
class WebElementPromise extends WebElement {
/**
* @param {!WebDriver} driver The parent WebDriver instance for this
* element.
* @param {!promise.Thenable<!WebElement>} el A promise
* that will resolve to the promised element.
*/
constructor(driver, el) {
super(driver, 'unused');
/**
* Cancel operation is only supported if the wrapped thenable is also
* cancellable.
* @param {(string|Error)=} opt_reason
* @override
*/
this.cancel = function(opt_reason) {
if (promise.CancellableThenable.isImplementation(el)) {
/** @type {!promise.CancellableThenable} */(el).cancel(opt_reason);
}
};
/** @override */
this.then = el.then.bind(el);
/** @override */
this.catch = el.catch.bind(el);
/**
* Defers returning the element ID until the wrapped WebElement has been
* resolved.
* @override
*/
this.getId = function() {
return el.then(function(el) {
return el.getId();
});
};
}
}
promise.CancellableThenable.addImplementation(WebElementPromise);
//////////////////////////////////////////////////////////////////////////////
//
// Alert
//
//////////////////////////////////////////////////////////////////////////////
/**
* Represents a modal dialog such as {@code alert}, {@code confirm}, or
* {@code prompt}. Provides functions to retrieve the message displayed with
* the alert, accept or dismiss the alert, and set the response text (in the
* case of {@code prompt}).
*/
class Alert {
/**
* @param {!WebDriver} driver The driver controlling the browser this alert
* is attached to.
* @param {string} text The message text displayed with this alert.
*/
constructor(driver, text) {
/** @private {!WebDriver} */
this.driver_ = driver;
/** @private {!promise.Thenable<string>} */
this.text_ = driver.controlFlow().promise(resolve => resolve(text));
}
/**
* Retrieves the message text displayed with this alert. For instance, if the
* alert were opened with alert("hello"), then this would return "hello".
*
* @return {!promise.Thenable<string>} A promise that will be
* resolved to the text displayed with this alert.
*/
getText() {
return this.text_;
}
/**
* Sets the username and password in an alert prompting for credentials (such
* as a Basic HTTP Auth prompt). This method will implicitly
* {@linkplain #accept() submit} the dialog.
*
* @param {string} username The username to send.
* @param {string} password The password to send.
* @return {!promise.Thenable<void>} A promise that will be resolved when this
* command has completed.
*/
authenticateAs(username, password) {
return this.driver_.schedule(
new command.Command(command.Name.SET_ALERT_CREDENTIALS),
'WebDriver.switchTo().alert()'
+ `.authenticateAs("${username}", "${password}")`);
}
/**
* Accepts this alert.
*
* @return {!promise.Thenable<void>} A promise that will be resolved
* when this command has completed.
*/
accept() {
return this.driver_.schedule(
new command.Command(command.Name.ACCEPT_ALERT),
'WebDriver.switchTo().alert().accept()');
}
/**
* Dismisses this alert.
*
* @return {!promise.Thenable<void>} A promise that will be resolved
* when this command has completed.
*/
dismiss() {
return this.driver_.schedule(
new command.Command(command.Name.DISMISS_ALERT),
'WebDriver.switchTo().alert().dismiss()');
}
/**
* Sets the response text on this alert. This command will return an error if
* the underlying alert does not support response text (e.g. window.alert and
* window.confirm).
*
* @param {string} text The text to set.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when this command has completed.
*/
sendKeys(text) {
return this.driver_.schedule(
new command.Command(command.Name.SET_ALERT_TEXT).
setParameter('text', text),
'WebDriver.switchTo().alert().sendKeys(' + text + ')');
}
}
/**
* AlertPromise is a promise that will be fulfilled with an Alert. This promise
* serves as a forward proxy on an Alert, allowing calls to be scheduled
* directly on this instance before the underlying Alert has been fulfilled. In
* other words, the following two statements are equivalent:
*
* driver.switchTo().alert().dismiss();
* driver.switchTo().alert().then(function(alert) {
* return alert.dismiss();
* });
*
* @implements {promise.CancellableThenable<!webdriver.Alert>}
* @final
*/
class AlertPromise extends Alert {
/**
* @param {!WebDriver} driver The driver controlling the browser this
* alert is attached to.
* @param {!promise.Thenable<!Alert>} alert A thenable
* that will be fulfilled with the promised alert.
*/
constructor(driver, alert) {
super(driver, 'unused');
/**
* Cancel operation is only supported if the wrapped thenable is also
* cancellable.
* @param {(string|Error)=} opt_reason
* @override
*/
this.cancel = function(opt_reason) {
if (promise.CancellableThenable.isImplementation(alert)) {
/** @type {!promise.CancellableThenable} */(alert).cancel(opt_reason);
}
};
/** @override */
this.then = alert.then.bind(alert);
/** @override */
this.catch = alert.catch.bind(alert);
/**
* Defer returning text until the promised alert has been resolved.
* @override
*/
this.getText = function() {
return alert.then(function(alert) {
return alert.getText();
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.authenticateAs = function(username, password) {
return alert.then(function(alert) {
return alert.authenticateAs(username, password);
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.accept = function() {
return alert.then(function(alert) {
return alert.accept();
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.dismiss = function() {
return alert.then(function(alert) {
return alert.dismiss();
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.sendKeys = function(text) {
return alert.then(function(alert) {
return alert.sendKeys(text);
});
};
}
}
promise.CancellableThenable.addImplementation(AlertPromise);
// PUBLIC API
module.exports = {
Alert: Alert,
AlertPromise: AlertPromise,
Condition: Condition,
Logs: Logs,
Navigation: Navigation,
Options: Options,
TargetLocator: TargetLocator,
Timeouts: Timeouts,
IWebDriver: IWebDriver,
WebDriver: WebDriver,
WebElement: WebElement,
WebElementCondition: WebElementCondition,
WebElementPromise: WebElementPromise,
Window: Window
};
| 1 | 14,518 | Also update line 2205 below | SeleniumHQ-selenium | java |
@@ -39,6 +39,7 @@ func (p *Provisioner) ProvisionHostPath(opts pvController.VolumeOptions, volumeC
name := opts.PVName
stgType := volumeConfig.GetStorageType()
saName := getOpenEBSServiceAccountName()
+ shared := volumeConfig.GetSharedMountValue()
path, err := volumeConfig.GetPath()
if err != nil { | 1 | /*
Copyright 2019 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"github.com/openebs/maya/pkg/alertlog"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/klog"
pvController "sigs.k8s.io/sig-storage-lib-external-provisioner/controller"
//pvController "github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller"
mconfig "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
persistentvolume "github.com/openebs/maya/pkg/kubernetes/persistentvolume/v1alpha1"
)
// ProvisionHostPath is invoked by the Provisioner which expect HostPath PV
// to be provisioned and a valid PV spec returned.
func (p *Provisioner) ProvisionHostPath(opts pvController.VolumeOptions, volumeConfig *VolumeConfig) (*v1.PersistentVolume, error) {
pvc := opts.PVC
nodeHostname := GetNodeHostname(opts.SelectedNode)
taints := GetTaints(opts.SelectedNode)
name := opts.PVName
stgType := volumeConfig.GetStorageType()
saName := getOpenEBSServiceAccountName()
path, err := volumeConfig.GetPath()
if err != nil {
alertlog.Logger.Errorw("",
"eventcode", "cstor.local.pv.provision.failure",
"msg", "Failed to provision CStor Local PV",
"rname", opts.PVName,
"reason", "Unable to get volume config",
"storagetype", stgType,
)
return nil, err
}
klog.Infof("Creating volume %v at %v:%v", name, nodeHostname, path)
//Before using the path for local PV, make sure it is created.
initCmdsForPath := []string{"mkdir", "-m", "0777", "-p"}
podOpts := &HelperPodOptions{
cmdsForPath: initCmdsForPath,
name: name,
path: path,
nodeHostname: nodeHostname,
serviceAccountName: saName,
selectedNodeTaints: taints,
}
iErr := p.createInitPod(podOpts)
if iErr != nil {
klog.Infof("Initialize volume %v failed: %v", name, iErr)
alertlog.Logger.Errorw("",
"eventcode", "cstor.local.pv.provision.failure",
"msg", "Failed to provision CStor Local PV",
"rname", opts.PVName,
"reason", "Volume initialization failed",
"storagetype", stgType,
)
return nil, iErr
}
// VolumeMode will always be specified as Filesystem for host path volume,
// and the value passed in from the PVC spec will be ignored.
fs := v1.PersistentVolumeFilesystem
// It is possible that the HostPath doesn't already exist on the node.
// Set the Local PV to create it.
//hostPathType := v1.HostPathDirectoryOrCreate
// TODO initialize the Labels and annotations
// Use annotations to specify the context using which the PV was created.
//volAnnotations := make(map[string]string)
//volAnnotations[string(v1alpha1.CASTypeKey)] = casVolume.Spec.CasType
//fstype := casVolume.Spec.FSType
labels := make(map[string]string)
labels[string(mconfig.CASTypeKey)] = "local-" + stgType
//labels[string(v1alpha1.StorageClassKey)] = *className
//TODO Change the following to a builder pattern
pvObj, err := persistentvolume.NewBuilder().
WithName(name).
WithLabels(labels).
WithReclaimPolicy(opts.PersistentVolumeReclaimPolicy).
WithAccessModes(pvc.Spec.AccessModes).
WithVolumeMode(fs).
WithCapacityQty(pvc.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]).
WithLocalHostDirectory(path).
WithNodeAffinity(nodeHostname).
Build()
if err != nil {
alertlog.Logger.Errorw("",
"eventcode", "cstor.local.pv.provision.failure",
"msg", "Failed to provision CStor Local PV",
"rname", opts.PVName,
"reason", "failed to build persistent volume",
"storagetype", stgType,
)
return nil, err
}
alertlog.Logger.Infow("",
"eventcode", "cstor.local.pv.provision.success",
"msg", "Successfully provisioned CStor Local PV",
"rname", opts.PVName,
"storagetype", stgType,
)
return pvObj, nil
}
// GetNodeObjectFromHostName returns the Node Object with matching NodeHostName.
func (p *Provisioner) GetNodeObjectFromHostName(hostName string) (*v1.Node, error) {
labelSelector := metav1.LabelSelector{MatchLabels: map[string]string{persistentvolume.KeyNode: hostName}}
listOptions := metav1.ListOptions{
LabelSelector: labels.Set(labelSelector.MatchLabels).String(),
Limit: 1,
}
nodeList, err := p.kubeClient.CoreV1().Nodes().List(listOptions)
if err != nil {
return nil, errors.Errorf("Unable to get the Node with the NodeHostName")
}
return &nodeList.Items[0], nil
}
// DeleteHostPath is invoked by the PVC controller to perform clean-up
// activities before deleteing the PV object. If reclaim policy is
// set to not-retain, then this function will create a helper pod
// to delete the host path from the node.
func (p *Provisioner) DeleteHostPath(pv *v1.PersistentVolume) (err error) {
defer func() {
err = errors.Wrapf(err, "failed to delete volume %v", pv.Name)
}()
saName := getOpenEBSServiceAccountName()
//Determine the path and node of the Local PV.
pvObj := persistentvolume.NewForAPIObject(pv)
path := pvObj.GetPath()
if path == "" {
return errors.Errorf("no HostPath set")
}
hostname := pvObj.GetAffinitedNodeHostname()
if hostname == "" {
return errors.Errorf("cannot find affinited node hostname")
}
alertlog.Logger.Infof("Get the Node Object from hostName: %v", hostname)
//Get the node Object once again to get updated Taints.
nodeObject, err := p.GetNodeObjectFromHostName(hostname)
if err != nil {
return err
}
taints := GetTaints(nodeObject)
//Initiate clean up only when reclaim policy is not retain.
klog.Infof("Deleting volume %v at %v:%v", pv.Name, hostname, path)
cleanupCmdsForPath := []string{"rm", "-rf"}
podOpts := &HelperPodOptions{
cmdsForPath: cleanupCmdsForPath,
name: pv.Name,
path: path,
nodeHostname: hostname,
serviceAccountName: saName,
selectedNodeTaints: taints,
}
if err := p.createCleanupPod(podOpts); err != nil {
return errors.Wrapf(err, "clean up volume %v failed", pv.Name)
}
return nil
}
| 1 | 18,028 | n_: It is a good practice to name the variable to indicate what they contain. In this case since `shared` is supposed to have boolean, calling it: `isShared` can help in the readability of the code. | openebs-maya | go |
@@ -85,7 +85,10 @@ export class ManualColumnFreeze extends BasePlugin {
}
/**
- * Freezes the given column (add it to fixed columns).
+ * Freezes the specified column (i.e. adds it to fixed columns).
+ *
+ * `freezeColumn()` doesn't re-render the table,
+ * so you need to call the `render()` method afterward.
*
* @param {number} column Visual column index.
*/ | 1 | import { BasePlugin } from '../base';
import freezeColumnItem from './contextMenuItem/freezeColumn';
import unfreezeColumnItem from './contextMenuItem/unfreezeColumn';
import './manualColumnFreeze.css';
export const PLUGIN_KEY = 'manualColumnFreeze';
export const PLUGIN_PRIORITY = 110;
const privatePool = new WeakMap();
/**
* @plugin ManualColumnFreeze
* @class ManualColumnFreeze
*
* @description
* This plugin allows to manually "freeze" and "unfreeze" a column using an entry in the Context Menu or using API.
* You can turn it on by setting a {@link options#manualcolumnfreeze Options#manualColumnFreeze} property to `true`.
*
* @example
* ```js
* // Enables the plugin
* manualColumnFreeze: true,
* ```
*/
export class ManualColumnFreeze extends BasePlugin {
static get PLUGIN_KEY() {
return PLUGIN_KEY;
}
static get PLUGIN_PRIORITY() {
return PLUGIN_PRIORITY;
}
constructor(hotInstance) {
super(hotInstance);
privatePool.set(this, {
afterFirstUse: false,
});
}
/**
* Checks if the plugin is enabled in the handsontable settings. This method is executed in {@link Hooks#beforeInit}
* hook and if it returns `true` than the {@link ManualColumnFreeze#enablePlugin} method is called.
*
* @returns {boolean}
*/
isEnabled() {
return !!this.hot.getSettings()[PLUGIN_KEY];
}
/**
* Enables the plugin functionality for this Handsontable instance.
*/
enablePlugin() {
if (this.enabled) {
return;
}
this.addHook('afterContextMenuDefaultOptions', options => this.addContextMenuEntry(options));
this.addHook('beforeColumnMove', (columns, finalIndex) => this.onBeforeColumnMove(columns, finalIndex));
super.enablePlugin();
}
/**
* Disables the plugin functionality for this Handsontable instance.
*/
disablePlugin() {
const priv = privatePool.get(this);
priv.afterFirstUse = false;
super.disablePlugin();
}
/**
* Updates the plugin state. This method is executed when {@link Core#updateSettings} is invoked.
*/
updatePlugin() {
this.disablePlugin();
this.enablePlugin();
super.updatePlugin();
}
/**
* Freezes the given column (add it to fixed columns).
*
* @param {number} column Visual column index.
*/
freezeColumn(column) {
const priv = privatePool.get(this);
const settings = this.hot.getSettings();
if (!priv.afterFirstUse) {
priv.afterFirstUse = true;
}
if (settings.fixedColumnsLeft === this.hot.countCols() || column <= settings.fixedColumnsLeft - 1) {
return; // already fixed
}
this.hot.columnIndexMapper.moveIndexes(column, settings.fixedColumnsLeft);
settings.fixedColumnsLeft += 1;
}
/**
* Unfreezes the given column (remove it from fixed columns and bring to it's previous position).
*
* @param {number} column Visual column index.
*/
unfreezeColumn(column) {
const priv = privatePool.get(this);
const settings = this.hot.getSettings();
if (!priv.afterFirstUse) {
priv.afterFirstUse = true;
}
if (settings.fixedColumnsLeft <= 0 || (column > settings.fixedColumnsLeft - 1)) {
return; // not fixed
}
settings.fixedColumnsLeft -= 1;
this.hot.columnIndexMapper.moveIndexes(column, settings.fixedColumnsLeft);
}
/**
* Adds the manualColumnFreeze context menu entries.
*
* @private
* @param {object} options Context menu options.
*/
addContextMenuEntry(options) {
options.items.push(
{ name: '---------' },
freezeColumnItem(this),
unfreezeColumnItem(this)
);
}
/**
* Prevents moving the columns from/to fixed area.
*
* @private
* @param {Array} columns Array of visual column indexes to be moved.
* @param {number} finalIndex Visual column index, being a start index for the moved columns. Points to where the elements will be placed after the moving action.
* @returns {boolean|undefined}
*/
onBeforeColumnMove(columns, finalIndex) {
const priv = privatePool.get(this);
if (priv.afterFirstUse) {
const freezeLine = this.hot.getSettings().fixedColumnsLeft;
// Moving any column before the "freeze line" isn't possible.
if (finalIndex < freezeLine) {
return false;
}
// Moving frozen column isn't possible.
if (columns.some(column => column < freezeLine)) {
return false;
}
}
}
}
| 1 | 19,759 | I suppose it's a false-positive error. Maybe there is a way to configure the `eslint-*` package to accept `i.e. .... lower case` syntax | handsontable-handsontable | js |
@@ -133,7 +133,7 @@ namespace OpenTelemetry.Trace
private void RunGetRequestedDataOtherSampler(Activity activity)
{
ActivityContext parentContext;
- if (string.IsNullOrEmpty(activity.ParentId))
+ if (string.IsNullOrEmpty(activity.ParentId) || activity.ParentSpanId.ToHexString().Equals("0000000000000000"))
{
parentContext = default;
} | 1 | // <copyright file="ActivitySourceAdapter.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Diagnostics;
using System.Linq.Expressions;
using OpenTelemetry.Resources;
namespace OpenTelemetry.Trace
{
/// <summary>
/// This class encapsulates the logic for performing ActivitySource actions
/// on Activities that are created using default ActivitySource.
/// All activities created without using ActivitySource will have a
/// default ActivitySource assigned to them with their name as empty string.
/// This class is to be used by instrumentation adapters which converts/augments
/// activies created without ActivitySource, into something which closely
/// matches the one created using ActivitySource.
/// </summary>
/// <remarks>
/// This class is meant to be only used when writing new Instrumentation for
/// libraries which are already instrumented with DiagnosticSource/Activity
/// following this doc:
/// https://github.com/dotnet/runtime/blob/master/src/libraries/System.Diagnostics.DiagnosticSource/src/ActivityUserGuide.md.
/// </remarks>
public class ActivitySourceAdapter
{
private static readonly Action<Activity, ActivityKind> SetKindProperty = CreateActivityKindSetter();
private readonly Sampler sampler;
private readonly Resource resource;
private ActivityProcessor activityProcessor;
private Action<Activity> getRequestedDataAction;
internal ActivitySourceAdapter(Sampler sampler, ActivityProcessor activityProcessor, Resource resource)
{
if (sampler == null)
{
throw new ArgumentNullException(nameof(sampler));
}
if (resource == null)
{
throw new ArgumentNullException(nameof(resource));
}
this.sampler = sampler;
if (this.sampler is AlwaysOnSampler)
{
this.getRequestedDataAction = this.RunGetRequestedDataAlwaysOnSampler;
}
else if (this.sampler is AlwaysOffSampler)
{
this.getRequestedDataAction = this.RunGetRequestedDataAlwaysOffSampler;
}
else
{
this.getRequestedDataAction = this.RunGetRequestedDataOtherSampler;
}
this.activityProcessor = activityProcessor;
this.resource = resource;
}
private ActivitySourceAdapter()
{
}
/// <summary>
/// Method that starts an <see cref="Activity"/>.
/// </summary>
/// <param name="activity"><see cref="Activity"/> to be started.</param>
/// <param name="kind">ActivityKind to be set of the activity.</param>
public void Start(Activity activity, ActivityKind kind)
{
SetKindProperty(activity, kind);
this.getRequestedDataAction(activity);
if (activity.IsAllDataRequested)
{
activity.SetResource(this.resource);
this.activityProcessor?.OnStart(activity);
}
}
/// <summary>
/// Method that stops an <see cref="Activity"/>.
/// </summary>
/// <param name="activity"><see cref="Activity"/> to be stopped.</param>
public void Stop(Activity activity)
{
if (activity?.IsAllDataRequested ?? false)
{
this.activityProcessor?.OnEnd(activity);
}
}
internal void UpdateProcessor(ActivityProcessor processor)
{
this.activityProcessor = processor;
}
private static Action<Activity, ActivityKind> CreateActivityKindSetter()
{
ParameterExpression instance = Expression.Parameter(typeof(Activity), "instance");
ParameterExpression propertyValue = Expression.Parameter(typeof(ActivityKind), "propertyValue");
var body = Expression.Assign(Expression.Property(instance, "Kind"), propertyValue);
return Expression.Lambda<Action<Activity, ActivityKind>>(body, instance, propertyValue).Compile();
}
private void RunGetRequestedDataAlwaysOnSampler(Activity activity)
{
activity.IsAllDataRequested = true;
activity.ActivityTraceFlags |= ActivityTraceFlags.Recorded;
}
private void RunGetRequestedDataAlwaysOffSampler(Activity activity)
{
activity.IsAllDataRequested = false;
}
private void RunGetRequestedDataOtherSampler(Activity activity)
{
ActivityContext parentContext;
if (string.IsNullOrEmpty(activity.ParentId))
{
parentContext = default;
}
else if (activity.Parent != null)
{
parentContext = activity.Parent.Context;
}
else
{
parentContext = new ActivityContext(
activity.TraceId,
activity.ParentSpanId,
activity.ActivityTraceFlags,
activity.TraceStateString,
isRemote: true);
}
var samplingParameters = new SamplingParameters(
parentContext,
activity.TraceId,
activity.DisplayName,
activity.Kind,
activity.TagObjects,
activity.Links);
var samplingResult = this.sampler.ShouldSample(samplingParameters);
switch (samplingResult.Decision)
{
case SamplingDecision.NotRecord:
activity.IsAllDataRequested = false;
break;
case SamplingDecision.Record:
activity.IsAllDataRequested = true;
break;
case SamplingDecision.RecordAndSampled:
activity.IsAllDataRequested = true;
activity.ActivityTraceFlags |= ActivityTraceFlags.Recorded;
break;
}
if (samplingResult.Decision != SamplingDecision.NotRecord)
{
foreach (var att in samplingResult.Attributes)
{
activity.SetTag(att.Key, att.Value);
}
}
}
}
}
| 1 | 17,151 | this maynot be a perf issue, if ToHexString() is not actually allocating a string, but returns the caches string value. to be confirmed. | open-telemetry-opentelemetry-dotnet | .cs |
@@ -114,11 +114,16 @@ func (w *watcher) run(ctx context.Context, provider imageprovider.Provider, inte
updates = append(updates, u...)
}
if len(updates) == 0 {
- w.logger.Info("no image to be updated")
+ w.logger.Info("no image to be updated",
+ zap.String("image-provider", provider.Name()),
+ )
continue
}
if err := update(updates); err != nil {
- w.logger.Error("failed to update image", zap.Error(err))
+ w.logger.Error("failed to update image", zap.String("image-provider",
+ provider.Name()),
+ zap.Error(err),
+ )
continue
}
} | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package imagewatcher provides a piped component
// that periodically checks the container registry and updates
// the image if there are differences with Git.
package imagewatcher
import (
"context"
"fmt"
"io/ioutil"
"path/filepath"
"sync"
"time"
"go.uber.org/zap"
"github.com/pipe-cd/pipe/pkg/app/piped/imageprovider"
"github.com/pipe-cd/pipe/pkg/config"
"github.com/pipe-cd/pipe/pkg/git"
"github.com/pipe-cd/pipe/pkg/yamlprocessor"
)
type Watcher interface {
Run(context.Context) error
}
type gitClient interface {
Clone(ctx context.Context, repoID, remote, branch, destination string) (git.Repo, error)
}
type watcher struct {
config *config.PipedSpec
gitClient gitClient
logger *zap.Logger
wg sync.WaitGroup
mu sync.Mutex
// Indexed by repo id.
gitRepos map[string]git.Repo
}
func NewWatcher(cfg *config.PipedSpec, gitClient gitClient, logger *zap.Logger) Watcher {
return &watcher{
config: cfg,
gitClient: gitClient,
logger: logger.Named("image-watcher"),
}
}
// Run spawns goroutines for each image provider. They periodically pull the image
// from the container registry to compare the image with one in the git repository.
func (w *watcher) Run(ctx context.Context) error {
// Pre-clone to cache the registered git repositories.
for _, r := range w.config.Repositories {
repo, err := w.gitClient.Clone(ctx, r.RepoID, r.Remote, r.Branch, "")
if err != nil {
w.logger.Error("failed to clone repository",
zap.String("repo-id", r.RepoID),
zap.Error(err),
)
return err
}
w.gitRepos[r.RepoID] = repo
}
for _, cfg := range w.config.ImageProviders {
p, err := imageprovider.NewProvider(&cfg, w.logger)
if err != nil {
return err
}
w.wg.Add(1)
go w.run(ctx, p, cfg.PullInterval.Duration())
}
w.wg.Wait()
return nil
}
// run periodically compares the image stored in the given provider and one stored in git.
// And then pushes those with differences.
func (w *watcher) run(ctx context.Context, provider imageprovider.Provider, interval time.Duration) {
defer w.wg.Done()
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
updates := make([]config.ImageWatcherTarget, 0)
for id, repo := range w.gitRepos {
u, err := w.determineUpdates(ctx, id, repo, provider)
if err != nil {
w.logger.Error("failed to determine images to be updated",
zap.String("repo-id", id),
zap.Error(err),
)
continue
}
updates = append(updates, u...)
}
if len(updates) == 0 {
w.logger.Info("no image to be updated")
continue
}
if err := update(updates); err != nil {
w.logger.Error("failed to update image", zap.Error(err))
continue
}
}
}
}
// determineUpdates gives back target images to be updated for a given repo.
func (w *watcher) determineUpdates(ctx context.Context, repoID string, repo git.Repo, provider imageprovider.Provider) ([]config.ImageWatcherTarget, error) {
branch := repo.GetClonedBranch()
w.mu.Lock()
err := repo.Pull(ctx, branch)
w.mu.Unlock()
if err != nil {
return nil, fmt.Errorf("failed to fetch from and integrate with a local branch: %w", err)
}
// Load Image Watcher Config for the given repo.
includes := make([]string, 0)
excludes := make([]string, 0)
for _, target := range w.config.ImageWatcher.Repos {
if target.RepoID != repoID {
continue
}
includes = append(includes, target.Includes...)
excludes = append(excludes, target.Excludes...)
}
cfg, ok, err := config.LoadImageWatcher(repo.GetPath(), includes, excludes)
if err != nil {
return nil, fmt.Errorf("failed to load configuration file for Image Watcher: %w", err)
}
if !ok {
return nil, fmt.Errorf("configuration file for Image Watcher not found: %w", err)
}
updates := make([]config.ImageWatcherTarget, 0)
for _, target := range cfg.Targets {
if provider.Name() != target.Provider {
continue
}
outdated, err := checkOutdated(ctx, target, repo, provider)
if err != nil {
return nil, fmt.Errorf("failed to check the image is outdated: %w", err)
}
if outdated {
updates = append(updates, target)
}
}
return updates, nil
}
// checkOutdated checks if the image defined in the given target is identical to the one in image provider.
func checkOutdated(ctx context.Context, target config.ImageWatcherTarget, repo git.Repo, provider imageprovider.Provider) (bool, error) {
i, err := provider.ParseImage(target.Image)
if err != nil {
return false, err
}
// TODO: Control not to reach the rate limit
imageRef, err := provider.GetLatestImage(ctx, i)
if err != nil {
return false, err
}
yml, err := ioutil.ReadFile(filepath.Join(repo.GetPath(), target.FilePath))
if err != nil {
return false, err
}
value, err := yamlprocessor.GetValue(yml, target.Field)
if err != nil {
return false, err
}
v, ok := value.(string)
if !ok {
return false, fmt.Errorf("unknown value is defined at %s in %s", target.FilePath, target.Field)
}
return imageRef.String() != v, nil
}
func update(targets []config.ImageWatcherTarget) error {
// TODO: Make it possible to push outdated images to Git
return nil
}
| 1 | 12,203 | `zap.String("image-provider", provider.Name())` should be in a same line. | pipe-cd-pipe | go |
@@ -1,8 +1,6 @@
/*exported DqElement */
function truncate(str, maxLength) {
- 'use strict';
-
maxLength = maxLength || 300;
if (str.length > maxLength) { | 1 | /*exported DqElement */
function truncate(str, maxLength) {
'use strict';
maxLength = maxLength || 300;
if (str.length > maxLength) {
var index = str.indexOf('>');
str = str.substring(0, index + 1);
}
return str;
}
function getSource (element) {
'use strict';
var source = element.outerHTML;
if (!source && typeof XMLSerializer === 'function') {
source = new XMLSerializer().serializeToString(element);
}
return truncate(source || '');
}
/**
* "Serialized" `HTMLElement`. It will calculate the CSS selector,
* grab the source (outerHTML) and offer an array for storing frame paths
* @param {HTMLElement} element The element to serialize
* @param {Object} spec Properties to use in place of the element when instantiated on Elements from other frames
*/
function DqElement(element, spec) {
'use strict';
this._fromFrame = !!spec;
this.spec = spec || {};
/**
* The generated HTML source code of the element
* @type {String}
*/
this.source = this.spec.source !== undefined ? this.spec.source : getSource(element);
/**
* The element which this object is based off or the containing frame, used for sorting.
* Excluded in toJSON method.
* @type {HTMLElement}
*/
this._element = element;
}
DqElement.prototype = {
/**
* A unique CSS selector for the element
* @return {String}
*/
get selector() {
return this.spec.selector || [axe.utils.getSelector(this.element)];
},
/**
* Xpath to the element
* @return {String}
*/
get xpath() {
return this.spec.xpath || [axe.utils.getXpath(this.element)];
},
/**
* Direct reference to the `HTMLElement` wrapped by this `DQElement`.
*/
get element() {
return this._element;
},
get fromFrame() {
return this._fromFrame;
},
toJSON: function() {
'use strict';
return {
selector: this.selector,
source: this.source,
xpath: this.xpath
};
}
};
DqElement.fromFrame = function (node, frame) {
node.selector.unshift(frame.selector);
node.xpath.unshift(frame.xpath);
return new axe.utils.DqElement(frame.element, node);
};
axe.utils.DqElement = DqElement;
| 1 | 11,101 | Why this deletion? | dequelabs-axe-core | js |
@@ -419,7 +419,10 @@ class S3KeyTest(unittest.TestCase):
remote_metadata = check._get_remote_metadata()
# TODO: investigate whether encoding ' ' as '%20' makes sense
- self.assertEqual(check.cache_control, 'public,%20max-age=500')
+ self.assertIn(
+ check.cache_control,
+ ('public,%20max-age=500', 'public, max-age=500')
+ )
self.assertEqual(remote_metadata['cache-control'], 'public,%20max-age=500')
self.assertEqual(check.get_metadata('test-plus'), 'A plus (+)')
self.assertEqual(check.content_disposition, 'filename=Sch%C3%B6ne%20Zeit.txt') | 1 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Some unit tests for S3 Key
"""
from tests.unit import unittest
import time
import random
import boto.s3
from boto.compat import six, StringIO, urllib
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
class S3KeyTest(unittest.TestCase):
s3 = True
def setUp(self):
self.conn = S3Connection()
random.seed()
self.bucket_name = 'keytest-%d-%d' % (
time.time(), random.randint(1, 99999999))
self.bucket = self.conn.create_bucket(self.bucket_name)
def tearDown(self):
for key in self.bucket:
key.delete()
self.bucket.delete()
def test_set_contents_from_file_dataloss(self):
# Create an empty stringio and write to it.
content = "abcde"
sfp = StringIO()
sfp.write(content)
# Try set_contents_from_file() without rewinding sfp
k = self.bucket.new_key("k")
try:
k.set_contents_from_file(sfp)
self.fail("forgot to rewind so should fail.")
except AttributeError:
pass
# call with rewind and check if we wrote 5 bytes
k.set_contents_from_file(sfp, rewind=True)
self.assertEqual(k.size, 5)
# check actual contents by getting it.
kn = self.bucket.new_key("k")
ks = kn.get_contents_as_string().decode('utf-8')
self.assertEqual(ks, content)
# finally, try with a 0 length string
sfp = StringIO()
k = self.bucket.new_key("k")
k.set_contents_from_file(sfp)
self.assertEqual(k.size, 0)
# check actual contents by getting it.
kn = self.bucket.new_key("k")
ks = kn.get_contents_as_string().decode('utf-8')
self.assertEqual(ks, "")
def test_set_contents_as_file(self):
content="01234567890123456789"
sfp = StringIO(content)
# fp is set at 0 for just opened (for read) files.
# set_contents should write full content to key.
k = self.bucket.new_key("k")
k.set_contents_from_file(sfp)
self.assertEqual(k.size, 20)
kn = self.bucket.new_key("k")
ks = kn.get_contents_as_string().decode('utf-8')
self.assertEqual(ks, content)
# set fp to 5 and set contents. this should
# set "567890123456789" to the key
sfp.seek(5)
k = self.bucket.new_key("k")
k.set_contents_from_file(sfp)
self.assertEqual(k.size, 15)
kn = self.bucket.new_key("k")
ks = kn.get_contents_as_string().decode('utf-8')
self.assertEqual(ks, content[5:])
# set fp to 5 and only set 5 bytes. this should
# write the value "56789" to the key.
sfp.seek(5)
k = self.bucket.new_key("k")
k.set_contents_from_file(sfp, size=5)
self.assertEqual(k.size, 5)
self.assertEqual(sfp.tell(), 10)
kn = self.bucket.new_key("k")
ks = kn.get_contents_as_string().decode('utf-8')
self.assertEqual(ks, content[5:10])
def test_set_contents_with_md5(self):
content="01234567890123456789"
sfp = StringIO(content)
# fp is set at 0 for just opened (for read) files.
# set_contents should write full content to key.
k = self.bucket.new_key("k")
good_md5 = k.compute_md5(sfp)
k.set_contents_from_file(sfp, md5=good_md5)
kn = self.bucket.new_key("k")
ks = kn.get_contents_as_string().decode('utf-8')
self.assertEqual(ks, content)
# set fp to 5 and only set 5 bytes. this should
# write the value "56789" to the key.
sfp.seek(5)
k = self.bucket.new_key("k")
good_md5 = k.compute_md5(sfp, size=5)
k.set_contents_from_file(sfp, size=5, md5=good_md5)
self.assertEqual(sfp.tell(), 10)
kn = self.bucket.new_key("k")
ks = kn.get_contents_as_string().decode('utf-8')
self.assertEqual(ks, content[5:10])
# let's try a wrong md5 by just altering it.
k = self.bucket.new_key("k")
sfp.seek(0)
hexdig, base64 = k.compute_md5(sfp)
bad_md5 = (hexdig, base64[3:])
try:
k.set_contents_from_file(sfp, md5=bad_md5)
self.fail("should fail with bad md5")
except S3ResponseError:
pass
def test_get_contents_with_md5(self):
content="01234567890123456789"
sfp = StringIO(content)
k = self.bucket.new_key("k")
k.set_contents_from_file(sfp)
kn = self.bucket.new_key("k")
s = kn.get_contents_as_string().decode('utf-8')
self.assertEqual(kn.md5, k.md5)
self.assertEqual(s, content)
def test_file_callback(self):
def callback(wrote, total):
self.my_cb_cnt += 1
self.assertNotEqual(wrote, self.my_cb_last, "called twice with same value")
self.my_cb_last = wrote
# Zero bytes written => 1 call
self.my_cb_cnt = 0
self.my_cb_last = None
k = self.bucket.new_key("k")
k.BufferSize = 2
sfp = StringIO("")
k.set_contents_from_file(sfp, cb=callback, num_cb=10)
self.assertEqual(self.my_cb_cnt, 1)
self.assertEqual(self.my_cb_last, 0)
sfp.close()
# Read back zero bytes => 1 call
self.my_cb_cnt = 0
self.my_cb_last = None
s = k.get_contents_as_string(cb=callback)
self.assertEqual(self.my_cb_cnt, 1)
self.assertEqual(self.my_cb_last, 0)
content="01234567890123456789"
sfp = StringIO(content)
# expect 2 calls due start/finish
self.my_cb_cnt = 0
self.my_cb_last = None
k = self.bucket.new_key("k")
k.set_contents_from_file(sfp, cb=callback, num_cb=10)
self.assertEqual(self.my_cb_cnt, 2)
self.assertEqual(self.my_cb_last, 20)
# Read back all bytes => 2 calls
self.my_cb_cnt = 0
self.my_cb_last = None
s = k.get_contents_as_string(cb=callback).decode('utf-8')
self.assertEqual(self.my_cb_cnt, 2)
self.assertEqual(self.my_cb_last, 20)
self.assertEqual(s, content)
# rewind sfp and try upload again. -1 should call
# for every read/write so that should make 11 when bs=2
sfp.seek(0)
self.my_cb_cnt = 0
self.my_cb_last = None
k = self.bucket.new_key("k")
k.BufferSize = 2
k.set_contents_from_file(sfp, cb=callback, num_cb=-1)
self.assertEqual(self.my_cb_cnt, 11)
self.assertEqual(self.my_cb_last, 20)
# Read back all bytes => 11 calls
self.my_cb_cnt = 0
self.my_cb_last = None
s = k.get_contents_as_string(cb=callback, num_cb=-1).decode('utf-8')
self.assertEqual(self.my_cb_cnt, 11)
self.assertEqual(self.my_cb_last, 20)
self.assertEqual(s, content)
# no more than 1 times => 2 times
# last time always 20 bytes
sfp.seek(0)
self.my_cb_cnt = 0
self.my_cb_last = None
k = self.bucket.new_key("k")
k.BufferSize = 2
k.set_contents_from_file(sfp, cb=callback, num_cb=1)
self.assertTrue(self.my_cb_cnt <= 2)
self.assertEqual(self.my_cb_last, 20)
# no more than 1 times => 2 times
self.my_cb_cnt = 0
self.my_cb_last = None
s = k.get_contents_as_string(cb=callback, num_cb=1).decode('utf-8')
self.assertTrue(self.my_cb_cnt <= 2)
self.assertEqual(self.my_cb_last, 20)
self.assertEqual(s, content)
# no more than 2 times
# last time always 20 bytes
sfp.seek(0)
self.my_cb_cnt = 0
self.my_cb_last = None
k = self.bucket.new_key("k")
k.BufferSize = 2
k.set_contents_from_file(sfp, cb=callback, num_cb=2)
self.assertTrue(self.my_cb_cnt <= 2)
self.assertEqual(self.my_cb_last, 20)
# no more than 2 times
self.my_cb_cnt = 0
self.my_cb_last = None
s = k.get_contents_as_string(cb=callback, num_cb=2).decode('utf-8')
self.assertTrue(self.my_cb_cnt <= 2)
self.assertEqual(self.my_cb_last, 20)
self.assertEqual(s, content)
# no more than 3 times
# last time always 20 bytes
sfp.seek(0)
self.my_cb_cnt = 0
self.my_cb_last = None
k = self.bucket.new_key("k")
k.BufferSize = 2
k.set_contents_from_file(sfp, cb=callback, num_cb=3)
self.assertTrue(self.my_cb_cnt <= 3)
self.assertEqual(self.my_cb_last, 20)
# no more than 3 times
self.my_cb_cnt = 0
self.my_cb_last = None
s = k.get_contents_as_string(cb=callback, num_cb=3).decode('utf-8')
self.assertTrue(self.my_cb_cnt <= 3)
self.assertEqual(self.my_cb_last, 20)
self.assertEqual(s, content)
# no more than 4 times
# last time always 20 bytes
sfp.seek(0)
self.my_cb_cnt = 0
self.my_cb_last = None
k = self.bucket.new_key("k")
k.BufferSize = 2
k.set_contents_from_file(sfp, cb=callback, num_cb=4)
self.assertTrue(self.my_cb_cnt <= 4)
self.assertEqual(self.my_cb_last, 20)
# no more than 4 times
self.my_cb_cnt = 0
self.my_cb_last = None
s = k.get_contents_as_string(cb=callback, num_cb=4).decode('utf-8')
self.assertTrue(self.my_cb_cnt <= 4)
self.assertEqual(self.my_cb_last, 20)
self.assertEqual(s, content)
# no more than 6 times
# last time always 20 bytes
sfp.seek(0)
self.my_cb_cnt = 0
self.my_cb_last = None
k = self.bucket.new_key("k")
k.BufferSize = 2
k.set_contents_from_file(sfp, cb=callback, num_cb=6)
self.assertTrue(self.my_cb_cnt <= 6)
self.assertEqual(self.my_cb_last, 20)
# no more than 6 times
self.my_cb_cnt = 0
self.my_cb_last = None
s = k.get_contents_as_string(cb=callback, num_cb=6).decode('utf-8')
self.assertTrue(self.my_cb_cnt <= 6)
self.assertEqual(self.my_cb_last, 20)
self.assertEqual(s, content)
# no more than 10 times
# last time always 20 bytes
sfp.seek(0)
self.my_cb_cnt = 0
self.my_cb_last = None
k = self.bucket.new_key("k")
k.BufferSize = 2
k.set_contents_from_file(sfp, cb=callback, num_cb=10)
self.assertTrue(self.my_cb_cnt <= 10)
self.assertEqual(self.my_cb_last, 20)
# no more than 10 times
self.my_cb_cnt = 0
self.my_cb_last = None
s = k.get_contents_as_string(cb=callback, num_cb=10).decode('utf-8')
self.assertTrue(self.my_cb_cnt <= 10)
self.assertEqual(self.my_cb_last, 20)
self.assertEqual(s, content)
# no more than 1000 times
# last time always 20 bytes
sfp.seek(0)
self.my_cb_cnt = 0
self.my_cb_last = None
k = self.bucket.new_key("k")
k.BufferSize = 2
k.set_contents_from_file(sfp, cb=callback, num_cb=1000)
self.assertTrue(self.my_cb_cnt <= 1000)
self.assertEqual(self.my_cb_last, 20)
# no more than 1000 times
self.my_cb_cnt = 0
self.my_cb_last = None
s = k.get_contents_as_string(cb=callback, num_cb=1000).decode('utf-8')
self.assertTrue(self.my_cb_cnt <= 1000)
self.assertEqual(self.my_cb_last, 20)
self.assertEqual(s, content)
def test_website_redirects(self):
self.bucket.configure_website('index.html')
key = self.bucket.new_key('redirect-key')
self.assertTrue(key.set_redirect('http://www.amazon.com/'))
self.assertEqual(key.get_redirect(), 'http://www.amazon.com/')
self.assertTrue(key.set_redirect('http://aws.amazon.com/'))
self.assertEqual(key.get_redirect(), 'http://aws.amazon.com/')
def test_website_redirect_none_configured(self):
key = self.bucket.new_key('redirect-key')
key.set_contents_from_string('')
self.assertEqual(key.get_redirect(), None)
def test_website_redirect_with_bad_value(self):
self.bucket.configure_website('index.html')
key = self.bucket.new_key('redirect-key')
with self.assertRaises(key.provider.storage_response_error):
# Must start with a / or http
key.set_redirect('ftp://ftp.example.org')
with self.assertRaises(key.provider.storage_response_error):
# Must start with a / or http
key.set_redirect('')
def test_setting_date(self):
key = self.bucket.new_key('test_date')
# This should actually set x-amz-meta-date & not fail miserably.
key.set_metadata('date', '20130524T155935Z')
key.set_contents_from_string('Some text here.')
check = self.bucket.get_key('test_date')
self.assertEqual(check.get_metadata('date'), u'20130524T155935Z')
self.assertTrue('x-amz-meta-date' in check._get_remote_metadata())
def test_header_casing(self):
key = self.bucket.new_key('test_header_case')
# Using anything but CamelCase on ``Content-Type`` or ``Content-MD5``
# used to cause a signature error (when using ``s3`` for signing).
key.set_metadata('Content-type', 'application/json')
key.set_metadata('Content-md5', 'XmUKnus7svY1frWsVskxXg==')
key.set_contents_from_string('{"abc": 123}')
check = self.bucket.get_key('test_header_case')
self.assertEqual(check.content_type, 'application/json')
def test_header_encoding(self):
key = self.bucket.new_key('test_header_encoding')
key.set_metadata('Cache-control', u'public, max-age=500')
key.set_metadata('Test-Plus', u'A plus (+)')
key.set_metadata('Content-disposition', u'filename=Schöne Zeit.txt')
key.set_metadata('Content-Encoding', 'gzip')
key.set_metadata('Content-Language', 'de')
key.set_metadata('Content-Type', 'application/pdf')
self.assertEqual(key.content_type, 'application/pdf')
key.set_metadata('X-Robots-Tag', 'all')
key.set_metadata('Expires', u'Thu, 01 Dec 1994 16:00:00 GMT')
key.set_contents_from_string('foo')
check = self.bucket.get_key('test_header_encoding')
remote_metadata = check._get_remote_metadata()
# TODO: investigate whether encoding ' ' as '%20' makes sense
self.assertEqual(check.cache_control, 'public,%20max-age=500')
self.assertEqual(remote_metadata['cache-control'], 'public,%20max-age=500')
self.assertEqual(check.get_metadata('test-plus'), 'A plus (+)')
self.assertEqual(check.content_disposition, 'filename=Sch%C3%B6ne%20Zeit.txt')
self.assertEqual(remote_metadata['content-disposition'], 'filename=Sch%C3%B6ne%20Zeit.txt')
self.assertEqual(check.content_encoding, 'gzip')
self.assertEqual(remote_metadata['content-encoding'], 'gzip')
self.assertEqual(check.content_language, 'de')
self.assertEqual(remote_metadata['content-language'], 'de')
self.assertEqual(check.content_type, 'application/pdf')
self.assertEqual(remote_metadata['content-type'], 'application/pdf')
self.assertEqual(check.x_robots_tag, 'all')
self.assertEqual(remote_metadata['x-robots-tag'], 'all')
self.assertEqual(check.expires, 'Thu,%2001%20Dec%201994%2016:00:00%20GMT')
self.assertEqual(remote_metadata['expires'], 'Thu,%2001%20Dec%201994%2016:00:00%20GMT')
expected = u'filename=Schöne Zeit.txt'
if six.PY2:
# Newer versions of python default to unicode strings, but python 2
# requires encoding to UTF-8 to compare the two properly
expected = expected.encode('utf-8')
self.assertEqual(
urllib.parse.unquote(check.content_disposition),
expected
)
def test_set_contents_with_sse_c(self):
content="01234567890123456789"
# the plain text of customer key is "01testKeyToSSEC!"
header = {
"x-amz-server-side-encryption-customer-algorithm" :
"AES256",
"x-amz-server-side-encryption-customer-key" :
"MAAxAHQAZQBzAHQASwBlAHkAVABvAFMAUwBFAEMAIQA=",
"x-amz-server-side-encryption-customer-key-MD5" :
"fUgCZDDh6bfEMuP2bN38mg=="
}
# upload and download content with AWS specified headers
k = self.bucket.new_key("testkey_for_sse_c")
k.set_contents_from_string(content, headers=header)
kn = self.bucket.new_key("testkey_for_sse_c")
ks = kn.get_contents_as_string(headers=header)
self.assertEqual(ks, content.encode('utf-8'))
class S3KeySigV4Test(unittest.TestCase):
def setUp(self):
self.conn = boto.s3.connect_to_region('eu-central-1')
self.bucket_name = 'boto-sigv4-key-%d' % int(time.time())
self.bucket = self.conn.create_bucket(self.bucket_name,
location='eu-central-1')
def tearDown(self):
for key in self.bucket:
key.delete()
self.bucket.delete()
def test_put_get_with_non_string_headers_key(self):
k = Key(self.bucket)
k.key = 'foobar'
body = 'This is a test of S3'
# A content-length header will be added to this request since it
# has a body.
k.set_contents_from_string(body)
# Set a header that has an integer. This checks for a bug where
# the sigv4 signer assumes that all of the headers are strings.
headers = {'Content-Length': 0}
from_s3_key = self.bucket.get_key('foobar', headers=headers)
self.assertEqual(from_s3_key.get_contents_as_string().decode('utf-8'),
body)
def test_head_put_get_with_non_ascii_key(self):
k = Key(self.bucket)
k.key = u'''pt-Olá_ch-你好_ko-안녕_ru-Здравствуйте%20,.<>~`!@#$%^&()_-+='"'''
body = 'This is a test of S3'
k.set_contents_from_string(body)
from_s3_key = self.bucket.get_key(k.key, validate=True)
self.assertEqual(from_s3_key.get_contents_as_string().decode('utf-8'),
body)
keys = self.bucket.get_all_keys(prefix=k.key, max_keys=1)
self.assertEqual(1, len(keys))
class S3KeyVersionCopyTest(unittest.TestCase):
def setUp(self):
self.conn = S3Connection()
self.bucket_name = 'boto-key-version-copy-%d' % int(time.time())
self.bucket = self.conn.create_bucket(self.bucket_name)
self.bucket.configure_versioning(True)
def tearDown(self):
for key in self.bucket.list_versions():
key.delete()
self.bucket.delete()
def test_key_overwrite_and_copy(self):
first_content = b"abcdefghijklm"
second_content = b"nopqrstuvwxyz"
k = Key(self.bucket, 'testkey')
k.set_contents_from_string(first_content)
# Wait for S3's eventual consistency (may not be necessary)
while self.bucket.get_key('testkey') is None:
time.sleep(5)
# Get the first version_id
first_key = self.bucket.get_key('testkey')
first_version_id = first_key.version_id
# Overwrite the key
k = Key(self.bucket, 'testkey')
k.set_contents_from_string(second_content)
# Wait for eventual consistency
while True:
second_key = self.bucket.get_key('testkey')
if second_key is None or second_key.version_id == first_version_id:
time.sleep(5)
else:
break
# Copy first key (no longer the current version) to a new key
source_key = self.bucket.get_key('testkey',
version_id=first_version_id)
source_key.copy(self.bucket, 'copiedkey')
while self.bucket.get_key('copiedkey') is None:
time.sleep(5)
copied_key = self.bucket.get_key('copiedkey')
copied_key_contents = copied_key.get_contents_as_string()
self.assertEqual(first_content, copied_key_contents)
| 1 | 12,012 | This change looks unrelated to the CL description? | boto-boto | py |
@@ -18,7 +18,7 @@ class DedupTest : public QueryTestBase {
void SetUp() override { QueryTestBase::SetUp(); }
};
-#define DEDUP_RESUTL_CHECK(inputName, outputName, sentence, expected) \
+#define DEDUP_RESULT_CHECK(inputName, outputName, sentence, expected) \
do { \
qctx_->symTable()->newVariable(outputName); \
auto yieldSentence = getYieldSentence(sentence, qctx_.get()); \ | 1 | /* Copyright (c) 2020 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License.
*/
#include <gtest/gtest.h>
#include "graph/context/QueryContext.h"
#include "graph/executor/query/DedupExecutor.h"
#include "graph/executor/query/ProjectExecutor.h"
#include "graph/executor/test/QueryTestBase.h"
#include "graph/planner/plan/Query.h"
namespace nebula {
namespace graph {
class DedupTest : public QueryTestBase {
public:
void SetUp() override { QueryTestBase::SetUp(); }
};
#define DEDUP_RESUTL_CHECK(inputName, outputName, sentence, expected) \
do { \
qctx_->symTable()->newVariable(outputName); \
auto yieldSentence = getYieldSentence(sentence, qctx_.get()); \
auto* dedupNode = Dedup::make(qctx_.get(), nullptr); \
dedupNode->setInputVar(inputName); \
dedupNode->setOutputVar(outputName); \
auto dedupExec = std::make_unique<DedupExecutor>(dedupNode, qctx_.get()); \
if (!expected.colNames.empty()) { \
EXPECT_TRUE(dedupExec->execute().get().ok()); \
} else { \
EXPECT_FALSE(dedupExec->execute().get().ok()); \
return; \
} \
auto& dedupResult = qctx_->ectx()->getResult(dedupNode->outputVar()); \
EXPECT_EQ(dedupResult.state(), Result::State::kSuccess); \
\
dedupNode->setInputVar(outputName); \
auto* project = Project::make(qctx_.get(), nullptr, yieldSentence->yieldColumns()); \
project->setInputVar(dedupNode->outputVar()); \
auto colNames = expected.colNames; \
project->setColNames(std::move(colNames)); \
\
auto proExe = std::make_unique<ProjectExecutor>(project, qctx_.get()); \
EXPECT_TRUE(proExe->execute().get().ok()); \
auto& proSesult = qctx_->ectx()->getResult(project->outputVar()); \
\
EXPECT_EQ(proSesult.value().getDataSet(), expected); \
EXPECT_EQ(proSesult.state(), Result::State::kSuccess); \
} while (false)
TEST_F(DedupTest, TestSequential) {
DataSet expected({"vid", "name", "age", "dst", "start", "end"});
expected.emplace_back(Row({"Ann", "Ann", 18, "School1", 2010, 2014}));
expected.emplace_back(Row({"Joy", "Joy", Value::kNullValue, "School2", 2009, 2012}));
expected.emplace_back(Row({"Tom", "Tom", 20, "School2", 2008, 2012}));
expected.emplace_back(Row({"Kate", "Kate", 19, "School2", 2009, 2013}));
expected.emplace_back(Row({"Lily", "Lily", 20, "School2", 2009, 2012}));
auto sentence =
"YIELD DISTINCT $-.vid as vid, $-.v_name as name, $-.v_age as age, "
"$-.v_dst as dst, $-.e_start_year as start, $-.e_end_year as end";
DEDUP_RESUTL_CHECK("input_sequential", "dedup_sequential", sentence, expected);
}
TEST_F(DedupTest, TestEmpty) {
DataSet expected({"name"});
DEDUP_RESUTL_CHECK("empty", "dedup_sequential", "YIELD DISTINCT $-.v_dst as name", expected);
}
TEST_F(DedupTest, WrongTypeIterator) {
DataSet expected;
DEDUP_RESUTL_CHECK(
"input_neighbor", "dedup_sequential", "YIELD DISTINCT $-.v_dst as name", expected);
}
} // namespace graph
} // namespace nebula
| 1 | 31,964 | I'm not certain that `res`+`utl` isn't a thing. | vesoft-inc-nebula | cpp |
@@ -182,6 +182,7 @@ public class FetchActiveFlowDao {
+ " LEFT JOIN "
+ " executors et ON ex.executor_id = et.id"
+ " WHERE ex.status NOT IN ("
+ + Status.EXECUTION_STOPPED.getNumVal() + ", "
+ Status.SUCCEEDED.getNumVal() + ", "
+ Status.KILLED.getNumVal() + ", "
+ Status.FAILED.getNumVal() + ")"; | 1 | /*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.executor;
import azkaban.db.DatabaseOperator;
import azkaban.db.EncodingType;
import azkaban.flow.Flow;
import azkaban.project.Project;
import azkaban.utils.GZIPUtils;
import azkaban.utils.Pair;
import com.google.common.annotations.VisibleForTesting;
import java.io.IOException;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.commons.dbutils.ResultSetHandler;
import org.apache.log4j.Logger;
@Singleton
public class FetchActiveFlowDao {
private static final Logger logger = Logger.getLogger(FetchActiveFlowDao.class);
private final DatabaseOperator dbOperator;
@Inject
public FetchActiveFlowDao(final DatabaseOperator dbOperator) {
this.dbOperator = dbOperator;
}
private static Pair<ExecutionReference, ExecutableFlow> getExecutableFlowHelper(
final ResultSet rs) throws SQLException {
final int id = rs.getInt("exec_id");
final int encodingType = rs.getInt("enc_type");
final byte[] data = rs.getBytes("flow_data");
final int status = rs.getInt("status");
if (data == null) {
logger.warn("Execution id " + id + " has flow_data = null. To clean up, update status to "
+ "FAILED manually, eg. "
+ "SET status = " + Status.FAILED.getNumVal() + " WHERE id = " + id);
} else {
final EncodingType encType = EncodingType.fromInteger(encodingType);
final ExecutableFlow exFlow;
try {
exFlow = ExecutableFlow.createExecutableFlow(
GZIPUtils.transformBytesToObject(data, encType), Status.fromInteger(status));
} catch (final IOException e) {
throw new SQLException("Error retrieving flow data " + id, e);
}
return getPairWithExecutorInfo(rs, exFlow);
}
return null;
}
private static Pair<ExecutionReference, ExecutableFlow> getPairWithExecutorInfo(
final ResultSet rs, final ExecutableFlow exFlow) throws SQLException {
final int executorId = rs.getInt("executorId");
final String host = rs.getString("host");
final int port = rs.getInt("port");
final Executor executor;
if (host == null) {
logger.warn("Executor id " + executorId + " (on execution " +
exFlow.getExecutionId() + ") wasn't found");
executor = null;
} else {
final boolean executorStatus = rs.getBoolean("executorStatus");
executor = new Executor(executorId, host, port, executorStatus);
}
final ExecutionReference ref = new ExecutionReference(exFlow.getExecutionId(), executor, exFlow.getDispatchMethod());
return new Pair<>(ref, exFlow);
}
private static Pair<ExecutionReference, ExecutableFlow> getExecutableFlowMetadataHelper(
final ResultSet rs) throws SQLException {
final Flow flow = new Flow(rs.getString("flow_id"));
final Project project = new Project(rs.getInt("project_id"), null);
project.setVersion(rs.getInt("version"));
final ExecutableFlow exFlow = new ExecutableFlow(project, flow);
exFlow.setExecutionId(rs.getInt("exec_id"));
exFlow.setStatus(Status.fromInteger(rs.getInt("status")));
exFlow.setSubmitTime(rs.getLong("submit_time"));
exFlow.setStartTime(rs.getLong("start_time"));
exFlow.setEndTime(rs.getLong("end_time"));
exFlow.setSubmitUser(rs.getString("submit_user"));
return getPairWithExecutorInfo(rs, exFlow);
}
/**
* Fetch flows that are not in finished status, including both dispatched and non-dispatched
* flows.
*
* @return unfinished flows map
* @throws ExecutorManagerException the executor manager exception
*/
Map<Integer, Pair<ExecutionReference, ExecutableFlow>> fetchUnfinishedFlows()
throws ExecutorManagerException {
try {
return this.dbOperator.query(FetchActiveExecutableFlows.FETCH_UNFINISHED_EXECUTABLE_FLOWS,
new FetchActiveExecutableFlows());
} catch (final SQLException e) {
throw new ExecutorManagerException("Error fetching unfinished flows", e);
}
}
/**
* Fetch unfinished flows similar to {@link #fetchUnfinishedFlows}, excluding flow data.
*
* @return unfinished flows map
* @throws ExecutorManagerException the executor manager exception
*/
public Map<Integer, Pair<ExecutionReference, ExecutableFlow>> fetchUnfinishedFlowsMetadata()
throws ExecutorManagerException {
try {
return this.dbOperator.query(FetchUnfinishedFlowsMetadata.FETCH_UNFINISHED_FLOWS_METADATA,
new FetchUnfinishedFlowsMetadata());
} catch (final SQLException e) {
throw new ExecutorManagerException("Error fetching unfinished flows metadata", e);
}
}
/**
* Fetch flows that are dispatched and not yet finished.
*
* @return active flows map
* @throws ExecutorManagerException the executor manager exception
*/
Map<Integer, Pair<ExecutionReference, ExecutableFlow>> fetchActiveFlows()
throws ExecutorManagerException {
try {
return this.dbOperator.query(FetchActiveExecutableFlows.FETCH_ACTIVE_EXECUTABLE_FLOWS,
new FetchActiveExecutableFlows());
} catch (final SQLException e) {
throw new ExecutorManagerException("Error fetching active flows", e);
}
}
/**
* Fetch the flow that is dispatched and not yet finished by execution id.
*
* @return active flow pair
* @throws ExecutorManagerException the executor manager exception
*/
Pair<ExecutionReference, ExecutableFlow> fetchActiveFlowByExecId(final int execId)
throws ExecutorManagerException {
try {
return this.dbOperator.query(FetchActiveExecutableFlow
.FETCH_ACTIVE_EXECUTABLE_FLOW_BY_EXEC_ID,
new FetchActiveExecutableFlow(), execId);
} catch (final SQLException e) {
throw new ExecutorManagerException("Error fetching active flow by exec id" + execId, e);
}
}
@VisibleForTesting
static class FetchActiveExecutableFlows implements
ResultSetHandler<Map<Integer, Pair<ExecutionReference, ExecutableFlow>>> {
// Select flows that are not in finished status
private static final String FETCH_UNFINISHED_EXECUTABLE_FLOWS =
"SELECT ex.exec_id exec_id, ex.enc_type enc_type, ex.flow_data flow_data, ex.status status,"
+ " et.host host, et.port port, ex.executor_id executorId, et.active executorStatus"
+ " FROM execution_flows ex"
+ " LEFT JOIN "
+ " executors et ON ex.executor_id = et.id"
+ " WHERE ex.status NOT IN ("
+ Status.SUCCEEDED.getNumVal() + ", "
+ Status.KILLED.getNumVal() + ", "
+ Status.FAILED.getNumVal() + ")";
// Select flows that are dispatched and not in finished status
private static final String FETCH_ACTIVE_EXECUTABLE_FLOWS =
"SELECT ex.exec_id exec_id, ex.enc_type enc_type, ex.flow_data flow_data, ex.status status,"
+ " et.host host, et.port port, ex.executor_id executorId, et.active executorStatus"
+ " FROM execution_flows ex"
+ " LEFT JOIN "
+ " executors et ON ex.executor_id = et.id"
+ " WHERE ex.status NOT IN ("
+ Status.SUCCEEDED.getNumVal() + ", "
+ Status.KILLED.getNumVal() + ", "
+ Status.FAILED.getNumVal() + ")"
// exclude queued flows that haven't been assigned yet -- this is the opposite of
// the condition in ExecutionFlowDao#FETCH_QUEUED_EXECUTABLE_FLOW
+ " AND NOT ("
+ " ex.executor_id IS NULL"
+ " AND ex.status = " + Status.PREPARING.getNumVal()
+ " )";
@Override
public Map<Integer, Pair<ExecutionReference, ExecutableFlow>> handle(
final ResultSet rs) throws SQLException {
if (!rs.next()) {
return Collections.emptyMap();
}
final Map<Integer, Pair<ExecutionReference, ExecutableFlow>> execFlows =
new HashMap<>();
do {
final Pair<ExecutionReference, ExecutableFlow> exFlow =
FetchActiveFlowDao.getExecutableFlowHelper(rs);
if (exFlow != null) {
execFlows.put(rs.getInt("exec_id"), exFlow);
}
} while (rs.next());
return execFlows;
}
}
@VisibleForTesting
static class FetchUnfinishedFlowsMetadata implements
ResultSetHandler<Map<Integer, Pair<ExecutionReference, ExecutableFlow>>> {
// Select flows that are not in finished status
private static final String FETCH_UNFINISHED_FLOWS_METADATA =
"SELECT ex.exec_id exec_id, ex.project_id project_id, ex.version version, "
+ "ex.flow_id flow_id, et.host host, et.port port, ex.executor_id executorId, "
+ "ex.status status, ex.submit_time submit_time, ex.start_time start_time, "
+ "ex.end_time end_time, ex.submit_user submit_user, et.active executorStatus"
+ " FROM execution_flows ex"
+ " LEFT JOIN "
+ " executors et ON ex.executor_id = et.id"
+ " Where ex.status NOT IN ("
+ Status.SUCCEEDED.getNumVal() + ", "
+ Status.KILLED.getNumVal() + ", "
+ Status.FAILED.getNumVal() + ")";
@Override
public Map<Integer, Pair<ExecutionReference, ExecutableFlow>> handle(
final ResultSet rs) throws SQLException {
if (!rs.next()) {
return Collections.emptyMap();
}
final Map<Integer, Pair<ExecutionReference, ExecutableFlow>> execFlows =
new HashMap<>();
do {
final Pair<ExecutionReference, ExecutableFlow> exFlow =
FetchActiveFlowDao.getExecutableFlowMetadataHelper(rs);
if (exFlow != null) {
execFlows.put(rs.getInt("exec_id"), exFlow);
}
} while (rs.next());
return execFlows;
}
}
private static class FetchActiveExecutableFlow implements
ResultSetHandler<Pair<ExecutionReference, ExecutableFlow>> {
// Select the flow that is dispatched and not in finished status by execution id
private static final String FETCH_ACTIVE_EXECUTABLE_FLOW_BY_EXEC_ID =
"SELECT ex.exec_id exec_id, ex.enc_type enc_type, ex.flow_data flow_data, ex.status status,"
+ " et.host host, et.port port, ex.executor_id executorId, et.active executorStatus"
+ " FROM execution_flows ex"
+ " LEFT JOIN "
+ " executors et ON ex.executor_id = et.id"
+ " WHERE ex.exec_id = ? AND ex.status NOT IN ("
+ Status.SUCCEEDED.getNumVal() + ", "
+ Status.KILLED.getNumVal() + ", "
+ Status.FAILED.getNumVal() + ")"
// exclude queued flows that haven't been assigned yet -- this is the opposite of
// the condition in ExecutionFlowDao#FETCH_QUEUED_EXECUTABLE_FLOW
+ " AND NOT ("
+ " ex.executor_id IS NULL"
+ " AND ex.status = " + Status.PREPARING.getNumVal()
+ " )";
@Override
public Pair<ExecutionReference, ExecutableFlow> handle(
final ResultSet rs) throws SQLException {
if (!rs.next()) {
return null;
}
return FetchActiveFlowDao.getExecutableFlowHelper(rs);
}
}
}
| 1 | 22,718 | This method can cause bugs. Why don't you use list of finalize statuses and then use join by comma and that will give you correct value. Use Apache Commons Lang library List<Integer> list = Arrays.asList(1,2,3,4,5,6,7); String str = org.apache.commons.lang.StringUtils.join(list, ","); You can use it at multiple places shown in this diff. | azkaban-azkaban | java |
@@ -23,7 +23,10 @@ void wlr_input_device_init(struct wlr_input_device *dev,
}
void wlr_input_device_destroy(struct wlr_input_device *dev) {
- if (!dev) return;
+ if (!dev) {
+ return;
+ }
+
if (dev->_device) {
switch (dev->type) {
case WLR_INPUT_DEVICE_KEYBOARD: | 1 | #define _XOPEN_SOURCE 500
#include <stdlib.h>
#include <string.h>
#include <wayland-server.h>
#include <wlr/types/wlr_input_device.h>
#include <wlr/interfaces/wlr_input_device.h>
#include <wlr/interfaces/wlr_keyboard.h>
#include <wlr/interfaces/wlr_pointer.h>
#include <wlr/interfaces/wlr_touch.h>
#include <wlr/interfaces/wlr_tablet_tool.h>
#include <wlr/interfaces/wlr_tablet_pad.h>
#include <wlr/util/log.h>
void wlr_input_device_init(struct wlr_input_device *dev,
enum wlr_input_device_type type,
struct wlr_input_device_impl *impl,
const char *name, int vendor, int product) {
dev->type = type;
dev->impl = impl;
dev->name = strdup(name);
dev->vendor = vendor;
dev->product = product;
}
void wlr_input_device_destroy(struct wlr_input_device *dev) {
if (!dev) return;
if (dev->_device) {
switch (dev->type) {
case WLR_INPUT_DEVICE_KEYBOARD:
wlr_keyboard_destroy(dev->keyboard);
break;
case WLR_INPUT_DEVICE_POINTER:
wlr_pointer_destroy(dev->pointer);
break;
case WLR_INPUT_DEVICE_TOUCH:
wlr_touch_destroy(dev->touch);
break;
case WLR_INPUT_DEVICE_TABLET_TOOL:
wlr_tablet_tool_destroy(dev->tablet_tool);
break;
case WLR_INPUT_DEVICE_TABLET_PAD:
wlr_tablet_pad_destroy(dev->tablet_pad);
break;
default:
wlr_log(L_DEBUG, "Warning: leaking memory %p %p %d",
dev->_device, dev, dev->type);
break;
}
}
free(dev->name);
if (dev->impl && dev->impl->destroy) {
dev->impl->destroy(dev);
} else {
free(dev);
}
}
| 1 | 7,712 | Merge with next condition | swaywm-wlroots | c |
@@ -348,6 +348,7 @@ type appResourcesGetter interface {
type taskDeployer interface {
DeployTask(input *deploy.CreateTaskResourcesInput, opts ...cloudformation.StackOption) error
+ DeleteTask(task deploy.TaskStackInfo) error
}
type taskRunner interface { | 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package cli
import (
"encoding"
"io"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/copilot-cli/internal/pkg/aws/cloudformation"
"github.com/aws/copilot-cli/internal/pkg/aws/codepipeline"
"github.com/aws/copilot-cli/internal/pkg/aws/ecs"
"github.com/aws/copilot-cli/internal/pkg/config"
"github.com/aws/copilot-cli/internal/pkg/deploy"
"github.com/aws/copilot-cli/internal/pkg/deploy/cloudformation/stack"
"github.com/aws/copilot-cli/internal/pkg/describe"
"github.com/aws/copilot-cli/internal/pkg/exec"
"github.com/aws/copilot-cli/internal/pkg/initialize"
"github.com/aws/copilot-cli/internal/pkg/logging"
"github.com/aws/copilot-cli/internal/pkg/repository"
"github.com/aws/copilot-cli/internal/pkg/task"
"github.com/aws/copilot-cli/internal/pkg/term/command"
termprogress "github.com/aws/copilot-cli/internal/pkg/term/progress"
"github.com/aws/copilot-cli/internal/pkg/term/prompt"
"github.com/aws/copilot-cli/internal/pkg/term/selector"
"github.com/aws/copilot-cli/internal/pkg/workspace"
)
// actionCommand is the interface that every command that creates a resource implements.
type actionCommand interface {
// Validate returns an error if a flag's value is invalid.
Validate() error
// Ask prompts for flag values that are required but not passed in.
Ask() error
// Execute runs the command after collecting all required options.
Execute() error
// RecommendedActions returns a list of follow-up suggestions users can run once the command executes successfully.
RecommendedActions() []string
}
// SSM store interfaces.
type serviceStore interface {
CreateService(svc *config.Workload) error
GetService(appName, svcName string) (*config.Workload, error)
ListServices(appName string) ([]*config.Workload, error)
DeleteService(appName, svcName string) error
}
type jobStore interface {
CreateJob(job *config.Workload) error
GetJob(appName, jobName string) (*config.Workload, error)
ListJobs(appName string) ([]*config.Workload, error)
DeleteJob(appName, jobName string) error
}
type wlStore interface {
ListWorkloads(appName string) ([]*config.Workload, error)
GetWorkload(appName, name string) (*config.Workload, error)
}
type workloadListWriter interface {
Write(appName string) error
}
type applicationStore interface {
applicationCreator
applicationGetter
applicationLister
applicationDeleter
}
type applicationCreator interface {
CreateApplication(app *config.Application) error
}
type applicationGetter interface {
GetApplication(appName string) (*config.Application, error)
}
type applicationLister interface {
ListApplications() ([]*config.Application, error)
}
type applicationDeleter interface {
DeleteApplication(name string) error
}
type environmentStore interface {
environmentCreator
environmentGetter
environmentLister
environmentDeleter
}
type environmentCreator interface {
CreateEnvironment(env *config.Environment) error
}
type environmentGetter interface {
GetEnvironment(appName string, environmentName string) (*config.Environment, error)
}
type environmentLister interface {
ListEnvironments(appName string) ([]*config.Environment, error)
}
type environmentDeleter interface {
DeleteEnvironment(appName, environmentName string) error
}
type store interface {
applicationStore
environmentStore
serviceStore
jobStore
wlStore
}
type deployedEnvironmentLister interface {
ListEnvironmentsDeployedTo(appName, svcName string) ([]string, error)
ListDeployedServices(appName, envName string) ([]string, error)
IsServiceDeployed(appName, envName string, svcName string) (bool, error)
}
// Secretsmanager interface.
type secretsManager interface {
secretCreator
secretDeleter
}
type secretCreator interface {
CreateSecret(secretName, secretString string) (string, error)
}
type secretDeleter interface {
DeleteSecret(secretName string) error
}
type imageBuilderPusher interface {
BuildAndPush(docker repository.ContainerLoginBuildPusher, args *exec.BuildArguments) error
}
type repositoryURIGetter interface {
URI() string
}
type repositoryService interface {
repositoryURIGetter
imageBuilderPusher
}
type logEventsWriter interface {
WriteLogEvents(opts logging.WriteLogEventsOpts) error
}
type templater interface {
Template() (string, error)
}
type stackSerializer interface {
templater
SerializedParameters() (string, error)
}
type runner interface {
Run(name string, args []string, options ...command.Option) error
}
type eventsWriter interface {
WriteEventsUntilStopped() error
}
type defaultSessionProvider interface {
Default() (*session.Session, error)
}
type regionalSessionProvider interface {
DefaultWithRegion(region string) (*session.Session, error)
}
type sessionFromRoleProvider interface {
FromRole(roleARN string, region string) (*session.Session, error)
}
type sessionFromStaticProvider interface {
FromStaticCreds(accessKeyID, secretAccessKey, sessionToken string) (*session.Session, error)
}
type sessionFromProfileProvider interface {
FromProfile(name string) (*session.Session, error)
}
type sessionProvider interface {
defaultSessionProvider
regionalSessionProvider
sessionFromRoleProvider
sessionFromProfileProvider
sessionFromStaticProvider
}
type describer interface {
Describe() (describe.HumanJSONStringer, error)
}
type wsFileDeleter interface {
DeleteWorkspaceFile() error
}
type svcManifestReader interface {
ReadServiceManifest(svcName string) ([]byte, error)
}
type jobManifestReader interface {
ReadJobManifest(jobName string) ([]byte, error)
}
type copilotDirGetter interface {
CopilotDirPath() (string, error)
}
type wsPipelineManifestReader interface {
ReadPipelineManifest() ([]byte, error)
}
type wsPipelineWriter interface {
WritePipelineBuildspec(marshaler encoding.BinaryMarshaler) (string, error)
WritePipelineManifest(marshaler encoding.BinaryMarshaler) (string, error)
}
type wsServiceLister interface {
ServiceNames() ([]string, error)
}
type wsSvcReader interface {
wsServiceLister
svcManifestReader
}
type wsSvcDirReader interface {
wsSvcReader
copilotDirGetter
}
type wsJobLister interface {
JobNames() ([]string, error)
}
type wsJobReader interface {
jobManifestReader
wsJobLister
}
type wsWlReader interface {
WorkloadNames() ([]string, error)
}
type wsJobDirReader interface {
wsJobReader
copilotDirGetter
}
type wsWlDirReader interface {
wsJobReader
wsSvcReader
copilotDirGetter
wsWlReader
ListDockerfiles() ([]string, error)
Summary() (*workspace.Summary, error)
}
type wsPipelineReader interface {
wsPipelineManifestReader
WorkloadNames() ([]string, error)
}
type wsAppManager interface {
Create(appName string) error
Summary() (*workspace.Summary, error)
}
type wsAddonManager interface {
WriteAddon(f encoding.BinaryMarshaler, svc, name string) (string, error)
wsWlReader
}
type artifactUploader interface {
PutArtifact(bucket, fileName string, data io.Reader) (string, error)
}
type bucketEmptier interface {
EmptyBucket(bucket string) error
}
// Interfaces for deploying resources through CloudFormation. Facilitates mocking.
type environmentDeployer interface {
DeployAndRenderEnvironment(out termprogress.FileWriter, env *deploy.CreateEnvironmentInput) error
DeleteEnvironment(appName, envName, cfnExecRoleARN string) error
GetEnvironment(appName, envName string) (*config.Environment, error)
EnvironmentTemplate(appName, envName string) (string, error)
UpdateEnvironmentTemplate(appName, envName, templateBody, cfnExecRoleARN string) error
}
type wlDeleter interface {
DeleteWorkload(in deploy.DeleteWorkloadInput) error
}
type svcRemoverFromApp interface {
RemoveServiceFromApp(app *config.Application, svcName string) error
}
type jobRemoverFromApp interface {
RemoveJobFromApp(app *config.Application, jobName string) error
}
type imageRemover interface {
ClearRepository(repoName string) error // implemented by ECR Service
}
type pipelineDeployer interface {
CreatePipeline(env *deploy.CreatePipelineInput) error
UpdatePipeline(env *deploy.CreatePipelineInput) error
PipelineExists(env *deploy.CreatePipelineInput) (bool, error)
DeletePipeline(pipelineName string) error
AddPipelineResourcesToApp(app *config.Application, region string) error
appResourcesGetter
// TODO: Add StreamPipelineCreation method
}
type appDeployer interface {
DeployApp(in *deploy.CreateAppInput) error
AddServiceToApp(app *config.Application, svcName string) error
AddJobToApp(app *config.Application, jobName string) error
AddEnvToApp(app *config.Application, env *config.Environment) error
DelegateDNSPermissions(app *config.Application, accountID string) error
DeleteApp(name string) error
}
type appResourcesGetter interface {
GetAppResourcesByRegion(app *config.Application, region string) (*stack.AppRegionalResources, error)
GetRegionalAppResources(app *config.Application) ([]*stack.AppRegionalResources, error)
}
type taskDeployer interface {
DeployTask(input *deploy.CreateTaskResourcesInput, opts ...cloudformation.StackOption) error
}
type taskRunner interface {
Run() ([]*task.Task, error)
}
type defaultClusterGetter interface {
HasDefaultCluster() (bool, error)
}
type deployer interface {
environmentDeployer
appDeployer
pipelineDeployer
}
type domainValidator interface {
DomainExists(domainName string) (bool, error)
}
type dockerfileParser interface {
GetExposedPorts() ([]uint16, error)
GetHealthCheck() (*exec.HealthCheck, error)
}
type statusDescriber interface {
Describe() (*describe.ServiceStatusDesc, error)
}
type envDescriber interface {
Describe() (*describe.EnvDescription, error)
}
type versionGetter interface {
Version() (string, error)
}
type envTemplater interface {
EnvironmentTemplate(appName, envName string) (string, error)
}
type envUpgrader interface {
UpgradeEnvironment(in *deploy.CreateEnvironmentInput) error
}
type legacyEnvUpgrader interface {
UpgradeLegacyEnvironment(in *deploy.CreateEnvironmentInput, lbWebServices ...string) error
envTemplater
}
type envTemplateUpgrader interface {
envUpgrader
legacyEnvUpgrader
}
type pipelineGetter interface {
GetPipeline(pipelineName string) (*codepipeline.Pipeline, error)
ListPipelineNamesByTags(tags map[string]string) ([]string, error)
GetPipelinesByTags(tags map[string]string) ([]*codepipeline.Pipeline, error)
}
type executor interface {
Execute() error
}
type deletePipelineRunner interface {
Run() error
}
type executeAsker interface {
Ask() error
executor
}
type appSelector interface {
Application(prompt, help string, additionalOpts ...string) (string, error)
}
type appEnvSelector interface {
appSelector
Environment(prompt, help, app string, additionalOpts ...string) (string, error)
}
type configSelector interface {
appEnvSelector
Service(prompt, help, app string) (string, error)
}
type deploySelector interface {
appSelector
DeployedService(prompt, help string, app string, opts ...selector.GetDeployedServiceOpts) (*selector.DeployedService, error)
}
type pipelineSelector interface {
Environments(prompt, help, app string, finalMsgFunc func(int) prompt.Option) ([]string, error)
}
type wsSelector interface {
appEnvSelector
Service(prompt, help string) (string, error)
Job(prompt, help string) (string, error)
Workload(msg, help string) (string, error)
}
type initJobSelector interface {
dockerfileSelector
Schedule(scheduleTypePrompt, scheduleTypeHelp string, scheduleValidator, rateValidator prompt.ValidatorFunc) (string, error)
}
type dockerfileSelector interface {
Dockerfile(selPrompt, notFoundPrompt, selHelp, notFoundHelp string, pv prompt.ValidatorFunc) (string, error)
}
type ec2Selector interface {
VPC(prompt, help string) (string, error)
PublicSubnets(prompt, help, vpcID string) ([]string, error)
PrivateSubnets(prompt, help, vpcID string) ([]string, error)
}
type credsSelector interface {
Creds(prompt, help string) (*session.Session, error)
}
type ec2Client interface {
HasDNSSupport(vpcID string) (bool, error)
}
type jobInitializer interface {
Job(props *initialize.JobProps) (string, error)
}
type svcInitializer interface {
Service(props *initialize.ServiceProps) (string, error)
}
type roleDeleter interface {
DeleteRole(string) error
}
type activeWorkloadTasksLister interface {
ListActiveWorkloadTasks(app, env, workload string) (clusterARN string, taskARNs []string, err error)
}
type tasksStopper interface {
StopTasks(tasks []string, opts ...ecs.StopTasksOpts) error
}
type serviceLinkedRoleCreator interface {
CreateECSServiceLinkedRole() error
}
| 1 | 16,144 | Maybe add it when it is used. | aws-copilot-cli | go |
@@ -36,3 +36,17 @@ TWO_ENABLED = {'scanners': [
{'name': 'cloudsql_acl', 'enabled': False},
{'name': 'iam_policy', 'enabled': True}
]}
+
+NONEXIST_ENABLED = {'scanners': [
+ {'name': 'bigquery', 'enabled': False},
+ {'name': 'bucket_acl', 'enabled': True},
+ {'name': 'cloudsql_acl', 'enabled': False},
+ {'name': 'non_exist_scanner', 'enabled': True}
+]}
+
+ALL_EXIST = {'scanners': [
+ {'name': 'bigquery', 'enabled': True},
+ {'name': 'bucket_acl', 'enabled': True},
+ {'name': 'cloudsql_acl', 'enabled': True},
+ {'name': 'iam_policy', 'enabled': True}
+]} | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fake runnable scanners."""
ALL_ENABLED = {'scanners': [
{'name': 'bigquery', 'enabled': True},
{'name': 'bucket_acl', 'enabled': True},
{'name': 'cloudsql_acl', 'enabled': True},
{'name': 'iam_policy', 'enabled': True}
]}
ALL_DISABLED = {'scanners': []}
ONE_ENABLED = {'scanners': [
{'name': 'bigquery', 'enabled': False},
{'name': 'bucket_acl', 'enabled': False},
{'name': 'cloudsql_acl', 'enabled': False},
{'name': 'iam_policy', 'enabled': True}
]}
TWO_ENABLED = {'scanners': [
{'name': 'bigquery', 'enabled': False},
{'name': 'bucket_acl', 'enabled': True},
{'name': 'cloudsql_acl', 'enabled': False},
{'name': 'iam_policy', 'enabled': True}
]}
| 1 | 32,431 | More clear naming: NONEXISTENT_ENABLED | forseti-security-forseti-security | py |
@@ -282,10 +282,10 @@ public class TypeUtil {
switch (from.typeId()) {
case INTEGER:
- return to == Types.LongType.get();
+ return to.equals(Types.LongType.get());
case FLOAT:
- return to == Types.DoubleType.get();
+ return to.equals(Types.DoubleType.get());
case DECIMAL:
Types.DecimalType fromDecimal = (Types.DecimalType) from; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.types;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import org.apache.iceberg.Schema;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet;
import org.apache.iceberg.relocated.com.google.common.collect.Iterables;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
public class TypeUtil {
private TypeUtil() {
}
/**
* Project extracts particular fields from a schema by ID.
* <p>
* Unlike {@link TypeUtil#select(Schema, Set)}, project will pick out only the fields enumerated. Structs that are
* explicitly projected are empty unless sub-fields are explicitly projected. Maps and lists cannot be explicitly
* selected in fieldIds.
* @param schema to project fields from
* @param fieldIds list of explicit fields to extract
* @return the schema with all fields fields not selected removed
*/
public static Schema project(Schema schema, Set<Integer> fieldIds) {
Preconditions.checkNotNull(schema, "Schema cannot be null");
Types.StructType result = project(schema.asStruct(), fieldIds);
if (schema.asStruct().equals(result)) {
return schema;
} else if (result != null) {
if (schema.getAliases() != null) {
return new Schema(result.fields(), schema.getAliases());
} else {
return new Schema(result.fields());
}
}
return new Schema(Collections.emptyList(), schema.getAliases());
}
public static Types.StructType project(Types.StructType struct, Set<Integer> fieldIds) {
Preconditions.checkNotNull(struct, "Struct cannot be null");
Preconditions.checkNotNull(fieldIds, "Field ids cannot be null");
Type result = visit(struct, new PruneColumns(fieldIds, false));
if (struct.equals(result)) {
return struct;
} else if (result != null) {
return result.asStructType();
}
return Types.StructType.of();
}
public static Schema select(Schema schema, Set<Integer> fieldIds) {
Preconditions.checkNotNull(schema, "Schema cannot be null");
Types.StructType result = select(schema.asStruct(), fieldIds);
if (Objects.equals(schema.asStruct(), result)) {
return schema;
} else if (result != null) {
if (schema.getAliases() != null) {
return new Schema(result.fields(), schema.getAliases());
} else {
return new Schema(result.fields());
}
}
return new Schema(ImmutableList.of(), schema.getAliases());
}
public static Types.StructType select(Types.StructType struct, Set<Integer> fieldIds) {
Preconditions.checkNotNull(struct, "Struct cannot be null");
Preconditions.checkNotNull(fieldIds, "Field ids cannot be null");
Type result = visit(struct, new PruneColumns(fieldIds, true));
if (struct.equals(result)) {
return struct;
} else if (result != null) {
return result.asStructType();
}
return Types.StructType.of();
}
public static Set<Integer> getProjectedIds(Schema schema) {
return ImmutableSet.copyOf(getIdsInternal(schema.asStruct(), true));
}
public static Set<Integer> getProjectedIds(Type type) {
if (type.isPrimitiveType()) {
return ImmutableSet.of();
}
return ImmutableSet.copyOf(getIdsInternal(type, true));
}
private static Set<Integer> getIdsInternal(Type type, boolean includeStructIds) {
return visit(type, new GetProjectedIds(includeStructIds));
}
public static Types.StructType selectNot(Types.StructType struct, Set<Integer> fieldIds) {
Set<Integer> projectedIds = getIdsInternal(struct, false);
projectedIds.removeAll(fieldIds);
return project(struct, projectedIds);
}
public static Schema selectNot(Schema schema, Set<Integer> fieldIds) {
Set<Integer> projectedIds = getIdsInternal(schema.asStruct(), false);
projectedIds.removeAll(fieldIds);
return project(schema, projectedIds);
}
public static Schema join(Schema left, Schema right) {
List<Types.NestedField> joinedColumns = Lists.newArrayList();
joinedColumns.addAll(left.columns());
joinedColumns.addAll(right.columns());
return new Schema(joinedColumns);
}
public static Map<String, Integer> indexByName(Types.StructType struct) {
IndexByName indexer = new IndexByName();
visit(struct, indexer);
return indexer.byName();
}
public static Map<Integer, String> indexNameById(Types.StructType struct) {
IndexByName indexer = new IndexByName();
visit(struct, indexer);
return indexer.byId();
}
public static Map<String, Integer> indexByLowerCaseName(Types.StructType struct) {
Map<String, Integer> indexByLowerCaseName = Maps.newHashMap();
indexByName(struct).forEach((name, integer) ->
indexByLowerCaseName.put(name.toLowerCase(Locale.ROOT), integer));
return indexByLowerCaseName;
}
public static Map<Integer, Types.NestedField> indexById(Types.StructType struct) {
return visit(struct, new IndexById());
}
public static Map<Integer, Integer> indexParents(Types.StructType struct) {
return ImmutableMap.copyOf(visit(struct, new IndexParents()));
}
/**
* Assigns fresh ids from the {@link NextID nextId function} for all fields in a type.
*
* @param type a type
* @param nextId an id assignment function
* @return an structurally identical type with new ids assigned by the nextId function
*/
public static Type assignFreshIds(Type type, NextID nextId) {
return TypeUtil.visit(type, new AssignFreshIds(nextId));
}
/**
* Assigns fresh ids from the {@link NextID nextId function} for all fields in a schema.
*
* @param schema a schema
* @param nextId an id assignment function
* @return a structurally identical schema with new ids assigned by the nextId function
*/
public static Schema assignFreshIds(Schema schema, NextID nextId) {
Types.StructType struct = TypeUtil.visit(schema.asStruct(), new AssignFreshIds(nextId)).asStructType();
return new Schema(struct.fields(), refreshIdentifierFields(struct, schema));
}
/**
* Assigns fresh ids from the {@link NextID nextId function} for all fields in a schema.
*
* @param schemaId an ID assigned to this schema
* @param schema a schema
* @param nextId an id assignment function
* @return a structurally identical schema with new ids assigned by the nextId function
*/
public static Schema assignFreshIds(int schemaId, Schema schema, NextID nextId) {
Types.StructType struct = TypeUtil.visit(schema.asStruct(), new AssignFreshIds(nextId)).asStructType();
return new Schema(schemaId, struct.fields(), refreshIdentifierFields(struct, schema));
}
/**
* Assigns ids to match a given schema, and fresh ids from the {@link NextID nextId function} for all other fields.
*
* @param schema a schema
* @param baseSchema a schema with existing IDs to copy by name
* @param nextId an id assignment function
* @return a structurally identical schema with new ids assigned by the nextId function
*/
public static Schema assignFreshIds(Schema schema, Schema baseSchema, NextID nextId) {
Types.StructType struct = TypeUtil
.visit(schema.asStruct(), new AssignFreshIds(schema, baseSchema, nextId))
.asStructType();
return new Schema(struct.fields(), refreshIdentifierFields(struct, schema));
}
/**
* Get the identifier fields in the fresh schema based on the identifier fields in the base schema.
* @param freshSchema fresh schema
* @param baseSchema base schema
* @return identifier fields in the fresh schema
*/
public static Set<Integer> refreshIdentifierFields(Types.StructType freshSchema, Schema baseSchema) {
Map<String, Integer> nameToId = TypeUtil.indexByName(freshSchema);
Set<String> identifierFieldNames = baseSchema.identifierFieldNames();
identifierFieldNames.forEach(name -> Preconditions.checkArgument(nameToId.containsKey(name),
"Cannot find ID for identifier field %s in schema %s", name, freshSchema));
return identifierFieldNames.stream().map(nameToId::get).collect(Collectors.toSet());
}
/**
* Assigns strictly increasing fresh ids for all fields in a schema, starting from 1.
*
* @param schema a schema
* @return a structurally identical schema with new ids assigned strictly increasing from 1
*/
public static Schema assignIncreasingFreshIds(Schema schema) {
AtomicInteger lastColumnId = new AtomicInteger(0);
return TypeUtil.assignFreshIds(schema, lastColumnId::incrementAndGet);
}
/**
* Reassigns ids in a schema from another schema.
* <p>
* Ids are determined by field names. If a field in the schema cannot be found in the source
* schema, this will throw IllegalArgumentException.
* <p>
* This will not alter a schema's structure, nullability, or types.
*
* @param schema the schema to have ids reassigned
* @param idSourceSchema the schema from which field ids will be used
* @return an structurally identical schema with field ids matching the source schema
* @throws IllegalArgumentException if a field cannot be found (by name) in the source schema
*/
public static Schema reassignIds(Schema schema, Schema idSourceSchema) {
Types.StructType struct = visit(schema, new ReassignIds(idSourceSchema)).asStructType();
return new Schema(struct.fields(), refreshIdentifierFields(struct, schema));
}
public static Type find(Schema schema, Predicate<Type> predicate) {
return visit(schema, new FindTypeVisitor(predicate));
}
public static boolean isPromotionAllowed(Type from, Type.PrimitiveType to) {
// Warning! Before changing this function, make sure that the type change doesn't introduce
// compatibility problems in partitioning.
if (from.equals(to)) {
return true;
}
switch (from.typeId()) {
case INTEGER:
return to == Types.LongType.get();
case FLOAT:
return to == Types.DoubleType.get();
case DECIMAL:
Types.DecimalType fromDecimal = (Types.DecimalType) from;
if (to.typeId() != Type.TypeID.DECIMAL) {
return false;
}
Types.DecimalType toDecimal = (Types.DecimalType) to;
return fromDecimal.scale() == toDecimal.scale() &&
fromDecimal.precision() <= toDecimal.precision();
}
return false;
}
/**
* Check whether we could write the iceberg table with the user-provided write schema.
*
* @param tableSchema the table schema written in iceberg meta data.
* @param writeSchema the user-provided write schema.
* @param checkNullability If true, not allow to write optional values to a required field.
* @param checkOrdering If true, not allow input schema to have different ordering than table schema.
*/
public static void validateWriteSchema(Schema tableSchema, Schema writeSchema,
Boolean checkNullability, Boolean checkOrdering) {
List<String> errors;
if (checkNullability) {
errors = CheckCompatibility.writeCompatibilityErrors(tableSchema, writeSchema, checkOrdering);
} else {
errors = CheckCompatibility.typeCompatibilityErrors(tableSchema, writeSchema, checkOrdering);
}
if (!errors.isEmpty()) {
StringBuilder sb = new StringBuilder();
sb.append("Cannot write incompatible dataset to table with schema:\n")
.append(tableSchema)
.append("\nwrite schema:")
.append(writeSchema)
.append("\nProblems:");
for (String error : errors) {
sb.append("\n* ").append(error);
}
throw new IllegalArgumentException(sb.toString());
}
}
/**
* Interface for passing a function that assigns column IDs.
*/
public interface NextID {
int get();
}
public static class SchemaVisitor<T> {
public void beforeField(Types.NestedField field) {
}
public void afterField(Types.NestedField field) {
}
public void beforeListElement(Types.NestedField elementField) {
beforeField(elementField);
}
public void afterListElement(Types.NestedField elementField) {
afterField(elementField);
}
public void beforeMapKey(Types.NestedField keyField) {
beforeField(keyField);
}
public void afterMapKey(Types.NestedField keyField) {
afterField(keyField);
}
public void beforeMapValue(Types.NestedField valueField) {
beforeField(valueField);
}
public void afterMapValue(Types.NestedField valueField) {
afterField(valueField);
}
public T schema(Schema schema, T structResult) {
return null;
}
public T struct(Types.StructType struct, List<T> fieldResults) {
return null;
}
public T field(Types.NestedField field, T fieldResult) {
return null;
}
public T list(Types.ListType list, T elementResult) {
return null;
}
public T map(Types.MapType map, T keyResult, T valueResult) {
return null;
}
public T primitive(Type.PrimitiveType primitive) {
return null;
}
}
public static <T> T visit(Schema schema, SchemaVisitor<T> visitor) {
return visitor.schema(schema, visit(schema.asStruct(), visitor));
}
public static <T> T visit(Type type, SchemaVisitor<T> visitor) {
switch (type.typeId()) {
case STRUCT:
Types.StructType struct = type.asNestedType().asStructType();
List<T> results = Lists.newArrayListWithExpectedSize(struct.fields().size());
for (Types.NestedField field : struct.fields()) {
visitor.beforeField(field);
T result;
try {
result = visit(field.type(), visitor);
} finally {
visitor.afterField(field);
}
results.add(visitor.field(field, result));
}
return visitor.struct(struct, results);
case LIST:
Types.ListType list = type.asNestedType().asListType();
T elementResult;
Types.NestedField elementField = list.field(list.elementId());
visitor.beforeListElement(elementField);
try {
elementResult = visit(list.elementType(), visitor);
} finally {
visitor.afterListElement(elementField);
}
return visitor.list(list, elementResult);
case MAP:
Types.MapType map = type.asNestedType().asMapType();
T keyResult;
T valueResult;
Types.NestedField keyField = map.field(map.keyId());
visitor.beforeMapKey(keyField);
try {
keyResult = visit(map.keyType(), visitor);
} finally {
visitor.afterMapKey(keyField);
}
Types.NestedField valueField = map.field(map.valueId());
visitor.beforeMapValue(valueField);
try {
valueResult = visit(map.valueType(), visitor);
} finally {
visitor.afterMapValue(valueField);
}
return visitor.map(map, keyResult, valueResult);
default:
return visitor.primitive(type.asPrimitiveType());
}
}
public static class CustomOrderSchemaVisitor<T> {
public T schema(Schema schema, Supplier<T> structResult) {
return null;
}
public T struct(Types.StructType struct, Iterable<T> fieldResults) {
return null;
}
public T field(Types.NestedField field, Supplier<T> fieldResult) {
return null;
}
public T list(Types.ListType list, Supplier<T> elementResult) {
return null;
}
public T map(Types.MapType map, Supplier<T> keyResult, Supplier<T> valueResult) {
return null;
}
public T primitive(Type.PrimitiveType primitive) {
return null;
}
}
private static class VisitFuture<T> implements Supplier<T> {
private final Type type;
private final CustomOrderSchemaVisitor<T> visitor;
private VisitFuture(Type type, CustomOrderSchemaVisitor<T> visitor) {
this.type = type;
this.visitor = visitor;
}
@Override
public T get() {
return visit(type, visitor);
}
}
private static class VisitFieldFuture<T> implements Supplier<T> {
private final Types.NestedField field;
private final CustomOrderSchemaVisitor<T> visitor;
private VisitFieldFuture(Types.NestedField field, CustomOrderSchemaVisitor<T> visitor) {
this.field = field;
this.visitor = visitor;
}
@Override
public T get() {
return visitor.field(field, new VisitFuture<>(field.type(), visitor));
}
}
public static <T> T visit(Schema schema, CustomOrderSchemaVisitor<T> visitor) {
return visitor.schema(schema, new VisitFuture<>(schema.asStruct(), visitor));
}
/**
* Used to traverse types with traversals other than pre-order.
* <p>
* This passes a {@link Supplier} to each {@link CustomOrderSchemaVisitor visitor} method that
* returns the result of traversing child types. Structs are passed an {@link Iterable} that
* traverses child fields during iteration.
* <p>
* An example use is assigning column IDs, which should be done with a post-order traversal.
*
* @param type a type to traverse with a visitor
* @param visitor a custom order visitor
* @param <T> the type returned by the visitor
* @return the result of traversing the given type with the visitor
*/
public static <T> T visit(Type type, CustomOrderSchemaVisitor<T> visitor) {
switch (type.typeId()) {
case STRUCT:
Types.StructType struct = type.asNestedType().asStructType();
List<VisitFieldFuture<T>> results = Lists
.newArrayListWithExpectedSize(struct.fields().size());
for (Types.NestedField field : struct.fields()) {
results.add(
new VisitFieldFuture<>(field, visitor));
}
return visitor.struct(struct, Iterables.transform(results, VisitFieldFuture::get));
case LIST:
Types.ListType list = type.asNestedType().asListType();
return visitor.list(list, new VisitFuture<>(list.elementType(), visitor));
case MAP:
Types.MapType map = type.asNestedType().asMapType();
return visitor.map(map,
new VisitFuture<>(map.keyType(), visitor),
new VisitFuture<>(map.valueType(), visitor));
default:
return visitor.primitive(type.asPrimitiveType());
}
}
static int decimalMaxPrecision(int numBytes) {
Preconditions.checkArgument(numBytes >= 0 && numBytes < 24,
"Unsupported decimal length: %s", numBytes);
return MAX_PRECISION[numBytes];
}
public static int decimalRequiredBytes(int precision) {
Preconditions.checkArgument(precision >= 0 && precision < 40,
"Unsupported decimal precision: %s", precision);
return REQUIRED_LENGTH[precision];
}
private static final int[] MAX_PRECISION = new int[24];
private static final int[] REQUIRED_LENGTH = new int[40];
static {
// for each length, calculate the max precision
for (int len = 0; len < MAX_PRECISION.length; len += 1) {
MAX_PRECISION[len] = (int) Math.floor(Math.log10(Math.pow(2, 8 * len - 1) - 1));
}
// for each precision, find the first length that can hold it
for (int precision = 0; precision < REQUIRED_LENGTH.length; precision += 1) {
REQUIRED_LENGTH[precision] = -1;
for (int len = 0; len < MAX_PRECISION.length; len += 1) {
// find the first length that can hold the precision
if (precision <= MAX_PRECISION[len]) {
REQUIRED_LENGTH[precision] = len;
break;
}
}
if (REQUIRED_LENGTH[precision] < 0) {
throw new IllegalStateException(
"Could not find required length for precision " + precision);
}
}
}
}
| 1 | 43,990 | why is this change necessary? | apache-iceberg | java |
@@ -178,9 +178,8 @@ public class SchemaTypeTable implements ImportTypeTable, SchemaTypeFormatter {
@Override
public String getFullNameFor(TypeModel type) {
- // TODO(andrealin): Remove this hack when null response types are implemented.
- if (type == null) {
- return "nullFullName";
+ if (type.isEmptyType()) {
+ return "java.lang.Void";
}
if (type instanceof DiscoveryRequestType) {
Method method = ((DiscoveryRequestType) type).parentMethod().getDiscoMethod(); | 1 | /* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.transformer;
import com.google.api.codegen.config.DiscoveryField;
import com.google.api.codegen.config.DiscoveryRequestType;
import com.google.api.codegen.config.FieldConfig;
import com.google.api.codegen.config.FieldModel;
import com.google.api.codegen.config.InterfaceModel;
import com.google.api.codegen.config.TypeModel;
import com.google.api.codegen.discogapic.transformer.DiscoGapicNamer;
import com.google.api.codegen.discovery.Method;
import com.google.api.codegen.discovery.Schema;
import com.google.api.codegen.transformer.SchemaTypeNameConverter.BoxingBehavior;
import com.google.api.codegen.util.TypeAlias;
import com.google.api.codegen.util.TypeName;
import com.google.api.codegen.util.TypeTable;
import com.google.api.codegen.util.TypedValue;
import java.util.Map;
/**
* A SchemaTypeTable manages the imports for a set of fully-qualified type names, and provides
* helper methods for importing instances of Schema.
*/
public class SchemaTypeTable implements ImportTypeTable, SchemaTypeFormatter {
private SchemaTypeFormatterImpl typeFormatter;
private TypeTable typeTable;
private SchemaTypeNameConverter typeNameConverter;
private DiscoGapicNamer discoGapicNamer;
private SurfaceNamer languageNamer;
public SchemaTypeTable(
TypeTable typeTable, SchemaTypeNameConverter typeNameConverter, SurfaceNamer languageNamer) {
this(typeTable, typeNameConverter, languageNamer, new DiscoGapicNamer());
}
private SchemaTypeTable(
TypeTable typeTable,
SchemaTypeNameConverter typeNameConverter,
SurfaceNamer languageNamer,
DiscoGapicNamer discoGapicNamer) {
this.typeFormatter = new SchemaTypeFormatterImpl(typeNameConverter);
this.typeTable = typeTable;
this.typeNameConverter = typeNameConverter;
this.languageNamer = languageNamer;
this.discoGapicNamer = discoGapicNamer;
}
public DiscoGapicNamer getDiscoGapicNamer() {
return discoGapicNamer;
}
@Override
public SchemaTypeNameConverter getTypeNameConverter() {
return typeNameConverter;
}
@Override
public String renderPrimitiveValue(Schema type, String value) {
return typeFormatter.renderPrimitiveValue(type, value);
}
@Override
public String getNicknameFor(Schema type) {
return typeNameConverter.getTypeName(type).getNickname();
}
@Override
public String getFullNameFor(Schema type) {
return typeFormatter.getFullNameFor(type);
}
@Override
public String getImplicitPackageFullNameFor(String shortName) {
return typeFormatter.getImplicitPackageFullNameFor(shortName);
}
@Override
public String getInnerTypeNameFor(Schema schema) {
return typeFormatter.getInnerTypeNameFor(schema);
}
@Override
public String getEnumValue(FieldModel type, String value) {
return getNotImplementedString("SchemaTypeTable.getFullNameFor(FieldModel type, String value)");
}
@Override
public String getEnumValue(TypeModel type, String value) {
// TODO(andrealin): implement.
return getNotImplementedString("SchemaTypeTable.getEnumValue(TypeModel type, String value)");
}
@Override
public String getAndSaveNicknameFor(TypeModel type) {
return typeTable.getAndSaveNicknameFor(typeNameConverter.getTypeName(type));
}
/** Creates a new SchemaTypeTable of the same concrete type, but with an empty import set. */
@Override
public SchemaTypeTable cloneEmpty() {
return new SchemaTypeTable(
typeTable.cloneEmpty(), typeNameConverter, languageNamer, discoGapicNamer);
}
@Override
public SchemaTypeTable cloneEmpty(String packageName) {
return new SchemaTypeTable(
typeTable.cloneEmpty(packageName), typeNameConverter, languageNamer, discoGapicNamer);
}
/** Compute the nickname for the given fullName and save it in the import set. */
@Override
public void saveNicknameFor(String fullName) {
getAndSaveNicknameFor(fullName);
}
/**
* Computes the nickname for the given full name, adds the full name to the import set, and
* returns the nickname.
*/
@Override
public String getAndSaveNicknameFor(String fullName) {
return typeTable.getAndSaveNicknameFor(fullName);
}
/** Adds the given type alias to the import set, and returns the nickname. */
@Override
public String getAndSaveNicknameFor(TypeAlias typeAlias) {
return typeTable.getAndSaveNicknameFor(typeAlias);
}
/**
* Computes the nickname for the given container full name and inner type short name, adds the
* full inner type name to the static import set, and returns the nickname.
*/
@Override
public String getAndSaveNicknameForInnerType(
String containerFullName, String innerTypeShortName) {
return typeTable.getAndSaveNicknameForInnerType(containerFullName, innerTypeShortName);
}
/**
* Computes the nickname for the given type, adds the full name to the import set, and returns the
* nickname.
*/
public String getAndSaveNicknameFor(Schema schema) {
return typeTable.getAndSaveNicknameFor(
typeNameConverter.getTypeName(schema, BoxingBehavior.BOX_PRIMITIVES));
}
public String getFullNameForElementType(Schema type) {
return typeFormatter.getFullNameFor(type);
}
/** Get the full name for the given type. */
@Override
public String getFullNameFor(FieldModel type) {
return getFullNameFor(((DiscoveryField) type).getDiscoveryField());
}
@Override
public String getFullNameFor(InterfaceModel type) {
return type.getFullName();
}
@Override
public String getFullNameFor(TypeModel type) {
// TODO(andrealin): Remove this hack when null response types are implemented.
if (type == null) {
return "nullFullName";
}
if (type instanceof DiscoveryRequestType) {
Method method = ((DiscoveryRequestType) type).parentMethod().getDiscoMethod();
return discoGapicNamer.getRequestTypeName(method, languageNamer).getFullName();
}
Schema schema = null;
if (!type.isEmptyType()) {
schema = ((DiscoveryField) type).getDiscoveryField();
}
return getFullNameFor(schema);
}
@Override
public String getFullNameForMessageType(TypeModel type) {
return getFullNameFor(type);
}
/** Get the full name for the element type of the given type. */
@Override
public String getFullNameForElementType(FieldModel type) {
return getFullNameForElementType(((DiscoveryField) type).getDiscoveryField());
}
/** Get the full name for the element type of the given type. */
@Override
public String getFullNameForElementType(TypeModel type) {
return getFullNameForElementType(((DiscoveryField) type).getDiscoveryField());
}
@Override
public String getAndSaveNicknameForElementType(TypeModel type) {
return getAndSaveNicknameForElementType(((FieldModel) type));
}
/** Returns the nickname for the given type (without adding the full name to the import set). */
@Override
public String getNicknameFor(FieldModel type) {
return typeFormatter.getNicknameFor(type);
}
@Override
public String getNicknameFor(TypeModel type) {
return typeFormatter.getNicknameFor(type);
}
/** Renders the primitive value of the given type. */
@Override
public String renderPrimitiveValue(FieldModel type, String key) {
return renderPrimitiveValue(((DiscoveryField) type).getDiscoveryField(), key);
}
@Override
public String renderPrimitiveValue(TypeModel type, String key) {
return renderPrimitiveValue(((DiscoveryField) type).getDiscoveryField(), key);
}
@Override
public String renderValueAsString(String key) {
return typeNameConverter.renderValueAsString(key);
}
/**
* Computes the nickname for the given type, adds the full name to the import set, and returns the
* nickname.
*/
@Override
public String getAndSaveNicknameFor(FieldModel type) {
return typeTable.getAndSaveNicknameFor(
typeNameConverter.getTypeName(((DiscoveryField) type).getDiscoveryField()));
}
/*
* Computes the nickname for the given FieldConfig, and ResourceName. Adds the full name to
* the import set, and returns the nickname.
*/
@Override
public String getAndSaveNicknameForTypedResourceName(
FieldConfig fieldConfig, String typedResourceShortName) {
return typeTable.getAndSaveNicknameFor(
typeNameConverter.getTypeNameForTypedResourceName(fieldConfig, typedResourceShortName));
}
/*
* Computes the nickname for the element type given FieldConfig, and ResourceName. Adds the full
* name to the import set, and returns the nickname.
*/
@Override
public String getAndSaveNicknameForResourceNameElementType(
FieldConfig fieldConfig, String typedResourceShortName) {
return typeTable.getAndSaveNicknameFor(
typeNameConverter.getTypeNameForResourceNameElementType(
fieldConfig, typedResourceShortName));
}
@Override
public String getAndSaveNicknameForElementType(FieldModel type) {
return typeTable.getAndSaveNicknameFor(typeNameConverter.getTypeNameForElementType(type));
}
@Override
public String getAndSaveNicknameForContainer(
String containerFullName, String... elementFullNames) {
TypeName completeTypeName = typeTable.getContainerTypeName(containerFullName, elementFullNames);
return typeTable.getAndSaveNicknameFor(completeTypeName);
}
@Override
public String getSnippetZeroValueAndSaveNicknameFor(FieldModel type) {
return typeNameConverter.getSnippetZeroValue(type).getValueAndSaveTypeNicknameIn(typeTable);
}
@Override
public String getSnippetZeroValueAndSaveNicknameFor(TypeModel type) {
Schema schema = null;
if (!type.isEmptyType()) {
schema = ((DiscoveryField) type).getDiscoveryField();
}
TypedValue typedValue = typeNameConverter.getSnippetZeroValue(schema);
return typedValue.getValueAndSaveTypeNicknameIn(typeTable);
}
@Override
public String getImplZeroValueAndSaveNicknameFor(FieldModel type) {
return typeNameConverter.getImplZeroValue(type).getValueAndSaveTypeNicknameIn(typeTable);
}
/** Returns the imports accumulated so far. */
@Override
public Map<String, TypeAlias> getImports() {
return typeTable.getImports();
}
@Override
public TypeTable getTypeTable() {
return typeTable;
}
public String getNotImplementedString(String feature) {
return "$ NOT IMPLEMENTED: " + feature + " $";
}
}
| 1 | 25,087 | make a SchemaTypeNameConverter.getTypeNameForEmptyType() and call that here. | googleapis-gapic-generator | java |
@@ -26,7 +26,10 @@ class TinyMCELanguage extends AbstractSmartyPlugin
public function __construct(Request $request)
{
- $this->locale = $request->getSession()->getLang()->getLocale();
+ if($request->getSession() != null)
+ $this->locale = $request->getSession()->getLang()->getLocale();
+ else
+ $this->locale = Lang::getDefaultLanguage()->getLocale();
}
public function guessTinyMCELanguage($params, \Smarty_Internal_Template $template) | 1 | <?php
/*************************************************************************************/
/* This file is part of the Thelia package. */
/* */
/* Copyright (c) OpenStudio */
/* email : dev@thelia.net */
/* web : http://www.thelia.net */
/* */
/* For the full copyright and license information, please view the LICENSE.txt */
/* file that was distributed with this source code. */
/*************************************************************************************/
namespace Tinymce\Smarty;
use Symfony\Component\Finder\Finder;
use Symfony\Component\Finder\SplFileInfo;
use Thelia\Core\HttpFoundation\Request;
use Thelia\Core\HttpFoundation\Session\Session;
use Thelia\Core\Template\Smarty\AbstractSmartyPlugin;
use Thelia\Core\Template\Smarty\SmartyPluginDescriptor;
class TinyMCELanguage extends AbstractSmartyPlugin
{
/** @var string $locale */
private $locale;
public function __construct(Request $request)
{
$this->locale = $request->getSession()->getLang()->getLocale();
}
public function guessTinyMCELanguage($params, \Smarty_Internal_Template $template)
{
// Find TinyMCE available languages
$finder = new Finder();
$files = $finder->in(__DIR__.DS."..".DS."Resources".DS.'js'.DS.'tinymce'.DS.'langs')->sortByName();
$miniLocale = substr($this->locale, 0, 2);
// Find the best matching language
/** @var SplFileInfo $file */
foreach($files as $file) {
$lang = str_replace('.js', '', $file->getFilename());
if ($lang == $this->locale || $lang == $miniLocale) {
return $lang;
}
}
return '';
}
/**
* Define the various smarty plugins hendled by this class
*
* @return array an array of smarty plugin descriptors
*/
public function getPluginDescriptors()
{
return array(
new SmartyPluginDescriptor('function', 'tinymce_lang', $this, 'guessTinyMCELanguage'),
);
}
} | 1 | 10,490 | Use braces on your conditional structures please | thelia-thelia | php |
@@ -16,6 +16,7 @@
DECLARE_string(u);
DECLARE_string(p);
+DEFINE_bool(enable_history, false, "Whether to force saving the command history");
namespace nebula {
namespace graph { | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include "base/Status.h"
#include <termios.h>
#include <unistd.h>
#include <readline/readline.h>
#include <readline/history.h>
#include "console/CliManager.h"
#include "client/cpp/GraphClient.h"
#include "fs/FileUtils.h"
DECLARE_string(u);
DECLARE_string(p);
namespace nebula {
namespace graph {
const int32_t kMaxAuthInfoRetries = 3;
const int32_t kMaxUsernameLen = 16;
const int32_t kMaxPasswordLen = 24;
const int32_t kMaxCommandLineLen = 1024;
CliManager::CliManager() {
::using_history();
initAutoCompletion();
}
bool CliManager::connect(const std::string& addr,
uint16_t port,
const std::string& username,
const std::string& password) {
char user[kMaxUsernameLen + 1];
char pass[kMaxPasswordLen + 1];
strncpy(user, username.c_str(), kMaxUsernameLen);
user[kMaxUsernameLen] = '\0';
strncpy(pass, password.c_str(), kMaxPasswordLen);
pass[kMaxPasswordLen] = '\0';
// Make sure username is not empty
if (FLAGS_u.empty()) {
for (int32_t i = 0; i < kMaxAuthInfoRetries && !strlen(user); i++) {
// Need to interactively get the username
std::cout << "Username: ";
std::cin.getline(user, kMaxUsernameLen);
user[kMaxUsernameLen] = '\0';
}
} else {
strcpy(user, FLAGS_u.c_str()); // NOLINT
}
if (!strlen(user)) {
std::cout << "Authentication failed: "
"Need a valid username to authenticate\n\n";
return false;
}
// Make sure password is not empty
if (FLAGS_p.empty()) {
for (int32_t i = 0; i < kMaxAuthInfoRetries && !strlen(pass); i++) {
// Need to interactively get the password
std::cout << "Password: ";
termios oldTerminal;
tcgetattr(STDIN_FILENO, &oldTerminal);
termios newTerminal = oldTerminal;
newTerminal.c_lflag &= ~ECHO;
tcsetattr(STDIN_FILENO, TCSANOW, &newTerminal);
std::cin.getline(pass, kMaxPasswordLen);
pass[kMaxPasswordLen] = '\0';
tcsetattr(STDIN_FILENO, TCSANOW, &oldTerminal);
}
} else {
strcpy(pass, FLAGS_p.c_str()); // NOLINT
}
if (!strlen(pass)) {
std::cout << "Authentication failed: "
"Need a valid password\n\n";
return false;
}
addr_ = addr;
port_ = port;
username_ = user;
auto client = std::make_unique<GraphClient>(addr_, port_);
cpp2::ErrorCode res = client->connect(user, pass);
if (res == cpp2::ErrorCode::SUCCEEDED) {
std::cerr << "\nWelcome to Nebula Graph (Version 0.1)\n\n";
cmdProcessor_ = std::make_unique<CmdProcessor>(std::move(client));
return true;
} else {
// There is an error
std::cout << "Connection failed\n";
return false;
}
}
void CliManager::batch(const std::string& filename) {
UNUSED(filename);
}
void CliManager::loop() {
// TODO(dutor) Detect if `stdin' is being attached to a TTY
std::string cmd;
loadHistory();
while (true) {
std::string line;
if (!readLine(line, !cmd.empty())) {
break;
}
if (line.empty()) {
cmd.clear();
continue;
}
if (line.back() == '\\') {
line.resize(line.size() - 1);
if (cmd.empty()) {
cmd = line;
} else if (cmd.back() == ' ') {
cmd += line;
} else {
cmd = cmd + " " + line;
}
continue;
}
cmd += line;
if (!cmdProcessor_->process(cmd)) {
break;
}
cmd.clear();
}
saveHistory();
}
bool CliManager::readLine(std::string &line, bool linebreak) {
auto ok = true;
char prompt[256];
static auto color = 0u;
::snprintf(prompt, sizeof(prompt),
"\001" // RL_PROMPT_START_IGNORE
"\033[1;%um" // color codes start
"\002" // RL_PROMPT_END_IGNORE
"nebula> " // prompt
"\001" // RL_PROMPT_START_IGNORE
"\033[0m" // restore color code
"\002", // RL_PROMPT_END_IGNORE
color++ % 6 + 31);
auto *input = ::readline(linebreak ? "": prompt);
do {
// EOF
if (input == nullptr) {
fprintf(stdout, "\n");
ok = false;
break;
}
// Empty line
if (input[0] == '\0') {
line.clear();
break;
}
line = folly::trimWhitespace(input).str();
if (!line.empty()) {
// Update command history
updateHistory(input);
}
} while (false);
::free(input);
return ok;
}
void CliManager::updateHistory(const char *line) {
auto **hists = ::history_list();
auto i = 0;
// Search in history
for (; i < ::history_length; i++) {
auto *hist = hists[i];
if (::strcmp(line, hist->line) == 0) {
break;
}
}
// New command
if (i == ::history_length) {
::add_history(line);
return;
}
// Found in history, make it lastest
auto *hist = hists[i];
for (; i < ::history_length - 1; i++) {
hists[i] = hists[i + 1];
}
hists[i] = hist;
}
void CliManager::saveHistory() {
std::string histfile;
histfile += ::getenv("HOME");
histfile += "/.nebula_history";
auto *file = ::fopen(histfile.c_str(), "w+");
if (file == nullptr) {
return; // fail silently
}
auto **hists = ::history_list();
for (auto i = 0; i < ::history_length; i++) {
fprintf(file, "%s\n", hists[i]->line);
}
::fflush(file);
::fclose(file);
}
void CliManager::loadHistory() {
std::string histfile;
histfile += ::getenv("HOME");
histfile += "/.nebula_history";
auto *file = ::fopen(histfile.c_str(), "r");
if (file == nullptr) {
return; // fail silently
}
char *line = nullptr;
size_t size = 0;
ssize_t read = 0;
while ((read = ::getline(&line, &size, file)) != -1) {
line[read - 1] = '\0'; // remove the trailing newline
updateHistory(line);
}
::free(line);
::fclose(file);
}
struct StringCaseEqual {
bool operator()(const std::string &lhs, const std::string &rhs) const {
return ::strcasecmp(lhs.c_str(), rhs.c_str()) == 0;
}
};
struct StringCaseHash {
size_t operator()(const std::string &lhs) const {
std::string upper;
upper.resize(lhs.size());
auto toupper = [] (auto c) { return ::toupper(c); };
std::transform(lhs.begin(), lhs.end(), upper.begin(), toupper);
return std::hash<std::string>()(upper);
}
};
// Primary keywords, like `GO' `CREATE', etc.
static std::vector<std::string> primaryKeywords;
// Keywords along with their sub-keywords, like `SHOW': `TAGS', `SPACES'
static std::unordered_map<std::string, std::vector<std::string>,
StringCaseHash, StringCaseEqual> subKeywords;
// Typenames, like `int', `double', `string', etc.
static std::vector<std::string> typeNames;
// To fill the containers above from a json file.
static Status loadCompletions();
static Status parseKeywordsFromJson(const folly::dynamic &json);
// To retrieve matches from within the `primaryKeywords'
static std::vector<std::string>
matchFromPrimaryKeywords(const std::string &text);
// To retrieve matches from within the `subKeywords'
static std::vector<std::string> matchFromSubKeywords(const std::string &text,
const std::string &primaryKeyword);
// Given a collection of keywords, retrieve matches that prefixed with `text'
static std::vector<std::string> matchFromKeywords(const std::string &text,
const std::vector<std::string> &keywords);
// To tell if the current `text' is at the start position of a statement.
// If so, we should do completion with primary keywords.
// Otherwise, the primary keyword of the current statement
// will be set, thus we will do completion with its sub keywords.
static bool isStartOfStatement(std::string &primaryKeyword);
// Given the prefix and a collection of keywords, retrieve the longest common prefix
// e.g. given `u' as the prefix and [USE, USER, USERS] as the collection, will return `USE'
static auto longestCommonPrefix(std::string prefix,
const std::vector<std::string>& words);
// Callback by realine if an auto completion is triggered
static char** completer(const char *text, int start, int end);
auto longestCommonPrefix(std::string prefix,
const std::vector<std::string>& words) {
if (words.size() == 1) {
return words[0];
}
while (true) {
char nextChar = 0;
for (auto &word : words) {
if (word.size() <= prefix.size()) {
return word;
}
if (nextChar == 0) {
nextChar = word[prefix.size()];
continue;
}
if (::toupper(nextChar) != ::toupper(word[prefix.size()])) {
return word.substr(0, prefix.size());
}
}
prefix = words[0].substr(0, prefix.size() + 1);
}
}
char** completer(const char *text, int start, int end) {
UNUSED(start);
UNUSED(end);
// Dont do filename completion even there is no match.
::rl_attempted_completion_over = 1;
// Dont do completion if in quotes
if (::rl_completion_quote_character != 0) {
return nullptr;
}
std::vector<std::string> matches;
std::string primaryKeyword; // The current primary keyword
if (isStartOfStatement(primaryKeyword)) {
matches = matchFromPrimaryKeywords(text);
} else {
matches = matchFromSubKeywords(text, primaryKeyword);
}
if (matches.empty()) {
return nullptr;
}
char **results = reinterpret_cast<char**>(malloc((2 + matches.size()) * sizeof(char*)));
// Get the longest common prefix of all matches as the echo back of this completion action
results[0] = ::strdup(longestCommonPrefix(text, matches).c_str());
auto i = 1;
for (auto &word : matches) {
results[i++] = ::strdup(word.c_str());
}
results[i] = nullptr;
return results;
}
bool isStartOfStatement(std::string &primaryKeyword) {
// If there is no input
if (::rl_line_buffer == nullptr || *::rl_line_buffer == '\0') {
return true;
}
std::string line = ::rl_line_buffer;
auto piece = folly::trimWhitespace(line);
// If the inputs are all white spaces
if (piece.empty()) {
return true;
}
// If the inputs are terminated with ';' or '|', i.e. complete statements
// Additionally, there is an incomplete primary keyword for the next statement
{
static const std::regex pattern(R"((\s*\w+[^;|]*[;|]\s*)*(\w+)?)");
std::smatch result;
if (std::regex_match(line, result, pattern)) {
return true;
}
}
// The same to the occasion above, except that the primary keyword is complete
// This is where sub keywords shall be completed
{
static const std::regex pattern(R"((\s*\w+[^;|]*[;|]\s*)*(\w+)[^;|]+)");
std::smatch result;
if (std::regex_match(line, result, pattern)) {
primaryKeyword = result[result.size() - 1].str();
return false;
}
}
// TODO(dutor) There are still many scenarios we cannot cover with regular expressions.
// We have to accomplish this with the help of the actual parser.
return false;
}
std::vector<std::string> matchFromPrimaryKeywords(const std::string &text) {
return matchFromKeywords(text, primaryKeywords);
}
std::vector<std::string> matchFromSubKeywords(const std::string &text,
const std::string &primaryKeyword) {
std::vector<std::string> matches = typeNames;
auto iter = subKeywords.find(primaryKeyword);
if (iter != subKeywords.end()) {
matches.insert(matches.end(), iter->second.begin(), iter->second.end());
}
return matchFromKeywords(text, matches);
}
std::vector<std::string>
matchFromKeywords(const std::string &text, const std::vector<std::string> &keywords) {
if (keywords.empty()) {
return {};
}
std::vector<std::string> matches;
for (auto &word : keywords) {
if (text.size() > word.size()) {
continue;
}
if (::strncasecmp(text.c_str(), word.c_str(), text.size()) == 0) {
matches.emplace_back(word);
}
}
return matches;
}
Status loadCompletions() {
using fs::FileUtils;
auto dir = FileUtils::readLink("/proc/self/exe").value();
dir = FileUtils::dirname(dir.c_str()) + "/../share/resources";
std::string file = dir + "/" + "completion.json";
auto status = Status::OK();
int fd = -1;
do {
fd = ::open(file.c_str(), O_RDONLY);
if (fd == -1) {
status = Status::Error("Failed to open `%s': %s",
file.c_str(), ::strerror(errno));
break;
}
auto len = ::lseek(fd, 0, SEEK_END);
if (len == 0) {
status = Status::Error("File `%s' is empty", file.c_str());
break;
}
auto buffer = std::make_unique<char[]>(len + 1);
::lseek(fd, 0, SEEK_SET);
auto ll = ::read(fd, buffer.get(), len);
UNUSED(ll);
buffer[len] = '\0';
std::string content;
content.assign(buffer.get(), len);
try {
status = parseKeywordsFromJson(folly::parseJson(content));
} catch (const std::exception &e) {
status = Status::Error("Illegal json `%s': %s", file.c_str(), e.what());
break;
}
if (!status.ok()) {
break;
}
} while (false);
if (fd != -1) {
::close(fd);
}
return status;
}
Status parseKeywordsFromJson(const folly::dynamic &json) {
auto iter = json.find("keywords");
if (iter == json.items().end()) {
fprintf(stderr, "completions: no `keywords' found\n");
return Status::OK();
}
for (auto &pair : iter->second.items()) {
auto &pkw = pair.first;
primaryKeywords.emplace_back(pkw.asString());
auto subIter = pair.second.find("sub_keywords");
if (subIter == pair.second.items().end()) {
continue;
}
if (!subIter->second.isArray()) {
fprintf(stderr, "sub-keywords for `%s' should be an array\n",
pkw.asString().c_str());
continue;
}
for (auto &subKey : subIter->second) {
if (!subKey.isString()) {
fprintf(stderr, "keyword name should be of type string\n");
break;
}
subKeywords[pkw.asString()].emplace_back(subKey.asString());
}
}
iter = json.find("typenames");
if (iter == json.items().end()) {
fprintf(stderr, "completions: no `typenames' found\n");
return Status::OK();
}
for (auto &tname : iter->second) {
typeNames.emplace_back(tname.asString());
}
return Status::OK();
}
void CliManager::initAutoCompletion() {
// The completion function
::rl_attempted_completion_function = completer;
// Characters that indicates begin or end of a quote
::rl_completer_quote_characters = "\"";
// Allow conditional parsing of the ~/.inputrc file
::rl_readline_name = "nebula-graph";
auto status = loadCompletions();
if (!status.ok()) {
fprintf(stderr, "%s\n", status.toString().c_str());
}
}
} // namespace graph
} // namespace nebula
| 1 | 19,463 | Great God, I have a question. This ".nebula_history" file is used to save history commands. Is there a file size limit? When the file is large, does it take a long time to start initialization (loadHistory)? How did you think about this? Thx. | vesoft-inc-nebula | cpp |
@@ -1004,8 +1004,9 @@ static void parseRecord (tokenInfo *const token)
*/
if (!isType (token, TOKEN_OPEN_PAREN))
readToken (token);
+ if (!isType (token, TOKEN_OPEN_PAREN))
+ return;
- Assert (isType (token, TOKEN_OPEN_PAREN));
do
{
if (isType (token, TOKEN_COMMA) || | 1 | /*
* Copyright (c) 2002-2003, Darren Hiebert
*
* This source code is released for free distribution under the terms of the
* GNU General Public License.
*
* This module contains functions for generating tags for PL/SQL language
* files.
*/
/*
* INCLUDE FILES
*/
#include "general.h" /* must always come first */
#include <ctype.h> /* to define isalpha () */
#ifdef DEBUG
#include <stdio.h>
#endif
#include "debug.h"
#include "entry.h"
#include "keyword.h"
#include "parse.h"
#include "read.h"
#include "routines.h"
#include "vstring.h"
/*
* On-line "Oracle Database PL/SQL Language Reference":
* http://download.oracle.com/docs/cd/B28359_01/appdev.111/b28370/toc.htm
*
* Sample PL/SQL code is available from:
* http://www.orafaq.com/faqscrpt.htm#GENPLSQL
*
* On-line SQL Anywhere Documentation
* http://www.ianywhere.com/developer/product_manuals/sqlanywhere/index.html
*/
/*
* MACROS
*/
#define isType(token,t) (boolean) ((token)->type == (t))
#define isKeyword(token,k) (boolean) ((token)->keyword == (k))
/*
* DATA DECLARATIONS
*/
typedef enum eException { ExceptionNone, ExceptionEOF } exception_t;
/*
* Used to specify type of keyword.
*/
typedef enum eKeywordId {
KEYWORD_NONE = -1,
KEYWORD_is,
KEYWORD_begin,
KEYWORD_body,
KEYWORD_cursor,
KEYWORD_declare,
KEYWORD_end,
KEYWORD_function,
KEYWORD_if,
KEYWORD_else,
KEYWORD_elseif,
KEYWORD_endif,
KEYWORD_loop,
KEYWORD_while,
KEYWORD_case,
KEYWORD_for,
KEYWORD_do,
KEYWORD_call,
KEYWORD_package,
KEYWORD_pragma,
KEYWORD_procedure,
KEYWORD_record,
KEYWORD_object,
KEYWORD_ref,
KEYWORD_rem,
KEYWORD_return,
KEYWORD_returns,
KEYWORD_subtype,
KEYWORD_table,
KEYWORD_trigger,
KEYWORD_type,
KEYWORD_index,
KEYWORD_event,
KEYWORD_publication,
KEYWORD_service,
KEYWORD_domain,
KEYWORD_datatype,
KEYWORD_result,
KEYWORD_url,
KEYWORD_internal,
KEYWORD_external,
KEYWORD_when,
KEYWORD_then,
KEYWORD_variable,
KEYWORD_exception,
KEYWORD_at,
KEYWORD_on,
KEYWORD_primary,
KEYWORD_references,
KEYWORD_unique,
KEYWORD_check,
KEYWORD_constraint,
KEYWORD_foreign,
KEYWORD_ml_table,
KEYWORD_ml_table_lang,
KEYWORD_ml_table_dnet,
KEYWORD_ml_table_java,
KEYWORD_ml_table_chk,
KEYWORD_ml_conn,
KEYWORD_ml_conn_lang,
KEYWORD_ml_conn_dnet,
KEYWORD_ml_conn_java,
KEYWORD_ml_conn_chk,
KEYWORD_ml_prop,
KEYWORD_local,
KEYWORD_temporary,
KEYWORD_drop,
KEYWORD_view,
KEYWORD_synonym,
KEYWORD_handler,
KEYWORD_comment,
KEYWORD_create,
KEYWORD_go
} keywordId;
/*
* Used to determine whether keyword is valid for the token language and
* what its ID is.
*/
typedef struct sKeywordDesc {
const char *name;
keywordId id;
} keywordDesc;
typedef enum eTokenType {
TOKEN_UNDEFINED,
TOKEN_EOF,
TOKEN_BLOCK_LABEL_BEGIN,
TOKEN_BLOCK_LABEL_END,
TOKEN_CHARACTER,
TOKEN_CLOSE_PAREN,
TOKEN_COLON,
TOKEN_SEMICOLON,
TOKEN_COMMA,
TOKEN_IDENTIFIER,
TOKEN_KEYWORD,
TOKEN_OPEN_PAREN,
TOKEN_OPERATOR,
TOKEN_OTHER,
TOKEN_STRING,
TOKEN_PERIOD,
TOKEN_OPEN_CURLY,
TOKEN_CLOSE_CURLY,
TOKEN_OPEN_SQUARE,
TOKEN_CLOSE_SQUARE,
TOKEN_TILDE,
TOKEN_FORWARD_SLASH,
TOKEN_EQUAL
} tokenType;
typedef struct sTokenInfoSQL {
tokenType type;
keywordId keyword;
vString * string;
vString * scope;
int scopeKind;
int begin_end_nest_lvl;
unsigned long lineNumber;
fpos_t filePosition;
} tokenInfo;
/*
* DATA DEFINITIONS
*/
static langType Lang_sql;
typedef enum {
SQLTAG_CURSOR,
SQLTAG_PROTOTYPE,
SQLTAG_FUNCTION,
SQLTAG_FIELD,
SQLTAG_LOCAL_VARIABLE,
SQLTAG_BLOCK_LABEL,
SQLTAG_PACKAGE,
SQLTAG_PROCEDURE,
SQLTAG_RECORD,
SQLTAG_SUBTYPE,
SQLTAG_TABLE,
SQLTAG_TRIGGER,
SQLTAG_VARIABLE,
SQLTAG_INDEX,
SQLTAG_EVENT,
SQLTAG_PUBLICATION,
SQLTAG_SERVICE,
SQLTAG_DOMAIN,
SQLTAG_VIEW,
SQLTAG_SYNONYM,
SQLTAG_MLTABLE,
SQLTAG_MLCONN,
SQLTAG_MLPROP,
SQLTAG_COUNT
} sqlKind;
static kindOption SqlKinds [] = {
{ TRUE, 'c', "cursor", "cursors" },
{ FALSE, 'd', "prototype", "prototypes" },
{ TRUE, 'f', "function", "functions" },
{ TRUE, 'F', "field", "record fields" },
{ FALSE, 'l', "local", "local variables" },
{ TRUE, 'L', "label", "block label" },
{ TRUE, 'P', "package", "packages" },
{ TRUE, 'p', "procedure", "procedures" },
{ FALSE, 'r', "record", "records" },
{ TRUE, 's', "subtype", "subtypes" },
{ TRUE, 't', "table", "tables" },
{ TRUE, 'T', "trigger", "triggers" },
{ TRUE, 'v', "variable", "variables" },
{ TRUE, 'i', "index", "indexes" },
{ TRUE, 'e', "event", "events" },
{ TRUE, 'U', "publication", "publications" },
{ TRUE, 'R', "service", "services" },
{ TRUE, 'D', "domain", "domains" },
{ TRUE, 'V', "view", "views" },
{ TRUE, 'n', "synonym", "synonyms" },
{ TRUE, 'x', "mltable", "MobiLink Table Scripts" },
{ TRUE, 'y', "mlconn", "MobiLink Conn Scripts" },
{ TRUE, 'z', "mlprop", "MobiLink Properties " }
};
static const keywordDesc SqlKeywordTable [] = {
/* keyword keyword ID */
{ "as", KEYWORD_is },
{ "is", KEYWORD_is },
{ "begin", KEYWORD_begin },
{ "body", KEYWORD_body },
{ "cursor", KEYWORD_cursor },
{ "declare", KEYWORD_declare },
{ "end", KEYWORD_end },
{ "function", KEYWORD_function },
{ "if", KEYWORD_if },
{ "else", KEYWORD_else },
{ "elseif", KEYWORD_elseif },
{ "endif", KEYWORD_endif },
{ "loop", KEYWORD_loop },
{ "while", KEYWORD_while },
{ "case", KEYWORD_case },
{ "for", KEYWORD_for },
{ "do", KEYWORD_do },
{ "call", KEYWORD_call },
{ "package", KEYWORD_package },
{ "pragma", KEYWORD_pragma },
{ "procedure", KEYWORD_procedure },
{ "record", KEYWORD_record },
{ "object", KEYWORD_object },
{ "ref", KEYWORD_ref },
{ "rem", KEYWORD_rem },
{ "return", KEYWORD_return },
{ "returns", KEYWORD_returns },
{ "subtype", KEYWORD_subtype },
{ "table", KEYWORD_table },
{ "trigger", KEYWORD_trigger },
{ "type", KEYWORD_type },
{ "index", KEYWORD_index },
{ "event", KEYWORD_event },
{ "publication", KEYWORD_publication },
{ "service", KEYWORD_service },
{ "domain", KEYWORD_domain },
{ "datatype", KEYWORD_datatype },
{ "result", KEYWORD_result },
{ "url", KEYWORD_url },
{ "internal", KEYWORD_internal },
{ "external", KEYWORD_external },
{ "when", KEYWORD_when },
{ "then", KEYWORD_then },
{ "variable", KEYWORD_variable },
{ "exception", KEYWORD_exception },
{ "at", KEYWORD_at },
{ "on", KEYWORD_on },
{ "primary", KEYWORD_primary },
{ "references", KEYWORD_references },
{ "unique", KEYWORD_unique },
{ "check", KEYWORD_check },
{ "constraint", KEYWORD_constraint },
{ "foreign", KEYWORD_foreign },
{ "ml_add_table_script", KEYWORD_ml_table },
{ "ml_add_lang_table_script", KEYWORD_ml_table_lang },
{ "ml_add_dnet_table_script", KEYWORD_ml_table_dnet },
{ "ml_add_java_table_script", KEYWORD_ml_table_java },
{ "ml_add_lang_table_script_chk", KEYWORD_ml_table_chk },
{ "ml_add_connection_script", KEYWORD_ml_conn },
{ "ml_add_lang_connection_script", KEYWORD_ml_conn_lang },
{ "ml_add_dnet_connection_script", KEYWORD_ml_conn_dnet },
{ "ml_add_java_connection_script", KEYWORD_ml_conn_java },
{ "ml_add_lang_conn_script_chk", KEYWORD_ml_conn_chk },
{ "ml_add_property", KEYWORD_ml_prop },
{ "local", KEYWORD_local },
{ "temporary", KEYWORD_temporary },
{ "drop", KEYWORD_drop },
{ "view", KEYWORD_view },
{ "synonym", KEYWORD_synonym },
{ "handler", KEYWORD_handler },
{ "comment", KEYWORD_comment },
{ "create", KEYWORD_create },
{ "go", KEYWORD_go }
};
/*
* FUNCTION DECLARATIONS
*/
/* Recursive calls */
static void parseBlock (tokenInfo *const token, const boolean local);
static void parseDeclare (tokenInfo *const token, const boolean local);
static void parseKeywords (tokenInfo *const token);
static tokenType parseSqlFile (tokenInfo *const token);
/*
* FUNCTION DEFINITIONS
*/
static boolean isIdentChar1 (const int c)
{
/*
* Other databases are less restrictive on the first character of
* an identifier.
* isIdentChar1 is used to identify the first character of an
* identifier, so we are removing some restrictions.
*/
return (boolean)
(isalpha (c) || c == '@' || c == '_' );
}
static boolean isIdentChar (const int c)
{
return (boolean)
(isalpha (c) || isdigit (c) || c == '$' ||
c == '@' || c == '_' || c == '#');
}
static boolean isCmdTerm (tokenInfo *const token)
{
DebugStatement (
debugPrintf (DEBUG_PARSE
, "\n isCmdTerm: token same tt:%d tk:%d\n"
, token->type
, token->keyword
);
);
/*
* Based on the various customer sites I have been at
* the most common command delimiters are
* ;
* ~
* /
* go
* This routine will check for any of these, more
* can easily be added by modifying readToken and
* either adding the character to:
* enum eTokenType
* enum eTokenType
*/
return (isType (token, TOKEN_SEMICOLON) ||
isType (token, TOKEN_TILDE) ||
isType (token, TOKEN_FORWARD_SLASH) ||
isKeyword (token, KEYWORD_go));
}
static boolean isMatchedEnd(tokenInfo *const token, int nest_lvl)
{
boolean terminated = FALSE;
/*
* Since different forms of SQL allow the use of
* BEGIN
* ...
* END
* blocks, some statements may not be terminated using
* the standard delimiters:
* ;
* ~
* /
* go
* This routine will check to see if we encounter and END
* for the matching nest level of BEGIN ... END statements.
* If we find one, then we can assume, the statement was terminated
* since we have fallen through to the END statement of the BEGIN
* block.
*/
if ( nest_lvl > 0 && isKeyword (token, KEYWORD_end) )
{
if ( token->begin_end_nest_lvl == nest_lvl )
terminated = TRUE;
}
return terminated;
}
static void buildSqlKeywordHash (void)
{
const size_t count = sizeof (SqlKeywordTable) /
sizeof (SqlKeywordTable [0]);
size_t i;
for (i = 0 ; i < count ; ++i)
{
const keywordDesc* const p = &SqlKeywordTable [i];
addKeyword (p->name, Lang_sql, (int) p->id);
}
}
static tokenInfo *newToken (void)
{
tokenInfo *const token = xMalloc (1, tokenInfo);
token->type = TOKEN_UNDEFINED;
token->keyword = KEYWORD_NONE;
token->string = vStringNew ();
token->scope = vStringNew ();
token->scopeKind = SQLTAG_COUNT;
token->begin_end_nest_lvl = 0;
token->lineNumber = getSourceLineNumber ();
token->filePosition = getInputFilePosition ();
return token;
}
static void deleteToken (tokenInfo *const token)
{
vStringDelete (token->string);
vStringDelete (token->scope);
eFree (token);
}
/*
* Tag generation functions
*/
static void makeSqlTag (tokenInfo *const token, const sqlKind kind)
{
if (SqlKinds [kind].enabled)
{
const char *const name = vStringValue (token->string);
tagEntryInfo e;
initTagEntry (&e, name);
e.lineNumber = token->lineNumber;
e.filePosition = token->filePosition;
e.kindName = SqlKinds [kind].name;
e.kind = SqlKinds [kind].letter;
if (vStringLength (token->scope) > 0)
{
Assert (token->scopeKind < SQLTAG_COUNT);
e.extensionFields.scope[0] = SqlKinds [token->scopeKind].name;
e.extensionFields.scope[1] = vStringValue (token->scope);
}
makeTagEntry (&e);
}
}
/*
* Parsing functions
*/
static void parseString (vString *const string, const int delimiter)
{
boolean end = FALSE;
while (! end)
{
int c = fileGetc ();
if (c == EOF)
end = TRUE;
/*
else if (c == '\\')
{
c = fileGetc(); // This maybe a ' or ". //
vStringPut(string, c);
}
*/
else if (c == delimiter)
end = TRUE;
else
vStringPut (string, c);
}
vStringTerminate (string);
}
/* Read a C identifier beginning with "firstChar" and places it into "name".
*/
static void parseIdentifier (vString *const string, const int firstChar)
{
int c = firstChar;
Assert (isIdentChar1 (c));
do
{
vStringPut (string, c);
c = fileGetc ();
} while (isIdentChar (c));
vStringTerminate (string);
if (!isspace (c))
fileUngetc (c); /* unget non-identifier character */
}
static void readToken (tokenInfo *const token)
{
int c;
token->type = TOKEN_UNDEFINED;
token->keyword = KEYWORD_NONE;
vStringClear (token->string);
getNextChar:
do
{
c = fileGetc ();
token->lineNumber = getSourceLineNumber ();
token->filePosition = getInputFilePosition ();
/*
* Added " to the list of ignores, not sure what this
* might break but it gets by this issue:
* create table "t1" (...)
*
* Darren, the code passes all my tests for both
* Oracle and SQL Anywhere, but maybe you can tell me
* what this may effect.
*/
}
while (c == '\t' || c == ' ' || c == '\n');
switch (c)
{
case EOF: token->type = TOKEN_EOF; break;
case '(': token->type = TOKEN_OPEN_PAREN; break;
case ')': token->type = TOKEN_CLOSE_PAREN; break;
case ':': token->type = TOKEN_COLON; break;
case ';': token->type = TOKEN_SEMICOLON; break;
case '.': token->type = TOKEN_PERIOD; break;
case ',': token->type = TOKEN_COMMA; break;
case '{': token->type = TOKEN_OPEN_CURLY; break;
case '}': token->type = TOKEN_CLOSE_CURLY; break;
case '~': token->type = TOKEN_TILDE; break;
case '[': token->type = TOKEN_OPEN_SQUARE; break;
case ']': token->type = TOKEN_CLOSE_SQUARE; break;
case '=': token->type = TOKEN_EQUAL; break;
case '\'':
case '"':
token->type = TOKEN_STRING;
parseString (token->string, c);
token->lineNumber = getSourceLineNumber ();
token->filePosition = getInputFilePosition ();
break;
case '-':
c = fileGetc ();
if (c == '-') /* -- is this the start of a comment? */
{
fileSkipToCharacter ('\n');
goto getNextChar;
}
else
{
if (!isspace (c))
fileUngetc (c);
token->type = TOKEN_OPERATOR;
}
break;
case '<':
case '>':
{
const int initial = c;
int d = fileGetc ();
if (d == initial)
{
if (initial == '<')
token->type = TOKEN_BLOCK_LABEL_BEGIN;
else
token->type = TOKEN_BLOCK_LABEL_END;
}
else
{
fileUngetc (d);
token->type = TOKEN_UNDEFINED;
}
break;
}
case '\\':
c = fileGetc ();
if (c != '\\' && c != '"' && c != '\'' && !isspace (c))
fileUngetc (c);
token->type = TOKEN_CHARACTER;
token->lineNumber = getSourceLineNumber ();
token->filePosition = getInputFilePosition ();
break;
case '/':
{
int d = fileGetc ();
if ((d != '*') && /* is this the start of a comment? */
(d != '/')) /* is a one line comment? */
{
token->type = TOKEN_FORWARD_SLASH;
fileUngetc (d);
}
else
{
if (d == '*')
{
do
{
fileSkipToCharacter ('*');
c = fileGetc ();
if (c == '/')
break;
else
fileUngetc (c);
} while (c != EOF && c != '\0');
goto getNextChar;
}
else if (d == '/') /* is this the start of a comment? */
{
fileSkipToCharacter ('\n');
goto getNextChar;
}
}
break;
}
default:
if (! isIdentChar1 (c))
token->type = TOKEN_UNDEFINED;
else
{
parseIdentifier (token->string, c);
token->lineNumber = getSourceLineNumber ();
token->filePosition = getInputFilePosition ();
token->keyword = analyzeToken (token->string, Lang_sql);
if (isKeyword (token, KEYWORD_rem))
{
vStringClear (token->string);
fileSkipToCharacter ('\n');
goto getNextChar;
}
else if (isKeyword (token, KEYWORD_NONE))
token->type = TOKEN_IDENTIFIER;
else
token->type = TOKEN_KEYWORD;
}
break;
}
}
/*
* reads an indentifier, possibly quoted:
* identifier
* "identifier"
* [identifier]
*/
static void readIdentifier (tokenInfo *const token)
{
readToken (token);
if (isType (token, TOKEN_OPEN_SQUARE))
{
tokenInfo *const close_square = newToken ();
readToken (token);
/* eat close swuare */
readToken (close_square);
deleteToken (close_square);
}
}
/*
* Token parsing functions
*/
/*
* static void addContext (tokenInfo* const parent, const tokenInfo* const child)
* {
* if (vStringLength (parent->string) > 0)
* {
* vStringCatS (parent->string, ".");
* }
* vStringCatS (parent->string, vStringValue(child->string));
* vStringTerminate(parent->string);
* }
*/
static void addToScope (tokenInfo* const token, vString* const extra, sqlKind kind)
{
if (vStringLength (token->scope) > 0)
{
vStringCatS (token->scope, ".");
}
vStringCatS (token->scope, vStringValue(extra));
vStringTerminate(token->scope);
token->scopeKind = kind;
}
/*
* Scanning functions
*/
static void findToken (tokenInfo *const token, const tokenType type)
{
while (! isType (token, type) &&
! isType (token, TOKEN_EOF))
{
readToken (token);
}
}
static void findCmdTerm (tokenInfo *const token, const boolean check_first)
{
int begin_end_nest_lvl = token->begin_end_nest_lvl;
if (check_first)
{
if (isCmdTerm(token))
return;
}
do
{
readToken (token);
} while (! isCmdTerm(token) &&
! isMatchedEnd(token, begin_end_nest_lvl) &&
! isType (token, TOKEN_EOF));
}
static void skipToMatched(tokenInfo *const token)
{
int nest_level = 0;
tokenType open_token;
tokenType close_token;
switch (token->type)
{
case TOKEN_OPEN_PAREN:
open_token = TOKEN_OPEN_PAREN;
close_token = TOKEN_CLOSE_PAREN;
break;
case TOKEN_OPEN_CURLY:
open_token = TOKEN_OPEN_CURLY;
close_token = TOKEN_CLOSE_CURLY;
break;
case TOKEN_OPEN_SQUARE:
open_token = TOKEN_OPEN_SQUARE;
close_token = TOKEN_CLOSE_SQUARE;
break;
default:
return;
}
/*
* This routine will skip to a matching closing token.
* It will also handle nested tokens like the (, ) below.
* ( name varchar(30), text binary(10) )
*/
if (isType (token, open_token))
{
nest_level++;
while (nest_level > 0 && !isType (token, TOKEN_EOF))
{
readToken (token);
if (isType (token, open_token))
{
nest_level++;
}
if (isType (token, close_token))
{
if (nest_level > 0)
{
nest_level--;
}
}
}
readToken (token);
}
}
static void copyToken (tokenInfo *const dest, tokenInfo *const src)
{
dest->lineNumber = src->lineNumber;
dest->filePosition = src->filePosition;
dest->type = src->type;
dest->keyword = src->keyword;
vStringCopy(dest->string, src->string);
vStringCopy(dest->scope, src->scope);
dest->scopeKind = src->scopeKind;
}
static void skipArgumentList (tokenInfo *const token)
{
/*
* Other databases can have arguments with fully declared
* datatypes:
* ( name varchar(30), text binary(10) )
* So we must check for nested open and closing parantheses
*/
if (isType (token, TOKEN_OPEN_PAREN)) /* arguments? */
{
skipToMatched (token);
}
}
static void parseSubProgram (tokenInfo *const token)
{
tokenInfo *const name = newToken ();
vString * saveScope = vStringNew ();
sqlKind saveScopeKind;
/*
* This must handle both prototypes and the body of
* the procedures.
*
* Prototype:
* FUNCTION func_name RETURN integer;
* PROCEDURE proc_name( parameters );
* Procedure
* FUNCTION GET_ML_USERNAME RETURN VARCHAR2
* IS
* BEGIN
* RETURN v_sync_user_id;
* END GET_ML_USERNAME;
*
* PROCEDURE proc_name( parameters )
* IS
* BEGIN
* END;
* CREATE PROCEDURE proc_name( parameters )
* EXTERNAL NAME ... ;
* CREATE PROCEDURE proc_name( parameters )
* BEGIN
* END;
*
* CREATE FUNCTION f_GetClassName(
* IN @object VARCHAR(128)
* ,IN @code VARCHAR(128)
* )
* RETURNS VARCHAR(200)
* DETERMINISTIC
* BEGIN
*
* IF( @object = 'user_state' ) THEN
* SET something = something;
* END IF;
*
* RETURN @name;
* END;
*
* Note, a Package adds scope to the items within.
* create or replace package demo_pkg is
* test_var number;
* function test_func return varchar2;
* function more.test_func2 return varchar2;
* end demo_pkg;
* So the tags generated here, contain the package name:
* demo_pkg.test_var
* demo_pkg.test_func
* demo_pkg.more.test_func2
*/
const sqlKind kind = isKeyword (token, KEYWORD_function) ?
SQLTAG_FUNCTION : SQLTAG_PROCEDURE;
Assert (isKeyword (token, KEYWORD_function) ||
isKeyword (token, KEYWORD_procedure));
vStringCopy(saveScope, token->scope);
saveScopeKind = token->scopeKind;
readToken (token);
copyToken (name, token);
readToken (token);
if (isType (token, TOKEN_PERIOD))
{
/*
* If this is an Oracle package, then the token->scope should
* already be set. If this is the case, also add this value to the
* scope.
* If this is not an Oracle package, chances are the scope should be
* blank and the value just read is the OWNER or CREATOR of the
* function and should not be considered part of the scope.
*/
if (vStringLength(saveScope) > 0)
{
addToScope(token, name->string, kind);
}
readToken (token);
copyToken (name, token);
readToken (token);
}
if (isType (token, TOKEN_OPEN_PAREN))
{
/* Reads to the next token after the TOKEN_CLOSE_PAREN */
skipArgumentList(token);
}
if (kind == SQLTAG_FUNCTION)
{
if (isKeyword (token, KEYWORD_return) ||
isKeyword (token, KEYWORD_returns))
{
/* Read datatype */
readToken (token);
/*
* Read token after which could be the
* command terminator if a prototype
* or an open parantheses
*/
readToken (token);
if (isType (token, TOKEN_OPEN_PAREN))
{
/* Reads to the next token after the TOKEN_CLOSE_PAREN */
skipArgumentList(token);
}
}
}
if (isCmdTerm (token))
{
makeSqlTag (name, SQLTAG_PROTOTYPE);
}
else
{
while (! isKeyword (token, KEYWORD_is) &&
! isKeyword (token, KEYWORD_begin) &&
! isKeyword (token, KEYWORD_at) &&
! isKeyword (token, KEYWORD_internal) &&
! isKeyword (token, KEYWORD_external) &&
! isKeyword (token, KEYWORD_url) &&
! isType (token, TOKEN_EQUAL) &&
! isType (token, TOKEN_EOF) &&
! isCmdTerm (token))
{
if (isKeyword (token, KEYWORD_result))
{
readToken (token);
if (isType (token, TOKEN_OPEN_PAREN))
{
/* Reads to the next token after the TOKEN_CLOSE_PAREN */
skipArgumentList(token);
}
} else {
readToken (token);
}
}
if (isKeyword (token, KEYWORD_at) ||
isKeyword (token, KEYWORD_url) ||
isKeyword (token, KEYWORD_internal) ||
isKeyword (token, KEYWORD_external))
{
addToScope(token, name->string, kind);
if (isType (name, TOKEN_IDENTIFIER) ||
isType (name, TOKEN_STRING) ||
isType (name, TOKEN_KEYWORD))
{
makeSqlTag (name, kind);
}
vStringClear (token->scope);
token->scopeKind = SQLTAG_COUNT;
}
if (isType (token, TOKEN_EQUAL))
readToken (token);
if (isKeyword (token, KEYWORD_declare))
parseDeclare (token, FALSE);
if (isKeyword (token, KEYWORD_is) ||
isKeyword (token, KEYWORD_begin))
{
addToScope(token, name->string, kind);
if (isType (name, TOKEN_IDENTIFIER) ||
isType (name, TOKEN_STRING) ||
isType (name, TOKEN_KEYWORD))
{
makeSqlTag (name, kind);
}
parseBlock (token, TRUE);
vStringClear (token->scope);
token->scopeKind = SQLTAG_COUNT;
}
}
vStringCopy(token->scope, saveScope);
token->scopeKind = saveScopeKind;
deleteToken (name);
vStringDelete(saveScope);
}
static void parseRecord (tokenInfo *const token)
{
/*
* Make it a bit forgiving, this is called from
* multiple functions, parseTable, parseType
*/
if (!isType (token, TOKEN_OPEN_PAREN))
readToken (token);
Assert (isType (token, TOKEN_OPEN_PAREN));
do
{
if (isType (token, TOKEN_COMMA) ||
isType (token, TOKEN_OPEN_PAREN))
{
readToken (token);
}
/*
* Create table statements can end with various constraints
* which must be excluded from the SQLTAG_FIELD.
* create table t1 (
* c1 integer,
* c2 char(30),
* c3 numeric(10,5),
* c4 integer,
* constraint whatever,
* primary key(c1),
* foreign key (),
* check ()
* )
*/
if (! isKeyword(token, KEYWORD_primary) &&
! isKeyword(token, KEYWORD_references) &&
! isKeyword(token, KEYWORD_unique) &&
! isKeyword(token, KEYWORD_check) &&
! isKeyword(token, KEYWORD_constraint) &&
! isKeyword(token, KEYWORD_foreign))
{
/* keyword test above is redundant as only a TOKEN_KEYWORD could
* match any isKeyword() anyway */
if (isType (token, TOKEN_IDENTIFIER) ||
isType (token, TOKEN_STRING))
{
makeSqlTag (token, SQLTAG_FIELD);
}
}
while (! isType (token, TOKEN_COMMA) &&
! isType (token, TOKEN_CLOSE_PAREN) &&
! isType (token, TOKEN_OPEN_PAREN) &&
! isType (token, TOKEN_EOF))
{
readToken (token);
/*
* A table structure can look like this:
* create table t1 (
* c1 integer,
* c2 char(30),
* c3 numeric(10,5),
* c4 integer
* )
* We can't just look for a COMMA or CLOSE_PAREN
* since that will not deal with the numeric(10,5)
* case. So we need to skip the argument list
* when we find an open paren.
*/
if (isType (token, TOKEN_OPEN_PAREN))
{
/* Reads to the next token after the TOKEN_CLOSE_PAREN */
skipArgumentList(token);
}
}
} while (! isType (token, TOKEN_CLOSE_PAREN) &&
! isType (token, TOKEN_EOF));
}
static void parseType (tokenInfo *const token)
{
tokenInfo *const name = newToken ();
vString * saveScope = vStringNew ();
sqlKind saveScopeKind;
vStringCopy(saveScope, token->scope);
/* If a scope has been set, add it to the name */
addToScope (name, token->scope, token->scopeKind);
saveScopeKind = token->scopeKind;
readToken (name);
if (isType (name, TOKEN_IDENTIFIER))
{
readToken (token);
if (isKeyword (token, KEYWORD_is))
{
readToken (token);
switch (token->keyword)
{
case KEYWORD_record:
case KEYWORD_object:
makeSqlTag (name, SQLTAG_RECORD);
addToScope (token, name->string, SQLTAG_RECORD);
parseRecord (token);
break;
case KEYWORD_table:
makeSqlTag (name, SQLTAG_TABLE);
break;
case KEYWORD_ref:
readToken (token);
if (isKeyword (token, KEYWORD_cursor))
makeSqlTag (name, SQLTAG_CURSOR);
break;
default: break;
}
vStringClear (token->scope);
token->scopeKind = SQLTAG_COUNT;
}
}
vStringCopy(token->scope, saveScope);
token->scopeKind = saveScopeKind;
deleteToken (name);
vStringDelete(saveScope);
}
static void parseSimple (tokenInfo *const token, const sqlKind kind)
{
/* This will simply make the tagname from the first word found */
readToken (token);
if (isType (token, TOKEN_IDENTIFIER) ||
isType (token, TOKEN_STRING))
{
makeSqlTag (token, kind);
}
}
static void parseDeclare (tokenInfo *const token, const boolean local)
{
/*
* PL/SQL declares are of this format:
* IS|AS
* [declare]
* CURSOR curname ...
* varname1 datatype;
* varname2 datatype;
* varname3 datatype;
* begin
*/
if (isKeyword (token, KEYWORD_declare))
readToken (token);
while (! isKeyword (token, KEYWORD_begin) &&
! isKeyword (token, KEYWORD_end) &&
! isType (token, TOKEN_EOF))
{
switch (token->keyword)
{
case KEYWORD_cursor: parseSimple (token, SQLTAG_CURSOR); break;
case KEYWORD_function: parseSubProgram (token); break;
case KEYWORD_procedure: parseSubProgram (token); break;
case KEYWORD_subtype: parseSimple (token, SQLTAG_SUBTYPE); break;
case KEYWORD_trigger: parseSimple (token, SQLTAG_TRIGGER); break;
case KEYWORD_type: parseType (token); break;
default:
if (isType (token, TOKEN_IDENTIFIER))
{
if (local)
{
makeSqlTag (token, SQLTAG_LOCAL_VARIABLE);
}
else
{
makeSqlTag (token, SQLTAG_VARIABLE);
}
}
break;
}
findToken (token, TOKEN_SEMICOLON);
readToken (token);
}
}
static void parseDeclareANSI (tokenInfo *const token, const boolean local)
{
tokenInfo *const type = newToken ();
/*
* ANSI declares are of this format:
* BEGIN
* DECLARE varname1 datatype;
* DECLARE varname2 datatype;
* ...
*
* This differ from PL/SQL where DECLARE preceeds the BEGIN block
* and the DECLARE keyword is not repeated.
*/
while (isKeyword (token, KEYWORD_declare))
{
readToken (token);
readToken (type);
if (isKeyword (type, KEYWORD_cursor))
makeSqlTag (token, SQLTAG_CURSOR);
else if (isKeyword (token, KEYWORD_local) &&
isKeyword (type, KEYWORD_temporary))
{
/*
* DECLARE LOCAL TEMPORARY TABLE table_name (
* c1 int,
* c2 int
* );
*/
readToken (token);
if (isKeyword (token, KEYWORD_table))
{
readToken (token);
if (isType(token, TOKEN_IDENTIFIER) ||
isType(token, TOKEN_STRING))
{
makeSqlTag (token, SQLTAG_TABLE);
}
}
}
else if (isType (token, TOKEN_IDENTIFIER) ||
isType (token, TOKEN_STRING))
{
if (local)
makeSqlTag (token, SQLTAG_LOCAL_VARIABLE);
else
makeSqlTag (token, SQLTAG_VARIABLE);
}
findToken (token, TOKEN_SEMICOLON);
readToken (token);
}
deleteToken (type);
}
static void parseLabel (tokenInfo *const token)
{
/*
* A label has this format:
* <<tobacco_dependency>>
* DECLARE
* v_senator VARCHAR2(100) := 'THURMOND, JESSE';
* BEGIN
* IF total_contributions (v_senator, 'TOBACCO') > 25000
* THEN
* <<alochol_dependency>>
* DECLARE
* v_senator VARCHAR2(100) := 'WHATEVERIT, TAKES';
* BEGIN
* ...
*/
Assert (isType (token, TOKEN_BLOCK_LABEL_BEGIN));
readToken (token);
if (isType (token, TOKEN_IDENTIFIER))
{
makeSqlTag (token, SQLTAG_BLOCK_LABEL);
readToken (token); /* read end of label */
}
}
static void parseStatements (tokenInfo *const token, const boolean exit_on_endif )
{
/* boolean isAnsi = TRUE; */
boolean stmtTerm = FALSE;
do
{
if (isType (token, TOKEN_BLOCK_LABEL_BEGIN))
parseLabel (token);
else
{
switch (token->keyword)
{
case KEYWORD_exception:
/*
* EXCEPTION
* <exception handler>;
*
* Where an exception handler could be:
* BEGIN
* WHEN OTHERS THEN
* x := x + 3;
* END;
* In this case we need to skip this keyword and
* move on to the next token without reading until
* TOKEN_SEMICOLON;
*/
readToken (token);
continue;
case KEYWORD_when:
/*
* WHEN statements can be used in exception clauses
* and CASE statements. The CASE statement should skip
* these given below we skip over to an END statement.
* But for an exception clause, we can have:
* EXCEPTION
* WHEN OTHERS THEN
* BEGIN
* x := x + 3;
* END;
* If we skip to the TOKEN_SEMICOLON, we miss the begin
* of a nested BEGIN END block. So read the next token
* after the THEN and restart the LOOP.
*/
while (! isKeyword (token, KEYWORD_then) &&
! isType (token, TOKEN_EOF))
readToken (token);
readToken (token);
continue;
case KEYWORD_if:
/*
* We do not want to look for a ; since for an empty
* IF block, it would skip over the END.
* IF...THEN
* END IF;
*
* IF...THEN
* ELSE
* END IF;
*
* IF...THEN
* ELSEIF...THEN
* ELSE
* END IF;
*
* or non-ANSI
* IF ...
* BEGIN
* END
*/
while (! isKeyword (token, KEYWORD_then) &&
! isKeyword (token, KEYWORD_begin) &&
! isType (token, TOKEN_EOF))
{
readToken (token);
}
if (isKeyword (token, KEYWORD_begin))
{
/* isAnsi = FALSE; */
parseBlock(token, FALSE);
/*
* Handle the non-Ansi IF blocks.
* parseBlock consumes the END, so if the next
* token in a command terminator (like GO)
* we know we are done with this statement.
*/
if (isCmdTerm (token))
stmtTerm = TRUE;
}
else
{
readToken (token);
while (! isKeyword (token, KEYWORD_end) &&
! isKeyword (token, KEYWORD_endif) &&
! isType (token, TOKEN_EOF))
{
if (isKeyword (token, KEYWORD_else) ||
isKeyword (token, KEYWORD_elseif))
{
readToken (token);
}
parseStatements (token, TRUE);
if (isCmdTerm(token))
readToken (token);
}
/*
* parseStatements returns when it finds an END, an IF
* should follow the END for ANSI anyway.
* IF...THEN
* END IF;
*/
if (isKeyword (token, KEYWORD_end))
readToken (token);
if (isKeyword (token, KEYWORD_if) ||
isKeyword (token, KEYWORD_endif))
{
readToken (token);
if (isCmdTerm(token))
stmtTerm = TRUE;
}
else
{
/*
* Well we need to do something here.
* There are lots of different END statements
* END;
* END CASE;
* ENDIF;
* ENDCASE;
*/
}
}
break;
case KEYWORD_loop:
case KEYWORD_case:
case KEYWORD_for:
/*
* LOOP...
* END LOOP;
*
* CASE
* WHEN '1' THEN
* END CASE;
*
* FOR loop_name AS cursor_name CURSOR FOR ...
* DO
* END FOR;
*/
if (isKeyword (token, KEYWORD_for))
{
/* loop name */
readToken (token);
/* AS */
readToken (token);
while (! isKeyword (token, KEYWORD_is) &&
! isType (token, TOKEN_EOF))
{
/*
* If this is not an AS keyword this is
* not a proper FOR statement and should
* simply be ignored
*/
return;
}
while (! isKeyword (token, KEYWORD_do) &&
! isType (token, TOKEN_EOF))
readToken (token);
}
readToken (token);
while (! isKeyword (token, KEYWORD_end) &&
! isType (token, TOKEN_EOF))
{
/*
if ( isKeyword (token, KEYWORD_else) ||
isKeyword (token, KEYWORD_elseif) )
readToken (token);
*/
parseStatements (token, FALSE);
if (isCmdTerm(token))
readToken (token);
}
if (isKeyword (token, KEYWORD_end ))
readToken (token);
/*
* Typically ended with
* END LOOP [loop name];
* END CASE
* END FOR [loop name];
*/
if (isKeyword (token, KEYWORD_loop) ||
isKeyword (token, KEYWORD_case) ||
isKeyword (token, KEYWORD_for))
{
readToken (token);
}
if (isCmdTerm(token))
stmtTerm = TRUE;
break;
case KEYWORD_create:
readToken (token);
parseKeywords(token);
break;
case KEYWORD_declare:
case KEYWORD_begin:
parseBlock (token, TRUE);
break;
case KEYWORD_end:
break;
default:
readToken (token);
break;
}
/*
* Not all statements must end in a semi-colon
* begin
* if current publisher <> 'publish' then
* signal UE_FailStatement
* end if
* end;
* The last statement prior to an end ("signal" above) does
* not need a semi-colon, nor does the end if, since it is
* also the last statement prior to the end of the block.
*
* So we must read to the first semi-colon or an END block
*/
while (! stmtTerm &&
! isKeyword (token, KEYWORD_end) &&
! isCmdTerm(token) &&
! isType(token, TOKEN_EOF))
{
if (exit_on_endif && isKeyword (token, KEYWORD_endif))
return;
if (isType (token, TOKEN_COLON) )
{
/*
* A : can signal a loop name
* myloop:
* LOOP
* LEAVE myloop;
* END LOOP;
* Unfortunately, labels do not have a
* cmd terminator, therefore we have to check
* if the next token is a keyword and process
* it accordingly.
*/
readToken (token);
if (isKeyword (token, KEYWORD_loop) ||
isKeyword (token, KEYWORD_while) ||
isKeyword (token, KEYWORD_for))
{
/* parseStatements (token); */
return;
}
}
readToken (token);
if (isType (token, TOKEN_OPEN_PAREN) ||
isType (token, TOKEN_OPEN_CURLY) ||
isType (token, TOKEN_OPEN_SQUARE))
{
skipToMatched (token);
}
/*
* Since we know how to parse various statements
* if we detect them, parse them to completion
*/
if (isType (token, TOKEN_BLOCK_LABEL_BEGIN) ||
isKeyword (token, KEYWORD_exception) ||
isKeyword (token, KEYWORD_loop) ||
isKeyword (token, KEYWORD_case) ||
isKeyword (token, KEYWORD_for) ||
isKeyword (token, KEYWORD_begin))
{
parseStatements (token, FALSE);
}
else if (isKeyword (token, KEYWORD_if))
parseStatements (token, TRUE);
}
}
/*
* We assumed earlier all statements ended with a command terminator.
* See comment above, now, only read if the current token
* is not a command terminator.
*/
if (isCmdTerm(token) && ! stmtTerm)
stmtTerm = TRUE;
} while (! isKeyword (token, KEYWORD_end) &&
! (exit_on_endif && isKeyword (token, KEYWORD_endif) ) &&
! isType (token, TOKEN_EOF) &&
! stmtTerm );
}
static void parseBlock (tokenInfo *const token, const boolean local)
{
if (isType (token, TOKEN_BLOCK_LABEL_BEGIN))
{
parseLabel (token);
readToken (token);
}
if (! isKeyword (token, KEYWORD_begin))
{
readToken (token);
/*
* These are Oracle style declares which generally come
* between an IS/AS and BEGIN block.
*/
parseDeclare (token, local);
}
if (isKeyword (token, KEYWORD_begin))
{
readToken (token);
/*
* Check for ANSI declarations which always follow
* a BEGIN statement. This routine will not advance
* the token if none are found.
*/
parseDeclareANSI (token, local);
token->begin_end_nest_lvl++;
while (! isKeyword (token, KEYWORD_end) &&
! isType (token, TOKEN_EOF))
{
parseStatements (token, FALSE);
if (isCmdTerm(token))
readToken (token);
}
token->begin_end_nest_lvl--;
/*
* Read the next token (we will assume
* it is the command delimiter)
*/
readToken (token);
/*
* Check if the END block is terminated
*/
if (! isCmdTerm (token))
{
/*
* Not sure what to do here at the moment.
* I think the routine that calls parseBlock
* must expect the next token has already
* been read since it is possible this
* token is not a command delimiter.
*/
/* findCmdTerm (token, FALSE); */
}
}
}
static void parsePackage (tokenInfo *const token)
{
/*
* Packages can be specified in a number of ways:
* CREATE OR REPLACE PACKAGE pkg_name AS
* or
* CREATE OR REPLACE PACKAGE owner.pkg_name AS
* or by specifying a package body
* CREATE OR REPLACE PACKAGE BODY pkg_name AS
* CREATE OR REPLACE PACKAGE BODY owner.pkg_name AS
*/
tokenInfo *const name = newToken ();
readIdentifier (name);
if (isKeyword (name, KEYWORD_body))
{
/*
* Ignore the BODY tag since we will process
* the body or prototypes in the same manner
*/
readIdentifier (name);
}
/* Check for owner.pkg_name */
while (! isKeyword (token, KEYWORD_is) &&
! isType (token, TOKEN_EOF))
{
readToken (token);
if ( isType(token, TOKEN_PERIOD) )
{
readIdentifier (name);
}
}
if (isKeyword (token, KEYWORD_is))
{
if (isType (name, TOKEN_IDENTIFIER) ||
isType (name, TOKEN_STRING))
{
makeSqlTag (name, SQLTAG_PACKAGE);
}
addToScope (token, name->string, SQLTAG_PACKAGE);
parseBlock (token, FALSE);
vStringClear (token->scope);
token->scopeKind = SQLTAG_COUNT;
}
findCmdTerm (token, FALSE);
deleteToken (name);
}
static void parseTable (tokenInfo *const token)
{
tokenInfo *const name = newToken ();
/*
* This deals with these formats:
* create table t1 (c1 int);
* create global tempoary table t2 (c1 int);
* create table "t3" (c1 int);
* create table bob.t4 (c1 int);
* create table bob."t5" (c1 int);
* create table "bob"."t6" (c1 int);
* create table bob."t7" (c1 int);
* Proxy tables use this format:
* create existing table bob."t7" AT '...';
* SQL Server and Sybase formats
* create table OnlyTable (
* create table dbo.HasOwner (
* create table [dbo].[HasOwnerSquare] (
* create table master.dbo.HasDb (
* create table master..HasDbNoOwner (
* create table [master].dbo.[HasDbAndOwnerSquare] (
* create table [master]..[HasDbNoOwnerSquare] (
*/
/* This could be a database, owner or table name */
readIdentifier (name);
readToken (token);
if (isType (token, TOKEN_PERIOD))
{
/*
* This could be a owner or table name.
* But this is also a special case since the table can be
* referenced with a blank owner:
* dbname..tablename
*/
readIdentifier (name);
/* Check if a blank name was provided */
if (isType (name, TOKEN_PERIOD))
{
readIdentifier (name);
}
readToken (token);
if (isType (token, TOKEN_PERIOD))
{
/* This can only be the table name */
readIdentifier (name);
readToken (token);
}
}
if (isType (token, TOKEN_OPEN_PAREN))
{
if (isType (name, TOKEN_IDENTIFIER) ||
isType (name, TOKEN_STRING))
{
makeSqlTag (name, SQLTAG_TABLE);
vStringCopy(token->scope, name->string);
token->scopeKind = SQLTAG_TABLE;
parseRecord (token);
vStringClear (token->scope);
token->scopeKind = SQLTAG_COUNT;
}
}
else if (isKeyword (token, KEYWORD_at))
{
if (isType (name, TOKEN_IDENTIFIER))
{
makeSqlTag (name, SQLTAG_TABLE);
}
}
findCmdTerm (token, FALSE);
deleteToken (name);
}
static void parseIndex (tokenInfo *const token)
{
tokenInfo *const name = newToken ();
tokenInfo *const owner = newToken ();
/*
* This deals with these formats
* create index i1 on t1(c1) create index "i2" on t1(c1)
* create virtual unique clustered index "i3" on t1(c1)
* create unique clustered index "i4" on t1(c1)
* create clustered index "i5" on t1(c1)
* create bitmap index "i6" on t1(c1)
*/
readIdentifier (name);
readToken (token);
if (isType (token, TOKEN_PERIOD))
{
readIdentifier (name);
readToken (token);
}
if (isKeyword (token, KEYWORD_on) &&
(isType (name, TOKEN_IDENTIFIER) ||
isType (name, TOKEN_STRING)))
{
readIdentifier (owner);
readToken (token);
if (isType (token, TOKEN_PERIOD))
{
readIdentifier (owner);
readToken (token);
}
addToScope(name, owner->string, SQLTAG_TABLE /* FIXME? */);
makeSqlTag (name, SQLTAG_INDEX);
}
findCmdTerm (token, FALSE);
deleteToken (name);
deleteToken (owner);
}
static void parseEvent (tokenInfo *const token)
{
tokenInfo *const name = newToken ();
/*
* This deals with these formats
* create event e1 handler begin end;
* create event "e2" handler begin end;
* create event dba."e3" handler begin end;
* create event "dba"."e4" handler begin end;
*/
readIdentifier (name);
readToken (token);
if (isType (token, TOKEN_PERIOD))
{
readIdentifier (name);
}
while (! isKeyword (token, KEYWORD_handler) &&
! isType (token, TOKEN_SEMICOLON) &&
! isType (token, TOKEN_EOF))
{
readToken (token);
}
if (isKeyword (token, KEYWORD_handler) ||
isType (token, TOKEN_SEMICOLON))
{
makeSqlTag (name, SQLTAG_EVENT);
}
if (isKeyword (token, KEYWORD_handler))
{
readToken (token);
if (isKeyword (token, KEYWORD_begin))
{
parseBlock (token, TRUE);
}
findCmdTerm (token, TRUE);
}
deleteToken (name);
}
static void parseTrigger (tokenInfo *const token)
{
tokenInfo *const name = newToken ();
tokenInfo *const table = newToken ();
/*
* This deals with these formats
* create or replace trigger tr1 begin end;
* create trigger "tr2" begin end;
* drop trigger "droptr1";
* create trigger "tr3" CALL sp_something();
* create trigger "owner"."tr4" begin end;
* create trigger "tr5" not valid;
* create trigger "tr6" begin end;
*/
readIdentifier (name);
readToken (token);
if (isType (token, TOKEN_PERIOD))
{
readIdentifier (name);
readToken (token);
}
while (! isKeyword (token, KEYWORD_on) &&
! isType (token, TOKEN_EOF) &&
! isCmdTerm (token))
{
readToken (token);
}
/*if (! isType (token, TOKEN_SEMICOLON) ) */
if (! isCmdTerm (token))
{
readToken (table);
readToken (token);
if (isType (token, TOKEN_PERIOD))
{
readToken (table);
readToken (token);
}
while (! isKeyword (token, KEYWORD_begin) &&
! isKeyword (token, KEYWORD_call) &&
! isCmdTerm (token) &&
! isType (token, TOKEN_EOF))
{
if (isKeyword (token, KEYWORD_declare))
{
addToScope(token, name->string, SQLTAG_TRIGGER);
parseDeclare(token, TRUE);
vStringClear(token->scope);
token->scopeKind = SQLTAG_COUNT;
}
else
readToken (token);
}
if (isKeyword (token, KEYWORD_begin) ||
isKeyword (token, KEYWORD_call))
{
addToScope(name, table->string, SQLTAG_TABLE);
makeSqlTag (name, SQLTAG_TRIGGER);
addToScope(token, table->string, SQLTAG_TABLE);
if (isKeyword (token, KEYWORD_begin))
{
parseBlock (token, TRUE);
}
vStringClear(token->scope);
token->scopeKind = SQLTAG_COUNT;
}
}
findCmdTerm (token, TRUE);
deleteToken (name);
deleteToken (table);
}
static void parsePublication (tokenInfo *const token)
{
tokenInfo *const name = newToken ();
/*
* This deals with these formats
* create or replace publication pu1 ()
* create publication "pu2" ()
* create publication dba."pu3" ()
* create publication "dba"."pu4" ()
*/
readIdentifier (name);
readToken (token);
if (isType (token, TOKEN_PERIOD))
{
readIdentifier (name);
readToken (token);
}
if (isType (token, TOKEN_OPEN_PAREN))
{
if (isType (name, TOKEN_IDENTIFIER) ||
isType (name, TOKEN_STRING))
{
makeSqlTag (name, SQLTAG_PUBLICATION);
}
}
findCmdTerm (token, FALSE);
deleteToken (name);
}
static void parseService (tokenInfo *const token)
{
tokenInfo *const name = newToken ();
/*
* This deals with these formats
* CREATE SERVICE s1 TYPE 'HTML'
* AUTHORIZATION OFF USER DBA AS
* SELECT *
* FROM SYS.SYSTABLE;
* CREATE SERVICE "s2" TYPE 'HTML'
* AUTHORIZATION OFF USER DBA AS
* CALL sp_Something();
*/
readIdentifier (name);
readToken (token);
if (isKeyword (token, KEYWORD_type))
{
if (isType (name, TOKEN_IDENTIFIER) ||
isType (name, TOKEN_STRING))
{
makeSqlTag (name, SQLTAG_SERVICE);
}
}
findCmdTerm (token, FALSE);
deleteToken (name);
}
static void parseDomain (tokenInfo *const token)
{
tokenInfo *const name = newToken ();
/*
* This deals with these formats
* CREATE DOMAIN|DATATYPE [AS] your_name ...;
*/
readIdentifier (name);
if (isKeyword (name, KEYWORD_is))
{
readIdentifier (name);
}
readToken (token);
if (isType (name, TOKEN_IDENTIFIER) ||
isType (name, TOKEN_STRING))
{
makeSqlTag (name, SQLTAG_DOMAIN);
}
findCmdTerm (token, FALSE);
deleteToken (name);
}
static void parseDrop (tokenInfo *const token)
{
/*
* This deals with these formats
* DROP TABLE|PROCEDURE|DOMAIN|DATATYPE name;
*
* Just simply skip over these statements.
* They are often confused with PROCEDURE prototypes
* since the syntax is similar, this effectively deals with
* the issue for all types.
*/
findCmdTerm (token, FALSE);
}
static void parseVariable (tokenInfo *const token)
{
tokenInfo *const name = newToken ();
/*
* This deals with these formats
* create variable varname1 integer;
* create variable @varname2 integer;
* create variable "varname3" integer;
* drop variable @varname3;
*/
readIdentifier (name);
readToken (token);
if (! isType (token, TOKEN_SEMICOLON) &&
(isType (name, TOKEN_IDENTIFIER) ||
isType (name, TOKEN_STRING)))
{
makeSqlTag (name, SQLTAG_VARIABLE);
}
findCmdTerm (token, TRUE);
deleteToken (name);
}
static void parseSynonym (tokenInfo *const token)
{
tokenInfo *const name = newToken ();
/*
* This deals with these formats
* create variable varname1 integer;
* create variable @varname2 integer;
* create variable "varname3" integer;
* drop variable @varname3;
*/
readIdentifier (name);
readToken (token);
if (isKeyword (token, KEYWORD_for) &&
(isType (name, TOKEN_IDENTIFIER) ||
isType (name, TOKEN_STRING)))
{
makeSqlTag (name, SQLTAG_SYNONYM);
}
findCmdTerm (token, TRUE);
deleteToken (name);
}
static void parseView (tokenInfo *const token)
{
tokenInfo *const name = newToken ();
/*
* This deals with these formats
* create variable varname1 integer;
* create variable @varname2 integer;
* create variable "varname3" integer;
* drop variable @varname3;
*/
readIdentifier (name);
readToken (token);
if (isType (token, TOKEN_PERIOD))
{
readIdentifier (name);
readToken (token);
}
if (isType (token, TOKEN_OPEN_PAREN))
{
skipArgumentList(token);
}
while (! isKeyword (token, KEYWORD_is) &&
! isType (token, TOKEN_SEMICOLON) &&
! isType (token, TOKEN_EOF))
{
readToken (token);
}
if (isKeyword (token, KEYWORD_is) &&
(isType (name, TOKEN_IDENTIFIER) ||
isType (name, TOKEN_STRING)))
{
makeSqlTag (name, SQLTAG_VIEW);
}
findCmdTerm (token, TRUE);
deleteToken (name);
}
static void parseMLTable (tokenInfo *const token)
{
tokenInfo *const version = newToken ();
tokenInfo *const table = newToken ();
tokenInfo *const event = newToken ();
/*
* This deals with these formats
* call dbo.ml_add_table_script( 'version', 'table_name', 'event',
* 'some SQL statement'
* );
*/
readToken (token);
if (isType (token, TOKEN_OPEN_PAREN))
{
readToken (version);
readToken (token);
while (! isType (token, TOKEN_COMMA) &&
! isType (token, TOKEN_CLOSE_PAREN) &&
! isType (token, TOKEN_EOF))
{
readToken (token);
}
if (isType (token, TOKEN_COMMA))
{
readToken (table);
readToken (token);
while (! isType (token, TOKEN_COMMA) &&
! isType (token, TOKEN_CLOSE_PAREN) &&
! isType (token, TOKEN_EOF))
{
readToken (token);
}
if (isType (token, TOKEN_COMMA))
{
readToken (event);
if (isType (version, TOKEN_STRING) &&
isType (table, TOKEN_STRING) &&
isType (event, TOKEN_STRING))
{
addToScope(version, table->string, SQLTAG_TABLE);
addToScope(version, event->string, SQLTAG_EVENT);
makeSqlTag (version, SQLTAG_MLTABLE);
}
}
if (! isType (token, TOKEN_CLOSE_PAREN))
findToken (token, TOKEN_CLOSE_PAREN);
}
}
findCmdTerm (token, TRUE);
deleteToken (version);
deleteToken (table);
deleteToken (event);
}
static void parseMLConn (tokenInfo *const token)
{
tokenInfo *const version = newToken ();
tokenInfo *const event = newToken ();
/*
* This deals with these formats
* call ml_add_connection_script( 'version', 'event',
* 'some SQL statement'
* );
*/
readToken (token);
if (isType (token, TOKEN_OPEN_PAREN))
{
readToken (version);
readToken (token);
while (! isType (token, TOKEN_COMMA) &&
! isType (token, TOKEN_CLOSE_PAREN) &&
! isType (token, TOKEN_EOF))
{
readToken (token);
}
if (isType (token, TOKEN_COMMA))
{
readToken (event);
if (isType (version, TOKEN_STRING) &&
isType (event, TOKEN_STRING))
{
addToScope(version, event->string, SQLTAG_EVENT);
makeSqlTag (version, SQLTAG_MLCONN);
}
}
if (! isType (token, TOKEN_CLOSE_PAREN))
findToken (token, TOKEN_CLOSE_PAREN);
}
findCmdTerm (token, TRUE);
deleteToken (version);
deleteToken (event);
}
static void parseMLProp (tokenInfo *const token)
{
tokenInfo *const component = newToken ();
tokenInfo *const prop_set_name = newToken ();
tokenInfo *const prop_name = newToken ();
/*
* This deals with these formats
* ml_add_property (
* 'comp_name',
* 'prop_set_name',
* 'prop_name',
* 'prop_value'
* )
*/
readToken (token);
if (isType (token, TOKEN_OPEN_PAREN))
{
readToken (component);
readToken (token);
while (! isType (token, TOKEN_COMMA) &&
! isType (token, TOKEN_CLOSE_PAREN) &&
! isType (token, TOKEN_EOF))
{
readToken (token);
}
if (isType (token, TOKEN_COMMA))
{
readToken (prop_set_name);
readToken (token);
while (! isType (token, TOKEN_COMMA) &&
! isType (token, TOKEN_CLOSE_PAREN) &&
! isType (token, TOKEN_EOF))
{
readToken (token);
}
if (isType (token, TOKEN_COMMA))
{
readToken (prop_name);
if (isType (component, TOKEN_STRING) &&
isType (prop_set_name, TOKEN_STRING) &&
isType (prop_name, TOKEN_STRING))
{
addToScope(component, prop_set_name->string, SQLTAG_MLPROP /* FIXME */);
addToScope(component, prop_name->string, SQLTAG_MLPROP /* FIXME */);
makeSqlTag (component, SQLTAG_MLPROP);
}
}
if (! isType (token, TOKEN_CLOSE_PAREN))
findToken (token, TOKEN_CLOSE_PAREN);
}
}
findCmdTerm (token, TRUE);
deleteToken (component);
deleteToken (prop_set_name);
deleteToken (prop_name);
}
static void parseComment (tokenInfo *const token)
{
/*
* This deals with this statement:
* COMMENT TO PRESERVE FORMAT ON PROCEDURE "DBA"."test" IS
* {create PROCEDURE DBA."test"()
* BEGIN
* signal dave;
* END
* }
* ;
* The comment can contain anything between the CURLY
* braces
* COMMENT ON USER "admin" IS
* 'Administration Group'
* ;
* Or it could be a simple string with no curly braces
*/
while (! isKeyword (token, KEYWORD_is) &&
! isType (token, TOKEN_EOF))
{
readToken (token);
}
readToken (token);
if (isType(token, TOKEN_OPEN_CURLY))
{
findToken (token, TOKEN_CLOSE_CURLY);
}
findCmdTerm (token, TRUE);
}
static void parseKeywords (tokenInfo *const token)
{
switch (token->keyword)
{
case KEYWORD_begin: parseBlock (token, FALSE); break;
case KEYWORD_comment: parseComment (token); break;
case KEYWORD_cursor: parseSimple (token, SQLTAG_CURSOR); break;
case KEYWORD_datatype: parseDomain (token); break;
case KEYWORD_declare: parseBlock (token, FALSE); break;
case KEYWORD_domain: parseDomain (token); break;
case KEYWORD_drop: parseDrop (token); break;
case KEYWORD_event: parseEvent (token); break;
case KEYWORD_function: parseSubProgram (token); break;
case KEYWORD_if: parseStatements (token, FALSE); break;
case KEYWORD_index: parseIndex (token); break;
case KEYWORD_ml_table: parseMLTable (token); break;
case KEYWORD_ml_table_lang: parseMLTable (token); break;
case KEYWORD_ml_table_dnet: parseMLTable (token); break;
case KEYWORD_ml_table_java: parseMLTable (token); break;
case KEYWORD_ml_table_chk: parseMLTable (token); break;
case KEYWORD_ml_conn: parseMLConn (token); break;
case KEYWORD_ml_conn_lang: parseMLConn (token); break;
case KEYWORD_ml_conn_dnet: parseMLConn (token); break;
case KEYWORD_ml_conn_java: parseMLConn (token); break;
case KEYWORD_ml_conn_chk: parseMLConn (token); break;
case KEYWORD_ml_prop: parseMLProp (token); break;
case KEYWORD_package: parsePackage (token); break;
case KEYWORD_procedure: parseSubProgram (token); break;
case KEYWORD_publication: parsePublication (token); break;
case KEYWORD_service: parseService (token); break;
case KEYWORD_subtype: parseSimple (token, SQLTAG_SUBTYPE); break;
case KEYWORD_synonym: parseSynonym (token); break;
case KEYWORD_table: parseTable (token); break;
case KEYWORD_trigger: parseTrigger (token); break;
case KEYWORD_type: parseType (token); break;
case KEYWORD_variable: parseVariable (token); break;
case KEYWORD_view: parseView (token); break;
default: break;
}
}
static tokenType parseSqlFile (tokenInfo *const token)
{
do
{
readToken (token);
if (isType (token, TOKEN_BLOCK_LABEL_BEGIN))
parseLabel (token);
else
parseKeywords (token);
} while (! isKeyword (token, KEYWORD_end) &&
! isType (token, TOKEN_EOF));
return token->type;
}
static void initialize (const langType language)
{
Assert (sizeof (SqlKinds) / sizeof (SqlKinds [0]) == SQLTAG_COUNT);
Lang_sql = language;
buildSqlKeywordHash ();
}
static void findSqlTags (void)
{
tokenInfo *const token = newToken ();
while (parseSqlFile (token) != TOKEN_EOF);
deleteToken (token);
}
extern parserDefinition* SqlParser (void)
{
static const char *const extensions [] = { "sql", NULL };
parserDefinition* def = parserNew ("SQL");
def->kinds = SqlKinds;
def->kindCount = KIND_COUNT (SqlKinds);
def->fileKind = KIND_FILE_ALT;
def->extensions = extensions;
def->parser = findSqlTags;
def->initialize = initialize;
return def;
}
/* vi:set tabstop=4 shiftwidth=4 noexpandtab: */
| 1 | 12,112 | Isn't this the same check as two lines above? | universal-ctags-ctags | c |
@@ -18,8 +18,11 @@ from mitmproxy import io
from mitmproxy import log
from mitmproxy import version
from mitmproxy import optmanager
+from mitmproxy import options
import mitmproxy.tools.web.master # noqa
+CONFIG_PATH = os.path.join(options.CA_DIR, 'config.yaml')
+
def flow_to_json(flow: mitmproxy.flow.Flow) -> dict:
""" | 1 | import hashlib
import json
import logging
import os.path
import re
from io import BytesIO
import mitmproxy.addons.view
import mitmproxy.flow
import tornado.escape
import tornado.web
import tornado.websocket
from mitmproxy import contentviews
from mitmproxy import exceptions
from mitmproxy import flowfilter
from mitmproxy import http
from mitmproxy import io
from mitmproxy import log
from mitmproxy import version
from mitmproxy import optmanager
import mitmproxy.tools.web.master # noqa
def flow_to_json(flow: mitmproxy.flow.Flow) -> dict:
"""
Remove flow message content and cert to save transmission space.
Args:
flow: The original flow.
"""
f = {
"id": flow.id,
"intercepted": flow.intercepted,
"client_conn": flow.client_conn.get_state(),
"server_conn": flow.server_conn.get_state(),
"type": flow.type,
"modified": flow.modified(),
"marked": flow.marked,
}
# .alpn_proto_negotiated is bytes, we need to decode that.
for conn in "client_conn", "server_conn":
if f[conn]["alpn_proto_negotiated"] is None:
continue
f[conn]["alpn_proto_negotiated"] = \
f[conn]["alpn_proto_negotiated"].decode(errors="backslashreplace")
if flow.error:
f["error"] = flow.error.get_state()
if isinstance(flow, http.HTTPFlow):
if flow.request:
if flow.request.raw_content:
content_length = len(flow.request.raw_content)
content_hash = hashlib.sha256(flow.request.raw_content).hexdigest()
else:
content_length = None
content_hash = None
f["request"] = {
"method": flow.request.method,
"scheme": flow.request.scheme,
"host": flow.request.host,
"port": flow.request.port,
"path": flow.request.path,
"http_version": flow.request.http_version,
"headers": tuple(flow.request.headers.items(True)),
"contentLength": content_length,
"contentHash": content_hash,
"timestamp_start": flow.request.timestamp_start,
"timestamp_end": flow.request.timestamp_end,
"is_replay": flow.request.is_replay,
"pretty_host": flow.request.pretty_host,
}
if flow.response:
if flow.response.raw_content:
content_length = len(flow.response.raw_content)
content_hash = hashlib.sha256(flow.response.raw_content).hexdigest()
else:
content_length = None
content_hash = None
f["response"] = {
"http_version": flow.response.http_version,
"status_code": flow.response.status_code,
"reason": flow.response.reason,
"headers": tuple(flow.response.headers.items(True)),
"contentLength": content_length,
"contentHash": content_hash,
"timestamp_start": flow.response.timestamp_start,
"timestamp_end": flow.response.timestamp_end,
"is_replay": flow.response.is_replay,
}
f.get("server_conn", {}).pop("cert", None)
f.get("client_conn", {}).pop("mitmcert", None)
return f
def logentry_to_json(e: log.LogEntry) -> dict:
return {
"id": id(e), # we just need some kind of id.
"message": e.msg,
"level": e.level
}
class APIError(tornado.web.HTTPError):
pass
class RequestHandler(tornado.web.RequestHandler):
def write(self, chunk):
# Writing arrays on the top level is ok nowadays.
# http://flask.pocoo.org/docs/0.11/security/#json-security
if isinstance(chunk, list):
chunk = tornado.escape.json_encode(chunk)
self.set_header("Content-Type", "application/json; charset=UTF-8")
super(RequestHandler, self).write(chunk)
def set_default_headers(self):
super().set_default_headers()
self.set_header("Server", version.MITMPROXY)
self.set_header("X-Frame-Options", "DENY")
self.add_header("X-XSS-Protection", "1; mode=block")
self.add_header("X-Content-Type-Options", "nosniff")
self.add_header(
"Content-Security-Policy",
"default-src 'self'; "
"connect-src 'self' ws:; "
"style-src 'self' 'unsafe-inline'"
)
@property
def json(self):
if not self.request.headers.get("Content-Type", "").startswith("application/json"):
raise APIError(400, "Invalid Content-Type, expected application/json.")
try:
return json.loads(self.request.body.decode())
except Exception as e:
raise APIError(400, "Malformed JSON: {}".format(str(e)))
@property
def filecontents(self):
"""
Accept either a multipart/form file upload or just take the plain request body.
"""
if self.request.files:
return next(iter(self.request.files.values()))[0].body
else:
return self.request.body
@property
def view(self) -> mitmproxy.addons.view.View:
return self.application.master.view
@property
def master(self) -> "mitmproxy.tools.web.master.WebMaster":
return self.application.master
@property
def flow(self) -> mitmproxy.flow.Flow:
flow_id = str(self.path_kwargs["flow_id"])
# FIXME: Add a facility to addon.view to safely access the store
flow = self.view.get_by_id(flow_id)
if flow:
return flow
else:
raise APIError(404, "Flow not found.")
def write_error(self, status_code: int, **kwargs):
if "exc_info" in kwargs and isinstance(kwargs["exc_info"][1], APIError):
self.finish(kwargs["exc_info"][1].log_message)
else:
super().write_error(status_code, **kwargs)
class IndexHandler(RequestHandler):
def get(self):
token = self.xsrf_token # https://github.com/tornadoweb/tornado/issues/645
assert token
self.render("index.html")
class FilterHelp(RequestHandler):
def get(self):
self.write(dict(
commands=flowfilter.help
))
class WebSocketEventBroadcaster(tornado.websocket.WebSocketHandler):
# raise an error if inherited class doesn't specify its own instance.
connections = None # type: set
def open(self):
self.connections.add(self)
def on_close(self):
self.connections.remove(self)
@classmethod
def broadcast(cls, **kwargs):
message = json.dumps(kwargs, ensure_ascii=False).encode("utf8", "surrogateescape")
for conn in cls.connections:
try:
conn.write_message(message)
except Exception: # pragma: no cover
logging.error("Error sending message", exc_info=True)
class ClientConnection(WebSocketEventBroadcaster):
connections = set() # type: set
class Flows(RequestHandler):
def get(self):
self.write([flow_to_json(f) for f in self.view])
class DumpFlows(RequestHandler):
def get(self):
self.set_header("Content-Disposition", "attachment; filename=flows")
self.set_header("Content-Type", "application/octet-stream")
bio = BytesIO()
fw = io.FlowWriter(bio)
for f in self.view:
fw.add(f)
self.write(bio.getvalue())
bio.close()
def post(self):
self.view.clear()
bio = BytesIO(self.filecontents)
for i in io.FlowReader(bio).stream():
self.master.load_flow(i)
bio.close()
class ClearAll(RequestHandler):
def post(self):
self.view.clear()
self.master.events.clear()
class ResumeFlows(RequestHandler):
def post(self):
for f in self.view:
f.resume()
self.view.update([f])
class KillFlows(RequestHandler):
def post(self):
for f in self.view:
if f.killable:
f.kill()
self.view.update([f])
class ResumeFlow(RequestHandler):
def post(self, flow_id):
self.flow.resume()
self.view.update([self.flow])
class KillFlow(RequestHandler):
def post(self, flow_id):
if self.flow.killable:
self.flow.kill()
self.view.update([self.flow])
class FlowHandler(RequestHandler):
def delete(self, flow_id):
if self.flow.killable:
self.flow.kill()
self.view.remove([self.flow])
def put(self, flow_id):
flow = self.flow
flow.backup()
try:
for a, b in self.json.items():
if a == "request" and hasattr(flow, "request"):
request = flow.request
for k, v in b.items():
if k in ["method", "scheme", "host", "path", "http_version"]:
setattr(request, k, str(v))
elif k == "port":
request.port = int(v)
elif k == "headers":
request.headers.clear()
for header in v:
request.headers.add(*header)
elif k == "content":
request.text = v
else:
raise APIError(400, "Unknown update request.{}: {}".format(k, v))
elif a == "response" and hasattr(flow, "response"):
response = flow.response
for k, v in b.items():
if k in ["msg", "http_version"]:
setattr(response, k, str(v))
elif k == "code":
response.status_code = int(v)
elif k == "headers":
response.headers.clear()
for header in v:
response.headers.add(*header)
elif k == "content":
response.text = v
else:
raise APIError(400, "Unknown update response.{}: {}".format(k, v))
else:
raise APIError(400, "Unknown update {}: {}".format(a, b))
except APIError:
flow.revert()
raise
self.view.update([flow])
class DuplicateFlow(RequestHandler):
def post(self, flow_id):
f = self.flow.copy()
self.view.add([f])
self.write(f.id)
class RevertFlow(RequestHandler):
def post(self, flow_id):
if self.flow.modified():
self.flow.revert()
self.view.update([self.flow])
class ReplayFlow(RequestHandler):
def post(self, flow_id):
self.flow.backup()
self.flow.response = None
self.view.update([self.flow])
try:
self.master.replay_request(self.flow)
except exceptions.ReplayException as e:
raise APIError(400, str(e))
class FlowContent(RequestHandler):
def post(self, flow_id, message):
self.flow.backup()
message = getattr(self.flow, message)
message.content = self.filecontents
self.view.update([self.flow])
def get(self, flow_id, message):
message = getattr(self.flow, message)
if not message.raw_content:
raise APIError(400, "No content.")
content_encoding = message.headers.get("Content-Encoding", None)
if content_encoding:
content_encoding = re.sub(r"[^\w]", "", content_encoding)
self.set_header("Content-Encoding", content_encoding)
original_cd = message.headers.get("Content-Disposition", None)
filename = None
if original_cd:
filename = re.search('filename=([-\w" .()]+)', original_cd)
if filename:
filename = filename.group(1)
if not filename:
filename = self.flow.request.path.split("?")[0].split("/")[-1]
filename = re.sub(r'[^-\w" .()]', "", filename)
cd = "attachment; filename={}".format(filename)
self.set_header("Content-Disposition", cd)
self.set_header("Content-Type", "application/text")
self.set_header("X-Content-Type-Options", "nosniff")
self.set_header("X-Frame-Options", "DENY")
self.write(message.raw_content)
class FlowContentView(RequestHandler):
def get(self, flow_id, message, content_view):
message = getattr(self.flow, message)
description, lines, error = contentviews.get_message_content_view(
content_view.replace('_', ' '), message
)
# if error:
# add event log
self.write(dict(
lines=list(lines),
description=description
))
class Events(RequestHandler):
def get(self):
self.write([logentry_to_json(e) for e in self.master.events.data])
class Settings(RequestHandler):
def get(self):
self.write(dict(
version=version.VERSION,
mode=str(self.master.options.mode),
intercept=self.master.options.intercept,
showhost=self.master.options.showhost,
upstream_cert=self.master.options.upstream_cert,
rawtcp=self.master.options.rawtcp,
http2=self.master.options.http2,
websocket=self.master.options.websocket,
anticache=self.master.options.anticache,
anticomp=self.master.options.anticomp,
stickyauth=self.master.options.stickyauth,
stickycookie=self.master.options.stickycookie,
stream=self.master.options.stream_large_bodies,
contentViews=[v.name.replace(' ', '_') for v in contentviews.views],
listen_host=self.master.options.listen_host,
listen_port=self.master.options.listen_port,
server=self.master.options.server,
))
def put(self):
update = self.json
option_whitelist = {
"intercept", "showhost", "upstream_cert",
"rawtcp", "http2", "websocket", "anticache", "anticomp",
"stickycookie", "stickyauth", "stream_large_bodies"
}
for k in update:
if k not in option_whitelist:
raise APIError(400, "Unknown setting {}".format(k))
self.master.options.update(**update)
class Options(RequestHandler):
def get(self):
self.write(optmanager.dump_dicts(self.master.options))
def put(self):
update = self.json
try:
self.master.options.update(**update)
except Exception as err:
raise APIError(400, "{}".format(err))
class Application(tornado.web.Application):
def __init__(self, master, debug):
self.master = master
handlers = [
(r"/", IndexHandler),
(r"/filter-help", FilterHelp),
(r"/updates", ClientConnection),
(r"/events", Events),
(r"/flows", Flows),
(r"/flows/dump", DumpFlows),
(r"/flows/resume", ResumeFlows),
(r"/flows/kill", KillFlows),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)", FlowHandler),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/resume", ResumeFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/kill", KillFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/duplicate", DuplicateFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/replay", ReplayFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/revert", RevertFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/(?P<message>request|response)/content", FlowContent),
(
r"/flows/(?P<flow_id>[0-9a-f\-]+)/(?P<message>request|response)/content/(?P<content_view>[0-9a-zA-Z\-\_]+)",
FlowContentView),
(r"/settings", Settings),
(r"/clear", ClearAll),
(r"/options", Options)
]
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
cookie_secret=os.urandom(256),
debug=debug,
autoreload=False,
)
super().__init__(handlers, **settings)
| 1 | 13,424 | Don't redefine, just import the existing one in `cmdline.py`. :) | mitmproxy-mitmproxy | py |
@@ -92,7 +92,8 @@ module Mongoid
#
# @since 2.0.0.rc.7
def process_attribute(name, value)
- if store_as = aliased_fields.invert[name.to_s]
+ responds = respond_to?("#{name}=")
+ if !responds && store_as = aliased_fields.invert[name.to_s]
name = store_as
end
responds = respond_to?("#{name}=") | 1 | # encoding: utf-8
module Mongoid
module Attributes
# This module contains the behavior for processing attributes.
module Processing
# Process the provided attributes casting them to their proper values if a
# field exists for them on the document. This will be limited to only the
# attributes provided in the suppied +Hash+ so that no extra nil values get
# put into the document's attributes.
#
# @example Process the attributes.
# person.process_attributes(:title => "sir", :age => 40)
#
# @param [ Hash ] attrs The attributes to set.
#
# @since 2.0.0.rc.7
def process_attributes(attrs = nil)
attrs ||= {}
if !attrs.empty?
attrs = sanitize_for_mass_assignment(attrs)
attrs.each_pair do |key, value|
next if pending_attribute?(key, value)
process_attribute(key, value)
end
end
yield self if block_given?
process_pending
end
private
# If the key provided is the name of a relation or a nested attribute, we
# need to wait until all other attributes are set before processing
# these.
#
# @example Is the attribute pending?
# document.pending_attribute?(:name, "Durran")
#
# @param [ Symbol ] key The name of the attribute.
# @param [ Object ] value The value of the attribute.
#
# @return [ true, false ] True if pending, false if not.
#
# @since 2.0.0.rc.7
def pending_attribute?(key, value)
name = key.to_s
if relations.has_key?(name)
pending_relations[name] = value
return true
end
if nested_attributes.has_key?(name)
pending_nested[name] = value
return true
end
return false
end
# Get all the pending relations that need to be set.
#
# @example Get the pending relations.
# document.pending_relations
#
# @return [ Hash ] The pending relations in key/value pairs.
#
# @since 2.0.0.rc.7
def pending_relations
@pending_relations ||= {}
end
# Get all the pending nested attributes that need to be set.
#
# @example Get the pending nested attributes.
# document.pending_nested
#
# @return [ Hash ] The pending nested attributes in key/value pairs.
#
# @since 2.0.0.rc.7
def pending_nested
@pending_nested ||= {}
end
# If the attribute is dynamic, add a field for it with a type of object
# and then either way set the value.
#
# @example Process the attribute.
# document.process_attribute(name, value)
#
# @param [ Symbol ] name The name of the field.
# @param [ Object ] value The value of the field.
#
# @since 2.0.0.rc.7
def process_attribute(name, value)
if store_as = aliased_fields.invert[name.to_s]
name = store_as
end
responds = respond_to?("#{name}=")
raise Errors::UnknownAttribute.new(self.class, name) unless responds
send("#{name}=", value)
end
# Process all the pending nested attributes that needed to wait until
# ids were set to fire off.
#
# @example Process the nested attributes.
# document.process_nested
#
# @since 2.0.0.rc.7
def process_nested
pending_nested.each_pair do |name, value|
send("#{name}=", value)
end
end
# Process all the pending items, then clear them out.
#
# @example Process the pending items.
# document.process_pending
#
# @param [ Hash ] options The mass assignment options.
#
# @since 2.0.0.rc.7
def process_pending
process_nested and process_relations
pending_nested.clear and pending_relations.clear
end
# Process all the pending relations that needed to wait until ids were set
# to fire off.
#
# @example Process the relations.
# document.process_relations
#
# @param [ Hash ] options The mass assignment options.
#
# @since 2.0.0.rc.7
def process_relations
pending_relations.each_pair do |name, value|
metadata = relations[name]
if value.is_a?(Hash)
metadata.nested_builder(value, {}).build(self)
else
send("#{name}=", value)
end
end
end
end
end
end
| 1 | 11,069 | why not call `respond_to?("#{name}=")` from the `if` line? | mongodb-mongoid | rb |
@@ -215,7 +215,8 @@ public final class BlazeCidrLauncher extends CidrLauncher {
workingDir = workspaceRootDirectory;
}
- GeneralCommandLine commandLine = new GeneralCommandLine(runner.executableToDebug.getPath());
+ GeneralCommandLine commandLine = new GeneralCommandLine(runner.executableToDebug.getPath())
+ .withWorkDirectory(workingDir);
commandLine.addParameters(handlerState.getExeFlagsState().getFlagsForExternalProcesses());
commandLine.addParameters(handlerState.getTestArgs()); | 1 | /*
* Copyright 2016 The Bazel Authors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.idea.blaze.clwb.run;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.idea.blaze.base.async.process.LineProcessingOutputStream;
import com.google.idea.blaze.base.command.BlazeCommand;
import com.google.idea.blaze.base.command.BlazeCommandName;
import com.google.idea.blaze.base.command.BlazeFlags;
import com.google.idea.blaze.base.command.BlazeInvocationContext;
import com.google.idea.blaze.base.console.BlazeConsoleLineProcessorProvider;
import com.google.idea.blaze.base.issueparser.IssueOutputFilter;
import com.google.idea.blaze.base.logging.EventLoggingService;
import com.google.idea.blaze.base.model.primitives.TargetExpression;
import com.google.idea.blaze.base.model.primitives.WorkspaceRoot;
import com.google.idea.blaze.base.projectview.ProjectViewManager;
import com.google.idea.blaze.base.projectview.ProjectViewSet;
import com.google.idea.blaze.base.run.BlazeCommandRunConfiguration;
import com.google.idea.blaze.base.run.ExecutorType;
import com.google.idea.blaze.base.run.filter.BlazeTargetFilter;
import com.google.idea.blaze.base.run.processhandler.LineProcessingProcessAdapter;
import com.google.idea.blaze.base.run.processhandler.ScopedBlazeProcessHandler;
import com.google.idea.blaze.base.run.smrunner.BlazeTestUiSession;
import com.google.idea.blaze.base.run.smrunner.SmRunnerUtils;
import com.google.idea.blaze.base.run.smrunner.TestUiSessionProvider;
import com.google.idea.blaze.base.scope.BlazeContext;
import com.google.idea.blaze.base.scope.scopes.ProblemsViewScope;
import com.google.idea.blaze.base.settings.Blaze;
import com.google.idea.blaze.base.settings.BlazeUserSettings;
import com.google.idea.blaze.base.settings.BuildSystem;
import com.google.idea.blaze.clwb.CidrGoogleTestUtilAdapter;
import com.google.idea.blaze.clwb.ToolchainUtils;
import com.google.idea.blaze.cpp.CppBlazeRules;
import com.intellij.execution.ExecutionException;
import com.intellij.execution.configuration.EnvironmentVariablesData;
import com.intellij.execution.configurations.CommandLineState;
import com.intellij.execution.configurations.GeneralCommandLine;
import com.intellij.execution.configurations.GeneralCommandLine.ParentEnvironmentType;
import com.intellij.execution.filters.Filter;
import com.intellij.execution.filters.UrlFilter;
import com.intellij.execution.process.ProcessHandler;
import com.intellij.execution.process.ProcessListener;
import com.intellij.execution.runners.ExecutionEnvironment;
import com.intellij.execution.ui.ConsoleView;
import com.intellij.ide.util.PropertiesComponent;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.ui.Messages;
import com.intellij.xdebugger.XDebugSession;
import com.jetbrains.cidr.cpp.toolchains.CPPDebugger.Kind;
import com.jetbrains.cidr.execution.CidrConsoleBuilder;
import com.jetbrains.cidr.execution.CidrLauncher;
import com.jetbrains.cidr.execution.TrivialInstaller;
import com.jetbrains.cidr.execution.TrivialRunParameters;
import com.jetbrains.cidr.execution.debugger.CidrDebugProcess;
import com.jetbrains.cidr.execution.debugger.CidrLocalDebugProcess;
import com.jetbrains.cidr.execution.debugger.backend.lldb.LLDBDriverConfiguration;
import com.jetbrains.cidr.execution.debugger.remote.CidrRemoteDebugParameters;
import com.jetbrains.cidr.execution.debugger.remote.CidrRemotePathMapping;
import com.jetbrains.cidr.execution.testing.google.CidrGoogleTestConsoleProperties;
import java.io.File;
import java.util.List;
import java.util.Optional;
import javax.annotation.Nullable;
/**
* Handles running/debugging cc_test and cc_binary targets in CLion. Sets up gdb when debugging, and
* uses the Google Test infrastructure for presenting test results.
*/
public final class BlazeCidrLauncher extends CidrLauncher {
private final Project project;
private final BlazeCommandRunConfiguration configuration;
private final BlazeCidrRunConfigState handlerState;
private final BlazeCidrRunConfigurationRunner runner;
private final ExecutionEnvironment env;
private static final String DISABLE_BAZEL_GOOGLETEST_FILTER_WARNING =
"bazel.test_filter.googletest_update";
BlazeCidrLauncher(
BlazeCommandRunConfiguration configuration,
BlazeCidrRunConfigurationRunner runner,
ExecutionEnvironment env) {
this.configuration = configuration;
this.handlerState = (BlazeCidrRunConfigState) configuration.getHandler().getState();
this.runner = runner;
this.env = env;
this.project = configuration.getProject();
}
@Override
public ProcessHandler createProcess(CommandLineState state) throws ExecutionException {
return createProcess(state, ImmutableList.of());
}
private ProcessHandler createProcess(CommandLineState state, List<String> extraBlazeFlags)
throws ExecutionException {
ImmutableList<String> testHandlerFlags = ImmutableList.of();
BlazeTestUiSession testUiSession =
useTestUi()
? TestUiSessionProvider.getInstance(project)
.getTestUiSession(configuration.getTargets())
: null;
if (testUiSession != null) {
testHandlerFlags = testUiSession.getBlazeFlags();
}
ProjectViewSet projectViewSet =
Preconditions.checkNotNull(ProjectViewManager.getInstance(project).getProjectViewSet());
if (shouldDisplayBazelTestFilterWarning()) {
String messageContents =
"<html>The Google Test framework did not apply test filtering correctly before "
+ "git commit <a href='https://github.com/google/googletest/commit/"
+ "ba96d0b1161f540656efdaed035b3c062b60e006"
+ "'>ba96d0b<a>.<br/>"
+ "Please ensure you are past this commit if you are using it.<br/><br/>"
+ "More information on the bazel <a href='https://github.com/bazelbuild/bazel/issues/"
+ "4411'>issue</a></html>";
int selectedOption =
Messages.showDialog(
getProject(),
messageContents,
"Please update 'Google Test' past ba96d0b...",
new String[] {"Close", "Don't show again"},
0, // Default to "Close"
Messages.getWarningIcon());
if (selectedOption == 1) {
PropertiesComponent.getInstance().setValue(DISABLE_BAZEL_GOOGLETEST_FILTER_WARNING, "true");
}
}
BlazeCommand.Builder commandBuilder =
BlazeCommand.builder(
Blaze.getBuildSystemProvider(project).getBinaryPath(project),
handlerState.getCommandState().getCommand())
.addTargets(configuration.getTargets())
.addBlazeFlags(extraBlazeFlags)
.addBlazeFlags(
BlazeFlags.blazeFlags(
project,
projectViewSet,
handlerState.getCommandState().getCommand(),
BlazeInvocationContext.runConfigContext(
ExecutorType.fromExecutor(env.getExecutor()),
configuration.getType(),
false)))
.addBlazeFlags(testHandlerFlags)
.addBlazeFlags(handlerState.getBlazeFlagsState().getFlagsForExternalProcesses())
.addExeFlags(handlerState.getExeFlagsState().getFlagsForExternalProcesses());
state.setConsoleBuilder(createConsoleBuilder(testUiSession));
state.addConsoleFilters(getConsoleFilters().toArray(new Filter[0]));
WorkspaceRoot workspaceRoot = WorkspaceRoot.fromProject(project);
final BlazeCommand command = commandBuilder.build();
return new ScopedBlazeProcessHandler(
project,
command,
workspaceRoot,
new ScopedBlazeProcessHandler.ScopedProcessHandlerDelegate() {
@Override
public void onBlazeContextStart(BlazeContext context) {
context.push(
new ProblemsViewScope(
project, BlazeUserSettings.getInstance().getShowProblemsViewOnRun()));
}
@Override
public ImmutableList<ProcessListener> createProcessListeners(BlazeContext context) {
LineProcessingOutputStream outputStream =
LineProcessingOutputStream.of(
BlazeConsoleLineProcessorProvider.getAllStderrLineProcessors(context));
return ImmutableList.of(new LineProcessingProcessAdapter(outputStream));
}
});
}
@Override
public CidrDebugProcess createDebugProcess(CommandLineState state, XDebugSession session)
throws ExecutionException {
TargetExpression target = configuration.getSingleTarget();
if (target == null) {
throw new ExecutionException("Cannot parse run configuration target.");
}
if (runner.executableToDebug == null) {
throw new ExecutionException("No debug binary found.");
}
EventLoggingService.getInstance().logEvent(getClass(), "debugging-cpp");
WorkspaceRoot workspaceRoot = WorkspaceRoot.fromProject(project);
File workspaceRootDirectory = workspaceRoot.directory();
if (!BlazeGDBServerProvider.shouldUseGdbserver()) {
File workingDir =
new File(runner.executableToDebug + ".runfiles", workspaceRootDirectory.getName());
if (!workingDir.exists()) {
workingDir = workspaceRootDirectory;
}
GeneralCommandLine commandLine = new GeneralCommandLine(runner.executableToDebug.getPath());
commandLine.addParameters(handlerState.getExeFlagsState().getFlagsForExternalProcesses());
commandLine.addParameters(handlerState.getTestArgs());
EnvironmentVariablesData envState = handlerState.getEnvVarsState().getData();
commandLine.withParentEnvironmentType(
envState.isPassParentEnvs() ? ParentEnvironmentType.SYSTEM : ParentEnvironmentType.NONE);
commandLine.getEnvironment().putAll(envState.getEnvs());
if (CppBlazeRules.RuleTypes.CC_TEST.getKind().equals(configuration.getTargetKind())) {
convertBlazeTestFilterToExecutableFlag().ifPresent(commandLine::addParameters);
}
TrivialInstaller installer = new TrivialInstaller(commandLine);
ImmutableList<String> startupCommands = getGdbStartupCommands(workspaceRootDirectory);
TrivialRunParameters parameters =
new TrivialRunParameters(
ToolchainUtils.getToolchain().getDebuggerKind() == Kind.BUNDLED_LLDB
? new LLDBDriverConfiguration()
: new BlazeGDBDriverConfiguration(project, startupCommands, workspaceRoot),
installer);
state.setConsoleBuilder(createConsoleBuilder(null));
state.addConsoleFilters(getConsoleFilters().toArray(new Filter[0]));
return new CidrLocalDebugProcess(parameters, session, state.getConsoleBuilder());
}
List<String> extraDebugFlags = BlazeGDBServerProvider.getFlagsForDebugging(handlerState);
ProcessHandler targetProcess = createProcess(state, extraDebugFlags);
configProcessHandler(targetProcess, false, true, getProject());
targetProcess.startNotify();
// CidrRemoteDebugParameters can't be constructed with a null sysroot, so pass in the default
// value "target:". Causes paths/files to be resolved in the context of the target.
CidrRemoteDebugParameters parameters =
new CidrRemoteDebugParameters(
"tcp:localhost:" + handlerState.getDebugPortState().port,
runner.executableToDebug.getPath(),
"target:",
ImmutableList.of(
new CidrRemotePathMapping("/proc/self/cwd", workspaceRootDirectory.getParent())));
BlazeCLionGDBDriverConfiguration debuggerDriverConfiguration =
new BlazeCLionGDBDriverConfiguration(project);
return new BlazeCidrRemoteDebugProcess(
targetProcess, debuggerDriverConfiguration, parameters, session, state.getConsoleBuilder());
}
/** Get the correct test prefix for blaze/bazel */
private String getTestFilterArgument() {
if (Blaze.getBuildSystem(project).equals(BuildSystem.Blaze)) {
return "--gunit_filter";
}
return "--gtest_filter";
}
private boolean shouldDisplayBazelTestFilterWarning() {
return Blaze.getBuildSystem(getProject()).equals(BuildSystem.Bazel)
&& CppBlazeRules.RuleTypes.CC_TEST.getKind().equals(configuration.getTargetKind())
&& handlerState.getTestFilterFlag() != null
&& !PropertiesComponent.getInstance()
.getBoolean(DISABLE_BAZEL_GOOGLETEST_FILTER_WARNING, false)
&& CidrGoogleTestUtilAdapter.findGoogleTestSymbol(getProject()) != null;
}
/**
* Convert blaze/bazel test filter to the equivalent executable flag
*
* @return An (Optional) flag to append to the executable's flag list
*/
private Optional<String> convertBlazeTestFilterToExecutableFlag() {
String testArgument = getTestFilterArgument();
String testFilter = handlerState.getTestFilterFlag();
if (testFilter == null) {
return Optional.empty();
}
return Optional.of(testFilter.replaceFirst(BlazeFlags.TEST_FILTER, testArgument));
}
@Override
public Project getProject() {
return project;
}
private ImmutableList<Filter> getConsoleFilters() {
return ImmutableList.of(
new BlazeTargetFilter(true),
new UrlFilter(),
new IssueOutputFilter(
project,
WorkspaceRoot.fromProject(project),
BlazeInvocationContext.ContextType.RunConfiguration,
false));
}
private CidrConsoleBuilder createConsoleBuilder(@Nullable BlazeTestUiSession testUiSession) {
if (BlazeCommandName.TEST.equals(handlerState.getCommandState().getCommand())) {
// hook up the test tree UI
return new GoogleTestConsoleBuilder(configuration.getProject(), testUiSession);
}
return new CidrConsoleBuilder(configuration.getProject(), null, null);
}
private ImmutableList<String> getGdbStartupCommands(File workspaceRootDirectory) {
// Forge creates debug symbol paths rooted at /proc/self/cwd .
// We need to tell gdb to translate this path prefix to the user's workspace
// root so the IDE can find the files.
String from = "/proc/self/cwd";
String to = workspaceRootDirectory.getPath();
String subPathCommand = String.format("set substitute-path %s %s", from, to);
return ImmutableList.of(subPathCommand);
}
private boolean useTestUi() {
return BlazeCommandName.TEST.equals(handlerState.getCommandState().getCommand());
}
private final class GoogleTestConsoleBuilder extends CidrConsoleBuilder {
@Nullable private final BlazeTestUiSession testUiSession;
private GoogleTestConsoleBuilder(Project project, @Nullable BlazeTestUiSession testUiSession) {
super(project, null, null);
this.testUiSession = testUiSession;
addFilter(new BlazeCidrTestOutputFilter(project));
}
@Override
protected ConsoleView createConsole() {
if (testUiSession != null) {
return SmRunnerUtils.getConsoleView(
configuration.getProject(), configuration, env.getExecutor(), testUiSession);
}
// when launching GDB directly the blaze test runners aren't involved
CidrGoogleTestConsoleProperties consoleProperties =
new CidrGoogleTestConsoleProperties(
configuration, env.getExecutor(), env.getExecutionTarget());
return createConsole(configuration.getType(), consoleProperties);
}
}
}
| 1 | 7,002 | This was properly set above as `<target>.runfiles/<workspace_name>` (with a fallback to workspace root dir) but never used past this line. | bazelbuild-intellij | java |
@@ -0,0 +1,13 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+using MvvmCross.Core.Views;
+
+namespace MvvmCross.Uwp.Attributes
+{
+ public class MvxPagePresentationAttribute : MvxBasePresentationAttribute
+ {
+ }
+} | 1 | 1 | 13,341 | Are all these namespaces required for this attribute? | MvvmCross-MvvmCross | .cs |
|
@@ -53,9 +53,12 @@ module Beaker
result.stdout << std_out
result.stderr << std_err
result.exit_code = status.exitstatus
+ @logger.info(result.stdout)
+ @logger.info(result.stderr)
end
rescue => e
result.stderr << e.inspect
+ @logger.info(result.stderr)
result.exit_code = 1
end
| 1 | require 'open3'
module Beaker
class LocalConnection
attr_accessor :logger, :hostname, :ip
def initialize options = {}
@logger = options[:logger]
@ssh_env_file = File.expand_path(options[:ssh_env_file])
@hostname = 'localhost'
@ip = '127.0.0.1'
@options = options
end
def self.connect options = {}
connection = new options
connection.connect
connection
end
def connect options = {}
@logger.debug "Local connection, no connection to start"
end
def close
@logger.debug "Local connection, no connection to close"
end
def with_env(env)
backup = ENV.to_hash
ENV.replace(env)
yield
ensure
ENV.replace(backup)
end
def execute command, options = {}, stdout_callback = nil, stderr_callback = stdout_callback
result = Result.new(@hostname, command)
envs = {}
if File.readable?(@ssh_env_file)
File.foreach(@ssh_env_file) do |line|
key, value = line.split('=')
envs[key] = value
end
end
begin
clean_env = ENV.reject{ |k| k =~ /^BUNDLE|^RUBY|^GEM/ }
with_env(clean_env) do
std_out, std_err, status = Open3.capture3(envs, command)
result.stdout << std_out
result.stderr << std_err
result.exit_code = status.exitstatus
end
rescue => e
result.stderr << e.inspect
result.exit_code = 1
end
result.finalize!
@logger.last_result = result
result
end
def scp_to(source, target, _options = {})
result = Result.new(@hostname, [source, target])
begin
FileUtils.cp_r source, target
rescue Errno::ENOENT => e
@logger.warn "#{e.class} error in cp'ing. Forcing the connection to close, which should " \
"raise an error."
end
result.stdout << " CP'ed file #{source} to #{target}"
result.exit_code = 0
result
end
def scp_from(source, target, options = {})
scp_to(target, source, options)
end
end
end
| 1 | 16,544 | Given this may be used and printed in other ways, isn't `debug` more appropriate? | voxpupuli-beaker | rb |
@@ -540,3 +540,7 @@ func (s *blockDiskStore) remove(id kbfsblock.ID) error {
}
return err
}
+
+func (s blockDiskStore) clear() error {
+ return ioutil.RemoveAll(s.dir)
+} | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"path/filepath"
"strings"
"github.com/keybase/go-codec/codec"
"github.com/keybase/kbfs/ioutil"
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/pkg/errors"
)
// blockDiskStore stores block data in flat files on disk.
//
// The directory layout looks like:
//
// dir/0100/0...01/data
// dir/0100/0...01/id
// dir/0100/0...01/ksh
// dir/0100/0...01/refs
// ...
// dir/01cc/5...55/id
// dir/01cc/5...55/refs
// ...
// dir/01dd/6...66/data
// dir/01dd/6...66/id
// dir/01dd/6...66/ksh
// ...
// dir/01ff/f...ff/data
// dir/01ff/f...ff/id
// dir/01ff/f...ff/ksh
// dir/01ff/f...ff/refs
//
// Each block has its own subdirectory with its ID truncated to 17
// bytes (34 characters) as a name. The block subdirectories are
// splayed over (# of possible hash types) * 256 subdirectories -- one
// byte for the hash type (currently only one) plus the first byte of
// the hash data -- using the first four characters of the name to
// keep the number of directories in dir itself to a manageable
// number, similar to git.
//
// Each block directory has the following files:
//
// - id: The full block ID in binary format. Always present.
// - data: The raw block data that should hash to the block ID.
// May be missing.
// - ksh: The raw data for the associated key server half.
// May be missing, but should be present when data is.
// - refs: The list of references to the block, along with other
// block-specific info, encoded as a serialized
// blockJournalInfo. May be missing. TODO: rename this to
// something more generic if we ever upgrade the journal
// version.
//
// Future versions of the disk store might add more files to this
// directory; if any code is written to move blocks around, it should
// be careful to preserve any unknown files in a block directory.
//
// The maximum number of characters added to the root dir by a block
// disk store is 44:
//
// /01ff/f...(30 characters total)...ff/data
//
// blockDiskStore is not goroutine-safe, so any code that uses it must
// guarantee that only one goroutine at a time calls its functions.
type blockDiskStore struct {
codec kbfscodec.Codec
dir string
}
// filesPerBlockMax is an upper bound for the number of files
// (including directories) to store one block: 4 for the regular
// files, 2 for the (splayed) directories, and 1 for the journal
// entry.
const filesPerBlockMax = 7
// makeBlockDiskStore returns a new blockDiskStore for the given
// directory.
func makeBlockDiskStore(codec kbfscodec.Codec, dir string) *blockDiskStore {
return &blockDiskStore{
codec: codec,
dir: dir,
}
}
// The functions below are for building various paths.
func (s *blockDiskStore) blockPath(id kbfsblock.ID) string {
// Truncate to 34 characters, which corresponds to 16 random
// bytes (since the first byte is a hash type) or 128 random
// bits, which means that the expected number of blocks
// generated before getting a path collision is 2^64 (see
// https://en.wikipedia.org/wiki/Birthday_problem#Cast_as_a_collision_problem
// ).
idStr := id.String()
return filepath.Join(s.dir, idStr[:4], idStr[4:34])
}
func (s *blockDiskStore) dataPath(id kbfsblock.ID) string {
return filepath.Join(s.blockPath(id), "data")
}
const idFilename = "id"
func (s *blockDiskStore) idPath(id kbfsblock.ID) string {
return filepath.Join(s.blockPath(id), idFilename)
}
func (s *blockDiskStore) keyServerHalfPath(id kbfsblock.ID) string {
return filepath.Join(s.blockPath(id), "ksh")
}
func (s *blockDiskStore) infoPath(id kbfsblock.ID) string {
// TODO: change the file name to "info" the next we change the
// journal layout.
return filepath.Join(s.blockPath(id), "refs")
}
// makeDir makes the directory for the given block ID and writes the
// ID file, if necessary.
func (s *blockDiskStore) makeDir(id kbfsblock.ID) error {
err := ioutil.MkdirAll(s.blockPath(id), 0700)
if err != nil {
return err
}
// TODO: Only write if the file doesn't exist.
return ioutil.WriteFile(s.idPath(id), []byte(id.String()), 0600)
}
// blockJournalInfo contains info about a particular block in the
// journal, such as the set of references to it.
type blockJournalInfo struct {
Refs blockRefMap
Flushed bool `codec:"f,omitempty"`
codec.UnknownFieldSetHandler
}
// TODO: Add caching for refs
// getRefInfo returns the references for the given ID.
func (s *blockDiskStore) getInfo(id kbfsblock.ID) (blockJournalInfo, error) {
var info blockJournalInfo
err := kbfscodec.DeserializeFromFile(s.codec, s.infoPath(id), &info)
if !ioutil.IsNotExist(err) && err != nil {
return blockJournalInfo{}, err
}
if info.Refs == nil {
info.Refs = make(blockRefMap)
}
return info, nil
}
// putRefInfo stores the given references for the given ID.
func (s *blockDiskStore) putInfo(id kbfsblock.ID, info blockJournalInfo) error {
return kbfscodec.SerializeToFile(s.codec, info, s.infoPath(id))
}
// addRefs adds references for the given contexts to the given ID, all
// with the same status and tag.
func (s *blockDiskStore) addRefs(id kbfsblock.ID, contexts []kbfsblock.Context,
status blockRefStatus, tag string) error {
info, err := s.getInfo(id)
if err != nil {
return err
}
if len(info.Refs) > 0 {
// Check existing contexts, if any.
for _, context := range contexts {
_, err := info.Refs.checkExists(context)
if err != nil {
return err
}
}
}
for _, context := range contexts {
err = info.Refs.put(context, status, tag)
if err != nil {
return err
}
}
return s.putInfo(id, info)
}
// getData returns the data and server half for the given ID, if
// present.
func (s *blockDiskStore) getData(id kbfsblock.ID) (
[]byte, kbfscrypto.BlockCryptKeyServerHalf, error) {
data, err := ioutil.ReadFile(s.dataPath(id))
if ioutil.IsNotExist(err) {
return nil, kbfscrypto.BlockCryptKeyServerHalf{},
blockNonExistentError{id}
} else if err != nil {
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err
}
keyServerHalfPath := s.keyServerHalfPath(id)
buf, err := ioutil.ReadFile(keyServerHalfPath)
if ioutil.IsNotExist(err) {
return nil, kbfscrypto.BlockCryptKeyServerHalf{},
blockNonExistentError{id}
} else if err != nil {
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err
}
// Check integrity.
err = kbfsblock.VerifyID(data, id)
if err != nil {
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err
}
var serverHalf kbfscrypto.BlockCryptKeyServerHalf
err = serverHalf.UnmarshalBinary(buf)
if err != nil {
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err
}
return data, serverHalf, nil
}
// All functions below are public functions.
func (s *blockDiskStore) hasAnyRef(id kbfsblock.ID) (bool, error) {
info, err := s.getInfo(id)
if err != nil {
return false, err
}
return len(info.Refs) > 0, nil
}
func (s *blockDiskStore) hasNonArchivedRef(id kbfsblock.ID) (bool, error) {
info, err := s.getInfo(id)
if err != nil {
return false, err
}
return info.Refs.hasNonArchivedRef(), nil
}
func (s *blockDiskStore) hasContext(id kbfsblock.ID, context kbfsblock.Context) (
bool, error) {
info, err := s.getInfo(id)
if err != nil {
return false, err
}
return info.Refs.checkExists(context)
}
func (s *blockDiskStore) hasData(id kbfsblock.ID) (bool, error) {
_, err := ioutil.Stat(s.dataPath(id))
if ioutil.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, err
}
return true, nil
}
func (s *blockDiskStore) isUnflushed(id kbfsblock.ID) (bool, error) {
ok, err := s.hasData(id)
if err != nil {
return false, err
}
if !ok {
return false, nil
}
// The data is there; has it been flushed?
info, err := s.getInfo(id)
if err != nil {
return false, err
}
return !info.Flushed, nil
}
func (s *blockDiskStore) markFlushed(id kbfsblock.ID) error {
info, err := s.getInfo(id)
if err != nil {
return err
}
info.Flushed = true
return s.putInfo(id, info)
}
func (s *blockDiskStore) getDataSize(id kbfsblock.ID) (int64, error) {
fi, err := ioutil.Stat(s.dataPath(id))
if ioutil.IsNotExist(err) {
return 0, nil
} else if err != nil {
return 0, err
}
return fi.Size(), nil
}
func (s *blockDiskStore) getDataWithContext(id kbfsblock.ID, context kbfsblock.Context) (
[]byte, kbfscrypto.BlockCryptKeyServerHalf, error) {
hasContext, err := s.hasContext(id, context)
if err != nil {
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err
}
if !hasContext {
return nil, kbfscrypto.BlockCryptKeyServerHalf{},
blockNonExistentError{id}
}
return s.getData(id)
}
func (s *blockDiskStore) getAllRefsForTest() (map[kbfsblock.ID]blockRefMap, error) {
res := make(map[kbfsblock.ID]blockRefMap)
fileInfos, err := ioutil.ReadDir(s.dir)
if ioutil.IsNotExist(err) {
return res, nil
} else if err != nil {
return nil, err
}
for _, fi := range fileInfos {
name := fi.Name()
if !fi.IsDir() {
return nil, errors.Errorf("Unexpected non-dir %q", name)
}
subFileInfos, err := ioutil.ReadDir(filepath.Join(s.dir, name))
if err != nil {
return nil, err
}
for _, sfi := range subFileInfos {
subName := sfi.Name()
if !sfi.IsDir() {
return nil, errors.Errorf("Unexpected non-dir %q",
subName)
}
idPath := filepath.Join(
s.dir, name, subName, idFilename)
idBytes, err := ioutil.ReadFile(idPath)
if err != nil {
return nil, err
}
id, err := kbfsblock.IDFromString(string(idBytes))
if err != nil {
return nil, errors.WithStack(err)
}
if !strings.HasPrefix(id.String(), name+subName) {
return nil, errors.Errorf(
"%q unexpectedly not a prefix of %q",
name+subName, id.String())
}
info, err := s.getInfo(id)
if err != nil {
return nil, err
}
if len(info.Refs) > 0 {
res[id] = info.Refs
}
}
}
return res, nil
}
// put puts the given data for the block, which may already exist, and
// adds a reference for the given context. If err is nil, putData
// indicates whether the data didn't already exist and was put; if
// false, it means that the data already exists, but this might have
// added a new ref.
func (s *blockDiskStore) put(id kbfsblock.ID, context kbfsblock.Context,
buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf,
tag string) (putData bool, err error) {
err = validateBlockPut(id, context, buf)
if err != nil {
return false, err
}
// Check the data and retrieve the server half, if they exist.
_, existingServerHalf, err := s.getDataWithContext(id, context)
var exists bool
switch err.(type) {
case blockNonExistentError:
exists = false
case nil:
exists = true
default:
return false, err
}
if exists {
// If the entry already exists, everything should be
// the same, except for possibly additional
// references.
// We checked that both buf and the existing data hash
// to id, so no need to check that they're both equal.
if existingServerHalf != serverHalf {
return false, errors.Errorf(
"key server half mismatch: expected %s, got %s",
existingServerHalf, serverHalf)
}
} else {
err = s.makeDir(id)
if err != nil {
return false, err
}
err = ioutil.WriteFile(s.dataPath(id), buf, 0600)
if err != nil {
return false, err
}
// TODO: Add integrity-checking for key server half?
data, err := serverHalf.MarshalBinary()
if err != nil {
return false, err
}
err = ioutil.WriteFile(s.keyServerHalfPath(id), data, 0600)
if err != nil {
return false, err
}
}
err = s.addRefs(id, []kbfsblock.Context{context}, liveBlockRef, tag)
if err != nil {
return false, err
}
return !exists, nil
}
func (s *blockDiskStore) addReference(
id kbfsblock.ID, context kbfsblock.Context, tag string) error {
err := s.makeDir(id)
if err != nil {
return err
}
return s.addRefs(id, []kbfsblock.Context{context}, liveBlockRef, tag)
}
func (s *blockDiskStore) archiveReferences(
contexts kbfsblock.ContextMap, tag string) error {
for id, idContexts := range contexts {
err := s.makeDir(id)
if err != nil {
return err
}
err = s.addRefs(id, idContexts, archivedBlockRef, tag)
if err != nil {
return err
}
}
return nil
}
// removeReferences removes references for the given contexts from
// their respective IDs. If tag is non-empty, then a reference will be
// removed only if its most recent tag (passed in to addRefs) matches
// the given one.
func (s *blockDiskStore) removeReferences(
id kbfsblock.ID, contexts []kbfsblock.Context, tag string) (
liveCount int, err error) {
info, err := s.getInfo(id)
if err != nil {
return 0, err
}
if len(info.Refs) == 0 {
return 0, nil
}
for _, context := range contexts {
err := info.Refs.remove(context, tag)
if err != nil {
return 0, err
}
if len(info.Refs) == 0 {
break
}
}
err = s.putInfo(id, info)
if err != nil {
return 0, err
}
return len(info.Refs), nil
}
// remove removes any existing data for the given ID, which must not
// have any references left.
func (s *blockDiskStore) remove(id kbfsblock.ID) error {
hasAnyRef, err := s.hasAnyRef(id)
if err != nil {
return err
}
if hasAnyRef {
return errors.Errorf(
"Trying to remove data for referenced block %s", id)
}
path := s.blockPath(id)
err = ioutil.RemoveAll(path)
if err != nil {
return err
}
// Remove the parent (splayed) directory if it exists and is
// empty.
err = ioutil.Remove(filepath.Dir(path))
if ioutil.IsNotExist(err) || ioutil.IsExist(err) {
err = nil
}
return err
}
| 1 | 16,071 | Looks like this is unused? Did you mean to call it when clearing the block journal? | keybase-kbfs | go |
@@ -117,8 +117,10 @@ public class ExecutorManager extends EventHandler implements
public ExecutorManager(Props props, ExecutorLoader loader,
Map<String, Alerter> alters) throws ExecutorManagerException {
+ alerters = alters;
azkProps = props;
this.executorLoader = loader;
+
this.setupExecutors();
this.loadRunningFlows();
| 1 | /*
* Copyright 2014 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.executor;
import java.io.File;
import java.io.IOException;
import java.lang.Thread.State;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import org.joda.time.DateTime;
import azkaban.alert.Alerter;
import azkaban.event.Event;
import azkaban.event.Event.Type;
import azkaban.event.EventHandler;
import azkaban.executor.selector.ExecutorComparator;
import azkaban.executor.selector.ExecutorFilter;
import azkaban.executor.selector.ExecutorSelector;
import azkaban.project.Project;
import azkaban.project.ProjectWhitelist;
import azkaban.scheduler.ScheduleStatisticManager;
import azkaban.utils.FileIOUtils.JobMetaData;
import azkaban.utils.FileIOUtils.LogData;
import azkaban.utils.JSONUtils;
import azkaban.utils.Pair;
import azkaban.utils.Props;
/**
* Executor manager used to manage the client side job.
*
*/
public class ExecutorManager extends EventHandler implements
ExecutorManagerAdapter {
static final String AZKABAN_EXECUTOR_SELECTOR_FILTERS =
"azkaban.executorselector.filters";
static final String AZKABAN_EXECUTOR_SELECTOR_COMPARATOR_PREFIX =
"azkaban.executorselector.comparator.";
static final String AZKABAN_QUEUEPROCESSING_ENABLED =
"azkaban.queueprocessing.enabled";
static final String AZKABAN_USE_MULTIPLE_EXECUTORS =
"azkaban.use.multiple.executors";
private static final String AZKABAN_WEBSERVER_QUEUE_SIZE =
"azkaban.webserver.queue.size";
private static final String AZKABAN_ACTIVE_EXECUTOR_REFRESHINTERVAL_IN_MS =
"azkaban.activeexecutor.refresh.milisecinterval";
private static final String AZKABAN_ACTIVE_EXECUTOR_REFRESHINTERVAL_IN_NUM_FLOW =
"azkaban.activeexecutor.refresh.flowinterval";
private static final String AZKABAN_EXECUTORINFO_REFRESH_MAX_THREADS =
"azkaban.executorinfo.refresh.maxThreads";
private static Logger logger = Logger.getLogger(ExecutorManager.class);
private ExecutorLoader executorLoader;
private CleanerThread cleanerThread;
private ConcurrentHashMap<Integer, Pair<ExecutionReference, ExecutableFlow>> runningFlows =
new ConcurrentHashMap<Integer, Pair<ExecutionReference, ExecutableFlow>>();
private ConcurrentHashMap<Integer, ExecutableFlow> recentlyFinished =
new ConcurrentHashMap<Integer, ExecutableFlow>();
QueuedExecutions queuedFlows;
final private Set<Executor> activeExecutors = new HashSet<Executor>();
private QueueProcessorThread queueProcessor;
private ExecutingManagerUpdaterThread executingManager;
// 12 weeks
private static final long DEFAULT_EXECUTION_LOGS_RETENTION_MS = 3 * 4 * 7
* 24 * 60 * 60 * 1000l;
private long lastCleanerThreadCheckTime = -1;
private long lastThreadCheckTime = -1;
private String updaterStage = "not started";
private Map<String, Alerter> alerters;
File cacheDir;
private final Props azkProps;
private List<String> filterList;
private Map<String, Integer> comparatorWeightsMap;
private long lastSuccessfulExecutorInfoRefresh;
private ExecutorService executorInforRefresherService;
public ExecutorManager(Props props, ExecutorLoader loader,
Map<String, Alerter> alters) throws ExecutorManagerException {
azkProps = props;
this.executorLoader = loader;
this.setupExecutors();
this.loadRunningFlows();
queuedFlows =
new QueuedExecutions(props.getLong(AZKABAN_WEBSERVER_QUEUE_SIZE, 100000));
this.loadQueuedFlows();
alerters = alters;
cacheDir = new File(props.getString("cache.directory", "cache"));
executingManager = new ExecutingManagerUpdaterThread();
executingManager.start();
if(isMultiExecutorMode()) {
setupMultiExecutorMode();
}
long executionLogsRetentionMs =
props.getLong("execution.logs.retention.ms",
DEFAULT_EXECUTION_LOGS_RETENTION_MS);
cleanerThread = new CleanerThread(executionLogsRetentionMs);
cleanerThread.start();
}
private void setupMultiExecutorMode() {
// initliatize hard filters for executor selector from azkaban.properties
String filters = azkProps.getString(AZKABAN_EXECUTOR_SELECTOR_FILTERS, "");
if (filters != null) {
filterList = Arrays.asList(StringUtils.split(filters, ","));
}
// initliatize comparator feature weights for executor selector from
// azkaban.properties
Map<String, String> compListStrings =
azkProps.getMapByPrefix(AZKABAN_EXECUTOR_SELECTOR_COMPARATOR_PREFIX);
if (compListStrings != null) {
comparatorWeightsMap = new TreeMap<String, Integer>();
for (Map.Entry<String, String> entry : compListStrings.entrySet()) {
comparatorWeightsMap.put(entry.getKey(), Integer.valueOf(entry.getValue()));
}
}
executorInforRefresherService =
Executors.newFixedThreadPool(azkProps.getInt(
AZKABAN_EXECUTORINFO_REFRESH_MAX_THREADS, 5));
// configure queue processor
queueProcessor =
new QueueProcessorThread(azkProps.getBoolean(
AZKABAN_QUEUEPROCESSING_ENABLED, true), azkProps.getLong(
AZKABAN_ACTIVE_EXECUTOR_REFRESHINTERVAL_IN_MS, 1000), azkProps.getInt(
AZKABAN_ACTIVE_EXECUTOR_REFRESHINTERVAL_IN_NUM_FLOW, 1000));
queueProcessor.start();
}
/**
*
* {@inheritDoc}
* @see azkaban.executor.ExecutorManagerAdapter#setupExecutors()
*/
@Override
public void setupExecutors() throws ExecutorManagerException {
Set<Executor> newExecutors = new HashSet<Executor>();
if (isMultiExecutorMode()) {
logger.info("Initializing multi executors from database");
newExecutors.addAll(executorLoader.fetchActiveExecutors());
} else if (azkProps.containsKey("executor.port")) {
// Add local executor, if specified as per properties
String executorHost = azkProps.getString("executor.host", "localhost");
int executorPort = azkProps.getInt("executor.port");
logger.info(String.format("Initializing local executor %s:%d",
executorHost, executorPort));
Executor executor =
executorLoader.fetchExecutor(executorHost, executorPort);
if (executor == null) {
executor = executorLoader.addExecutor(executorHost, executorPort);
} else if (!executor.isActive()) {
executor.setActive(true);
executorLoader.updateExecutor(executor);
}
newExecutors.add(new Executor(executor.getId(), executorHost,
executorPort, true));
}
if (newExecutors.isEmpty()) {
logger.error("No active executor found");
throw new ExecutorManagerException("No active executor found");
} else if(newExecutors.size() > 1 && !isMultiExecutorMode()) {
logger.error("Multiple local executors specified");
throw new ExecutorManagerException("Multiple local executors specified");
} else {
// clear all active executors, only if we have at least one new active
// executors
activeExecutors.clear();
activeExecutors.addAll(newExecutors);
}
}
private boolean isMultiExecutorMode() {
return azkProps.getBoolean(AZKABAN_USE_MULTIPLE_EXECUTORS, false);
}
/**
* Refresh Executor stats for all the actie executors in this executorManager
*/
private void refreshExecutors() {
synchronized (activeExecutors) {
List<Pair<Executor, Future<String>>> futures =
new ArrayList<Pair<Executor, Future<String>>>();
for (final Executor executor : activeExecutors) {
// execute each executorInfo refresh task to fetch
Future<String> fetchExecutionInfo =
executorInforRefresherService.submit(new Callable<String>() {
@Override
public String call() throws Exception {
return callExecutorForJsonString(executor.getHost(),
executor.getPort(), "/serverstastics", null);
}
});
futures.add(new Pair<Executor, Future<String>>(executor,
fetchExecutionInfo));
}
boolean wasSuccess = true;
for (Pair<Executor, Future<String>> refreshPair : futures) {
Executor executor = refreshPair.getFirst();
try {
// max 5 secs
String jsonString = refreshPair.getSecond().get(5, TimeUnit.SECONDS);
executor.setExecutorInfo(ExecutorInfo.fromJSONString(jsonString));
logger.info("Successfully refreshed ExecutorInfo for executor: "
+ executor);
} catch (TimeoutException e) {
wasSuccess = false;
logger.error("Timed out while waiting for ExecutorInfo refresh"
+ executor, e);
} catch (Exception e) {
wasSuccess = false;
logger.error("Failed to update ExecutorInfo for executor : "
+ executor, e);
}
}
// update is successful for all executors
if (wasSuccess) {
lastSuccessfulExecutorInfoRefresh = System.currentTimeMillis();
}
}
}
/**
* Throws exception if running in local mode
* {@inheritDoc}
* @see azkaban.executor.ExecutorManagerAdapter#disableQueueProcessorThread()
*/
@Override
public void disableQueueProcessorThread() throws ExecutorManagerException {
if (isMultiExecutorMode()) {
queueProcessor.setActive(false);
} else {
throw new ExecutorManagerException(
"Cannot disable QueueProcessor in local mode");
}
}
/**
* Throws exception if running in local mode
* {@inheritDoc}
* @see azkaban.executor.ExecutorManagerAdapter#enableQueueProcessorThread()
*/
@Override
public void enableQueueProcessorThread() throws ExecutorManagerException {
if (isMultiExecutorMode()) {
queueProcessor.setActive(true);
} else {
throw new ExecutorManagerException(
"Cannot enable QueueProcessor in local mode");
}
}
public State getQueueProcessorThreadState() {
if (isMultiExecutorMode())
return queueProcessor.getState();
else
return State.NEW; // not started in local mode
}
/**
* Returns state of QueueProcessor False, no flow is being dispatched True ,
* flows are being dispatched as expected
*
* @return
*/
public boolean isQueueProcessorThreadActive() {
if (isMultiExecutorMode())
return queueProcessor.isActive();
else
return false;
}
/**
* Return last Successful ExecutorInfo Refresh for all active executors
*
* @return
*/
public long getLastSuccessfulExecutorInfoRefresh() {
return this.lastSuccessfulExecutorInfoRefresh;
}
/**
* Get currently supported Comparators available to use via azkaban.properties
*
* @return
*/
public Set<String> getAvailableExecutorComparatorNames() {
return ExecutorComparator.getAvailableComparatorNames();
}
/**
* Get currently supported filters available to use via azkaban.properties
*
* @return
*/
public Set<String> getAvailableExecutorFilterNames() {
return ExecutorFilter.getAvailableFilterNames();
}
@Override
public State getExecutorManagerThreadState() {
return executingManager.getState();
}
public String getExecutorThreadStage() {
return updaterStage;
}
@Override
public boolean isExecutorManagerThreadActive() {
return executingManager.isAlive();
}
@Override
public long getLastExecutorManagerThreadCheckTime() {
return lastThreadCheckTime;
}
public long getLastCleanerThreadCheckTime() {
return this.lastCleanerThreadCheckTime;
}
@Override
public Collection<Executor> getAllActiveExecutors() {
return Collections.unmodifiableCollection(activeExecutors);
}
/**
*
* {@inheritDoc}
*
* @see azkaban.executor.ExecutorManagerAdapter#fetchExecutor(int)
*/
@Override
public Executor fetchExecutor(int executorId) throws ExecutorManagerException {
for (Executor executor : activeExecutors) {
if (executor.getId() == executorId) {
return executor;
}
}
return executorLoader.fetchExecutor(executorId);
}
@Override
public Set<String> getPrimaryServerHosts() {
// Only one for now. More probably later.
HashSet<String> ports = new HashSet<String>();
for (Executor executor : activeExecutors) {
ports.add(executor.getHost() + ":" + executor.getPort());
}
return ports;
}
@Override
public Set<String> getAllActiveExecutorServerHosts() {
// Includes non primary server/hosts
HashSet<String> ports = new HashSet<String>();
for (Executor executor : activeExecutors) {
ports.add(executor.getHost() + ":" + executor.getPort());
}
// include executor which were initially active and still has flows running
for (Pair<ExecutionReference, ExecutableFlow> running : runningFlows
.values()) {
ExecutionReference ref = running.getFirst();
ports.add(ref.getHost() + ":" + ref.getPort());
}
return ports;
}
private void loadRunningFlows() throws ExecutorManagerException {
runningFlows.putAll(executorLoader.fetchActiveFlows());
// Finalize all flows which were running on an executor which is now
// inactive
for (Pair<ExecutionReference, ExecutableFlow> pair : runningFlows.values()) {
if (!activeExecutors.contains(pair.getFirst().getExecutor())) {
finalizeFlows(pair.getSecond());
}
}
}
/*
* load queued flows i.e with active_execution_reference and not assigned to
* any executor
*/
private void loadQueuedFlows() throws ExecutorManagerException {
List<Pair<ExecutionReference, ExecutableFlow>> retrievedExecutions =
executorLoader.fetchQueuedFlows();
if (retrievedExecutions != null) {
for (Pair<ExecutionReference, ExecutableFlow> pair : retrievedExecutions) {
queuedFlows.enqueue(pair.getSecond(), pair.getFirst());
}
}
}
/**
* Gets a list of all the active (running flows and non-dispatched flows)
* executions for a given project and flow {@inheritDoc}
*
* @see azkaban.executor.ExecutorManagerAdapter#getRunningFlows(int,
* java.lang.String)
*/
@Override
public List<Integer> getRunningFlows(int projectId, String flowId) {
List<Integer> executionIds = new ArrayList<Integer>();
executionIds.addAll(getRunningFlowsHelper(projectId, flowId,
queuedFlows.getAllEntries()));
executionIds.addAll(getRunningFlowsHelper(projectId, flowId,
runningFlows.values()));
return executionIds;
}
/* Helper method for getRunningFlows */
private List<Integer> getRunningFlowsHelper(int projectId, String flowId,
Collection<Pair<ExecutionReference, ExecutableFlow>> collection) {
List<Integer> executionIds = new ArrayList<Integer>();
for (Pair<ExecutionReference, ExecutableFlow> ref : collection) {
if (ref.getSecond().getFlowId().equals(flowId)
&& ref.getSecond().getProjectId() == projectId) {
executionIds.add(ref.getFirst().getExecId());
}
}
return executionIds;
}
/**
*
* {@inheritDoc}
*
* @see azkaban.executor.ExecutorManagerAdapter#getActiveFlowsWithExecutor()
*/
@Override
public List<Pair<ExecutableFlow, Executor>> getActiveFlowsWithExecutor()
throws IOException {
List<Pair<ExecutableFlow, Executor>> flows =
new ArrayList<Pair<ExecutableFlow, Executor>>();
getActiveFlowsWithExecutorHelper(flows, queuedFlows.getAllEntries());
getActiveFlowsWithExecutorHelper(flows, runningFlows.values());
return flows;
}
/* Helper method for getActiveFlowsWithExecutor */
private void getActiveFlowsWithExecutorHelper(
List<Pair<ExecutableFlow, Executor>> flows,
Collection<Pair<ExecutionReference, ExecutableFlow>> collection) {
for (Pair<ExecutionReference, ExecutableFlow> ref : collection) {
flows.add(new Pair<ExecutableFlow, Executor>(ref.getSecond(), ref
.getFirst().getExecutor()));
}
}
/**
* Checks whether the given flow has an active (running, non-dispatched)
* executions {@inheritDoc}
*
* @see azkaban.executor.ExecutorManagerAdapter#isFlowRunning(int,
* java.lang.String)
*/
@Override
public boolean isFlowRunning(int projectId, String flowId) {
boolean isRunning = false;
isRunning =
isRunning
|| isFlowRunningHelper(projectId, flowId, queuedFlows.getAllEntries());
isRunning =
isRunning
|| isFlowRunningHelper(projectId, flowId, runningFlows.values());
return isRunning;
}
/* Search a running flow in a collection */
private boolean isFlowRunningHelper(int projectId, String flowId,
Collection<Pair<ExecutionReference, ExecutableFlow>> collection) {
for (Pair<ExecutionReference, ExecutableFlow> ref : collection) {
if (ref.getSecond().getProjectId() == projectId
&& ref.getSecond().getFlowId().equals(flowId)) {
return true;
}
}
return false;
}
/**
* Fetch ExecutableFlow from an active (running, non-dispatched) or from
* database {@inheritDoc}
*
* @see azkaban.executor.ExecutorManagerAdapter#getExecutableFlow(int)
*/
@Override
public ExecutableFlow getExecutableFlow(int execId)
throws ExecutorManagerException {
if (runningFlows.containsKey(execId)) {
return runningFlows.get(execId).getSecond();
} else if (queuedFlows.hasExecution(execId)) {
return queuedFlows.getFlow(execId);
} else {
return executorLoader.fetchExecutableFlow(execId);
}
}
/**
* Get all active (running, non-dispatched) flows
*
* {@inheritDoc}
*
* @see azkaban.executor.ExecutorManagerAdapter#getRunningFlows()
*/
@Override
public List<ExecutableFlow> getRunningFlows() {
ArrayList<ExecutableFlow> flows = new ArrayList<ExecutableFlow>();
getActiveFlowHelper(flows, queuedFlows.getAllEntries());
getActiveFlowHelper(flows, runningFlows.values());
return flows;
}
/*
* Helper method to get all running flows from a Pair<ExecutionReference,
* ExecutableFlow collection
*/
private void getActiveFlowHelper(ArrayList<ExecutableFlow> flows,
Collection<Pair<ExecutionReference, ExecutableFlow>> collection) {
for (Pair<ExecutionReference, ExecutableFlow> ref : collection) {
flows.add(ref.getSecond());
}
}
/**
* Get execution Ids of all active (running, non-dispatched) flows
*
* {@inheritDoc}
*
* @see azkaban.executor.ExecutorManagerAdapter#getRunningFlows()
*/
public String getRunningFlowIds() {
List<Integer> allIds = new ArrayList<Integer>();
getRunningFlowsIdsHelper(allIds, queuedFlows.getAllEntries());
getRunningFlowsIdsHelper(allIds, runningFlows.values());
Collections.sort(allIds);
return allIds.toString();
}
/**
* Get execution Ids of all non-dispatched flows
*
* {@inheritDoc}
*
* @see azkaban.executor.ExecutorManagerAdapter#getRunningFlows()
*/
public String getQueuedFlowIds() {
List<Integer> allIds = new ArrayList<Integer>();
getRunningFlowsIdsHelper(allIds, queuedFlows.getAllEntries());
Collections.sort(allIds);
return allIds.toString();
}
/* Helper method to flow ids of all running flows */
private void getRunningFlowsIdsHelper(List<Integer> allIds,
Collection<Pair<ExecutionReference, ExecutableFlow>> collection) {
for (Pair<ExecutionReference, ExecutableFlow> ref : collection) {
allIds.add(ref.getSecond().getExecutionId());
}
}
public List<ExecutableFlow> getRecentlyFinishedFlows() {
return new ArrayList<ExecutableFlow>(recentlyFinished.values());
}
@Override
public List<ExecutableFlow> getExecutableFlows(Project project,
String flowId, int skip, int size) throws ExecutorManagerException {
List<ExecutableFlow> flows =
executorLoader.fetchFlowHistory(project.getId(), flowId, skip, size);
return flows;
}
@Override
public List<ExecutableFlow> getExecutableFlows(int skip, int size)
throws ExecutorManagerException {
List<ExecutableFlow> flows = executorLoader.fetchFlowHistory(skip, size);
return flows;
}
@Override
public List<ExecutableFlow> getExecutableFlows(String flowIdContains,
int skip, int size) throws ExecutorManagerException {
List<ExecutableFlow> flows =
executorLoader.fetchFlowHistory(null, '%' + flowIdContains + '%', null,
0, -1, -1, skip, size);
return flows;
}
@Override
public List<ExecutableFlow> getExecutableFlows(String projContain,
String flowContain, String userContain, int status, long begin, long end,
int skip, int size) throws ExecutorManagerException {
List<ExecutableFlow> flows =
executorLoader.fetchFlowHistory(projContain, flowContain, userContain,
status, begin, end, skip, size);
return flows;
}
@Override
public List<ExecutableJobInfo> getExecutableJobs(Project project,
String jobId, int skip, int size) throws ExecutorManagerException {
List<ExecutableJobInfo> nodes =
executorLoader.fetchJobHistory(project.getId(), jobId, skip, size);
return nodes;
}
@Override
public int getNumberOfJobExecutions(Project project, String jobId)
throws ExecutorManagerException {
return executorLoader.fetchNumExecutableNodes(project.getId(), jobId);
}
@Override
public int getNumberOfExecutions(Project project, String flowId)
throws ExecutorManagerException {
return executorLoader.fetchNumExecutableFlows(project.getId(), flowId);
}
@Override
public LogData getExecutableFlowLog(ExecutableFlow exFlow, int offset,
int length) throws ExecutorManagerException {
Pair<ExecutionReference, ExecutableFlow> pair =
runningFlows.get(exFlow.getExecutionId());
if (pair != null) {
Pair<String, String> typeParam = new Pair<String, String>("type", "flow");
Pair<String, String> offsetParam =
new Pair<String, String>("offset", String.valueOf(offset));
Pair<String, String> lengthParam =
new Pair<String, String>("length", String.valueOf(length));
@SuppressWarnings("unchecked")
Map<String, Object> result =
callExecutorServer(pair.getFirst(), ConnectorParams.LOG_ACTION,
typeParam, offsetParam, lengthParam);
return LogData.createLogDataFromObject(result);
} else {
LogData value =
executorLoader.fetchLogs(exFlow.getExecutionId(), "", 0, offset,
length);
return value;
}
}
@Override
public LogData getExecutionJobLog(ExecutableFlow exFlow, String jobId,
int offset, int length, int attempt) throws ExecutorManagerException {
Pair<ExecutionReference, ExecutableFlow> pair =
runningFlows.get(exFlow.getExecutionId());
if (pair != null) {
Pair<String, String> typeParam = new Pair<String, String>("type", "job");
Pair<String, String> jobIdParam =
new Pair<String, String>("jobId", jobId);
Pair<String, String> offsetParam =
new Pair<String, String>("offset", String.valueOf(offset));
Pair<String, String> lengthParam =
new Pair<String, String>("length", String.valueOf(length));
Pair<String, String> attemptParam =
new Pair<String, String>("attempt", String.valueOf(attempt));
@SuppressWarnings("unchecked")
Map<String, Object> result =
callExecutorServer(pair.getFirst(), ConnectorParams.LOG_ACTION,
typeParam, jobIdParam, offsetParam, lengthParam, attemptParam);
return LogData.createLogDataFromObject(result);
} else {
LogData value =
executorLoader.fetchLogs(exFlow.getExecutionId(), jobId, attempt,
offset, length);
return value;
}
}
@Override
public List<Object> getExecutionJobStats(ExecutableFlow exFlow, String jobId,
int attempt) throws ExecutorManagerException {
Pair<ExecutionReference, ExecutableFlow> pair =
runningFlows.get(exFlow.getExecutionId());
if (pair == null) {
return executorLoader.fetchAttachments(exFlow.getExecutionId(), jobId,
attempt);
}
Pair<String, String> jobIdParam = new Pair<String, String>("jobId", jobId);
Pair<String, String> attemptParam =
new Pair<String, String>("attempt", String.valueOf(attempt));
@SuppressWarnings("unchecked")
Map<String, Object> result =
callExecutorServer(pair.getFirst(), ConnectorParams.ATTACHMENTS_ACTION,
jobIdParam, attemptParam);
@SuppressWarnings("unchecked")
List<Object> jobStats = (List<Object>) result.get("attachments");
return jobStats;
}
@Override
public JobMetaData getExecutionJobMetaData(ExecutableFlow exFlow,
String jobId, int offset, int length, int attempt)
throws ExecutorManagerException {
Pair<ExecutionReference, ExecutableFlow> pair =
runningFlows.get(exFlow.getExecutionId());
if (pair != null) {
Pair<String, String> typeParam = new Pair<String, String>("type", "job");
Pair<String, String> jobIdParam =
new Pair<String, String>("jobId", jobId);
Pair<String, String> offsetParam =
new Pair<String, String>("offset", String.valueOf(offset));
Pair<String, String> lengthParam =
new Pair<String, String>("length", String.valueOf(length));
Pair<String, String> attemptParam =
new Pair<String, String>("attempt", String.valueOf(attempt));
@SuppressWarnings("unchecked")
Map<String, Object> result =
callExecutorServer(pair.getFirst(), ConnectorParams.METADATA_ACTION,
typeParam, jobIdParam, offsetParam, lengthParam, attemptParam);
return JobMetaData.createJobMetaDataFromObject(result);
} else {
return null;
}
}
/**
* if flows was dispatched to an executor, cancel by calling Executor else if
* flow is still in queue, remove from queue and finalize {@inheritDoc}
*
* @see azkaban.executor.ExecutorManagerAdapter#cancelFlow(azkaban.executor.ExecutableFlow,
* java.lang.String)
*/
@Override
public void cancelFlow(ExecutableFlow exFlow, String userId)
throws ExecutorManagerException {
synchronized (exFlow) {
if (runningFlows.containsKey(exFlow.getExecutionId())) {
Pair<ExecutionReference, ExecutableFlow> pair =
runningFlows.get(exFlow.getExecutionId());
callExecutorServer(pair.getFirst(), ConnectorParams.CANCEL_ACTION,
userId);
} else if (queuedFlows.hasExecution(exFlow.getExecutionId())) {
queuedFlows.dequeue(exFlow.getExecutionId());
finalizeFlows(exFlow);
} else {
throw new ExecutorManagerException("Execution "
+ exFlow.getExecutionId() + " of flow " + exFlow.getFlowId()
+ " isn't running.");
}
}
}
@Override
public void resumeFlow(ExecutableFlow exFlow, String userId)
throws ExecutorManagerException {
synchronized (exFlow) {
Pair<ExecutionReference, ExecutableFlow> pair =
runningFlows.get(exFlow.getExecutionId());
if (pair == null) {
throw new ExecutorManagerException("Execution "
+ exFlow.getExecutionId() + " of flow " + exFlow.getFlowId()
+ " isn't running.");
}
callExecutorServer(pair.getFirst(), ConnectorParams.RESUME_ACTION, userId);
}
}
@Override
public void pauseFlow(ExecutableFlow exFlow, String userId)
throws ExecutorManagerException {
synchronized (exFlow) {
Pair<ExecutionReference, ExecutableFlow> pair =
runningFlows.get(exFlow.getExecutionId());
if (pair == null) {
throw new ExecutorManagerException("Execution "
+ exFlow.getExecutionId() + " of flow " + exFlow.getFlowId()
+ " isn't running.");
}
callExecutorServer(pair.getFirst(), ConnectorParams.PAUSE_ACTION, userId);
}
}
@Override
public void pauseExecutingJobs(ExecutableFlow exFlow, String userId,
String... jobIds) throws ExecutorManagerException {
modifyExecutingJobs(exFlow, ConnectorParams.MODIFY_PAUSE_JOBS, userId,
jobIds);
}
@Override
public void resumeExecutingJobs(ExecutableFlow exFlow, String userId,
String... jobIds) throws ExecutorManagerException {
modifyExecutingJobs(exFlow, ConnectorParams.MODIFY_RESUME_JOBS, userId,
jobIds);
}
@Override
public void retryFailures(ExecutableFlow exFlow, String userId)
throws ExecutorManagerException {
modifyExecutingJobs(exFlow, ConnectorParams.MODIFY_RETRY_FAILURES, userId);
}
@Override
public void retryExecutingJobs(ExecutableFlow exFlow, String userId,
String... jobIds) throws ExecutorManagerException {
modifyExecutingJobs(exFlow, ConnectorParams.MODIFY_RETRY_JOBS, userId,
jobIds);
}
@Override
public void disableExecutingJobs(ExecutableFlow exFlow, String userId,
String... jobIds) throws ExecutorManagerException {
modifyExecutingJobs(exFlow, ConnectorParams.MODIFY_DISABLE_JOBS, userId,
jobIds);
}
@Override
public void enableExecutingJobs(ExecutableFlow exFlow, String userId,
String... jobIds) throws ExecutorManagerException {
modifyExecutingJobs(exFlow, ConnectorParams.MODIFY_ENABLE_JOBS, userId,
jobIds);
}
@Override
public void cancelExecutingJobs(ExecutableFlow exFlow, String userId,
String... jobIds) throws ExecutorManagerException {
modifyExecutingJobs(exFlow, ConnectorParams.MODIFY_CANCEL_JOBS, userId,
jobIds);
}
@SuppressWarnings("unchecked")
private Map<String, Object> modifyExecutingJobs(ExecutableFlow exFlow,
String command, String userId, String... jobIds)
throws ExecutorManagerException {
synchronized (exFlow) {
Pair<ExecutionReference, ExecutableFlow> pair =
runningFlows.get(exFlow.getExecutionId());
if (pair == null) {
throw new ExecutorManagerException("Execution "
+ exFlow.getExecutionId() + " of flow " + exFlow.getFlowId()
+ " isn't running.");
}
Map<String, Object> response = null;
if (jobIds != null && jobIds.length > 0) {
for (String jobId : jobIds) {
if (!jobId.isEmpty()) {
ExecutableNode node = exFlow.getExecutableNode(jobId);
if (node == null) {
throw new ExecutorManagerException("Job " + jobId
+ " doesn't exist in execution " + exFlow.getExecutionId()
+ ".");
}
}
}
String ids = StringUtils.join(jobIds, ',');
response =
callExecutorServer(pair.getFirst(),
ConnectorParams.MODIFY_EXECUTION_ACTION, userId,
new Pair<String, String>(
ConnectorParams.MODIFY_EXECUTION_ACTION_TYPE, command),
new Pair<String, String>(ConnectorParams.MODIFY_JOBS_LIST, ids));
} else {
response =
callExecutorServer(pair.getFirst(),
ConnectorParams.MODIFY_EXECUTION_ACTION, userId,
new Pair<String, String>(
ConnectorParams.MODIFY_EXECUTION_ACTION_TYPE, command));
}
return response;
}
}
private void applyDisabledJobs(List<Object> disabledJobs,
ExecutableFlowBase exflow) {
for (Object disabled : disabledJobs) {
if (disabled instanceof String) {
String nodeName = (String) disabled;
ExecutableNode node = exflow.getExecutableNode(nodeName);
if (node != null) {
node.setStatus(Status.DISABLED);
}
} else if (disabled instanceof Map) {
@SuppressWarnings("unchecked")
Map<String, Object> nestedDisabled = (Map<String, Object>) disabled;
String nodeName = (String) nestedDisabled.get("id");
@SuppressWarnings("unchecked")
List<Object> subDisabledJobs =
(List<Object>) nestedDisabled.get("children");
if (nodeName == null || subDisabledJobs == null) {
return;
}
ExecutableNode node = exflow.getExecutableNode(nodeName);
if (node != null && node instanceof ExecutableFlowBase) {
applyDisabledJobs(subDisabledJobs, (ExecutableFlowBase) node);
}
}
}
}
@Override
public String submitExecutableFlow(ExecutableFlow exflow, String userId)
throws ExecutorManagerException {
synchronized (exflow) {
String flowId = exflow.getFlowId();
logger.info("Submitting execution flow " + flowId + " by " + userId);
String message = "";
if (queuedFlows.isFull()) {
message =
String
.format(
"Failed to submit %s for project %s. Azkaban has overrun its webserver queue capacity",
flowId, exflow.getProjectName());
logger.error(message);
} else {
int projectId = exflow.getProjectId();
exflow.setSubmitUser(userId);
exflow.setSubmitTime(System.currentTimeMillis());
List<Integer> running = getRunningFlows(projectId, flowId);
ExecutionOptions options = exflow.getExecutionOptions();
if (options == null) {
options = new ExecutionOptions();
}
if (options.getDisabledJobs() != null) {
applyDisabledJobs(options.getDisabledJobs(), exflow);
}
if (!running.isEmpty()) {
if (options.getConcurrentOption().equals(
ExecutionOptions.CONCURRENT_OPTION_PIPELINE)) {
Collections.sort(running);
Integer runningExecId = running.get(running.size() - 1);
options.setPipelineExecutionId(runningExecId);
message =
"Flow " + flowId + " is already running with exec id "
+ runningExecId + ". Pipelining level "
+ options.getPipelineLevel() + ". \n";
} else if (options.getConcurrentOption().equals(
ExecutionOptions.CONCURRENT_OPTION_SKIP)) {
throw new ExecutorManagerException("Flow " + flowId
+ " is already running. Skipping execution.",
ExecutorManagerException.Reason.SkippedExecution);
} else {
// The settings is to run anyways.
message =
"Flow " + flowId + " is already running with exec id "
+ StringUtils.join(running, ",")
+ ". Will execute concurrently. \n";
}
}
boolean memoryCheck =
!ProjectWhitelist.isProjectWhitelisted(exflow.getProjectId(),
ProjectWhitelist.WhitelistType.MemoryCheck);
options.setMemoryCheck(memoryCheck);
// The exflow id is set by the loader. So it's unavailable until after
// this call.
executorLoader.uploadExecutableFlow(exflow);
// We create an active flow reference in the datastore. If the upload
// fails, we remove the reference.
ExecutionReference reference =
new ExecutionReference(exflow.getExecutionId());
if (isMultiExecutorMode()) {
//Take MultiExecutor route
executorLoader.addActiveExecutableReference(reference);
queuedFlows.enqueue(exflow, reference);
} else {
Executor executor = activeExecutors.iterator().next();
// assign only local executor we have
reference.setExecutor(executor);
executorLoader.addActiveExecutableReference(reference);
try {
callExecutorServer(exflow, executor, ConnectorParams.EXECUTE_ACTION);
runningFlows.put(exflow.getExecutionId(),
new Pair<ExecutionReference, ExecutableFlow>(reference, exflow));
} catch (ExecutorManagerException e) {
executorLoader.removeActiveExecutableReference(reference
.getExecId());
throw e;
}
}
message +=
"Execution submitted successfully with exec id "
+ exflow.getExecutionId();
}
return message;
}
}
private void cleanOldExecutionLogs(long millis) {
try {
int count = executorLoader.removeExecutionLogsByTime(millis);
logger.info("Cleaned up " + count + " log entries.");
} catch (ExecutorManagerException e) {
e.printStackTrace();
}
}
private Map<String, Object> callExecutorServer(ExecutableFlow exflow,
Executor executor, String action) throws ExecutorManagerException {
try {
return callExecutorServer(executor.getHost(), executor.getPort(), action,
exflow.getExecutionId(), null, (Pair<String, String>[]) null);
} catch (IOException e) {
throw new ExecutorManagerException(e);
}
}
private Map<String, Object> callExecutorServer(ExecutionReference ref,
String action, String user) throws ExecutorManagerException {
try {
return callExecutorServer(ref.getHost(), ref.getPort(), action,
ref.getExecId(), user, (Pair<String, String>[]) null);
} catch (IOException e) {
throw new ExecutorManagerException(e);
}
}
private Map<String, Object> callExecutorServer(ExecutionReference ref,
String action, Pair<String, String>... params)
throws ExecutorManagerException {
try {
return callExecutorServer(ref.getHost(), ref.getPort(), action,
ref.getExecId(), null, params);
} catch (IOException e) {
throw new ExecutorManagerException(e);
}
}
private Map<String, Object> callExecutorServer(ExecutionReference ref,
String action, String user, Pair<String, String>... params)
throws ExecutorManagerException {
try {
return callExecutorServer(ref.getHost(), ref.getPort(), action,
ref.getExecId(), user, params);
} catch (IOException e) {
throw new ExecutorManagerException(e);
}
}
private Map<String, Object> callExecutorServer(String host, int port,
String action, Integer executionId, String user,
Pair<String, String>... params) throws IOException {
List<Pair<String, String>> paramList = new ArrayList<Pair<String,String>>();
// if params = null
if(params != null) {
paramList.addAll(Arrays.asList(params));
}
paramList
.add(new Pair<String, String>(ConnectorParams.ACTION_PARAM, action));
paramList.add(new Pair<String, String>(ConnectorParams.EXECID_PARAM, String
.valueOf(executionId)));
paramList.add(new Pair<String, String>(ConnectorParams.USER_PARAM, user));
Map<String, Object> jsonResponse =
callExecutorForJsonObject(host, port, "/executor", paramList);
return jsonResponse;
}
/*
* Helper method used by ExecutorManager to call executor and return json
* object map
*/
private Map<String, Object> callExecutorForJsonObject(String host, int port,
String path, List<Pair<String, String>> paramList) throws IOException {
String responseString =
callExecutorForJsonString(host, port, path, paramList);
@SuppressWarnings("unchecked")
Map<String, Object> jsonResponse =
(Map<String, Object>) JSONUtils.parseJSONFromString(responseString);
String error = (String) jsonResponse.get(ConnectorParams.RESPONSE_ERROR);
if (error != null) {
throw new IOException(error);
}
return jsonResponse;
}
/*
* Helper method used by ExecutorManager to call executor and return raw json
* string
*/
private String callExecutorForJsonString(String host, int port, String path,
List<Pair<String, String>> paramList) throws IOException {
if (paramList == null) {
paramList = new ArrayList<Pair<String, String>>();
}
ExecutorApiClient apiclient = ExecutorApiClient.getInstance();
@SuppressWarnings("unchecked")
URI uri =
ExecutorApiClient.buildUri(host, port, path, true,
paramList.toArray(new Pair[0]));
return apiclient.httpGet(uri, null);
}
/**
* Manage servlet call for stats servlet in Azkaban execution server
* {@inheritDoc}
*
* @throws ExecutorManagerException
*
* @see azkaban.executor.ExecutorManagerAdapter#callExecutorStats(java.lang.String,
* azkaban.utils.Pair[])
*/
@Override
public Map<String, Object> callExecutorStats(int executorId, String action,
Pair<String, String>... params) throws IOException, ExecutorManagerException {
Executor executor = fetchExecutor(executorId);
List<Pair<String, String>> paramList =
new ArrayList<Pair<String, String>>();
// if params = null
if (params != null) {
paramList.addAll(Arrays.asList(params));
}
paramList
.add(new Pair<String, String>(ConnectorParams.ACTION_PARAM, action));
return callExecutorForJsonObject(executor.getHost(), executor.getPort(),
"/stats", paramList);
}
@Override
public Map<String, Object> callExecutorJMX(String hostPort, String action,
String mBean) throws IOException {
List<Pair<String, String>> paramList =
new ArrayList<Pair<String, String>>();
paramList.add(new Pair<String, String>(action, ""));
if(mBean != null) {
paramList.add(new Pair<String, String>(ConnectorParams.JMX_MBEAN, mBean));
}
String[] hostPortSplit = hostPort.split(":");
return callExecutorForJsonObject(hostPortSplit[0],
Integer.valueOf(hostPortSplit[1]), "/jmx", paramList);
}
@Override
public void shutdown() {
if (isMultiExecutorMode()) {
queueProcessor.shutdown();
}
executingManager.shutdown();
}
private class ExecutingManagerUpdaterThread extends Thread {
private boolean shutdown = false;
public ExecutingManagerUpdaterThread() {
this.setName("ExecutorManagerUpdaterThread");
}
// 10 mins recently finished threshold.
private long recentlyFinishedLifetimeMs = 600000;
private int waitTimeIdleMs = 2000;
private int waitTimeMs = 500;
// When we have an http error, for that flow, we'll check every 10 secs, 6
// times (1 mins) before we evict.
private int numErrors = 6;
private long errorThreshold = 10000;
private void shutdown() {
shutdown = true;
}
@SuppressWarnings("unchecked")
public void run() {
while (!shutdown) {
try {
lastThreadCheckTime = System.currentTimeMillis();
updaterStage = "Starting update all flows.";
Map<Executor, List<ExecutableFlow>> exFlowMap =
getFlowToExecutorMap();
ArrayList<ExecutableFlow> finishedFlows =
new ArrayList<ExecutableFlow>();
ArrayList<ExecutableFlow> finalizeFlows =
new ArrayList<ExecutableFlow>();
if (exFlowMap.size() > 0) {
for (Map.Entry<Executor, List<ExecutableFlow>> entry : exFlowMap
.entrySet()) {
List<Long> updateTimesList = new ArrayList<Long>();
List<Integer> executionIdsList = new ArrayList<Integer>();
Executor executor = entry.getKey();
updaterStage =
"Starting update flows on " + executor.getHost() + ":"
+ executor.getPort();
// We pack the parameters of the same host together before we
// query.
fillUpdateTimeAndExecId(entry.getValue(), executionIdsList,
updateTimesList);
Pair<String, String> updateTimes =
new Pair<String, String>(
ConnectorParams.UPDATE_TIME_LIST_PARAM,
JSONUtils.toJSON(updateTimesList));
Pair<String, String> executionIds =
new Pair<String, String>(ConnectorParams.EXEC_ID_LIST_PARAM,
JSONUtils.toJSON(executionIdsList));
Map<String, Object> results = null;
try {
results =
callExecutorServer(executor.getHost(),
executor.getPort(), ConnectorParams.UPDATE_ACTION,
null, null, executionIds, updateTimes);
} catch (IOException e) {
logger.error(e);
for (ExecutableFlow flow : entry.getValue()) {
Pair<ExecutionReference, ExecutableFlow> pair =
runningFlows.get(flow.getExecutionId());
updaterStage =
"Failed to get update. Doing some clean up for flow "
+ pair.getSecond().getExecutionId();
if (pair != null) {
ExecutionReference ref = pair.getFirst();
int numErrors = ref.getNumErrors();
if (ref.getNumErrors() < this.numErrors) {
ref.setNextCheckTime(System.currentTimeMillis()
+ errorThreshold);
ref.setNumErrors(++numErrors);
} else {
logger.error("Evicting flow " + flow.getExecutionId()
+ ". The executor is unresponsive.");
// TODO should send out an unresponsive email here.
finalizeFlows.add(pair.getSecond());
}
}
}
}
// We gets results
if (results != null) {
List<Map<String, Object>> executionUpdates =
(List<Map<String, Object>>) results
.get(ConnectorParams.RESPONSE_UPDATED_FLOWS);
for (Map<String, Object> updateMap : executionUpdates) {
try {
ExecutableFlow flow = updateExecution(updateMap);
updaterStage = "Updated flow " + flow.getExecutionId();
if (isFinished(flow)) {
finishedFlows.add(flow);
finalizeFlows.add(flow);
}
} catch (ExecutorManagerException e) {
ExecutableFlow flow = e.getExecutableFlow();
logger.error(e);
if (flow != null) {
logger.error("Finalizing flow " + flow.getExecutionId());
finalizeFlows.add(flow);
}
}
}
}
}
updaterStage = "Evicting old recently finished flows.";
evictOldRecentlyFinished(recentlyFinishedLifetimeMs);
// Add new finished
for (ExecutableFlow flow : finishedFlows) {
if (flow.getScheduleId() >= 0
&& flow.getStatus() == Status.SUCCEEDED) {
ScheduleStatisticManager.invalidateCache(flow.getScheduleId(),
cacheDir);
}
fireEventListeners(Event.create(flow, Type.FLOW_FINISHED));
recentlyFinished.put(flow.getExecutionId(), flow);
}
updaterStage =
"Finalizing " + finalizeFlows.size() + " error flows.";
// Kill error flows
for (ExecutableFlow flow : finalizeFlows) {
finalizeFlows(flow);
}
}
updaterStage = "Updated all active flows. Waiting for next round.";
synchronized (this) {
try {
if (runningFlows.size() > 0) {
this.wait(waitTimeMs);
} else {
this.wait(waitTimeIdleMs);
}
} catch (InterruptedException e) {
}
}
} catch (Exception e) {
logger.error(e);
}
}
}
}
private void finalizeFlows(ExecutableFlow flow) {
int execId = flow.getExecutionId();
updaterStage = "finalizing flow " + execId;
// First we check if the execution in the datastore is complete
try {
ExecutableFlow dsFlow;
if (isFinished(flow)) {
dsFlow = flow;
} else {
updaterStage = "finalizing flow " + execId + " loading from db";
dsFlow = executorLoader.fetchExecutableFlow(execId);
// If it's marked finished, we're good. If not, we fail everything and
// then mark it finished.
if (!isFinished(dsFlow)) {
updaterStage = "finalizing flow " + execId + " failing the flow";
failEverything(dsFlow);
executorLoader.updateExecutableFlow(dsFlow);
}
}
updaterStage = "finalizing flow " + execId + " deleting active reference";
// Delete the executing reference.
if (flow.getEndTime() == -1) {
flow.setEndTime(System.currentTimeMillis());
executorLoader.updateExecutableFlow(dsFlow);
}
executorLoader.removeActiveExecutableReference(execId);
updaterStage = "finalizing flow " + execId + " cleaning from memory";
runningFlows.remove(execId);
fireEventListeners(Event.create(dsFlow, Type.FLOW_FINISHED));
recentlyFinished.put(execId, dsFlow);
} catch (ExecutorManagerException e) {
logger.error(e);
}
// TODO append to the flow log that we forced killed this flow because the
// target no longer had
// the reference.
updaterStage = "finalizing flow " + execId + " alerting and emailing";
ExecutionOptions options = flow.getExecutionOptions();
// But we can definitely email them.
Alerter mailAlerter = alerters.get("email");
if (flow.getStatus() == Status.FAILED || flow.getStatus() == Status.KILLED) {
if (options.getFailureEmails() != null
&& !options.getFailureEmails().isEmpty()) {
try {
mailAlerter
.alertOnError(
flow,
"Executor no longer seems to be running this execution. Most likely due to executor bounce.");
} catch (Exception e) {
logger.error(e);
}
}
if (options.getFlowParameters().containsKey("alert.type")) {
String alertType = options.getFlowParameters().get("alert.type");
Alerter alerter = alerters.get(alertType);
if (alerter != null) {
try {
alerter
.alertOnError(
flow,
"Executor no longer seems to be running this execution. Most likely due to executor bounce.");
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
logger.error("Failed to alert by " + alertType);
}
} else {
logger.error("Alerter type " + alertType
+ " doesn't exist. Failed to alert.");
}
}
} else {
if (options.getSuccessEmails() != null
&& !options.getSuccessEmails().isEmpty()) {
try {
mailAlerter.alertOnSuccess(flow);
} catch (Exception e) {
logger.error(e);
}
}
if (options.getFlowParameters().containsKey("alert.type")) {
String alertType = options.getFlowParameters().get("alert.type");
Alerter alerter = alerters.get(alertType);
if (alerter != null) {
try {
alerter.alertOnSuccess(flow);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
logger.error("Failed to alert by " + alertType);
}
} else {
logger.error("Alerter type " + alertType
+ " doesn't exist. Failed to alert.");
}
}
}
}
private void failEverything(ExecutableFlow exFlow) {
long time = System.currentTimeMillis();
for (ExecutableNode node : exFlow.getExecutableNodes()) {
switch (node.getStatus()) {
case SUCCEEDED:
case FAILED:
case KILLED:
case SKIPPED:
case DISABLED:
continue;
// case UNKNOWN:
case READY:
node.setStatus(Status.KILLED);
break;
default:
node.setStatus(Status.FAILED);
break;
}
if (node.getStartTime() == -1) {
node.setStartTime(time);
}
if (node.getEndTime() == -1) {
node.setEndTime(time);
}
}
if (exFlow.getEndTime() == -1) {
exFlow.setEndTime(time);
}
exFlow.setStatus(Status.FAILED);
}
private void evictOldRecentlyFinished(long ageMs) {
ArrayList<Integer> recentlyFinishedKeys =
new ArrayList<Integer>(recentlyFinished.keySet());
long oldAgeThreshold = System.currentTimeMillis() - ageMs;
for (Integer key : recentlyFinishedKeys) {
ExecutableFlow flow = recentlyFinished.get(key);
if (flow.getEndTime() < oldAgeThreshold) {
// Evict
recentlyFinished.remove(key);
}
}
}
private ExecutableFlow updateExecution(Map<String, Object> updateData)
throws ExecutorManagerException {
Integer execId =
(Integer) updateData.get(ConnectorParams.UPDATE_MAP_EXEC_ID);
if (execId == null) {
throw new ExecutorManagerException(
"Response is malformed. Need exec id to update.");
}
Pair<ExecutionReference, ExecutableFlow> refPair =
this.runningFlows.get(execId);
if (refPair == null) {
throw new ExecutorManagerException(
"No running flow found with the execution id. Removing " + execId);
}
ExecutionReference ref = refPair.getFirst();
ExecutableFlow flow = refPair.getSecond();
if (updateData.containsKey("error")) {
// The flow should be finished here.
throw new ExecutorManagerException((String) updateData.get("error"), flow);
}
// Reset errors.
ref.setNextCheckTime(0);
ref.setNumErrors(0);
Status oldStatus = flow.getStatus();
flow.applyUpdateObject(updateData);
Status newStatus = flow.getStatus();
ExecutionOptions options = flow.getExecutionOptions();
if (oldStatus != newStatus && newStatus.equals(Status.FAILED_FINISHING)) {
// We want to see if we should give an email status on first failure.
if (options.getNotifyOnFirstFailure()) {
Alerter mailAlerter = alerters.get("email");
try {
mailAlerter.alertOnFirstError(flow);
} catch (Exception e) {
e.printStackTrace();
logger.error("Failed to send first error email." + e.getMessage());
}
}
if (options.getFlowParameters().containsKey("alert.type")) {
String alertType = options.getFlowParameters().get("alert.type");
Alerter alerter = alerters.get(alertType);
if (alerter != null) {
try {
alerter.alertOnFirstError(flow);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
logger.error("Failed to alert by " + alertType);
}
} else {
logger.error("Alerter type " + alertType
+ " doesn't exist. Failed to alert.");
}
}
}
return flow;
}
public boolean isFinished(ExecutableFlow flow) {
switch (flow.getStatus()) {
case SUCCEEDED:
case FAILED:
case KILLED:
return true;
default:
return false;
}
}
private void fillUpdateTimeAndExecId(List<ExecutableFlow> flows,
List<Integer> executionIds, List<Long> updateTimes) {
for (ExecutableFlow flow : flows) {
executionIds.add(flow.getExecutionId());
updateTimes.add(flow.getUpdateTime());
}
}
/* Group Executable flow by Executors to reduce number of REST calls */
private Map<Executor, List<ExecutableFlow>> getFlowToExecutorMap() {
HashMap<Executor, List<ExecutableFlow>> exFlowMap =
new HashMap<Executor, List<ExecutableFlow>>();
for (Pair<ExecutionReference, ExecutableFlow> runningFlow : runningFlows
.values()) {
ExecutionReference ref = runningFlow.getFirst();
ExecutableFlow flow = runningFlow.getSecond();
Executor executor = ref.getExecutor();
// We can set the next check time to prevent the checking of certain
// flows.
if (ref.getNextCheckTime() >= System.currentTimeMillis()) {
continue;
}
List<ExecutableFlow> flows = exFlowMap.get(executor);
if (flows == null) {
flows = new ArrayList<ExecutableFlow>();
exFlowMap.put(executor, flows);
}
flows.add(flow);
}
return exFlowMap;
}
@Override
public int getExecutableFlows(int projectId, String flowId, int from,
int length, List<ExecutableFlow> outputList)
throws ExecutorManagerException {
List<ExecutableFlow> flows =
executorLoader.fetchFlowHistory(projectId, flowId, from, length);
outputList.addAll(flows);
return executorLoader.fetchNumExecutableFlows(projectId, flowId);
}
@Override
public List<ExecutableFlow> getExecutableFlows(int projectId, String flowId,
int from, int length, Status status) throws ExecutorManagerException {
return executorLoader.fetchFlowHistory(projectId, flowId, from, length,
status);
}
/*
* cleaner thread to clean up execution_logs, etc in DB. Runs every day.
*/
private class CleanerThread extends Thread {
// log file retention is 1 month.
// check every day
private static final long CLEANER_THREAD_WAIT_INTERVAL_MS =
24 * 60 * 60 * 1000;
private final long executionLogsRetentionMs;
private boolean shutdown = false;
private long lastLogCleanTime = -1;
public CleanerThread(long executionLogsRetentionMs) {
this.executionLogsRetentionMs = executionLogsRetentionMs;
this.setName("AzkabanWebServer-Cleaner-Thread");
}
@SuppressWarnings("unused")
public void shutdown() {
shutdown = true;
this.interrupt();
}
public void run() {
while (!shutdown) {
synchronized (this) {
try {
lastCleanerThreadCheckTime = System.currentTimeMillis();
// Cleanup old stuff.
long currentTime = System.currentTimeMillis();
if (currentTime - CLEANER_THREAD_WAIT_INTERVAL_MS > lastLogCleanTime) {
cleanExecutionLogs();
lastLogCleanTime = currentTime;
}
wait(CLEANER_THREAD_WAIT_INTERVAL_MS);
} catch (InterruptedException e) {
logger.info("Interrupted. Probably to shut down.");
}
}
}
}
private void cleanExecutionLogs() {
logger.info("Cleaning old logs from execution_logs");
long cutoff = DateTime.now().getMillis() - executionLogsRetentionMs;
logger.info("Cleaning old log files before "
+ new DateTime(cutoff).toString());
cleanOldExecutionLogs(DateTime.now().getMillis()
- executionLogsRetentionMs);
}
}
/*
* This thread is responsible for processing queued flows using dispatcher and
* making rest api calls to executor server
*/
private class QueueProcessorThread extends Thread {
private static final long QUEUE_PROCESSOR_WAIT_IN_MS = 1000;
private static final int MAX_DISPATCHING_ERRORS_PERMITTED = 5;
private final long activeExecutorRefreshWindowInMilisec;
private final int activeExecutorRefreshWindowInFlows;
private volatile boolean shutdown = false;
private volatile boolean isActive = true;
public QueueProcessorThread(boolean isActive,
long activeExecutorRefreshWindowInTime,
int activeExecutorRefreshWindowInFlows) {
setActive(isActive);
this.activeExecutorRefreshWindowInFlows =
activeExecutorRefreshWindowInFlows;
this.activeExecutorRefreshWindowInMilisec =
activeExecutorRefreshWindowInTime;
this.setName("AzkabanWebServer-QueueProcessor-Thread");
}
public void setActive(boolean isActive) {
this.isActive = isActive;
logger.info("QueueProcessorThread active turned " + this.isActive);
}
public boolean isActive() {
return isActive;
}
public void shutdown() {
shutdown = true;
this.interrupt();
}
public void run() {
// Loops till QueueProcessorThread is shutdown
while (!shutdown) {
synchronized (this) {
try {
// start processing queue if active, other wait for sometime
if (isActive) {
processQueuedFlows(activeExecutorRefreshWindowInMilisec,
activeExecutorRefreshWindowInFlows);
}
wait(QUEUE_PROCESSOR_WAIT_IN_MS);
} catch (Exception e) {
logger.error(
"QueueProcessorThread Interrupted. Probably to shut down.", e);
}
}
}
}
/* Method responsible for processing the non-dispatched flows */
private void processQueuedFlows(long activeExecutorsRefreshWindow,
int maxContinuousFlowProcessed) throws InterruptedException,
ExecutorManagerException {
long lastExecutorRefreshTime = System.currentTimeMillis();
Pair<ExecutionReference, ExecutableFlow> runningCandidate;
int currentContinuousFlowProcessed = 0;
while (isActive() && (runningCandidate = queuedFlows.fetchHead()) != null) {
ExecutionReference reference = runningCandidate.getFirst();
ExecutableFlow exflow = runningCandidate.getSecond();
long currentTime = System.currentTimeMillis();
// if we have dispatched more than maxContinuousFlowProcessed or
// It has been more then activeExecutorsRefreshWindow millisec since we
// refreshed
if (currentTime - lastExecutorRefreshTime > activeExecutorsRefreshWindow
|| currentContinuousFlowProcessed >= maxContinuousFlowProcessed) {
// Refresh executorInfo for all activeExecutors
refreshExecutors();
lastExecutorRefreshTime = currentTime;
currentContinuousFlowProcessed = 0;
}
exflow.setUpdateTime(currentTime);
// process flow with current snapshot of activeExecutors
processFlow(reference, exflow, new HashSet<Executor>(activeExecutors));
currentContinuousFlowProcessed++;
}
}
/* process flow with a snapshot of available Executors */
private void processFlow(ExecutionReference reference,
ExecutableFlow exflow, Set<Executor> availableExecutors)
throws ExecutorManagerException {
synchronized (exflow) {
Executor selectedExecutor = selectExecutor(exflow, availableExecutors);
if (selectedExecutor != null) {
try {
dispatch(reference, exflow, selectedExecutor);
} catch (ExecutorManagerException e) {
logger.warn(String.format(
"Executor %s responded with exception for exec: %d",
selectedExecutor, exflow.getExecutionId()), e);
handleDispatchExceptionCase(reference, exflow, selectedExecutor,
availableExecutors);
}
} else {
handleNoExecutorSelectedCase(reference, exflow);
}
}
}
/* Helper method to fetch overriding Executor, if a valid user has specifed otherwise return null */
private Executor getUserSpecifiedExecutor(ExecutionOptions options,
int executionId) {
Executor executor = null;
if (options != null
&& options.getFlowParameters() != null
&& options.getFlowParameters().containsKey(
ExecutionOptions.USE_EXECUTOR)) {
try {
int executorId =
Integer.valueOf(options.getFlowParameters().get(
ExecutionOptions.USE_EXECUTOR));
executor = fetchExecutor(executorId);
if (executor == null) {
logger
.warn(String
.format(
"User specified executor id: %d for execution id: %d is not active, Looking up db.",
executorId, executionId));
executor = executorLoader.fetchExecutor(executorId);
if (executor == null) {
logger
.warn(String
.format(
"User specified executor id: %d for execution id: %d is missing from db. Defaulting to availableExecutors",
executorId, executionId));
}
}
} catch (ExecutorManagerException ex) {
logger.error("Failed to fetch user specified executor for exec_id = "
+ executionId, ex);
}
}
return executor;
}
/* Choose Executor for exflow among the available executors */
private Executor selectExecutor(ExecutableFlow exflow,
Set<Executor> availableExecutors) {
Executor choosenExecutor =
getUserSpecifiedExecutor(exflow.getExecutionOptions(),
exflow.getExecutionId());
// If no executor was specified by admin
if (choosenExecutor == null) {
logger.info("Using dispatcher for execution id :"
+ exflow.getExecutionId());
ExecutorSelector selector = new ExecutorSelector(filterList, comparatorWeightsMap);
choosenExecutor = selector.getBest(activeExecutors, exflow);
}
return choosenExecutor;
}
private void handleDispatchExceptionCase(ExecutionReference reference,
ExecutableFlow exflow, Executor lastSelectedExecutor,
Set<Executor> remainingExecutors) throws ExecutorManagerException {
logger
.info(String
.format(
"Reached handleDispatchExceptionCase stage for exec %d with error count %d",
exflow.getExecutionId(), reference.getNumErrors()));
reference.setNumErrors(reference.getNumErrors() + 1);
if (reference.getNumErrors() >= MAX_DISPATCHING_ERRORS_PERMITTED
|| remainingExecutors.size() <= 1) {
logger.error("Failed to process queued flow");
finalizeFlows(exflow);
} else {
remainingExecutors.remove(lastSelectedExecutor);
// try other executors except chosenExecutor
processFlow(reference, exflow, remainingExecutors);
}
}
private void handleNoExecutorSelectedCase(ExecutionReference reference,
ExecutableFlow exflow) throws ExecutorManagerException {
logger
.info(String
.format(
"Reached handleNoExecutorSelectedCase stage for exec %d with error count %d",
exflow.getExecutionId(), reference.getNumErrors()));
// TODO: handle scenario where a high priority flow failing to get
// schedule can starve all others
queuedFlows.enqueue(exflow, reference);
}
private void dispatch(ExecutionReference reference, ExecutableFlow exflow,
Executor choosenExecutor) throws ExecutorManagerException {
exflow.setUpdateTime(System.currentTimeMillis());
callExecutorServer(exflow, choosenExecutor,
ConnectorParams.EXECUTE_ACTION);
executorLoader.assignExecutor(choosenExecutor.getId(),
exflow.getExecutionId());
reference.setExecutor(choosenExecutor);
// move from flow to running flows
runningFlows.put(exflow.getExecutionId(),
new Pair<ExecutionReference, ExecutableFlow>(reference, exflow));
logger.info(String.format(
"Successfully dispatched exec %d with error count %d",
exflow.getExecutionId(), reference.getNumErrors()));
}
}
} | 1 | 10,776 | why named alters? how about this.alerts = alerts? | azkaban-azkaban | java |
@@ -1549,7 +1549,7 @@ func (js *jetStream) processStreamLeaderChange(mset *stream, isLeader bool) {
resp.Error = jsError(err)
s.sendAPIErrResponse(client, acc, subject, reply, _EMPTY_, s.jsonResponse(&resp))
} else {
- resp.StreamInfo = &StreamInfo{Created: mset.createdTime(), State: mset.state(), Config: mset.config(), Cluster: js.clusterInfo(mset.raftGroup())}
+ resp.StreamInfo = &StreamInfo{Created: mset.createdTime(), State: mset.state(), Config: mset.config(), Cluster: js.clusterInfo(mset.raftGroup()), Sources: mset.sourcesInfo(), Mirror: mset.mirrorInfo()}
s.sendAPIResponse(client, acc, subject, reply, _EMPTY_, s.jsonResponse(&resp))
if node := mset.raftNode(); node != nil {
mset.sendCreateAdvisory() | 1 | // Copyright 2020-2021 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"math/rand"
"path"
"reflect"
"sort"
"strings"
"sync/atomic"
"time"
"github.com/klauspost/compress/s2"
"github.com/nats-io/nuid"
)
// jetStreamCluster holds information about the meta group and stream assignments.
type jetStreamCluster struct {
// The metacontroller raftNode.
meta RaftNode
// For stream and consumer assignments. All servers will have this be the same.
// ACC -> STREAM -> Stream Assignment -> Consumers
streams map[string]map[string]*streamAssignment
// Server.
s *Server
// Internal client.
c *client
// Processing assignment results.
streamResults *subscription
consumerResults *subscription
// For asking for leader to stepdown.
stepdown *subscription
}
// Used to guide placement of streams and meta controllers in clustered JetStream.
type Placement struct {
Cluster string `json:"cluster"`
Tags []string `json:"tags,omitempty"`
}
// Define types of the entry.
type entryOp uint8
const (
// Meta ops.
assignStreamOp entryOp = iota
assignConsumerOp
removeStreamOp
removeConsumerOp
// Stream ops.
streamMsgOp
purgeStreamOp
deleteMsgOp
// Consumer ops
updateDeliveredOp
updateAcksOp
// Compressed consumer assignments.
assignCompressedConsumerOp
// Filtered Consumer skip.
updateSkipOp
// Update Stream
updateStreamOp
)
// raftGroups are controlled by the metagroup controller.
// The raftGroups will house streams and consumers.
type raftGroup struct {
Name string `json:"name"`
Peers []string `json:"peers"`
Storage StorageType `json:"store"`
Preferred string `json:"preferred,omitempty"`
// Internal
node RaftNode
}
// streamAssignment is what the meta controller uses to assign streams to peers.
type streamAssignment struct {
Client *ClientInfo `json:"client,omitempty"`
Created time.Time `json:"created"`
Config *StreamConfig `json:"stream"`
Group *raftGroup `json:"group"`
Sync string `json:"sync"`
Subject string `json:"subject"`
Reply string `json:"reply"`
Restore *StreamState `json:"restore_state,omitempty"`
// Internal
consumers map[string]*consumerAssignment
responded bool
err error
}
// consumerAssignment is what the meta controller uses to assign consumers to streams.
type consumerAssignment struct {
Client *ClientInfo `json:"client,omitempty"`
Created time.Time `json:"created"`
Name string `json:"name"`
Stream string `json:"stream"`
Config *ConsumerConfig `json:"consumer"`
Group *raftGroup `json:"group"`
Subject string `json:"subject"`
Reply string `json:"reply"`
State *ConsumerState `json:"state,omitempty"`
// Internal
responded bool
deleted bool
err error
}
// streamPurge is what the stream leader will replicate when purging a stream.
type streamPurge struct {
Client *ClientInfo `json:"client,omitempty"`
Stream string `json:"stream"`
LastSeq uint64 `json:"last_seq"`
Subject string `json:"subject"`
Reply string `json:"reply"`
}
// streamMsgDelete is what the stream leader will replicate when deleting a message.
type streamMsgDelete struct {
Client *ClientInfo `json:"client,omitempty"`
Stream string `json:"stream"`
Seq uint64 `json:"seq"`
NoErase bool `json:"no_erase,omitempty"`
Subject string `json:"subject"`
Reply string `json:"reply"`
}
const (
defaultStoreDirName = "_js_"
defaultMetaGroupName = "_meta_"
defaultMetaFSBlkSize = 64 * 1024
)
// For validating clusters.
func validateJetStreamOptions(o *Options) error {
// If not clustered no checks.
if !o.JetStream || o.Cluster.Port == 0 {
return nil
}
if o.ServerName == _EMPTY_ {
return fmt.Errorf("jetstream cluster requires `server_name` to be set")
}
if o.Cluster.Name == _EMPTY_ {
return fmt.Errorf("jetstream cluster requires `cluster.name` to be set")
}
return nil
}
func (s *Server) getJetStreamCluster() (*jetStream, *jetStreamCluster) {
s.mu.Lock()
shutdown := s.shutdown
js := s.js
s.mu.Unlock()
if shutdown || js == nil {
return nil, nil
}
js.mu.RLock()
cc := js.cluster
js.mu.RUnlock()
if cc == nil {
return nil, nil
}
return js, cc
}
func (s *Server) JetStreamIsClustered() bool {
js := s.getJetStream()
if js == nil {
return false
}
js.mu.RLock()
isClustered := js.cluster != nil
js.mu.RUnlock()
return isClustered
}
func (s *Server) JetStreamIsLeader() bool {
js := s.getJetStream()
if js == nil {
return false
}
js.mu.RLock()
defer js.mu.RUnlock()
return js.cluster.isLeader()
}
func (s *Server) JetStreamIsCurrent() bool {
js := s.getJetStream()
if js == nil {
return false
}
js.mu.RLock()
defer js.mu.RUnlock()
return js.cluster.isCurrent()
}
func (s *Server) JetStreamSnapshotMeta() error {
js := s.getJetStream()
if js == nil {
return ErrJetStreamNotEnabled
}
js.mu.RLock()
defer js.mu.RUnlock()
cc := js.cluster
if !cc.isLeader() {
return errNotLeader
}
return cc.meta.InstallSnapshot(js.metaSnapshot())
}
func (s *Server) JetStreamStepdownStream(account, stream string) error {
js, cc := s.getJetStreamCluster()
if js == nil {
return ErrJetStreamNotEnabled
}
if cc == nil {
return ErrJetStreamNotClustered
}
// Grab account
acc, err := s.LookupAccount(account)
if err != nil {
return err
}
// Grab stream
mset, err := acc.lookupStream(stream)
if err != nil {
return err
}
if node := mset.raftNode(); node != nil && node.Leader() {
node.StepDown()
}
return nil
}
func (s *Server) JetStreamSnapshotStream(account, stream string) error {
js, cc := s.getJetStreamCluster()
if js == nil {
return ErrJetStreamNotEnabled
}
if cc == nil {
return ErrJetStreamNotClustered
}
// Grab account
acc, err := s.LookupAccount(account)
if err != nil {
return err
}
// Grab stream
mset, err := acc.lookupStream(stream)
if err != nil {
return err
}
mset.mu.RLock()
if !mset.node.Leader() {
mset.mu.RUnlock()
return ErrJetStreamNotLeader
}
n := mset.node
mset.mu.RUnlock()
return n.InstallSnapshot(mset.stateSnapshot())
}
func (s *Server) JetStreamClusterPeers() []string {
js := s.getJetStream()
if js == nil {
return nil
}
js.mu.RLock()
defer js.mu.RUnlock()
cc := js.cluster
if !cc.isLeader() {
return nil
}
peers := cc.meta.Peers()
var nodes []string
for _, p := range peers {
si, ok := s.nodeToInfo.Load(p.ID)
if !ok || si.(*nodeInfo).offline {
continue
}
nodes = append(nodes, si.(*nodeInfo).name)
}
return nodes
}
// Read lock should be held.
func (cc *jetStreamCluster) isLeader() bool {
if cc == nil {
// Non-clustered mode
return true
}
return cc.meta.Leader()
}
// isCurrent will determine if this node is a leader or an up to date follower.
// Read lock should be held.
func (cc *jetStreamCluster) isCurrent() bool {
if cc == nil {
// Non-clustered mode
return true
}
return cc.meta.Current()
}
// isStreamCurrent will determine if this node is a participant for the stream and if its up to date.
// Read lock should be held.
func (cc *jetStreamCluster) isStreamCurrent(account, stream string) bool {
if cc == nil {
// Non-clustered mode
return true
}
as := cc.streams[account]
if as == nil {
return false
}
sa := as[stream]
if sa == nil {
return false
}
rg := sa.Group
if rg == nil || rg.node == nil {
return false
}
isCurrent := rg.node.Current()
if isCurrent {
// Check if we are processing a snapshot and are catching up.
acc, err := cc.s.LookupAccount(account)
if err != nil {
return false
}
mset, err := acc.lookupStream(stream)
if err != nil {
return false
}
if mset.isCatchingUp() {
return false
}
}
return isCurrent
}
func (a *Account) getJetStreamFromAccount() (*Server, *jetStream, *jsAccount) {
a.mu.RLock()
jsa := a.js
a.mu.RUnlock()
if jsa == nil {
return nil, nil, nil
}
jsa.mu.RLock()
js := jsa.js
jsa.mu.RUnlock()
if js == nil {
return nil, nil, nil
}
js.mu.RLock()
s := js.srv
js.mu.RUnlock()
return s, js, jsa
}
func (s *Server) JetStreamIsStreamLeader(account, stream string) bool {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return false
}
js.mu.RLock()
defer js.mu.RUnlock()
return cc.isStreamLeader(account, stream)
}
func (a *Account) JetStreamIsStreamLeader(stream string) bool {
s, js, jsa := a.getJetStreamFromAccount()
if s == nil || js == nil || jsa == nil {
return false
}
js.mu.RLock()
defer js.mu.RUnlock()
return js.cluster.isStreamLeader(a.Name, stream)
}
func (s *Server) JetStreamIsStreamCurrent(account, stream string) bool {
js, cc := s.getJetStreamCluster()
if js == nil {
return false
}
js.mu.RLock()
defer js.mu.RUnlock()
return cc.isStreamCurrent(account, stream)
}
func (a *Account) JetStreamIsConsumerLeader(stream, consumer string) bool {
s, js, jsa := a.getJetStreamFromAccount()
if s == nil || js == nil || jsa == nil {
return false
}
js.mu.RLock()
defer js.mu.RUnlock()
return js.cluster.isConsumerLeader(a.Name, stream, consumer)
}
func (s *Server) JetStreamIsConsumerLeader(account, stream, consumer string) bool {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return false
}
js.mu.RLock()
defer js.mu.RUnlock()
return cc.isConsumerLeader(account, stream, consumer)
}
func (s *Server) enableJetStreamClustering() error {
if !s.isRunning() {
return nil
}
js := s.getJetStream()
if js == nil {
return ErrJetStreamNotEnabled
}
// Already set.
if js.cluster != nil {
return nil
}
s.Noticef("Starting JetStream cluster")
// We need to determine if we have a stable cluster name and expected number of servers.
s.Debugf("JetStream cluster checking for stable cluster name and peers")
if s.isClusterNameDynamic() || s.configuredRoutes() == 0 {
return errors.New("JetStream cluster requires cluster name and explicit routes")
}
return js.setupMetaGroup()
}
func (js *jetStream) setupMetaGroup() error {
s := js.srv
s.Noticef("Creating JetStream metadata controller")
// Setup our WAL for the metagroup.
sysAcc := s.SystemAccount()
storeDir := path.Join(js.config.StoreDir, sysAcc.Name, defaultStoreDirName, defaultMetaGroupName)
fs, err := newFileStore(
FileStoreConfig{StoreDir: storeDir, BlockSize: defaultMetaFSBlkSize},
StreamConfig{Name: defaultMetaGroupName, Storage: FileStorage},
)
if err != nil {
s.Errorf("Error creating filestore: %v", err)
return err
}
cfg := &RaftConfig{Name: defaultMetaGroupName, Store: storeDir, Log: fs}
if _, err := readPeerState(storeDir); err != nil {
s.Noticef("JetStream cluster bootstrapping")
peers := s.ActivePeers()
s.Debugf("JetStream cluster initial peers: %+v", peers)
if err := s.bootstrapRaftNode(cfg, peers, false); err != nil {
return err
}
} else {
s.Noticef("JetStream cluster recovering state")
}
// Start up our meta node.
n, err := s.startRaftNode(cfg)
if err != nil {
s.Warnf("Could not start metadata controller: %v", err)
return err
}
n.Campaign()
c := s.createInternalJetStreamClient()
sacc := s.SystemAccount()
js.mu.Lock()
defer js.mu.Unlock()
js.cluster = &jetStreamCluster{
meta: n,
streams: make(map[string]map[string]*streamAssignment),
s: s,
c: c,
}
c.registerWithAccount(sacc)
js.srv.startGoRoutine(js.monitorCluster)
return nil
}
func (js *jetStream) getMetaGroup() RaftNode {
js.mu.RLock()
defer js.mu.RUnlock()
if js.cluster == nil {
return nil
}
return js.cluster.meta
}
func (js *jetStream) server() *Server {
js.mu.RLock()
s := js.srv
js.mu.RUnlock()
return s
}
// Will respond if we do not think we have a metacontroller leader.
func (js *jetStream) isLeaderless() bool {
js.mu.RLock()
defer js.mu.RUnlock()
cc := js.cluster
if cc == nil {
return false
}
// If we don't have a leader.
if cc.meta.GroupLeader() == _EMPTY_ {
// Make sure we have been running for enough time.
if time.Since(cc.meta.Created()) > lostQuorumInterval {
return true
}
}
return false
}
// Will respond iff we are a member and we know we have no leader.
func (js *jetStream) isGroupLeaderless(rg *raftGroup) bool {
if rg == nil {
return false
}
js.mu.RLock()
defer js.mu.RUnlock()
cc := js.cluster
// If we are not a member we can not say..
if !rg.isMember(cc.meta.ID()) {
return false
}
// Single peer groups always have a leader if we are here.
if rg.node == nil {
return false
}
// If we don't have a leader.
if rg.node.GroupLeader() == _EMPTY_ {
// Make sure we have been running for enough time.
if time.Since(rg.node.Created()) > lostQuorumInterval {
return true
}
}
return false
}
func (s *Server) JetStreamIsStreamAssigned(account, stream string) bool {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return false
}
acc, _ := s.LookupAccount(account)
if acc == nil {
return false
}
return cc.isStreamAssigned(acc, stream)
}
// streamAssigned informs us if this server has this stream assigned.
func (jsa *jsAccount) streamAssigned(stream string) bool {
jsa.mu.RLock()
js, acc := jsa.js, jsa.account
jsa.mu.RUnlock()
if js == nil {
return false
}
js.mu.RLock()
assigned := js.cluster.isStreamAssigned(acc, stream)
js.mu.RUnlock()
return assigned
}
// Read lock should be held.
func (cc *jetStreamCluster) isStreamAssigned(a *Account, stream string) bool {
// Non-clustered mode always return true.
if cc == nil {
return true
}
as := cc.streams[a.Name]
if as == nil {
return false
}
sa := as[stream]
if sa == nil {
return false
}
rg := sa.Group
if rg == nil {
return false
}
// Check if we are the leader of this raftGroup assigned to the stream.
ourID := cc.meta.ID()
for _, peer := range rg.Peers {
if peer == ourID {
return true
}
}
return false
}
// Read lock should be held.
func (cc *jetStreamCluster) isStreamLeader(account, stream string) bool {
// Non-clustered mode always return true.
if cc == nil {
return true
}
if cc.meta == nil {
return false
}
var sa *streamAssignment
if as := cc.streams[account]; as != nil {
sa = as[stream]
}
if sa == nil {
return false
}
rg := sa.Group
if rg == nil {
return false
}
// Check if we are the leader of this raftGroup assigned to the stream.
ourID := cc.meta.ID()
for _, peer := range rg.Peers {
if peer == ourID {
if len(rg.Peers) == 1 || rg.node != nil && rg.node.Leader() {
return true
}
}
}
return false
}
// Read lock should be held.
func (cc *jetStreamCluster) isConsumerLeader(account, stream, consumer string) bool {
// Non-clustered mode always return true.
if cc == nil {
return true
}
if cc.meta == nil {
return false
}
var sa *streamAssignment
if as := cc.streams[account]; as != nil {
sa = as[stream]
}
if sa == nil {
return false
}
// Check if we are the leader of this raftGroup assigned to this consumer.
ca := sa.consumers[consumer]
if ca == nil {
return false
}
rg := ca.Group
ourID := cc.meta.ID()
for _, peer := range rg.Peers {
if peer == ourID {
if len(rg.Peers) == 1 || (rg.node != nil && rg.node.Leader()) {
return true
}
}
}
return false
}
func (js *jetStream) monitorCluster() {
s, n := js.server(), js.getMetaGroup()
qch, lch, ach := n.QuitC(), n.LeadChangeC(), n.ApplyC()
defer s.grWG.Done()
s.Debugf("Starting metadata monitor")
defer s.Debugf("Exiting metadata monitor")
const compactInterval = 2 * time.Minute
t := time.NewTicker(compactInterval)
defer t.Stop()
var isLeader bool
var lastSnap []byte
doSnapshot := func() {
if snap := js.metaSnapshot(); !bytes.Equal(lastSnap, snap) {
if err := n.InstallSnapshot(snap); err == nil {
lastSnap = snap
}
}
}
isRecovering := true
for {
select {
case <-s.quitCh:
return
case <-qch:
return
case ce := <-ach:
if ce == nil {
// Signals we have replayed all of our metadata.
isRecovering = false
s.Debugf("Recovered JetStream cluster metadata")
continue
}
// FIXME(dlc) - Deal with errors.
if _, didRemoval, err := js.applyMetaEntries(ce.Entries, isRecovering); err == nil {
n.Applied(ce.Index)
if didRemoval {
// Since we received one make sure we have our own since we do not store
// our meta state outside of raft.
doSnapshot()
} else if _, b := n.Size(); b > uint64(len(lastSnap)*4) {
doSnapshot()
}
}
case isLeader = <-lch:
js.processLeaderChange(isLeader)
case <-t.C:
doSnapshot()
}
}
}
// Represents our stable meta state that we can write out.
type writeableStreamAssignment struct {
Client *ClientInfo `json:"client,omitempty"`
Created time.Time `json:"created"`
Config *StreamConfig `json:"stream"`
Group *raftGroup `json:"group"`
Sync string `json:"sync"`
Consumers []*consumerAssignment
}
func (js *jetStream) metaSnapshot() []byte {
var streams []writeableStreamAssignment
js.mu.RLock()
cc := js.cluster
for _, asa := range cc.streams {
for _, sa := range asa {
wsa := writeableStreamAssignment{
Client: sa.Client,
Created: sa.Created,
Config: sa.Config,
Group: sa.Group,
Sync: sa.Sync,
}
for _, ca := range sa.consumers {
wsa.Consumers = append(wsa.Consumers, ca)
}
streams = append(streams, wsa)
}
}
if len(streams) == 0 {
js.mu.RUnlock()
return nil
}
b, _ := json.Marshal(streams)
js.mu.RUnlock()
return s2.EncodeBetter(nil, b)
}
func (js *jetStream) applyMetaSnapshot(buf []byte, isRecovering bool) error {
if len(buf) == 0 {
return nil
}
jse, err := s2.Decode(nil, buf)
if err != nil {
return err
}
var wsas []writeableStreamAssignment
if err = json.Unmarshal(jse, &wsas); err != nil {
return err
}
// Build our new version here outside of js.
streams := make(map[string]map[string]*streamAssignment)
for _, wsa := range wsas {
as := streams[wsa.Client.serviceAccount()]
if as == nil {
as = make(map[string]*streamAssignment)
streams[wsa.Client.serviceAccount()] = as
}
sa := &streamAssignment{Client: wsa.Client, Created: wsa.Created, Config: wsa.Config, Group: wsa.Group, Sync: wsa.Sync}
if len(wsa.Consumers) > 0 {
sa.consumers = make(map[string]*consumerAssignment)
for _, ca := range wsa.Consumers {
sa.consumers[ca.Name] = ca
}
}
as[wsa.Config.Name] = sa
}
js.mu.Lock()
cc := js.cluster
var saAdd, saDel, saChk []*streamAssignment
// Walk through the old list to generate the delete list.
for account, asa := range cc.streams {
nasa := streams[account]
for sn, sa := range asa {
if nsa := nasa[sn]; nsa == nil {
saDel = append(saDel, sa)
} else {
saChk = append(saChk, nsa)
}
}
}
// Walk through the new list to generate the add list.
for account, nasa := range streams {
asa := cc.streams[account]
for sn, sa := range nasa {
if asa[sn] == nil {
saAdd = append(saAdd, sa)
}
}
}
// Now walk the ones to check and process consumers.
var caAdd, caDel []*consumerAssignment
for _, sa := range saChk {
if osa := js.streamAssignment(sa.Client.serviceAccount(), sa.Config.Name); osa != nil {
for _, ca := range osa.consumers {
if sa.consumers[ca.Name] == nil {
caDel = append(caDel, ca)
} else {
caAdd = append(caAdd, ca)
}
}
}
}
js.mu.Unlock()
// Do removals first.
for _, sa := range saDel {
if isRecovering {
js.setStreamAssignmentResponded(sa)
}
js.processStreamRemoval(sa)
}
// Now do add for the streams. Also add in all consumers.
for _, sa := range saAdd {
if isRecovering {
js.setStreamAssignmentResponded(sa)
}
js.processStreamAssignment(sa)
// We can simply add the consumers.
for _, ca := range sa.consumers {
if isRecovering {
js.setConsumerAssignmentResponded(ca)
}
js.processConsumerAssignment(ca)
}
}
// Now do the deltas for existing stream's consumers.
for _, ca := range caDel {
if isRecovering {
js.setConsumerAssignmentResponded(ca)
}
js.processConsumerRemoval(ca)
}
for _, ca := range caAdd {
if isRecovering {
js.setConsumerAssignmentResponded(ca)
}
js.processConsumerAssignment(ca)
}
return nil
}
// Called on recovery to make sure we do not process like original
func (js *jetStream) setStreamAssignmentResponded(sa *streamAssignment) {
js.mu.Lock()
defer js.mu.Unlock()
sa.responded = true
sa.Restore = nil
}
// Called on recovery to make sure we do not process like original
func (js *jetStream) setConsumerAssignmentResponded(ca *consumerAssignment) {
js.mu.Lock()
defer js.mu.Unlock()
ca.responded = true
}
// Just copied over and changes out the group so it can be encoded.
// Lock should be held.
func (sa *streamAssignment) copyGroup() *streamAssignment {
csa, cg := *sa, *sa.Group
csa.Group = &cg
csa.Group.Peers = append(sa.Group.Peers[:0:0], sa.Group.Peers...)
return &csa
}
func (js *jetStream) processRemovePeer(peer string) {
js.mu.Lock()
defer js.mu.Unlock()
cc := js.cluster
// Only leader should process and re-assign mappings.
if !cc.isLeader() {
return
}
// Grab our nodes.
// FIXME(dlc) - Make sure these are live.
// Need to search for this peer in our stream assignments for potential remapping.
for _, as := range cc.streams {
for _, sa := range as {
if sa.Group.isMember(peer) {
js.removePeerFromStream(sa, peer)
}
}
}
}
// Assumes all checks have already been done.
// Lock should be held.
func (js *jetStream) removePeerFromStream(sa *streamAssignment, peer string) {
s, cc := js.srv, js.cluster
csa := sa.copyGroup()
if !cc.remapStreamAssignment(csa, peer) {
s.Warnf("JetStream cluster could not remap stream '%s > %s'", sa.Client.serviceAccount(), sa.Config.Name)
}
// Send our proposal for this csa. Also use same group definition for all the consumers as well.
cc.meta.Propose(encodeAddStreamAssignment(csa))
rg := csa.Group
for _, ca := range sa.consumers {
cca := *ca
cca.Group.Peers = rg.Peers
cc.meta.Propose(encodeAddConsumerAssignment(&cca))
}
}
func (js *jetStream) applyMetaEntries(entries []*Entry, isRecovering bool) (bool, bool, error) {
var didSnap, didRemove bool
for _, e := range entries {
if e.Type == EntrySnapshot {
js.applyMetaSnapshot(e.Data, isRecovering)
didSnap = true
} else if e.Type == EntryRemovePeer {
js.processRemovePeer(string(e.Data))
} else {
buf := e.Data
switch entryOp(buf[0]) {
case assignStreamOp:
sa, err := decodeStreamAssignment(buf[1:])
if err != nil {
js.srv.Errorf("JetStream cluster failed to decode stream assignment: %q", buf[1:])
return didSnap, didRemove, err
}
if isRecovering {
js.setStreamAssignmentResponded(sa)
}
js.processStreamAssignment(sa)
case removeStreamOp:
sa, err := decodeStreamAssignment(buf[1:])
if err != nil {
js.srv.Errorf("JetStream cluster failed to decode stream assignment: %q", buf[1:])
return didSnap, didRemove, err
}
if isRecovering {
js.setStreamAssignmentResponded(sa)
}
js.processStreamRemoval(sa)
didRemove = true
case assignConsumerOp:
ca, err := decodeConsumerAssignment(buf[1:])
if err != nil {
js.srv.Errorf("JetStream cluster failed to decode consumer assigment: %q", buf[1:])
return didSnap, didRemove, err
}
if isRecovering {
js.setConsumerAssignmentResponded(ca)
}
js.processConsumerAssignment(ca)
case assignCompressedConsumerOp:
ca, err := decodeConsumerAssignmentCompressed(buf[1:])
if err != nil {
js.srv.Errorf("JetStream cluster failed to decode compressed consumer assigment: %q", buf[1:])
return didSnap, didRemove, err
}
if isRecovering {
js.setConsumerAssignmentResponded(ca)
}
js.processConsumerAssignment(ca)
case removeConsumerOp:
ca, err := decodeConsumerAssignment(buf[1:])
if err != nil {
js.srv.Errorf("JetStream cluster failed to decode consumer assigment: %q", buf[1:])
return didSnap, didRemove, err
}
if isRecovering {
js.setConsumerAssignmentResponded(ca)
}
js.processConsumerRemoval(ca)
didRemove = true
case updateStreamOp:
sa, err := decodeStreamAssignment(buf[1:])
if err != nil {
js.srv.Errorf("JetStream cluster failed to decode stream assignment: %q", buf[1:])
return didSnap, didRemove, err
}
if isRecovering {
js.setStreamAssignmentResponded(sa)
}
js.processUpdateStreamAssignment(sa)
default:
panic("JetStream Cluster Unknown meta entry op type")
}
}
}
return didSnap, didRemove, nil
}
func (rg *raftGroup) isMember(id string) bool {
if rg == nil {
return false
}
for _, peer := range rg.Peers {
if peer == id {
return true
}
}
return false
}
func (rg *raftGroup) setPreferred() {
if rg == nil || len(rg.Peers) == 0 {
return
}
if len(rg.Peers) == 1 {
rg.Preferred = rg.Peers[0]
} else {
// For now just randomly select a peer for the preferred.
pi := rand.Int31n(int32(len(rg.Peers)))
rg.Preferred = rg.Peers[pi]
}
}
// createRaftGroup is called to spin up this raft group if needed.
func (js *jetStream) createRaftGroup(rg *raftGroup) error {
js.mu.Lock()
defer js.mu.Unlock()
s, cc := js.srv, js.cluster
// If this is a single peer raft group or we are not a member return.
if len(rg.Peers) <= 1 || !rg.isMember(cc.meta.ID()) {
// Nothing to do here.
return nil
}
// We already have this assigned.
if node := s.lookupRaftNode(rg.Name); node != nil {
s.Debugf("JetStream cluster already has raft group %q assigned", rg.Name)
rg.node = node
return nil
}
s.Debugf("JetStream cluster creating raft group:%+v", rg)
sysAcc := s.SystemAccount()
if sysAcc == nil {
s.Debugf("JetStream cluster detected shutdown processing raft group: %+v", rg)
return errors.New("shutting down")
}
storeDir := path.Join(js.config.StoreDir, sysAcc.Name, defaultStoreDirName, rg.Name)
fs, err := newFileStore(
FileStoreConfig{StoreDir: storeDir, BlockSize: 8_000_000, AsyncFlush: true},
StreamConfig{Name: rg.Name, Storage: FileStorage},
)
if err != nil {
s.Errorf("Error creating filestore: %v", err)
return err
}
cfg := &RaftConfig{Name: rg.Name, Store: storeDir, Log: fs}
if _, err := readPeerState(storeDir); err != nil {
s.bootstrapRaftNode(cfg, rg.Peers, true)
}
n, err := s.startRaftNode(cfg)
if err != nil {
s.Debugf("Error creating raft group: %v", err)
return err
}
rg.node = n
// See if we are preferred and should start campaign immediately.
if n.ID() == rg.Preferred {
n.Campaign()
}
return nil
}
func (mset *stream) raftGroup() *raftGroup {
if mset == nil {
return nil
}
mset.mu.RLock()
defer mset.mu.RUnlock()
if mset.sa == nil {
return nil
}
return mset.sa.Group
}
func (mset *stream) raftNode() RaftNode {
if mset == nil {
return nil
}
mset.mu.RLock()
defer mset.mu.RUnlock()
return mset.node
}
// Monitor our stream node for this stream.
func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment) {
s, cc, n := js.server(), js.cluster, sa.Group.node
defer s.grWG.Done()
if n == nil {
s.Warnf("No RAFT group for '%s > %s", sa.Client.serviceAccount(), sa.Config.Name)
return
}
qch, lch, ach := n.QuitC(), n.LeadChangeC(), n.ApplyC()
s.Debugf("Starting stream monitor for '%s > %s'", sa.Client.serviceAccount(), sa.Config.Name)
defer s.Debugf("Exiting stream monitor for '%s > %s'", sa.Client.serviceAccount(), sa.Config.Name)
const (
compactInterval = 2 * time.Minute
compactSizeMin = 64 * 1024 * 1024
compactNumMin = 8
)
t := time.NewTicker(compactInterval)
defer t.Stop()
js.mu.RLock()
isLeader := cc.isStreamLeader(sa.Client.serviceAccount(), sa.Config.Name)
isRestore := sa.Restore != nil
js.mu.RUnlock()
acc, err := s.LookupAccount(sa.Client.serviceAccount())
if err != nil {
s.Warnf("Could not retrieve account for stream '%s > %s", sa.Client.serviceAccount(), sa.Config.Name)
return
}
var lastSnap []byte
var lastApplied uint64
// Should only to be called from leader.
doSnapshot := func() {
if mset == nil || isRestore {
return
}
if snap := mset.stateSnapshot(); !bytes.Equal(lastSnap, snap) {
if err := n.InstallSnapshot(snap); err == nil {
lastSnap = snap
_, _, lastApplied = n.Progress()
}
}
}
// We will establish a restoreDoneCh no matter what. Will never be triggered unless
// we replace with the restore chan.
restoreDoneCh := make(<-chan error)
isRecovering := true
for {
select {
case <-s.quitCh:
return
case <-qch:
return
case ce := <-ach:
// No special processing needed for when we are caught up on restart.
if ce == nil {
isRecovering = false
continue
}
// Apply our entries.
if err := js.applyStreamEntries(mset, ce, isRecovering); err == nil {
n.Applied(ce.Index)
ne := ce.Index - lastApplied
// If over our compact min and we have at least min entries to compact, go ahead and snapshot/compact.
if _, b := n.Size(); b > compactSizeMin && ne >= compactNumMin {
doSnapshot()
}
} else {
s.Warnf("Error applying entries to '%s > %s'", sa.Client.serviceAccount(), sa.Config.Name)
}
case isLeader = <-lch:
if isLeader && isRestore {
acc, _ := s.LookupAccount(sa.Client.serviceAccount())
restoreDoneCh = s.processStreamRestore(sa.Client, acc, sa.Config.Name, _EMPTY_, sa.Reply, _EMPTY_)
} else {
if !isLeader && n.GroupLeader() != noLeader {
js.setStreamAssignmentResponded(sa)
}
js.processStreamLeaderChange(mset, isLeader)
}
case <-t.C:
if isLeader {
doSnapshot()
}
case err := <-restoreDoneCh:
// We have completed a restore from snapshot on this server. The stream assignment has
// already been assigned but the replicas will need to catch up out of band. Consumers
// will need to be assigned by forwarding the proposal and stamping the initial state.
s.Debugf("Stream restore for '%s > %s' completed", sa.Client.serviceAccount(), sa.Config.Name)
if err != nil {
s.Debugf("Stream restore failed: %v", err)
}
isRestore = false
sa.Restore = nil
// If we were successful lookup up our stream now.
if err == nil {
mset, err = acc.lookupStream(sa.Config.Name)
if mset != nil {
mset.setStreamAssignment(sa)
}
}
if err != nil {
if mset != nil {
mset.delete()
}
js.mu.Lock()
sa.err = err
sa.responded = true
if n != nil {
n.Delete()
}
result := &streamAssignmentResult{
Account: sa.Client.serviceAccount(),
Stream: sa.Config.Name,
Restore: &JSApiStreamRestoreResponse{ApiResponse: ApiResponse{Type: JSApiStreamRestoreResponseType}},
}
result.Restore.Error = jsError(sa.err)
js.mu.Unlock()
// Send response to the metadata leader. They will forward to the user as needed.
b, _ := json.Marshal(result) // Avoids auto-processing and doing fancy json with newlines.
s.sendInternalMsgLocked(streamAssignmentSubj, _EMPTY_, nil, b)
return
}
if !isLeader {
panic("Finished restore but not leader")
}
// Trigger the stream followers to catchup.
if n := mset.raftNode(); n != nil {
n.SendSnapshot(mset.stateSnapshot())
}
js.processStreamLeaderChange(mset, isLeader)
// Check to see if we have restored consumers here.
// These are not currently assigned so we will need to do so here.
if consumers := mset.getConsumers(); len(consumers) > 0 {
for _, o := range mset.getConsumers() {
rg := cc.createGroupForConsumer(sa)
// Pick a preferred leader.
rg.setPreferred()
name, cfg := o.String(), o.config()
// Place our initial state here as well for assignment distribution.
ca := &consumerAssignment{
Group: rg,
Stream: sa.Config.Name,
Name: name,
Config: &cfg,
Client: sa.Client,
Created: o.createdTime(),
State: o.readStoreState(),
}
// We make these compressed in case state is complex.
addEntry := encodeAddConsumerAssignmentCompressed(ca)
cc.meta.ForwardProposal(addEntry)
// Check to make sure we see the assignment.
go func() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for range ticker.C {
js.mu.RLock()
ca, meta := js.consumerAssignment(ca.Client.serviceAccount(), sa.Config.Name, name), cc.meta
js.mu.RUnlock()
if ca == nil {
s.Warnf("Consumer assignment has not been assigned, retrying")
if meta != nil {
meta.ForwardProposal(addEntry)
} else {
return
}
} else {
return
}
}
}()
}
}
}
}
}
func (js *jetStream) applyStreamEntries(mset *stream, ce *CommittedEntry, isRecovering bool) error {
for _, e := range ce.Entries {
if e.Type == EntrySnapshot {
if !isRecovering && mset != nil {
var snap streamSnapshot
if err := json.Unmarshal(e.Data, &snap); err != nil {
return err
}
mset.processSnapshot(&snap)
}
} else if e.Type == EntryRemovePeer {
js.mu.RLock()
ourID := js.cluster.meta.ID()
js.mu.RUnlock()
if peer := string(e.Data); peer == ourID {
mset.stop(true, false)
}
return nil
} else {
buf := e.Data
switch entryOp(buf[0]) {
case streamMsgOp:
subject, reply, hdr, msg, lseq, ts, err := decodeStreamMsg(buf[1:])
if err != nil {
panic(err.Error())
}
// Skip by hand here since first msg special case.
// Reason is sequence is unsigned and for lseq being 0
// the lseq under stream would have be -1.
if lseq == 0 && mset.lastSeq() != 0 {
continue
}
s := js.srv
if err := mset.processJetStreamMsg(subject, reply, hdr, msg, lseq, ts); err != nil {
if err != errLastSeqMismatch || !isRecovering {
s.Debugf("Got error processing JetStream msg: %v", err)
}
if strings.Contains(err.Error(), "no space left") {
s.Errorf("JetStream out of space, will be DISABLED")
s.DisableJetStream()
return err
}
}
case deleteMsgOp:
md, err := decodeMsgDelete(buf[1:])
if err != nil {
panic(err.Error())
}
s, cc := js.server(), js.cluster
var removed bool
if md.NoErase {
removed, err = mset.removeMsg(md.Seq)
} else {
removed, err = mset.eraseMsg(md.Seq)
}
if err != nil && !isRecovering {
s.Debugf("JetStream cluster failed to delete msg %d from stream %q for account %q: %v",
md.Seq, md.Stream, md.Client.serviceAccount(), err)
}
js.mu.RLock()
isLeader := cc.isStreamLeader(md.Client.serviceAccount(), md.Stream)
js.mu.RUnlock()
if isLeader && !isRecovering {
var resp = JSApiMsgDeleteResponse{ApiResponse: ApiResponse{Type: JSApiMsgDeleteResponseType}}
if err != nil {
resp.Error = jsError(err)
s.sendAPIErrResponse(md.Client, mset.account(), md.Subject, md.Reply, _EMPTY_, s.jsonResponse(resp))
} else if !removed {
resp.Error = &ApiError{Code: 400, Description: fmt.Sprintf("sequence [%d] not found", md.Seq)}
s.sendAPIErrResponse(md.Client, mset.account(), md.Subject, md.Reply, _EMPTY_, s.jsonResponse(resp))
} else {
resp.Success = true
s.sendAPIResponse(md.Client, mset.account(), md.Subject, md.Reply, _EMPTY_, s.jsonResponse(resp))
}
}
case purgeStreamOp:
sp, err := decodeStreamPurge(buf[1:])
if err != nil {
panic(err.Error())
}
// Ignore if we are recovering and we have already processed.
if isRecovering {
if mset.state().FirstSeq <= sp.LastSeq {
// Make sure all messages from the purge are gone.
mset.store.Compact(sp.LastSeq + 1)
}
continue
}
s := js.server()
purged, err := mset.purge()
if err != nil {
s.Warnf("JetStream cluster failed to purge stream %q for account %q: %v", sp.Stream, sp.Client.serviceAccount(), err)
}
js.mu.RLock()
isLeader := js.cluster.isStreamLeader(sp.Client.serviceAccount(), sp.Stream)
js.mu.RUnlock()
if isLeader && !isRecovering {
var resp = JSApiStreamPurgeResponse{ApiResponse: ApiResponse{Type: JSApiStreamPurgeResponseType}}
if err != nil {
resp.Error = jsError(err)
s.sendAPIErrResponse(sp.Client, mset.account(), sp.Subject, sp.Reply, _EMPTY_, s.jsonResponse(resp))
} else {
resp.Purged = purged
resp.Success = true
s.sendAPIResponse(sp.Client, mset.account(), sp.Subject, sp.Reply, _EMPTY_, s.jsonResponse(resp))
}
}
default:
panic("JetStream Cluster Unknown group entry op type!")
}
}
}
return nil
}
// Returns the PeerInfo for all replicas of a raft node. This is different than node.Peers()
// and is used for external facing advisories.
func (s *Server) replicas(node RaftNode) []*PeerInfo {
now := time.Now()
var replicas []*PeerInfo
for _, rp := range node.Peers() {
if sir, ok := s.nodeToInfo.Load(rp.ID); ok && sir != nil {
si := sir.(*nodeInfo)
pi := &PeerInfo{Name: si.name, Current: rp.Current, Active: now.Sub(rp.Last), Offline: si.offline, Lag: rp.Lag}
replicas = append(replicas, pi)
}
}
return replicas
}
// Will check our node peers and see if we should remove a peer.
func (js *jetStream) checkPeers(rg *raftGroup) {
js.mu.Lock()
defer js.mu.Unlock()
// FIXME(dlc) - Single replicas?
if rg == nil || rg.node == nil {
return
}
for _, peer := range rg.node.Peers() {
if !rg.isMember(peer.ID) {
rg.node.ProposeRemovePeer(peer.ID)
}
}
}
func (js *jetStream) processStreamLeaderChange(mset *stream, isLeader bool) {
if mset == nil {
return
}
sa := mset.streamAssignment()
if sa == nil {
return
}
js.mu.Lock()
s, account, err := js.srv, sa.Client.serviceAccount(), sa.err
client, subject, reply := sa.Client, sa.Subject, sa.Reply
hasResponded := sa.responded
sa.responded = true
js.mu.Unlock()
streamName := mset.name()
if isLeader {
s.Noticef("JetStream cluster new stream leader for '%s > %s'", sa.Client.serviceAccount(), streamName)
s.sendStreamLeaderElectAdvisory(mset)
// Check for peer removal and process here if needed.
js.checkPeers(sa.Group)
} else {
// We are stepping down.
// Make sure if we are doing so because we have lost quorum that we send the appropriate advisories.
if node := mset.raftNode(); node != nil && !node.Quorum() && time.Since(node.Created()) > 5*time.Second {
s.sendStreamLostQuorumAdvisory(mset)
}
}
// Tell stream to switch leader status.
mset.setLeader(isLeader)
if !isLeader || hasResponded {
return
}
acc, _ := s.LookupAccount(account)
if acc == nil {
return
}
// Send our response.
var resp = JSApiStreamCreateResponse{ApiResponse: ApiResponse{Type: JSApiStreamCreateResponseType}}
if err != nil {
resp.Error = jsError(err)
s.sendAPIErrResponse(client, acc, subject, reply, _EMPTY_, s.jsonResponse(&resp))
} else {
resp.StreamInfo = &StreamInfo{Created: mset.createdTime(), State: mset.state(), Config: mset.config(), Cluster: js.clusterInfo(mset.raftGroup())}
s.sendAPIResponse(client, acc, subject, reply, _EMPTY_, s.jsonResponse(&resp))
if node := mset.raftNode(); node != nil {
mset.sendCreateAdvisory()
}
}
}
// Fixed value ok for now.
const lostQuorumAdvInterval = 10 * time.Second
// Determines if we should send lost quorum advisory. We throttle these after first one.
func (mset *stream) shouldSendLostQuorum() bool {
mset.mu.Lock()
defer mset.mu.Unlock()
if time.Since(mset.lqsent) >= lostQuorumAdvInterval {
mset.lqsent = time.Now()
return true
}
return false
}
func (s *Server) sendStreamLostQuorumAdvisory(mset *stream) {
if mset == nil {
return
}
node, stream, acc := mset.raftNode(), mset.name(), mset.account()
if node == nil {
return
}
if !mset.shouldSendLostQuorum() {
return
}
s.Warnf("JetStream cluster stream '%s > %s' has NO quorum, stalled.", acc.GetName(), stream)
subj := JSAdvisoryStreamQuorumLostPre + "." + stream
adv := &JSStreamQuorumLostAdvisory{
TypedEvent: TypedEvent{
Type: JSStreamQuorumLostAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: stream,
Replicas: s.replicas(node),
}
// Send to the user's account if not the system account.
if acc != s.SystemAccount() {
s.publishAdvisory(acc, subj, adv)
}
// Now do system level one. Place account info in adv, and nil account means system.
adv.Account = acc.GetName()
s.publishAdvisory(nil, subj, adv)
}
func (s *Server) sendStreamLeaderElectAdvisory(mset *stream) {
if mset == nil {
return
}
node, stream, acc := mset.raftNode(), mset.name(), mset.account()
if node == nil {
return
}
subj := JSAdvisoryStreamLeaderElectedPre + "." + stream
adv := &JSStreamLeaderElectedAdvisory{
TypedEvent: TypedEvent{
Type: JSStreamLeaderElectedAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: stream,
Leader: s.serverNameForNode(node.GroupLeader()),
Replicas: s.replicas(node),
}
// Send to the user's account if not the system account.
if acc != s.SystemAccount() {
s.publishAdvisory(acc, subj, adv)
}
// Now do system level one. Place account info in adv, and nil account means system.
adv.Account = acc.GetName()
s.publishAdvisory(nil, subj, adv)
}
// Will lookup a stream assignment.
// Lock should be held.
func (js *jetStream) streamAssignment(account, stream string) (sa *streamAssignment) {
cc := js.cluster
if cc == nil {
return nil
}
if as := cc.streams[account]; as != nil {
sa = as[stream]
}
return sa
}
// processStreamAssignment is called when followers have replicated an assignment.
func (js *jetStream) processStreamAssignment(sa *streamAssignment) {
js.mu.RLock()
s, cc := js.srv, js.cluster
js.mu.RUnlock()
if s == nil || cc == nil {
// TODO(dlc) - debug at least
return
}
acc, err := s.LookupAccount(sa.Client.serviceAccount())
if err != nil {
// TODO(dlc) - log error
return
}
stream := sa.Config.Name
js.mu.Lock()
if cc.meta == nil {
js.mu.Unlock()
return
}
ourID := cc.meta.ID()
var isMember bool
if sa.Group != nil {
isMember = sa.Group.isMember(ourID)
}
accStreams := cc.streams[acc.Name]
if accStreams == nil {
accStreams = make(map[string]*streamAssignment)
} else if osa := accStreams[stream]; osa != nil {
// Copy over private existing state from former SA.
sa.Group.node = osa.Group.node
sa.consumers = osa.consumers
sa.responded = osa.responded
sa.err = osa.err
}
// Update our state.
accStreams[stream] = sa
cc.streams[acc.Name] = accStreams
js.mu.Unlock()
// Check if this is for us..
if isMember {
js.processClusterCreateStream(acc, sa)
} else if mset, _ := acc.lookupStream(sa.Config.Name); mset != nil {
// We have one here even though we are not a member. This can happen on re-assignment.
s.Debugf("JetStream removing stream '%s > %s' from this server, re-assigned", sa.Client.serviceAccount(), sa.Config.Name)
if node := mset.raftNode(); node != nil {
node.ProposeRemovePeer(ourID)
}
mset.stop(true, false)
}
}
// processUpdateStreamAssignment is called when followers have replicated an updated assignment.
func (js *jetStream) processUpdateStreamAssignment(sa *streamAssignment) {
js.mu.RLock()
s, cc := js.srv, js.cluster
js.mu.RUnlock()
if s == nil || cc == nil {
// TODO(dlc) - debug at least
return
}
acc, err := s.LookupAccount(sa.Client.serviceAccount())
if err != nil {
// TODO(dlc) - log error
return
}
stream := sa.Config.Name
js.mu.Lock()
if cc.meta == nil {
js.mu.Unlock()
return
}
ourID := cc.meta.ID()
var isMember bool
if sa.Group != nil {
isMember = sa.Group.isMember(ourID)
}
accStreams := cc.streams[acc.Name]
if accStreams == nil {
js.mu.Unlock()
return
}
osa := accStreams[stream]
if osa == nil {
js.mu.Unlock()
return
}
// Copy over private existing state from former SA.
sa.Group.node = osa.Group.node
sa.consumers = osa.consumers
sa.err = osa.err
// Update our state.
accStreams[stream] = sa
cc.streams[acc.Name] = accStreams
js.mu.Unlock()
// Check if this is for us..
if isMember {
js.processClusterUpdateStream(acc, sa)
} else if mset, _ := acc.lookupStream(sa.Config.Name); mset != nil {
// We have one here even though we are not a member. This can happen on re-assignment.
s.Debugf("JetStream removing stream '%s > %s' from this server, re-assigned", sa.Client.serviceAccount(), sa.Config.Name)
if node := mset.raftNode(); node != nil {
node.ProposeRemovePeer(ourID)
}
mset.stop(true, false)
}
}
// processClusterUpdateStream is called when we have a stream assignment that
// has been updated for an existing assignment.
func (js *jetStream) processClusterUpdateStream(acc *Account, sa *streamAssignment) {
if sa == nil {
return
}
js.mu.RLock()
s, rg := js.srv, sa.Group
client, subject, reply := sa.Client, sa.Subject, sa.Reply
alreadyRunning := rg.node != nil
hasResponded := sa.responded
sa.responded = true
js.mu.RUnlock()
mset, err := acc.lookupStream(sa.Config.Name)
if err == nil || mset != nil {
if rg.node != nil && !alreadyRunning {
s.startGoRoutine(func() { js.monitorStream(mset, sa) })
}
mset.setStreamAssignment(sa)
if err = mset.update(sa.Config); err != nil {
s.Warnf("JetStream cluster error updating stream %q for account %q: %v", sa.Config.Name, acc.Name, err)
}
}
if err != nil {
js.mu.Lock()
sa.err = err
sa.responded = true
if rg.node != nil {
rg.node.Delete()
}
result := &streamAssignmentResult{
Account: sa.Client.serviceAccount(),
Stream: sa.Config.Name,
Response: &JSApiStreamCreateResponse{ApiResponse: ApiResponse{Type: JSApiStreamCreateResponseType}},
}
result.Response.Error = jsError(err)
js.mu.Unlock()
// Send response to the metadata leader. They will forward to the user as needed.
s.sendInternalMsgLocked(streamAssignmentSubj, _EMPTY_, nil, result)
return
}
mset.mu.RLock()
isLeader := mset.isLeader()
mset.mu.RUnlock()
if !isLeader || hasResponded {
return
}
// Send our response.
var resp = JSApiStreamUpdateResponse{ApiResponse: ApiResponse{Type: JSApiStreamUpdateResponseType}}
if err != nil {
resp.Error = jsError(err)
s.sendAPIErrResponse(client, acc, subject, reply, _EMPTY_, s.jsonResponse(&resp))
} else {
resp.StreamInfo = &StreamInfo{Created: mset.createdTime(), State: mset.state(), Config: mset.config(), Cluster: js.clusterInfo(mset.raftGroup())}
s.sendAPIResponse(client, acc, subject, reply, _EMPTY_, s.jsonResponse(&resp))
}
}
// processClusterCreateStream is called when we have a stream assignment that
// has been committed and this server is a member of the peer group.
func (js *jetStream) processClusterCreateStream(acc *Account, sa *streamAssignment) {
if sa == nil {
return
}
js.mu.RLock()
s, rg := js.srv, sa.Group
alreadyRunning := rg.node != nil
js.mu.RUnlock()
// Process the raft group and make sure it's running if needed.
err := js.createRaftGroup(rg)
// If we are restoring, create the stream if we are R>1 and not the preferred who handles the
// receipt of the snapshot itself.
shouldCreate := true
if sa.Restore != nil {
if len(rg.Peers) == 1 || rg.node != nil && rg.node.ID() == rg.Preferred {
shouldCreate = false
} else {
sa.Restore = nil
}
}
// Our stream.
var mset *stream
// Process here if not restoring or not the leader.
if shouldCreate && err == nil {
// Go ahead and create or update the stream.
mset, err = acc.lookupStream(sa.Config.Name)
if err == nil && mset != nil {
mset.setStreamAssignment(sa)
if err := mset.update(sa.Config); err != nil {
s.Warnf("JetStream cluster error updating stream %q for account %q: %v", sa.Config.Name, acc.Name, err)
}
} else if err == ErrJetStreamStreamNotFound {
// Add in the stream here.
mset, err = acc.addStreamWithAssignment(sa.Config, nil, sa)
}
if mset != nil {
mset.setCreatedTime(sa.Created)
}
}
// This is an error condition.
if err != nil {
s.Debugf("Stream create failed for '%s > %s': %v", sa.Client.serviceAccount(), sa.Config.Name, err)
js.mu.Lock()
sa.err = err
sa.responded = true
if rg.node != nil {
rg.node.Delete()
}
result := &streamAssignmentResult{
Account: sa.Client.serviceAccount(),
Stream: sa.Config.Name,
Response: &JSApiStreamCreateResponse{ApiResponse: ApiResponse{Type: JSApiStreamCreateResponseType}},
}
result.Response.Error = jsError(err)
js.mu.Unlock()
// Send response to the metadata leader. They will forward to the user as needed.
s.sendInternalMsgLocked(streamAssignmentSubj, _EMPTY_, nil, result)
return
}
// Start our monitoring routine.
if rg.node != nil {
if !alreadyRunning {
s.startGoRoutine(func() { js.monitorStream(mset, sa) })
}
} else {
// Single replica stream, process manually here.
// If we are restoring, process that first.
if sa.Restore != nil {
// We are restoring a stream here.
restoreDoneCh := s.processStreamRestore(sa.Client, acc, sa.Config.Name, _EMPTY_, sa.Reply, _EMPTY_)
s.startGoRoutine(func() {
defer s.grWG.Done()
select {
case err := <-restoreDoneCh:
if err == nil {
mset, err = acc.lookupStream(sa.Config.Name)
if mset != nil {
mset.setStreamAssignment(sa)
mset.setCreatedTime(sa.Created)
}
}
if err != nil {
if mset != nil {
mset.delete()
}
js.mu.Lock()
sa.err = err
sa.responded = true
result := &streamAssignmentResult{
Account: sa.Client.serviceAccount(),
Stream: sa.Config.Name,
Restore: &JSApiStreamRestoreResponse{ApiResponse: ApiResponse{Type: JSApiStreamRestoreResponseType}},
}
result.Restore.Error = jsError(sa.err)
js.mu.Unlock()
// Send response to the metadata leader. They will forward to the user as needed.
b, _ := json.Marshal(result) // Avoids auto-processing and doing fancy json with newlines.
s.sendInternalMsgLocked(streamAssignmentSubj, _EMPTY_, nil, b)
return
}
js.processStreamLeaderChange(mset, true)
// Check to see if we have restored consumers here.
// These are not currently assigned so we will need to do so here.
if consumers := mset.getConsumers(); len(consumers) > 0 {
js.mu.RLock()
cc := js.cluster
js.mu.RUnlock()
for _, o := range consumers {
rg := cc.createGroupForConsumer(sa)
name, cfg := o.String(), o.config()
// Place our initial state here as well for assignment distribution.
ca := &consumerAssignment{
Group: rg,
Stream: sa.Config.Name,
Name: name,
Config: &cfg,
Client: sa.Client,
Created: o.createdTime(),
}
addEntry := encodeAddConsumerAssignment(ca)
cc.meta.ForwardProposal(addEntry)
// Check to make sure we see the assignment.
go func() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for range ticker.C {
js.mu.RLock()
ca, meta := js.consumerAssignment(ca.Client.serviceAccount(), sa.Config.Name, name), cc.meta
js.mu.RUnlock()
if ca == nil {
s.Warnf("Consumer assignment has not been assigned, retrying")
if meta != nil {
meta.ForwardProposal(addEntry)
} else {
return
}
} else {
return
}
}
}()
}
}
case <-s.quitCh:
return
}
})
} else {
js.processStreamLeaderChange(mset, true)
}
}
}
// processStreamRemoval is called when followers have replicated an assignment.
func (js *jetStream) processStreamRemoval(sa *streamAssignment) {
js.mu.Lock()
s, cc := js.srv, js.cluster
if s == nil || cc == nil || cc.meta == nil {
// TODO(dlc) - debug at least
js.mu.Unlock()
return
}
stream := sa.Config.Name
isMember := sa.Group.isMember(cc.meta.ID())
wasLeader := cc.isStreamLeader(sa.Client.serviceAccount(), stream)
// Check if we already have this assigned.
accStreams := cc.streams[sa.Client.serviceAccount()]
needDelete := accStreams != nil && accStreams[stream] != nil
if needDelete {
delete(accStreams, stream)
if len(accStreams) == 0 {
delete(cc.streams, sa.Client.serviceAccount())
}
}
js.mu.Unlock()
if needDelete {
js.processClusterDeleteStream(sa, isMember, wasLeader)
}
}
func (js *jetStream) processClusterDeleteStream(sa *streamAssignment, isMember, wasLeader bool) {
if sa == nil {
return
}
js.mu.RLock()
s := js.srv
hadLeader := sa.Group.node == nil || sa.Group.node.GroupLeader() != noLeader
js.mu.RUnlock()
acc, err := s.LookupAccount(sa.Client.serviceAccount())
if err != nil {
s.Debugf("JetStream cluster failed to lookup account %q: %v", sa.Client.serviceAccount(), err)
return
}
var resp = JSApiStreamDeleteResponse{ApiResponse: ApiResponse{Type: JSApiStreamDeleteResponseType}}
// Go ahead and delete the stream.
mset, err := acc.lookupStream(sa.Config.Name)
if err != nil {
resp.Error = jsNotFoundError(err)
} else if mset != nil {
err = mset.stop(true, wasLeader)
}
if sa.Group.node != nil {
sa.Group.node.Delete()
}
if !isMember || !wasLeader && hadLeader {
return
}
if err != nil {
if resp.Error == nil {
resp.Error = jsError(err)
}
s.sendAPIErrResponse(sa.Client, acc, sa.Subject, sa.Reply, _EMPTY_, s.jsonResponse(resp))
} else {
resp.Success = true
s.sendAPIResponse(sa.Client, acc, sa.Subject, sa.Reply, _EMPTY_, s.jsonResponse(resp))
}
}
// processConsumerAssignment is called when followers have replicated an assignment for a consumer.
func (js *jetStream) processConsumerAssignment(ca *consumerAssignment) {
js.mu.Lock()
s, cc := js.srv, js.cluster
if s == nil || cc == nil || cc.meta == nil {
// TODO(dlc) - debug at least
js.mu.Unlock()
return
}
acc, err := s.LookupAccount(ca.Client.serviceAccount())
if err != nil {
// TODO(dlc) - log error
return
}
sa := js.streamAssignment(ca.Client.serviceAccount(), ca.Stream)
if sa == nil {
s.Debugf("Consumer create failed, could not locate stream '%s > %s'", ca.Client.serviceAccount(), ca.Stream)
ca.err = ErrJetStreamStreamNotFound
result := &consumerAssignmentResult{
Account: ca.Client.serviceAccount(),
Stream: ca.Stream,
Consumer: ca.Name,
Response: &JSApiConsumerCreateResponse{ApiResponse: ApiResponse{Type: JSApiConsumerCreateResponseType}},
}
result.Response.Error = jsNotFoundError(ErrJetStreamStreamNotFound)
// Send response to the metadata leader. They will forward to the user as needed.
b, _ := json.Marshal(result) // Avoids auto-processing and doing fancy json with newlines.
s.sendInternalMsgLocked(consumerAssignmentSubj, _EMPTY_, nil, b)
js.mu.Unlock()
return
}
if sa.consumers == nil {
sa.consumers = make(map[string]*consumerAssignment)
} else if oca := sa.consumers[ca.Name]; oca != nil {
// Copy over private existing state from former CA.
ca.Group.node = oca.Group.node
ca.responded = oca.responded
ca.err = oca.err
}
// Place into our internal map under the stream assignment.
// Ok to replace an existing one, we check on process call below.
sa.consumers[ca.Name] = ca
// See if we are a member
ourID := cc.meta.ID()
isMember := ca.Group.isMember(ourID)
js.mu.Unlock()
// Check if this is for us..
if isMember {
js.processClusterCreateConsumer(ca)
} else {
// We are not a member, if we have this consumer on this
// server remove it.
if mset, _ := acc.lookupStream(ca.Stream); mset != nil {
if o := mset.lookupConsumer(ca.Name); o != nil {
s.Debugf("JetStream removing consumer '%s > %s > %s' from this server, re-assigned",
ca.Client.serviceAccount(), ca.Stream, ca.Name)
if node := o.raftNode(); node != nil {
node.ProposeRemovePeer(ourID)
}
o.stopWithFlags(true, false, false)
}
}
}
}
func (js *jetStream) processConsumerRemoval(ca *consumerAssignment) {
js.mu.Lock()
s, cc := js.srv, js.cluster
if s == nil || cc == nil || cc.meta == nil {
// TODO(dlc) - debug at least
js.mu.Unlock()
return
}
isMember := ca.Group.isMember(cc.meta.ID())
wasLeader := cc.isConsumerLeader(ca.Client.serviceAccount(), ca.Stream, ca.Name)
// Delete from our state.
var needDelete bool
if accStreams := cc.streams[ca.Client.serviceAccount()]; accStreams != nil {
if sa := accStreams[ca.Stream]; sa != nil && sa.consumers != nil && sa.consumers[ca.Name] != nil {
needDelete = true
delete(sa.consumers, ca.Name)
}
}
js.mu.Unlock()
if needDelete {
js.processClusterDeleteConsumer(ca, isMember, wasLeader)
}
}
type consumerAssignmentResult struct {
Account string `json:"account"`
Stream string `json:"stream"`
Consumer string `json:"consumer"`
Response *JSApiConsumerCreateResponse `json:"response,omitempty"`
}
// processClusterCreateConsumer is when we are a member fo the group and need to create the consumer.
func (js *jetStream) processClusterCreateConsumer(ca *consumerAssignment) {
if ca == nil {
return
}
js.mu.RLock()
s := js.srv
acc, err := s.LookupAccount(ca.Client.serviceAccount())
if err != nil {
s.Warnf("JetStream cluster failed to lookup account %q: %v", ca.Client.serviceAccount(), err)
js.mu.RUnlock()
return
}
rg := ca.Group
alreadyRunning := rg.node != nil
js.mu.RUnlock()
// Go ahead and create or update the consumer.
mset, err := acc.lookupStream(ca.Stream)
if err != nil {
js.mu.Lock()
s.Debugf("Consumer create failed, could not locate stream '%s > %s'", ca.Client.serviceAccount(), ca.Stream)
ca.err = ErrJetStreamStreamNotFound
result := &consumerAssignmentResult{
Account: ca.Client.serviceAccount(),
Stream: ca.Stream,
Consumer: ca.Name,
Response: &JSApiConsumerCreateResponse{ApiResponse: ApiResponse{Type: JSApiConsumerCreateResponseType}},
}
result.Response.Error = jsNotFoundError(ErrJetStreamStreamNotFound)
// Send response to the metadata leader. They will forward to the user as needed.
b, _ := json.Marshal(result) // Avoids auto-processing and doing fancy json with newlines.
s.sendInternalMsgLocked(consumerAssignmentSubj, _EMPTY_, nil, b)
js.mu.Unlock()
return
}
// Process the raft group and make sure its running if needed.
js.createRaftGroup(rg)
// Check if we already have this consumer running.
o := mset.lookupConsumer(ca.Name)
if o != nil {
if o.isDurable() && o.isPushMode() {
ocfg := o.config()
if configsEqualSansDelivery(ocfg, *ca.Config) && o.hasNoLocalInterest() {
o.updateDeliverSubject(ca.Config.DeliverSubject)
}
}
o.setConsumerAssignment(ca)
s.Debugf("JetStream cluster, consumer was already running")
}
// Add in the consumer if needed.
if o == nil {
o, err = mset.addConsumerWithAssignment(ca.Config, ca.Name, ca)
}
// If we have an initial state set apply that now.
if ca.State != nil && o != nil {
err = o.setStoreState(ca.State)
}
if err != nil {
s.Debugf("Consumer create failed for '%s > %s > %s': %v\n", ca.Client.serviceAccount(), ca.Stream, ca.Name, err)
ca.err = err
if rg.node != nil {
rg.node.Delete()
}
result := &consumerAssignmentResult{
Account: ca.Client.serviceAccount(),
Stream: ca.Stream,
Consumer: ca.Name,
Response: &JSApiConsumerCreateResponse{ApiResponse: ApiResponse{Type: JSApiConsumerCreateResponseType}},
}
result.Response.Error = jsError(err)
// Send response to the metadata leader. They will forward to the user as needed.
b, _ := json.Marshal(result) // Avoids auto-processing and doing fancy json with newlines.
s.sendInternalMsgLocked(consumerAssignmentSubj, _EMPTY_, nil, b)
} else {
o.setCreatedTime(ca.Created)
// Start our monitoring routine.
if rg.node != nil {
if !alreadyRunning {
s.startGoRoutine(func() { js.monitorConsumer(o, ca) })
}
} else {
// Single replica consumer, process manually here.
js.processConsumerLeaderChange(o, true)
}
}
}
func (js *jetStream) processClusterDeleteConsumer(ca *consumerAssignment, isMember, wasLeader bool) {
if ca == nil {
return
}
js.mu.RLock()
s := js.srv
js.mu.RUnlock()
acc, err := s.LookupAccount(ca.Client.serviceAccount())
if err != nil {
s.Warnf("JetStream cluster failed to lookup account %q: %v", ca.Client.serviceAccount(), err)
return
}
var resp = JSApiConsumerDeleteResponse{ApiResponse: ApiResponse{Type: JSApiConsumerDeleteResponseType}}
// Go ahead and delete the consumer.
mset, err := acc.lookupStream(ca.Stream)
if err != nil {
resp.Error = jsNotFoundError(err)
} else if mset != nil {
if o := mset.lookupConsumer(ca.Name); o != nil {
err = o.stopWithFlags(true, true, wasLeader)
} else {
resp.Error = jsNoConsumerErr
}
}
if ca.Group.node != nil {
ca.Group.node.Delete()
}
if !wasLeader || ca.Reply == _EMPTY_ {
return
}
if err != nil {
if resp.Error == nil {
resp.Error = jsError(err)
}
s.sendAPIErrResponse(ca.Client, acc, ca.Subject, ca.Reply, _EMPTY_, s.jsonResponse(resp))
} else {
resp.Success = true
s.sendAPIResponse(ca.Client, acc, ca.Subject, ca.Reply, _EMPTY_, s.jsonResponse(resp))
}
}
// Returns the consumer assignment, or nil if not present.
// Lock should be held.
func (js *jetStream) consumerAssignment(account, stream, consumer string) *consumerAssignment {
if sa := js.streamAssignment(account, stream); sa != nil {
return sa.consumers[consumer]
}
return nil
}
// consumerAssigned informs us if this server has this consumer assigned.
func (jsa *jsAccount) consumerAssigned(stream, consumer string) bool {
jsa.mu.RLock()
defer jsa.mu.RUnlock()
js, acc := jsa.js, jsa.account
if js == nil {
return false
}
return js.cluster.isConsumerAssigned(acc, stream, consumer)
}
// Read lock should be held.
func (cc *jetStreamCluster) isConsumerAssigned(a *Account, stream, consumer string) bool {
// Non-clustered mode always return true.
if cc == nil {
return true
}
var sa *streamAssignment
accStreams := cc.streams[a.Name]
if accStreams != nil {
sa = accStreams[stream]
}
if sa == nil {
// TODO(dlc) - This should not happen.
return false
}
ca := sa.consumers[consumer]
if ca == nil {
return false
}
rg := ca.Group
// Check if we are the leader of this raftGroup assigned to the stream.
ourID := cc.meta.ID()
for _, peer := range rg.Peers {
if peer == ourID {
return true
}
}
return false
}
func (o *consumer) raftGroup() *raftGroup {
if o == nil {
return nil
}
o.mu.RLock()
defer o.mu.RUnlock()
if o.ca == nil {
return nil
}
return o.ca.Group
}
func (o *consumer) raftNode() RaftNode {
if o == nil {
return nil
}
o.mu.RLock()
defer o.mu.RUnlock()
return o.node
}
func (js *jetStream) monitorConsumer(o *consumer, ca *consumerAssignment) {
s, cc, n := js.server(), js.cluster, o.raftNode()
defer s.grWG.Done()
if n == nil {
s.Warnf("No RAFT group for consumer")
return
}
qch, lch, ach := n.QuitC(), n.LeadChangeC(), n.ApplyC()
s.Debugf("Starting consumer monitor for '%s > %s > %s", o.acc.Name, ca.Stream, ca.Name)
defer s.Debugf("Exiting consumer monitor for '%s > %s > %s'", o.acc.Name, ca.Stream, ca.Name)
js.mu.RLock()
isLeader := cc.isConsumerLeader(ca.Client.serviceAccount(), ca.Stream, ca.Name)
js.mu.RUnlock()
const (
compactInterval = 2 * time.Minute
compactSizeMin = 8 * 1024 * 1024
compactNumMin = 64
)
t := time.NewTicker(compactInterval)
defer t.Stop()
var lastSnap []byte
var lastApplied uint64
// Should only to be called from leader.
doSnapshot := func() {
if state, err := o.store.State(); err == nil && state != nil {
if snap := encodeConsumerState(state); !bytes.Equal(lastSnap, snap) {
if err := n.InstallSnapshot(snap); err == nil {
lastSnap = snap
_, _, lastApplied = n.Progress()
}
}
}
}
for {
select {
case <-s.quitCh:
return
case <-qch:
return
case ce := <-ach:
// No special processing needed for when we are caught up on restart.
if ce == nil {
continue
}
if err := js.applyConsumerEntries(o, ce); err == nil {
n.Applied(ce.Index)
ne := ce.Index - lastApplied
// If over our compact min and we have at least min entries to compact, go ahead and snapshot/compact.
if _, b := n.Size(); b > compactSizeMin && ne > compactNumMin {
doSnapshot()
}
} else {
s.Warnf("Error applying consumer entries to '%s > %s'", ca.Client.serviceAccount(), ca.Name)
}
case isLeader := <-lch:
if !isLeader && n.GroupLeader() != noLeader {
js.setConsumerAssignmentResponded(ca)
}
js.processConsumerLeaderChange(o, isLeader)
case <-t.C:
if isLeader {
doSnapshot()
}
}
}
}
func (js *jetStream) applyConsumerEntries(o *consumer, ce *CommittedEntry) error {
for _, e := range ce.Entries {
if e.Type == EntrySnapshot {
// No-op needed?
state, err := decodeConsumerState(e.Data)
if err != nil {
panic(err.Error())
}
o.store.Update(state)
} else if e.Type == EntryRemovePeer {
js.mu.RLock()
ourID := js.cluster.meta.ID()
js.mu.RUnlock()
if peer := string(e.Data); peer == ourID {
o.stopWithFlags(true, false, false)
}
return nil
} else {
buf := e.Data
switch entryOp(buf[0]) {
case updateDeliveredOp:
dseq, sseq, dc, ts, err := decodeDeliveredUpdate(buf[1:])
if err != nil {
panic(err.Error())
}
if err := o.store.UpdateDelivered(dseq, sseq, dc, ts); err != nil {
panic(err.Error())
}
case updateAcksOp:
dseq, sseq, err := decodeAckUpdate(buf[1:])
if err != nil {
panic(err.Error())
}
o.processReplicatedAck(dseq, sseq)
case updateSkipOp:
o.mu.Lock()
if !o.isLeader() {
var le = binary.LittleEndian
o.sseq = le.Uint64(buf[1:])
}
o.mu.Unlock()
default:
panic(fmt.Sprintf("JetStream Cluster Unknown group entry op type! %v", entryOp(buf[0])))
}
}
}
return nil
}
func (o *consumer) processReplicatedAck(dseq, sseq uint64) {
o.store.UpdateAcks(dseq, sseq)
o.mu.RLock()
mset := o.mset
if mset == nil || mset.cfg.Retention != InterestPolicy {
o.mu.RUnlock()
return
}
var sagap uint64
if o.cfg.AckPolicy == AckAll {
if o.isLeader() {
sagap = sseq - o.asflr
} else {
// We are a follower so only have the store state, so read that in.
state, err := o.store.State()
if err != nil {
o.mu.RUnlock()
return
}
sagap = sseq - state.AckFloor.Stream
}
}
o.mu.RUnlock()
if sagap > 1 {
// FIXME(dlc) - This is very inefficient, will need to fix.
for seq := sseq; seq > sseq-sagap; seq-- {
mset.ackMsg(o, seq)
}
} else {
mset.ackMsg(o, sseq)
}
}
var errBadAckUpdate = errors.New("jetstream cluster bad replicated ack update")
var errBadDeliveredUpdate = errors.New("jetstream cluster bad replicated delivered update")
func decodeAckUpdate(buf []byte) (dseq, sseq uint64, err error) {
var bi, n int
if dseq, n = binary.Uvarint(buf); n < 0 {
return 0, 0, errBadAckUpdate
}
bi += n
if sseq, n = binary.Uvarint(buf[bi:]); n < 0 {
return 0, 0, errBadAckUpdate
}
return dseq, sseq, nil
}
func decodeDeliveredUpdate(buf []byte) (dseq, sseq, dc uint64, ts int64, err error) {
var bi, n int
if dseq, n = binary.Uvarint(buf); n < 0 {
return 0, 0, 0, 0, errBadDeliveredUpdate
}
bi += n
if sseq, n = binary.Uvarint(buf[bi:]); n < 0 {
return 0, 0, 0, 0, errBadDeliveredUpdate
}
bi += n
if dc, n = binary.Uvarint(buf[bi:]); n < 0 {
return 0, 0, 0, 0, errBadDeliveredUpdate
}
bi += n
if ts, n = binary.Varint(buf[bi:]); n < 0 {
return 0, 0, 0, 0, errBadDeliveredUpdate
}
return dseq, sseq, dc, ts, nil
}
func (js *jetStream) processConsumerLeaderChange(o *consumer, isLeader bool) {
ca := o.consumerAssignment()
if ca == nil {
return
}
js.mu.Lock()
s, account, err := js.srv, ca.Client.serviceAccount(), ca.err
client, subject, reply := ca.Client, ca.Subject, ca.Reply
hasResponded := ca.responded
ca.responded = true
js.mu.Unlock()
streamName := o.streamName()
consumerName := o.String()
acc, _ := s.LookupAccount(account)
if acc == nil {
return
}
if isLeader {
s.Noticef("JetStream cluster new consumer leader for '%s > %s > %s'", ca.Client.serviceAccount(), streamName, consumerName)
s.sendConsumerLeaderElectAdvisory(o)
// Check for peer removal and process here if needed.
js.checkPeers(ca.Group)
} else {
// We are stepping down.
// Make sure if we are doing so because we have lost quorum that we send the appropriate advisories.
if node := o.raftNode(); node != nil && !node.Quorum() && time.Since(node.Created()) > 5*time.Second {
s.sendConsumerLostQuorumAdvisory(o)
}
}
// Tell consumer to switch leader status.
o.setLeader(isLeader)
if !isLeader || hasResponded {
return
}
var resp = JSApiConsumerCreateResponse{ApiResponse: ApiResponse{Type: JSApiConsumerCreateResponseType}}
if err != nil {
resp.Error = jsError(err)
s.sendAPIErrResponse(client, acc, subject, reply, _EMPTY_, s.jsonResponse(&resp))
} else {
resp.ConsumerInfo = o.info()
s.sendAPIResponse(client, acc, subject, reply, _EMPTY_, s.jsonResponse(&resp))
if node := o.raftNode(); node != nil {
o.sendCreateAdvisory()
}
}
}
// Determines if we should send lost quorum advisory. We throttle these after first one.
func (o *consumer) shouldSendLostQuorum() bool {
o.mu.Lock()
defer o.mu.Unlock()
if time.Since(o.lqsent) >= lostQuorumAdvInterval {
o.lqsent = time.Now()
return true
}
return false
}
func (s *Server) sendConsumerLostQuorumAdvisory(o *consumer) {
if o == nil {
return
}
node, stream, consumer, acc := o.raftNode(), o.streamName(), o.String(), o.account()
if node == nil {
return
}
if !o.shouldSendLostQuorum() {
return
}
s.Warnf("JetStream cluster consumer '%s > %s > %s' has NO quorum, stalled.", acc.GetName(), stream, consumer)
subj := JSAdvisoryConsumerQuorumLostPre + "." + stream + "." + consumer
adv := &JSConsumerQuorumLostAdvisory{
TypedEvent: TypedEvent{
Type: JSConsumerQuorumLostAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: stream,
Consumer: consumer,
Replicas: s.replicas(node),
}
// Send to the user's account if not the system account.
if acc != s.SystemAccount() {
s.publishAdvisory(acc, subj, adv)
}
// Now do system level one. Place account info in adv, and nil account means system.
adv.Account = acc.GetName()
s.publishAdvisory(nil, subj, adv)
}
func (s *Server) sendConsumerLeaderElectAdvisory(o *consumer) {
if o == nil {
return
}
node, stream, consumer, acc := o.raftNode(), o.streamName(), o.String(), o.account()
if node == nil {
return
}
subj := JSAdvisoryConsumerLeaderElectedPre + "." + stream + "." + consumer
adv := &JSConsumerLeaderElectedAdvisory{
TypedEvent: TypedEvent{
Type: JSConsumerLeaderElectedAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: stream,
Consumer: consumer,
Leader: s.serverNameForNode(node.GroupLeader()),
Replicas: s.replicas(node),
}
// Send to the user's account if not the system account.
if acc != s.SystemAccount() {
s.publishAdvisory(acc, subj, adv)
}
// Now do system level one. Place account info in adv, and nil account means system.
adv.Account = acc.GetName()
s.publishAdvisory(nil, subj, adv)
}
type streamAssignmentResult struct {
Account string `json:"account"`
Stream string `json:"stream"`
Response *JSApiStreamCreateResponse `json:"create_response,omitempty"`
Restore *JSApiStreamRestoreResponse `json:"restore_response,omitempty"`
}
// Process error results of stream and consumer assignments.
// Success will be handled by stream leader.
func (js *jetStream) processStreamAssignmentResults(sub *subscription, c *client, subject, reply string, msg []byte) {
var result streamAssignmentResult
if err := json.Unmarshal(msg, &result); err != nil {
// TODO(dlc) - log
return
}
acc, _ := js.srv.LookupAccount(result.Account)
if acc == nil {
// TODO(dlc) - log
return
}
js.mu.Lock()
defer js.mu.Unlock()
s, cc := js.srv, js.cluster
// FIXME(dlc) - suppress duplicates?
if sa := js.streamAssignment(result.Account, result.Stream); sa != nil {
var resp string
if result.Response != nil {
resp = s.jsonResponse(result.Response)
} else if result.Restore != nil {
resp = s.jsonResponse(result.Restore)
}
js.srv.sendAPIErrResponse(sa.Client, acc, sa.Subject, sa.Reply, _EMPTY_, resp)
sa.responded = true
// TODO(dlc) - Could have mixed results, should track per peer.
// Set sa.err while we are deleting so we will not respond to list/names requests.
sa.err = ErrJetStreamNotAssigned
cc.meta.Propose(encodeDeleteStreamAssignment(sa))
}
}
func (js *jetStream) processConsumerAssignmentResults(sub *subscription, c *client, subject, reply string, msg []byte) {
var result consumerAssignmentResult
if err := json.Unmarshal(msg, &result); err != nil {
// TODO(dlc) - log
return
}
acc, _ := js.srv.LookupAccount(result.Account)
if acc == nil {
// TODO(dlc) - log
return
}
js.mu.Lock()
defer js.mu.Unlock()
s, cc := js.srv, js.cluster
if sa := js.streamAssignment(result.Account, result.Stream); sa != nil && sa.consumers != nil {
if ca := sa.consumers[result.Consumer]; ca != nil && !ca.responded {
js.srv.sendAPIErrResponse(ca.Client, acc, ca.Subject, ca.Reply, _EMPTY_, s.jsonResponse(result.Response))
ca.responded = true
// Check if this failed.
// TODO(dlc) - Could have mixed results, should track per peer.
if result.Response.Error != nil {
// So while we are delting we will not respond to list/names requests.
ca.err = ErrJetStreamNotAssigned
cc.meta.Propose(encodeDeleteConsumerAssignment(ca))
}
}
}
}
const (
streamAssignmentSubj = "$SYS.JSC.STREAM.ASSIGNMENT.RESULT"
consumerAssignmentSubj = "$SYS.JSC.CONSUMER.ASSIGNMENT.RESULT"
)
// Lock should be held.
func (js *jetStream) startUpdatesSub() {
cc, s, c := js.cluster, js.srv, js.cluster.c
if cc.streamResults == nil {
cc.streamResults, _ = s.systemSubscribe(streamAssignmentSubj, _EMPTY_, false, c, js.processStreamAssignmentResults)
}
if cc.consumerResults == nil {
cc.consumerResults, _ = s.systemSubscribe(consumerAssignmentSubj, _EMPTY_, false, c, js.processConsumerAssignmentResults)
}
if cc.stepdown == nil {
cc.stepdown, _ = s.systemSubscribe(JSApiLeaderStepDown, _EMPTY_, false, c, s.jsLeaderStepDownRequest)
}
}
// Lock should be held.
func (js *jetStream) stopUpdatesSub() {
cc := js.cluster
if cc.streamResults != nil {
cc.s.sysUnsubscribe(cc.streamResults)
cc.streamResults = nil
}
if cc.consumerResults != nil {
cc.s.sysUnsubscribe(cc.consumerResults)
cc.consumerResults = nil
}
if cc.stepdown != nil {
cc.s.sysUnsubscribe(cc.stepdown)
cc.stepdown = nil
}
}
func (js *jetStream) processLeaderChange(isLeader bool) {
if isLeader {
js.srv.Noticef("JetStream cluster new metadata leader")
}
js.mu.Lock()
defer js.mu.Unlock()
if isLeader {
js.startUpdatesSub()
} else {
js.stopUpdatesSub()
// TODO(dlc) - stepdown.
}
}
// Lock should be held.
func (cc *jetStreamCluster) remapStreamAssignment(sa *streamAssignment, removePeer string) bool {
// Need to select a replacement peer
s, now, cluster := cc.s, time.Now(), sa.Client.Cluster
if sa.Config.Placement != nil && sa.Config.Placement.Cluster != _EMPTY_ {
cluster = sa.Config.Placement.Cluster
}
for _, p := range cc.meta.Peers() {
// If it is not in our list it probably shutdown, so don't consider.
if si, ok := s.nodeToInfo.Load(p.ID); !ok || si.(*nodeInfo).offline {
continue
}
// Make sure they are active and current and not already part of our group.
current, lastSeen := p.Current, now.Sub(p.Last)
if !current || lastSeen > lostQuorumInterval || sa.Group.isMember(p.ID) {
continue
}
// Make sure the correct cluster.
if s.clusterNameForNode(p.ID) != cluster {
continue
}
// If we are here we have our candidate replacement, swap out the old one.
for i, peer := range sa.Group.Peers {
if peer == removePeer {
sa.Group.Peers[i] = p.ID
// Don't influence preferred leader.
sa.Group.Preferred = _EMPTY_
return true
}
}
}
return false
}
// selectPeerGroup will select a group of peers to start a raft group.
// TODO(dlc) - For now randomly select. Can be way smarter.
func (cc *jetStreamCluster) selectPeerGroup(r int, cluster string) []string {
var nodes []string
peers := cc.meta.Peers()
s := cc.s
for _, p := range peers {
// If we know its offline or it is not in our list it probably shutdown, so don't consider.
if si, ok := s.nodeToInfo.Load(p.ID); !ok || si.(*nodeInfo).offline {
continue
}
if cluster != _EMPTY_ {
if s.clusterNameForNode(p.ID) == cluster {
nodes = append(nodes, p.ID)
}
} else {
nodes = append(nodes, p.ID)
}
}
if len(nodes) < r {
return nil
}
// Don't depend on range to randomize.
rand.Shuffle(len(nodes), func(i, j int) { nodes[i], nodes[j] = nodes[j], nodes[i] })
return nodes[:r]
}
func groupNameForStream(peers []string, storage StorageType) string {
return groupName("S", peers, storage)
}
func groupNameForConsumer(peers []string, storage StorageType) string {
return groupName("C", peers, storage)
}
func groupName(prefix string, peers []string, storage StorageType) string {
var gns string
if len(peers) == 1 {
gns = peers[0]
} else {
gns = string(getHash(nuid.Next()))
}
return fmt.Sprintf("%s-R%d%s-%s", prefix, len(peers), storage.String()[:1], gns)
}
// createGroupForStream will create a group for assignment for the stream.
// Lock should be held.
func (cc *jetStreamCluster) createGroupForStream(ci *ClientInfo, cfg *StreamConfig) *raftGroup {
replicas := cfg.Replicas
if replicas == 0 {
replicas = 1
}
cluster := ci.Cluster
if cfg.Placement != nil && cfg.Placement.Cluster != _EMPTY_ {
cluster = cfg.Placement.Cluster
}
// Need to create a group here.
// TODO(dlc) - Can be way smarter here.
peers := cc.selectPeerGroup(replicas, cluster)
if len(peers) == 0 {
return nil
}
return &raftGroup{Name: groupNameForStream(peers, cfg.Storage), Storage: cfg.Storage, Peers: peers}
}
func (s *Server) jsClusteredStreamRequest(ci *ClientInfo, acc *Account, subject, reply string, rmsg []byte, config *StreamConfig) {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
var resp = JSApiStreamCreateResponse{ApiResponse: ApiResponse{Type: JSApiStreamCreateResponseType}}
// Grab our jetstream account info.
acc.mu.RLock()
jsa := acc.js
acc.mu.RUnlock()
if jsa == nil {
resp.Error = jsNotEnabledErr
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
ccfg, err := checkStreamCfg(config)
if err != nil {
resp.Error = jsError(err)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
cfg := &ccfg
js.mu.RLock()
numStreams := len(cc.streams[acc.Name])
js.mu.RUnlock()
// Check for stream limits here before proposing. These need to be tracked from meta layer, not jsa.
jsa.mu.RLock()
exceeded := jsa.limits.MaxStreams > 0 && numStreams >= jsa.limits.MaxStreams
jsa.mu.RUnlock()
if exceeded {
resp.Error = jsError(fmt.Errorf("maximum number of streams reached"))
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
// Check for stream limits here before proposing.
if err := jsa.checkLimits(cfg); err != nil {
resp.Error = jsError(err)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
// Now process the request and proposal.
js.mu.Lock()
defer js.mu.Unlock()
if sa := js.streamAssignment(acc.Name, cfg.Name); sa != nil {
resp.Error = jsError(ErrJetStreamStreamAlreadyUsed)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
// Raft group selection and placement.
rg := cc.createGroupForStream(ci, cfg)
if rg == nil {
resp.Error = jsInsufficientErr
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
// Pick a preferred leader.
rg.setPreferred()
// Sync subject for post snapshot sync.
sa := &streamAssignment{Group: rg, Sync: syncSubjForStream(), Config: cfg, Subject: subject, Reply: reply, Client: ci, Created: time.Now()}
cc.meta.Propose(encodeAddStreamAssignment(sa))
}
func (s *Server) jsClusteredStreamUpdateRequest(ci *ClientInfo, acc *Account, subject, reply string, rmsg []byte, cfg *StreamConfig) {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
// Now process the request and proposal.
js.mu.Lock()
defer js.mu.Unlock()
var resp = JSApiStreamUpdateResponse{ApiResponse: ApiResponse{Type: JSApiStreamUpdateResponseType}}
osa := js.streamAssignment(acc.Name, cfg.Name)
if osa == nil {
resp.Error = jsNotFoundError(ErrJetStreamStreamNotFound)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
var newCfg *StreamConfig
if jsa := js.accounts[acc]; jsa != nil {
if ncfg, err := jsa.configUpdateCheck(osa.Config, cfg); err != nil {
resp.Error = jsError(err)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
} else {
newCfg = ncfg
}
} else {
resp.Error = jsNotEnabledErr
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
sa := &streamAssignment{Group: osa.Group, Config: newCfg, Subject: subject, Reply: reply, Client: ci}
cc.meta.Propose(encodeUpdateStreamAssignment(sa))
}
func (s *Server) jsClusteredStreamDeleteRequest(ci *ClientInfo, acc *Account, stream, subject, reply string, rmsg []byte) {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
js.mu.Lock()
defer js.mu.Unlock()
osa := js.streamAssignment(acc.Name, stream)
if osa == nil {
var resp = JSApiStreamDeleteResponse{ApiResponse: ApiResponse{Type: JSApiStreamDeleteResponseType}}
resp.Error = jsNotFoundError(ErrJetStreamStreamNotFound)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
// Remove any remaining consumers as well.
for _, ca := range osa.consumers {
ca.Reply, ca.State = _EMPTY_, nil
cc.meta.Propose(encodeDeleteConsumerAssignment(ca))
}
sa := &streamAssignment{Group: osa.Group, Config: osa.Config, Subject: subject, Reply: reply, Client: ci}
cc.meta.Propose(encodeDeleteStreamAssignment(sa))
}
func (s *Server) jsClusteredStreamPurgeRequest(ci *ClientInfo, acc *Account, mset *stream, stream, subject, reply string, rmsg []byte) {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
js.mu.Lock()
defer js.mu.Unlock()
sa := js.streamAssignment(acc.Name, stream)
if sa == nil {
resp := JSApiStreamPurgeResponse{ApiResponse: ApiResponse{Type: JSApiStreamPurgeResponseType}}
resp.Error = jsNotFoundError(ErrJetStreamStreamNotFound)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
if n := sa.Group.node; n != nil {
sp := &streamPurge{Stream: stream, LastSeq: mset.state().LastSeq, Subject: subject, Reply: reply, Client: ci}
n.Propose(encodeStreamPurge(sp))
} else if mset != nil {
var resp = JSApiStreamPurgeResponse{ApiResponse: ApiResponse{Type: JSApiStreamPurgeResponseType}}
purged, err := mset.purge()
if err != nil {
resp.Error = jsError(err)
} else {
resp.Purged = purged
resp.Success = true
}
s.sendAPIResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(resp))
}
}
func (s *Server) jsClusteredStreamRestoreRequest(ci *ClientInfo, acc *Account, req *JSApiStreamRestoreRequest, stream, subject, reply string, rmsg []byte) {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
js.mu.Lock()
defer js.mu.Unlock()
cfg := &req.Config
resp := JSApiStreamRestoreResponse{ApiResponse: ApiResponse{Type: JSApiStreamRestoreResponseType}}
if sa := js.streamAssignment(ci.serviceAccount(), cfg.Name); sa != nil {
resp.Error = jsError(ErrJetStreamStreamAlreadyUsed)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
// Raft group selection and placement.
rg := cc.createGroupForStream(ci, cfg)
if rg == nil {
resp.Error = jsInsufficientErr
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
// Pick a preferred leader.
rg.setPreferred()
sa := &streamAssignment{Group: rg, Sync: syncSubjForStream(), Config: cfg, Subject: subject, Reply: reply, Client: ci, Created: time.Now()}
// Now add in our restore state and pre-select a peer to handle the actual receipt of the snapshot.
sa.Restore = &req.State
cc.meta.Propose(encodeAddStreamAssignment(sa))
}
// This will do a scatter and gather operation for all streams for this account. This is only called from metadata leader.
// This will be running in a separate Go routine.
func (s *Server) jsClusteredStreamListRequest(acc *Account, ci *ClientInfo, offset int, subject, reply string, rmsg []byte) {
defer s.grWG.Done()
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
js.mu.Lock()
var streams []*streamAssignment
for _, sa := range cc.streams[acc.Name] {
streams = append(streams, sa)
}
// Needs to be sorted.
if len(streams) > 1 {
sort.Slice(streams, func(i, j int) bool {
return strings.Compare(streams[i].Config.Name, streams[j].Config.Name) < 0
})
}
scnt := len(streams)
if offset > scnt {
offset = scnt
}
if offset > 0 {
streams = streams[offset:]
}
if len(streams) > JSApiListLimit {
streams = streams[:JSApiListLimit]
}
var resp = JSApiStreamListResponse{
ApiResponse: ApiResponse{Type: JSApiStreamListResponseType},
Streams: make([]*StreamInfo, 0, len(streams)),
}
if len(streams) == 0 {
js.mu.Unlock()
resp.Limit = JSApiListLimit
resp.Offset = offset
s.sendAPIResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(resp))
return
}
// Create an inbox for our responses and send out requests.
inbox := infoReplySubject()
rc := make(chan *StreamInfo, len(streams))
rsub, _ := s.systemSubscribe(inbox, _EMPTY_, false, cc.c, func(_ *subscription, _ *client, _, reply string, msg []byte) {
var si StreamInfo
if err := json.Unmarshal(msg, &si); err != nil {
s.Warnf("Error unmarshaling clustered stream info response:%v", err)
return
}
select {
case rc <- &si:
default:
s.Warnf("Failed placing remote stream info result on internal channel")
}
})
defer s.sysUnsubscribe(rsub)
// Send out our requests here.
for _, sa := range streams {
isubj := fmt.Sprintf(clusterStreamInfoT, sa.Client.serviceAccount(), sa.Config.Name)
s.sendInternalMsgLocked(isubj, inbox, nil, nil)
}
// Don't hold lock.
js.mu.Unlock()
const timeout = 5 * time.Second
notActive := time.NewTimer(timeout)
defer notActive.Stop()
LOOP:
for {
select {
case <-s.quitCh:
return
case <-notActive.C:
s.Warnf("Did not receive all stream info results for %q", acc)
resp.Error = jsClusterIncompleteErr
break LOOP
case si := <-rc:
resp.Streams = append(resp.Streams, si)
// Check to see if we are done.
if len(resp.Streams) == len(streams) {
break LOOP
}
}
}
// Needs to be sorted as well.
if len(resp.Streams) > 1 {
sort.Slice(resp.Streams, func(i, j int) bool {
return strings.Compare(resp.Streams[i].Config.Name, resp.Streams[j].Config.Name) < 0
})
}
resp.Total = len(resp.Streams)
resp.Limit = JSApiListLimit
resp.Offset = offset
s.sendAPIResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(resp))
}
// This will do a scatter and gather operation for all consumers for this stream and account.
// This will be running in a separate Go routine.
func (s *Server) jsClusteredConsumerListRequest(acc *Account, ci *ClientInfo, offset int, stream, subject, reply string, rmsg []byte) {
defer s.grWG.Done()
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
js.mu.Lock()
var consumers []*consumerAssignment
if sas := cc.streams[acc.Name]; sas != nil {
if sa := sas[stream]; sa != nil {
// Copy over since we need to sort etc.
for _, ca := range sa.consumers {
consumers = append(consumers, ca)
}
}
}
// Needs to be sorted.
if len(consumers) > 1 {
sort.Slice(consumers, func(i, j int) bool {
return strings.Compare(consumers[i].Name, consumers[j].Name) < 0
})
}
ocnt := len(consumers)
if offset > ocnt {
offset = ocnt
}
if offset > 0 {
consumers = consumers[offset:]
}
if len(consumers) > JSApiListLimit {
consumers = consumers[:JSApiListLimit]
}
// Send out our requests here.
var resp = JSApiConsumerListResponse{
ApiResponse: ApiResponse{Type: JSApiConsumerListResponseType},
Consumers: []*ConsumerInfo{},
}
if len(consumers) == 0 {
js.mu.Unlock()
resp.Limit = JSApiListLimit
resp.Offset = offset
s.sendAPIResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(resp))
return
}
// Create an inbox for our responses and send out requests.
inbox := infoReplySubject()
rc := make(chan *ConsumerInfo, len(consumers))
rsub, _ := s.systemSubscribe(inbox, _EMPTY_, false, cc.c, func(_ *subscription, _ *client, _, reply string, msg []byte) {
var ci ConsumerInfo
if err := json.Unmarshal(msg, &ci); err != nil {
s.Warnf("Error unmarshaling clustered consumer info response:%v", err)
return
}
select {
case rc <- &ci:
default:
s.Warnf("Failed placing consumer info result on internal chan")
}
})
defer s.sysUnsubscribe(rsub)
for _, ca := range consumers {
isubj := fmt.Sprintf(clusterConsumerInfoT, ca.Client.serviceAccount(), stream, ca.Name)
s.sendInternalMsgLocked(isubj, inbox, nil, nil)
}
js.mu.Unlock()
const timeout = 2 * time.Second
notActive := time.NewTimer(timeout)
defer notActive.Stop()
LOOP:
for {
select {
case <-s.quitCh:
return
case <-notActive.C:
s.Warnf("Did not receive all stream info results for %q", acc)
break LOOP
case ci := <-rc:
resp.Consumers = append(resp.Consumers, ci)
// Check to see if we are done.
if len(resp.Consumers) == len(consumers) {
break LOOP
}
}
}
// Needs to be sorted as well.
if len(resp.Consumers) > 1 {
sort.Slice(resp.Consumers, func(i, j int) bool {
return strings.Compare(resp.Consumers[i].Name, resp.Consumers[j].Name) < 0
})
}
resp.Total = len(resp.Consumers)
resp.Limit = JSApiListLimit
resp.Offset = offset
s.sendAPIResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(resp))
}
func encodeStreamPurge(sp *streamPurge) []byte {
var bb bytes.Buffer
bb.WriteByte(byte(purgeStreamOp))
json.NewEncoder(&bb).Encode(sp)
return bb.Bytes()
}
func decodeStreamPurge(buf []byte) (*streamPurge, error) {
var sp streamPurge
err := json.Unmarshal(buf, &sp)
return &sp, err
}
func (s *Server) jsClusteredConsumerDeleteRequest(ci *ClientInfo, acc *Account, stream, consumer, subject, reply string, rmsg []byte) {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
js.mu.Lock()
defer js.mu.Unlock()
var resp = JSApiConsumerDeleteResponse{ApiResponse: ApiResponse{Type: JSApiConsumerDeleteResponseType}}
sa := js.streamAssignment(acc.Name, stream)
if sa == nil {
resp.Error = jsNotFoundError(ErrJetStreamStreamNotFound)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
if sa.consumers == nil {
resp.Error = jsNoConsumerErr
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
oca := sa.consumers[consumer]
if oca == nil {
resp.Error = jsNoConsumerErr
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
oca.deleted = true
ca := &consumerAssignment{Group: oca.Group, Stream: stream, Name: consumer, Config: oca.Config, Subject: subject, Reply: reply, Client: ci}
cc.meta.Propose(encodeDeleteConsumerAssignment(ca))
}
func encodeMsgDelete(md *streamMsgDelete) []byte {
var bb bytes.Buffer
bb.WriteByte(byte(deleteMsgOp))
json.NewEncoder(&bb).Encode(md)
return bb.Bytes()
}
func decodeMsgDelete(buf []byte) (*streamMsgDelete, error) {
var md streamMsgDelete
err := json.Unmarshal(buf, &md)
return &md, err
}
func (s *Server) jsClusteredMsgDeleteRequest(ci *ClientInfo, acc *Account, mset *stream, stream, subject, reply string, req *JSApiMsgDeleteRequest, rmsg []byte) {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
js.mu.Lock()
defer js.mu.Unlock()
sa := js.streamAssignment(acc.Name, stream)
if sa == nil {
s.Debugf("Message delete failed, could not locate stream '%s > %s'", acc.Name, stream)
return
}
// Check for single replica items.
if n := sa.Group.node; n != nil {
md := &streamMsgDelete{Seq: req.Seq, NoErase: req.NoErase, Stream: stream, Subject: subject, Reply: reply, Client: ci}
n.Propose(encodeMsgDelete(md))
} else if mset != nil {
var err error
var removed bool
if req.NoErase {
removed, err = mset.removeMsg(req.Seq)
} else {
removed, err = mset.eraseMsg(req.Seq)
}
var resp = JSApiMsgDeleteResponse{ApiResponse: ApiResponse{Type: JSApiMsgDeleteResponseType}}
if err != nil {
resp.Error = jsError(err)
} else if !removed {
resp.Error = &ApiError{Code: 400, Description: fmt.Sprintf("sequence [%d] not found", req.Seq)}
} else {
resp.Success = true
}
s.sendAPIResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(resp))
}
}
func encodeAddStreamAssignment(sa *streamAssignment) []byte {
var bb bytes.Buffer
bb.WriteByte(byte(assignStreamOp))
json.NewEncoder(&bb).Encode(sa)
return bb.Bytes()
}
func encodeUpdateStreamAssignment(sa *streamAssignment) []byte {
var bb bytes.Buffer
bb.WriteByte(byte(updateStreamOp))
json.NewEncoder(&bb).Encode(sa)
return bb.Bytes()
}
func encodeDeleteStreamAssignment(sa *streamAssignment) []byte {
var bb bytes.Buffer
bb.WriteByte(byte(removeStreamOp))
json.NewEncoder(&bb).Encode(sa)
return bb.Bytes()
}
func decodeStreamAssignment(buf []byte) (*streamAssignment, error) {
var sa streamAssignment
err := json.Unmarshal(buf, &sa)
return &sa, err
}
// createGroupForConsumer will create a new group with same peer set as the stream.
func (cc *jetStreamCluster) createGroupForConsumer(sa *streamAssignment) *raftGroup {
peers := sa.Group.Peers
if len(peers) == 0 {
return nil
}
return &raftGroup{Name: groupNameForConsumer(peers, sa.Config.Storage), Storage: sa.Config.Storage, Peers: peers}
}
func (s *Server) jsClusteredConsumerRequest(ci *ClientInfo, acc *Account, subject, reply string, rmsg []byte, stream string, cfg *ConsumerConfig) {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
js.mu.Lock()
defer js.mu.Unlock()
var resp = JSApiConsumerCreateResponse{ApiResponse: ApiResponse{Type: JSApiConsumerCreateResponseType}}
// Lookup the stream assignment.
sa := js.streamAssignment(acc.Name, stream)
if sa == nil {
resp.Error = jsError(ErrJetStreamStreamNotFound)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
rg := cc.createGroupForConsumer(sa)
if rg == nil {
resp.Error = jsInsufficientErr
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
// Pick a preferred leader.
rg.setPreferred()
// We need to set the ephemeral here before replicating.
var oname string
if !isDurableConsumer(cfg) {
for {
oname = createConsumerName()
if sa.consumers != nil {
if sa.consumers[oname] != nil {
continue
}
}
break
}
} else {
oname = cfg.Durable
if ca := sa.consumers[oname]; ca != nil && !ca.deleted {
// This can be ok if delivery subject update.
if !reflect.DeepEqual(cfg, ca.Config) && !configsEqualSansDelivery(*cfg, *ca.Config) {
resp.Error = jsError(ErrJetStreamConsumerAlreadyUsed)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
}
}
ca := &consumerAssignment{Group: rg, Stream: stream, Name: oname, Config: cfg, Subject: subject, Reply: reply, Client: ci, Created: time.Now().UTC()}
cc.meta.Propose(encodeAddConsumerAssignment(ca))
}
func encodeAddConsumerAssignment(ca *consumerAssignment) []byte {
var bb bytes.Buffer
bb.WriteByte(byte(assignConsumerOp))
json.NewEncoder(&bb).Encode(ca)
return bb.Bytes()
}
func encodeDeleteConsumerAssignment(ca *consumerAssignment) []byte {
var bb bytes.Buffer
bb.WriteByte(byte(removeConsumerOp))
json.NewEncoder(&bb).Encode(ca)
return bb.Bytes()
}
func decodeConsumerAssignment(buf []byte) (*consumerAssignment, error) {
var ca consumerAssignment
err := json.Unmarshal(buf, &ca)
return &ca, err
}
func encodeAddConsumerAssignmentCompressed(ca *consumerAssignment) []byte {
b, err := json.Marshal(ca)
if err != nil {
return nil
}
// TODO(dlc) - Streaming better approach here probably.
var bb bytes.Buffer
bb.WriteByte(byte(assignCompressedConsumerOp))
bb.Write(s2.Encode(nil, b))
return bb.Bytes()
}
func decodeConsumerAssignmentCompressed(buf []byte) (*consumerAssignment, error) {
var ca consumerAssignment
js, err := s2.Decode(nil, buf)
if err != nil {
return nil, err
}
err = json.Unmarshal(js, &ca)
return &ca, err
}
var errBadStreamMsg = errors.New("jetstream cluster bad replicated stream msg")
func decodeStreamMsg(buf []byte) (subject, reply string, hdr, msg []byte, lseq uint64, ts int64, err error) {
var le = binary.LittleEndian
if len(buf) < 26 {
return _EMPTY_, _EMPTY_, nil, nil, 0, 0, errBadStreamMsg
}
lseq = le.Uint64(buf)
buf = buf[8:]
ts = int64(le.Uint64(buf))
buf = buf[8:]
sl := int(le.Uint16(buf))
buf = buf[2:]
if len(buf) < sl {
return _EMPTY_, _EMPTY_, nil, nil, 0, 0, errBadStreamMsg
}
subject = string(buf[:sl])
buf = buf[sl:]
if len(buf) < 2 {
return _EMPTY_, _EMPTY_, nil, nil, 0, 0, errBadStreamMsg
}
rl := int(le.Uint16(buf))
buf = buf[2:]
if len(buf) < rl {
return _EMPTY_, _EMPTY_, nil, nil, 0, 0, errBadStreamMsg
}
reply = string(buf[:rl])
buf = buf[rl:]
if len(buf) < 2 {
return _EMPTY_, _EMPTY_, nil, nil, 0, 0, errBadStreamMsg
}
hl := int(le.Uint16(buf))
buf = buf[2:]
if len(buf) < hl {
return _EMPTY_, _EMPTY_, nil, nil, 0, 0, errBadStreamMsg
}
hdr = buf[:hl]
buf = buf[hl:]
if len(buf) < 4 {
return _EMPTY_, _EMPTY_, nil, nil, 0, 0, errBadStreamMsg
}
ml := int(le.Uint32(buf))
buf = buf[4:]
if len(buf) < ml {
return _EMPTY_, _EMPTY_, nil, nil, 0, 0, errBadStreamMsg
}
msg = buf[:ml]
return subject, reply, hdr, msg, lseq, ts, nil
}
func encodeStreamMsg(subject, reply string, hdr, msg []byte, lseq uint64, ts int64) []byte {
elen := 1 + 8 + 8 + len(subject) + len(reply) + len(hdr) + len(msg)
elen += (2 + 2 + 2 + 4) // Encoded lengths, 4bytes
// TODO(dlc) - check sizes of subject, reply and hdr, make sure uint16 ok.
buf := make([]byte, elen)
buf[0] = byte(streamMsgOp)
var le = binary.LittleEndian
wi := 1
le.PutUint64(buf[wi:], lseq)
wi += 8
le.PutUint64(buf[wi:], uint64(ts))
wi += 8
le.PutUint16(buf[wi:], uint16(len(subject)))
wi += 2
copy(buf[wi:], subject)
wi += len(subject)
le.PutUint16(buf[wi:], uint16(len(reply)))
wi += 2
copy(buf[wi:], reply)
wi += len(reply)
le.PutUint16(buf[wi:], uint16(len(hdr)))
wi += 2
if len(hdr) > 0 {
copy(buf[wi:], hdr)
wi += len(hdr)
}
le.PutUint32(buf[wi:], uint32(len(msg)))
wi += 4
if len(msg) > 0 {
copy(buf[wi:], msg)
wi += len(msg)
}
return buf[:wi]
}
// StreamSnapshot is used for snapshotting and out of band catch up in clustered mode.
type streamSnapshot struct {
Msgs uint64 `json:"messages"`
Bytes uint64 `json:"bytes"`
FirstSeq uint64 `json:"first_seq"`
LastSeq uint64 `json:"last_seq"`
Deleted []uint64 `json:"deleted,omitempty"`
}
// Grab a snapshot of a stream for clustered mode.
func (mset *stream) stateSnapshot() []byte {
mset.mu.RLock()
defer mset.mu.RUnlock()
state := mset.store.State()
snap := &streamSnapshot{
Msgs: state.Msgs,
Bytes: state.Bytes,
FirstSeq: state.FirstSeq,
LastSeq: state.LastSeq,
Deleted: state.Deleted,
}
b, _ := json.Marshal(snap)
return b
}
// processClusteredMsg will propose the inbound message to the underlying raft group.
func (mset *stream) processClusteredInboundMsg(subject, reply string, hdr, msg []byte) error {
// For possible error response.
var response []byte
mset.mu.RLock()
canRespond := !mset.cfg.NoAck && len(reply) > 0
s, jsa, st, rf, sendq := mset.srv, mset.jsa, mset.cfg.Storage, mset.cfg.Replicas, mset.sendq
maxMsgSize := int(mset.cfg.MaxMsgSize)
mset.mu.RUnlock()
// Check here pre-emptively if we have exceeded our account limits.
var exceeded bool
jsa.mu.RLock()
if st == MemoryStorage {
total := jsa.storeTotal + int64(memStoreMsgSize(subject, hdr, msg)*uint64(rf))
if jsa.limits.MaxMemory > 0 && total > jsa.limits.MaxMemory {
exceeded = true
}
} else {
total := jsa.storeTotal + int64(fileStoreMsgSize(subject, hdr, msg)*uint64(rf))
if jsa.limits.MaxStore > 0 && total > jsa.limits.MaxStore {
exceeded = true
}
}
jsa.mu.RUnlock()
// If we have exceeded our account limits go ahead and return.
if exceeded {
err := fmt.Errorf("JetStream resource limits exceeded for account: %q", jsa.acc().Name)
s.Warnf(err.Error())
if canRespond {
var resp = &JSPubAckResponse{PubAck: &PubAck{Stream: mset.name()}}
resp.Error = &ApiError{Code: 400, Description: "resource limits exceeded for account"}
response, _ = json.Marshal(resp)
sendq <- &jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, response, nil, 0}
}
return err
}
// Check msgSize if we have a limit set there. Again this works if it goes through but better to be pre-emptive.
if maxMsgSize >= 0 && (len(hdr)+len(msg)) > maxMsgSize {
err := fmt.Errorf("JetStream message size exceeds limits for '%s > %s'", jsa.acc().Name, mset.cfg.Name)
s.Warnf(err.Error())
if canRespond {
var resp = &JSPubAckResponse{PubAck: &PubAck{Stream: mset.name()}}
resp.Error = &ApiError{Code: 400, Description: "message size exceeds maximum allowed"}
response, _ = json.Marshal(resp)
sendq <- &jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, response, nil, 0}
}
return err
}
// Proceed with proposing this message.
mset.mu.Lock()
// We only use mset.clseq for clustering and in case we run ahead of actual commits.
// Check if we need to set initial value here
if mset.clseq < mset.lseq {
mset.clseq = mset.lseq
}
// Do proposal.
err := mset.node.Propose(encodeStreamMsg(subject, reply, hdr, msg, mset.clseq, time.Now().UnixNano()))
if err != nil {
if canRespond {
var resp = &JSPubAckResponse{PubAck: &PubAck{Stream: mset.cfg.Name}}
resp.Error = &ApiError{Code: 503, Description: err.Error()}
response, _ = json.Marshal(resp)
}
} else {
mset.clseq++
}
mset.mu.Unlock()
// If we errored out respond here.
if err != nil && canRespond {
sendq <- &jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, response, nil, 0}
}
return err
}
// For requesting messages post raft snapshot to catch up streams post server restart.
// Any deleted msgs etc will be handled inline on catchup.
type streamSyncRequest struct {
FirstSeq uint64 `json:"first_seq"`
LastSeq uint64 `json:"last_seq"`
}
// Given a stream state that represents a snapshot, calculate the sync request based on our current state.
func (mset *stream) calculateSyncRequest(state *StreamState, snap *streamSnapshot) *streamSyncRequest {
// Quick check if we are already caught up.
if state.LastSeq >= snap.LastSeq {
return nil
}
return &streamSyncRequest{FirstSeq: state.LastSeq + 1, LastSeq: snap.LastSeq}
}
// processSnapshotDeletes will update our current store based on the snapshot
// but only processing deletes and new FirstSeq / purges.
func (mset *stream) processSnapshotDeletes(snap *streamSnapshot) {
state := mset.store.State()
// Adjust if FirstSeq has moved.
if snap.FirstSeq > state.FirstSeq {
mset.store.Compact(snap.FirstSeq)
state = mset.store.State()
}
// Range the deleted and delete if applicable.
for _, dseq := range snap.Deleted {
if dseq <= state.LastSeq {
mset.store.RemoveMsg(dseq)
}
}
}
func (mset *stream) setCatchingUp() {
mset.mu.Lock()
mset.catchup = true
mset.mu.Unlock()
}
func (mset *stream) clearCatchingUp() {
mset.mu.Lock()
mset.catchup = false
mset.mu.Unlock()
}
func (mset *stream) isCatchingUp() bool {
mset.mu.RLock()
defer mset.mu.RUnlock()
return mset.catchup
}
// Process a stream snapshot.
func (mset *stream) processSnapshot(snap *streamSnapshot) {
// Update any deletes, etc.
mset.processSnapshotDeletes(snap)
mset.mu.Lock()
state := mset.store.State()
sreq := mset.calculateSyncRequest(&state, snap)
s, subject, n := mset.srv, mset.sa.Sync, mset.node
mset.mu.Unlock()
// Just return if up to date..
if sreq == nil {
return
}
// Pause the apply channel for our raft group while we catch up.
n.PauseApply()
defer n.ResumeApply()
// Set our catchup state.
mset.setCatchingUp()
defer mset.clearCatchingUp()
js := s.getJetStream()
var sub *subscription
var err error
const activityInterval = 5 * time.Second
notActive := time.NewTimer(activityInterval)
defer notActive.Stop()
defer func() {
if sub != nil {
s.sysUnsubscribe(sub)
}
// Make sure any consumers are updated for the pending amounts.
mset.mu.Lock()
for _, o := range mset.consumers {
o.mu.Lock()
if o.isLeader() {
o.setInitialPending()
}
o.mu.Unlock()
}
mset.mu.Unlock()
}()
RETRY:
// If we have a sub clear that here.
if sub != nil {
s.sysUnsubscribe(sub)
sub = nil
}
// Grab sync request again on failures.
if sreq == nil {
mset.mu.Lock()
state := mset.store.State()
sreq = mset.calculateSyncRequest(&state, snap)
mset.mu.Unlock()
if sreq == nil {
return
}
}
type mr struct {
msg []byte
reply string
}
msgsC := make(chan *mr, 32768)
// Send our catchup request here.
reply := syncReplySubject()
sub, err = s.sysSubscribe(reply, func(_ *subscription, _ *client, _, reply string, msg []byte) {
// Make copies - https://github.com/go101/go101/wiki
// TODO(dlc) - Since we are using a buffer from the inbound client/route.
select {
case msgsC <- &mr{msg: append(msg[:0:0], msg...), reply: reply}:
default:
s.Warnf("Failed to place catchup message onto internal channel: %d pending", len(msgsC))
return
}
})
if err != nil {
s.Errorf("Could not subscribe to stream catchup: %v", err)
return
}
b, _ := json.Marshal(sreq)
s.sendInternalMsgLocked(subject, reply, nil, b)
// Clear our sync request and capture last.
last := sreq.LastSeq
sreq = nil
// Run our own select loop here.
for qch, lch := n.QuitC(), n.LeadChangeC(); ; {
select {
case mrec := <-msgsC:
notActive.Reset(activityInterval)
msg := mrec.msg
// Check for eof signaling.
if len(msg) == 0 {
return
}
if lseq, err := mset.processCatchupMsg(msg); err == nil {
if lseq >= last {
return
}
} else {
goto RETRY
}
if mrec.reply != _EMPTY_ {
s.sendInternalMsgLocked(mrec.reply, _EMPTY_, nil, nil)
}
case <-notActive.C:
s.Warnf("Catchup for stream '%s > %s' stalled", mset.account(), mset.name())
notActive.Reset(activityInterval)
goto RETRY
case <-s.quitCh:
return
case <-qch:
return
case isLeader := <-lch:
js.processStreamLeaderChange(mset, isLeader)
}
}
}
// processCatchupMsg will be called to process out of band catchup msgs from a sync request.
func (mset *stream) processCatchupMsg(msg []byte) (uint64, error) {
if len(msg) == 0 || entryOp(msg[0]) != streamMsgOp {
// TODO(dlc) - This is error condition, log.
return 0, errors.New("bad catchup msg")
}
subj, _, hdr, msg, seq, ts, err := decodeStreamMsg(msg[1:])
if err != nil {
return 0, errors.New("bad catchup msg")
}
// Put into our store
// Messages to be skipped have no subject or timestamp.
// TODO(dlc) - formalize with skipMsgOp
if subj == _EMPTY_ && ts == 0 {
lseq := mset.store.SkipMsg()
if lseq != seq {
return 0, errors.New("wrong sequence for skipped msg")
}
} else if err := mset.store.StoreRawMsg(subj, hdr, msg, seq, ts); err != nil {
return 0, err
}
// Update our lseq.
mset.setLastSeq(seq)
return seq, nil
}
func (mset *stream) handleClusterSyncRequest(sub *subscription, c *client, subject, reply string, msg []byte) {
var sreq streamSyncRequest
if err := json.Unmarshal(msg, &sreq); err != nil {
// Log error.
return
}
mset.srv.startGoRoutine(func() { mset.runCatchup(reply, &sreq) })
}
// clusterInfo will report on the status of the raft group.
func (js *jetStream) clusterInfo(rg *raftGroup) *ClusterInfo {
if js == nil {
return nil
}
js.mu.RLock()
defer js.mu.RUnlock()
s := js.srv
if rg == nil || rg.node == nil {
return &ClusterInfo{
Name: s.ClusterName(),
Leader: s.Name(),
}
}
n := rg.node
ci := &ClusterInfo{
Name: s.ClusterName(),
Leader: s.serverNameForNode(n.GroupLeader()),
}
now := time.Now()
id, peers := n.ID(), n.Peers()
for _, rp := range peers {
if rp.ID != id && rg.isMember(rp.ID) {
lastSeen := now.Sub(rp.Last)
current := rp.Current
if current && lastSeen > lostQuorumInterval {
current = false
}
if sir, ok := s.nodeToInfo.Load(rp.ID); ok && sir != nil {
si := sir.(*nodeInfo)
pi := &PeerInfo{Name: si.name, Current: current, Offline: si.offline, Active: lastSeen, Lag: rp.Lag}
ci.Replicas = append(ci.Replicas, pi)
}
}
}
return ci
}
func (mset *stream) handleClusterStreamInfoRequest(sub *subscription, c *client, subject, reply string, msg []byte) {
mset.mu.RLock()
if mset.client == nil {
mset.mu.RUnlock()
return
}
s, js, config := mset.srv, mset.srv.js, mset.cfg
mset.mu.RUnlock()
si := &StreamInfo{Created: mset.createdTime(), State: mset.state(), Config: config, Cluster: js.clusterInfo(mset.raftGroup())}
b, _ := json.Marshal(si)
s.sendInternalMsgLocked(reply, _EMPTY_, nil, b)
}
func (mset *stream) runCatchup(sendSubject string, sreq *streamSyncRequest) {
s := mset.srv
defer s.grWG.Done()
const maxOutBytes = int64(2 * 1024 * 1024) // 2MB for now.
const maxOutMsgs = int32(16384)
outb := int64(0)
outm := int32(0)
// Flow control processing.
ackReplySize := func(subj string) int64 {
if li := strings.LastIndexByte(subj, btsep); li > 0 && li < len(subj) {
return parseAckReplyNum(subj[li+1:])
}
return 0
}
nextBatchC := make(chan struct{}, 1)
nextBatchC <- struct{}{}
// Setup ackReply for flow control.
ackReply := syncAckSubject()
ackSub, _ := s.sysSubscribe(ackReply, func(sub *subscription, c *client, subject, reply string, msg []byte) {
sz := ackReplySize(subject)
atomic.AddInt64(&outb, -sz)
atomic.AddInt32(&outm, -1)
select {
case nextBatchC <- struct{}{}:
default:
}
})
defer s.sysUnsubscribe(ackSub)
ackReplyT := strings.ReplaceAll(ackReply, ".*", ".%d")
// EOF
defer s.sendInternalMsgLocked(sendSubject, _EMPTY_, nil, nil)
const activityInterval = 5 * time.Second
notActive := time.NewTimer(activityInterval)
defer notActive.Stop()
// Setup sequences to walk through.
seq, last := sreq.FirstSeq, sreq.LastSeq
sendNextBatch := func() {
for ; seq <= last && atomic.LoadInt64(&outb) <= maxOutBytes && atomic.LoadInt32(&outm) <= maxOutMsgs; seq++ {
subj, hdr, msg, ts, err := mset.store.LoadMsg(seq)
// if this is not a deleted msg, bail out.
if err != nil && err != ErrStoreMsgNotFound && err != errDeletedMsg {
// break, something changed.
seq = last + 1
return
}
// S2?
em := encodeStreamMsg(subj, _EMPTY_, hdr, msg, seq, ts)
// Place size in reply subject for flow control.
reply := fmt.Sprintf(ackReplyT, len(em))
atomic.AddInt64(&outb, int64(len(em)))
atomic.AddInt32(&outm, 1)
s.sendInternalMsgLocked(sendSubject, reply, nil, em)
}
}
// Grab stream quit channel.
mset.mu.RLock()
qch := mset.qch
mset.mu.RUnlock()
if qch == nil {
return
}
// Run as long as we are still active and need catchup.
// FIXME(dlc) - Purge event? Stream delete?
for {
select {
case <-s.quitCh:
return
case <-qch:
return
case <-notActive.C:
s.Warnf("Catchup for stream '%s > %s' stalled", mset.account(), mset.name())
return
case <-nextBatchC:
// Update our activity timer.
notActive.Reset(activityInterval)
sendNextBatch()
// Check if we are finished.
if seq >= last {
s.Debugf("Done resync for stream '%s > %s'", mset.account(), mset.name())
return
}
}
}
}
func syncSubjForStream() string {
return syncSubject("$JSC.SYNC")
}
func syncReplySubject() string {
return syncSubject("$JSC.R")
}
func infoReplySubject() string {
return syncSubject("$JSC.R")
}
func syncAckSubject() string {
return syncSubject("$JSC.ACK") + ".*"
}
func syncSubject(pre string) string {
var sb strings.Builder
sb.WriteString(pre)
sb.WriteByte(btsep)
var b [replySuffixLen]byte
rn := rand.Int63()
for i, l := 0, rn; i < len(b); i++ {
b[i] = digits[l%base]
l /= base
}
sb.Write(b[:])
return sb.String()
}
const (
clusterStreamInfoT = "$JSC.SI.%s.%s"
clusterConsumerInfoT = "$JSC.CI.%s.%s.%s"
jsaUpdatesSubT = "$JSC.ARU.%s.*"
jsaUpdatesPubT = "$JSC.ARU.%s.%s"
)
| 1 | 12,645 | not sure if this has to be here or not tbh | nats-io-nats-server | go |
@@ -72,7 +72,12 @@ func newHarnessUsingAutodelete(ctx context.Context, t *testing.T) (drivertest.Ha
}
func (h *harness) CreateTopic(ctx context.Context, testName string) (dt driver.Topic, cleanup func(), err error) {
+ // Keep the topic entity name under 50 characters as per Azure limits.
+ // See https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-quotas
topicName := fmt.Sprintf("%s-topic-%d", sanitize(testName), atomic.AddUint32(&h.numTopics, 1))
+ if len(topicName) > 50 {
+ topicName = topicName[:50]
+ }
createTopic(ctx, topicName, h.ns, nil)
| 1 | // Copyright 2018 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package azuresb
import (
"context"
"fmt"
"os"
"strings"
"sync/atomic"
"testing"
"gocloud.dev/internal/testing/setup"
"gocloud.dev/pubsub"
"gocloud.dev/pubsub/driver"
"gocloud.dev/pubsub/drivertest"
"github.com/Azure/azure-amqp-common-go"
"github.com/Azure/azure-service-bus-go"
)
var (
// See docs below on how to provision an Azure Service Bus Namespace and obtaining the connection string.
// https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-dotnet-get-started-with-queues
connString = os.Getenv("SERVICEBUS_CONNECTION_STRING")
)
const (
nonexistentTopicName = "nonexistent-topic"
)
type harness struct {
ns *servicebus.Namespace
numTopics uint32 // atomic
numSubs uint32 // atomic
closer func()
autodelete bool
}
func newHarness(ctx context.Context, t *testing.T) (drivertest.Harness, error) {
if connString == "" {
return nil, fmt.Errorf("azuresb: test harness requires environment variable SERVICEBUS_CONNECTION_STRING to run")
}
ns, err := NewNamespaceFromConnectionString(connString)
if err != nil {
return nil, err
}
noop := func() {}
return &harness{
ns: ns,
closer: noop,
}, nil
}
func newHarnessUsingAutodelete(ctx context.Context, t *testing.T) (drivertest.Harness, error) {
h, err := newHarness(ctx, t)
if err == nil {
h.(*harness).autodelete = true
}
return h, err
}
func (h *harness) CreateTopic(ctx context.Context, testName string) (dt driver.Topic, cleanup func(), err error) {
topicName := fmt.Sprintf("%s-topic-%d", sanitize(testName), atomic.AddUint32(&h.numTopics, 1))
createTopic(ctx, topicName, h.ns, nil)
sbTopic, err := NewTopic(h.ns, topicName, nil)
dt, err = openTopic(ctx, sbTopic, nil)
if err != nil {
return nil, nil, err
}
cleanup = func() {
sbTopic.Close(ctx)
deleteTopic(ctx, topicName, h.ns)
}
return dt, cleanup, nil
}
func (h *harness) MakeNonexistentTopic(ctx context.Context) (driver.Topic, error) {
sbTopic, err := NewTopic(h.ns, nonexistentTopicName, nil)
if err != nil {
return nil, err
}
return openTopic(ctx, sbTopic, nil)
}
func (h *harness) CreateSubscription(ctx context.Context, dt driver.Topic, testName string) (ds driver.Subscription, cleanup func(), err error) {
// Keep the subscription entity name under 50 characters as per Azure limits.
// See https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-quotas
subName := fmt.Sprintf("%s-sub-%d", sanitize(testName), atomic.AddUint32(&h.numSubs, 1))
if len(subName) > 50 {
subName = subName[:50]
}
t := dt.(*topic)
err = createSubscription(ctx, t.sbTopic.Name, subName, h.ns, nil)
if err != nil {
return nil, nil, err
}
var opts []servicebus.SubscriptionOption
if h.autodelete {
opts = append(opts, servicebus.SubscriptionWithReceiveAndDelete())
}
sbSub, err := NewSubscription(t.sbTopic, subName, opts)
if err != nil {
return nil, nil, err
}
sopts := SubscriptionOptions{}
if h.autodelete {
sopts.AckFuncForReceiveAndDelete = func() {}
}
ds, err = openSubscription(ctx, h.ns, t.sbTopic, sbSub, &sopts)
if err != nil {
return nil, nil, err
}
cleanup = func() {
sbSub.Close(ctx)
deleteSubscription(ctx, t.sbTopic.Name, subName, h.ns)
}
return ds, cleanup, nil
}
func (h *harness) MakeNonexistentSubscription(ctx context.Context) (driver.Subscription, error) {
sbTopic, _ := NewTopic(h.ns, nonexistentTopicName, nil)
sbSub, _ := NewSubscription(sbTopic, "nonexistent-subscription", nil)
return openSubscription(ctx, h.ns, sbTopic, sbSub, nil)
}
func (h *harness) Close() {
h.closer()
}
func (h *harness) MaxBatchSizes() (int, int) { return sendBatcherOpts.MaxBatchSize, 0 }
// Please run the TestConformance with an extended timeout since each test needs to perform CRUD for ServiceBus Topics and Subscriptions.
// Example: C:\Go\bin\go.exe test -timeout 60s gocloud.dev/pubsub/azuresb -run ^TestConformance$
func TestConformance(t *testing.T) {
if !*setup.Record {
t.Skip("replaying is not yet supported for Azure pubsub")
}
asTests := []drivertest.AsTest{sbAsTest{}}
drivertest.RunConformanceTests(t, newHarness, asTests)
}
/* Disabled for now, as the tests do not pass.
func TestConformanceWithAutodelete(t *testing.T) {
if !*setup.Record {
t.Skip("replaying is not yet supported for Azure pubsub")
}
asTests := []drivertest.AsTest{sbAsTest{}}
drivertest.RunConformanceTests(t, newHarnessUsingAutodelete, asTests)
}
*/
type sbAsTest struct{}
func (sbAsTest) Name() string {
return "azure"
}
func (sbAsTest) TopicCheck(top *pubsub.Topic) error {
var t2 servicebus.Topic
if top.As(&t2) {
return fmt.Errorf("cast succeeded for %T, want failure", &t2)
}
var t3 *servicebus.Topic
if !top.As(&t3) {
return fmt.Errorf("cast failed for %T", &t3)
}
return nil
}
func (sbAsTest) SubscriptionCheck(sub *pubsub.Subscription) error {
var s2 servicebus.Subscription
if sub.As(&s2) {
return fmt.Errorf("cast succeeded for %T, want failure", &s2)
}
var s3 *servicebus.Subscription
if !sub.As(&s3) {
return fmt.Errorf("cast failed for %T", &s3)
}
return nil
}
func (sbAsTest) TopicErrorCheck(t *pubsub.Topic, err error) error {
var sbError common.Retryable
if !t.ErrorAs(err, &sbError) {
return fmt.Errorf("failed to convert %v (%T) to a common.Retryable", err, err)
}
return nil
}
func (sbAsTest) SubscriptionErrorCheck(s *pubsub.Subscription, err error) error {
// We generate our own error for non-existent subscription, so there's no
// underlying Azure error type.
return nil
}
func (sbAsTest) MessageCheck(m *pubsub.Message) error {
var m2 servicebus.Message
if m.As(&m2) {
return fmt.Errorf("cast succeeded for %T, want failure", &m2)
}
var m3 *servicebus.Message
if !m.As(&m3) {
return fmt.Errorf("cast failed for %T", &m3)
}
return nil
}
func sanitize(testName string) string {
return strings.Replace(testName, "/", "_", -1)
}
// createTopic ensures the existence of a Service Bus Topic on a given Namespace.
func createTopic(ctx context.Context, topicName string, ns *servicebus.Namespace, opts []servicebus.TopicManagementOption) error {
tm := ns.NewTopicManager()
_, err := tm.Get(ctx, topicName)
if err == nil {
_ = tm.Delete(ctx, topicName)
}
_, err = tm.Put(ctx, topicName, opts...)
return err
}
// deleteTopic removes a Service Bus Topic on a given Namespace.
func deleteTopic(ctx context.Context, topicName string, ns *servicebus.Namespace) error {
tm := ns.NewTopicManager()
te, _ := tm.Get(ctx, topicName)
if te != nil {
return tm.Delete(ctx, topicName)
}
return nil
}
// createSubscription ensures the existence of a Service Bus Subscription on a given Namespace and Topic.
func createSubscription(ctx context.Context, topicName string, subscriptionName string, ns *servicebus.Namespace, opts []servicebus.SubscriptionManagementOption) error {
sm, err := ns.NewSubscriptionManager(topicName)
if err != nil {
return err
}
_, err = sm.Get(ctx, subscriptionName)
if err == nil {
_ = sm.Delete(ctx, subscriptionName)
}
_, err = sm.Put(ctx, subscriptionName, opts...)
return err
}
// deleteSubscription removes a Service Bus Subscription on a given Namespace and Topic.
func deleteSubscription(ctx context.Context, topicName string, subscriptionName string, ns *servicebus.Namespace) error {
sm, err := ns.NewSubscriptionManager(topicName)
if err != nil {
return nil
}
se, _ := sm.Get(ctx, subscriptionName)
if se != nil {
_ = sm.Delete(ctx, subscriptionName)
}
return nil
}
func BenchmarkAzureServiceBusPubSub(b *testing.B) {
const (
benchmarkTopicName = "benchmark-topic"
benchmarkSubscriptionName = "benchmark-subscription"
)
ctx := context.Background()
if connString == "" {
b.Fatal("azuresb: benchmark requires environment variable SERVICEBUS_CONNECTION_STRING to run")
}
ns, err := NewNamespaceFromConnectionString(connString)
if err != nil {
b.Fatal(err)
}
// Make topic.
if err := createTopic(ctx, benchmarkTopicName, ns, nil); err != nil {
b.Fatal(err)
}
defer deleteTopic(ctx, benchmarkTopicName, ns)
sbTopic, err := NewTopic(ns, benchmarkTopicName, nil)
if err != nil {
b.Fatal(err)
}
defer sbTopic.Close(ctx)
topic, err := OpenTopic(ctx, sbTopic, nil)
if err != nil {
b.Fatal(err)
}
// Make subscription.
if err := createSubscription(ctx, benchmarkTopicName, benchmarkSubscriptionName, ns, nil); err != nil {
b.Fatal(err)
}
sbSub, err := NewSubscription(sbTopic, benchmarkSubscriptionName, nil)
if err != nil {
b.Fatal(err)
}
sub, err := OpenSubscription(ctx, ns, sbTopic, sbSub, nil)
if err != nil {
b.Fatal(err)
}
drivertest.RunBenchmarks(b, topic, sub)
}
func fakeConnectionStringInEnv() func() {
oldEnvVal := os.Getenv("SERVICEBUS_CONNECTION_STRING")
os.Setenv("SERVICEBUS_CONNECTION_STRING", "Endpoint=sb://foo.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=mykey")
return func() {
os.Setenv("SERVICEBUS_CONNECTION_STRING", oldEnvVal)
}
}
func TestOpenTopicFromURL(t *testing.T) {
cleanup := fakeConnectionStringInEnv()
defer cleanup()
tests := []struct {
URL string
WantErr bool
}{
// OK.
{"azuresb://mytopic", false},
// Invalid parameter.
{"azuresb://mytopic?param=value", true},
}
ctx := context.Background()
for _, test := range tests {
_, err := pubsub.OpenTopic(ctx, test.URL)
if (err != nil) != test.WantErr {
t.Errorf("%s: got error %v, want error %v", test.URL, err, test.WantErr)
}
}
}
func TestOpenSubscriptionFromURL(t *testing.T) {
cleanup := fakeConnectionStringInEnv()
defer cleanup()
tests := []struct {
URL string
WantErr bool
}{
// OK.
{"azuresb://mytopic?subscription=mysub", false},
// Missing subscription.
{"azuresb://mytopic", true},
// Invalid parameter.
{"azuresb://mytopic?subscription=mysub¶m=value", true},
}
ctx := context.Background()
for _, test := range tests {
_, err := pubsub.OpenSubscription(ctx, test.URL)
if (err != nil) != test.WantErr {
t.Errorf("%s: got error %v, want error %v", test.URL, err, test.WantErr)
}
}
}
| 1 | 16,302 | There's a better fix for this in #1741, which should replace this. | google-go-cloud | go |
@@ -63,7 +63,7 @@ func (s *StreamMock) Close() error {
func TestHandshake(t *testing.T) {
logger := logging.New(ioutil.Discard, 0)
info := Info{
- Address: "node1",
+ Address: []byte("node1"),
NetworkID: 0,
Light: false,
} | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package handshake
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"testing"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/p2p/libp2p/internal/handshake/pb"
"github.com/ethersphere/bee/pkg/p2p/protobuf"
)
type StreamMock struct {
readBuffer *bytes.Buffer
writeBuffer *bytes.Buffer
writeCounter int
readCounter int
readError error
writeError error
readErrCheckmark int
writeErrCheckmark int
}
func (s *StreamMock) setReadErr(err error, checkmark int) {
s.readError = err
s.readErrCheckmark = checkmark
}
func (s *StreamMock) setWriteErr(err error, checkmark int) {
s.writeError = err
s.writeErrCheckmark = checkmark
}
func (s *StreamMock) Read(p []byte) (n int, err error) {
if s.readError != nil && s.readErrCheckmark <= s.readCounter {
return 0, s.readError
}
s.readCounter++
return s.readBuffer.Read(p)
}
func (s *StreamMock) Write(p []byte) (n int, err error) {
if s.writeError != nil && s.writeErrCheckmark <= s.writeCounter {
return 0, s.writeError
}
s.writeCounter++
return s.writeBuffer.Write(p)
}
func (s *StreamMock) Close() error {
return nil
}
func TestHandshake(t *testing.T) {
logger := logging.New(ioutil.Discard, 0)
info := Info{
Address: "node1",
NetworkID: 0,
Light: false,
}
handshakeService := New(info.Address, info.NetworkID, logger)
t.Run("OK", func(t *testing.T) {
expectedInfo := Info{
Address: "node2",
NetworkID: 1,
Light: false,
}
var buffer1 bytes.Buffer
var buffer2 bytes.Buffer
stream1 := &StreamMock{readBuffer: &buffer1, writeBuffer: &buffer2}
stream2 := &StreamMock{readBuffer: &buffer2, writeBuffer: &buffer1}
w, r := protobuf.NewWriterAndReader(stream2)
if err := w.WriteMsg(&pb.ShakeHandAck{
ShakeHand: &pb.ShakeHand{
Address: expectedInfo.Address,
NetworkID: expectedInfo.NetworkID,
Light: expectedInfo.Light,
},
Ack: &pb.Ack{Address: info.Address},
}); err != nil {
t.Fatal(err)
}
res, err := handshakeService.Handshake(stream1)
if err != nil {
t.Fatal(err)
}
if *res != expectedInfo {
t.Fatalf("got %+v, expected %+v", res, info)
}
if err := r.ReadMsg(&pb.Ack{}); err != nil {
t.Fatal(err)
}
})
t.Run("ERROR - shakehand write error ", func(t *testing.T) {
testErr := errors.New("test error")
expectedErr := fmt.Errorf("write message: %w", testErr)
stream := &StreamMock{}
stream.setWriteErr(testErr, 0)
res, err := handshakeService.Handshake(stream)
if err == nil || err.Error() != expectedErr.Error() {
t.Fatal("expected:", expectedErr, "got:", err)
}
if res != nil {
t.Fatal("handshake returned non-nil res")
}
})
t.Run("ERROR - shakehand read error ", func(t *testing.T) {
testErr := errors.New("test error")
expectedErr := fmt.Errorf("read message: %w", testErr)
stream := &StreamMock{writeBuffer: &bytes.Buffer{}}
stream.setReadErr(testErr, 0)
res, err := handshakeService.Handshake(stream)
if err == nil || err.Error() != expectedErr.Error() {
t.Fatal("expected:", expectedErr, "got:", err)
}
if res != nil {
t.Fatal("handshake returned non-nil res")
}
})
t.Run("ERROR - ack write error ", func(t *testing.T) {
testErr := errors.New("test error")
expectedErr := fmt.Errorf("ack: write message: %w", testErr)
expectedInfo := Info{
Address: "node2",
NetworkID: 1,
Light: false,
}
var buffer1 bytes.Buffer
var buffer2 bytes.Buffer
stream1 := &StreamMock{readBuffer: &buffer1, writeBuffer: &buffer2}
stream1.setWriteErr(testErr, 1)
stream2 := &StreamMock{readBuffer: &buffer2, writeBuffer: &buffer1}
w, _ := protobuf.NewWriterAndReader(stream2)
if err := w.WriteMsg(&pb.ShakeHandAck{
ShakeHand: &pb.ShakeHand{
Address: expectedInfo.Address,
NetworkID: expectedInfo.NetworkID,
Light: expectedInfo.Light,
},
Ack: &pb.Ack{Address: info.Address},
}); err != nil {
t.Fatal(err)
}
res, err := handshakeService.Handshake(stream1)
if err == nil || err.Error() != expectedErr.Error() {
t.Fatal("expected:", expectedErr, "got:", err)
}
if res != nil {
t.Fatal("handshake returned non-nil res")
}
})
}
func TestHandle(t *testing.T) {
nodeInfo := Info{
Address: "node1",
NetworkID: 0,
Light: false,
}
logger := logging.New(ioutil.Discard, 0)
handshakeService := New(nodeInfo.Address, nodeInfo.NetworkID, logger)
t.Run("OK", func(t *testing.T) {
node2Info := Info{
Address: "node2",
NetworkID: 1,
Light: false,
}
var buffer1 bytes.Buffer
var buffer2 bytes.Buffer
stream1 := &StreamMock{readBuffer: &buffer1, writeBuffer: &buffer2}
stream2 := &StreamMock{readBuffer: &buffer2, writeBuffer: &buffer1}
w, _ := protobuf.NewWriterAndReader(stream2)
if err := w.WriteMsg(&pb.ShakeHand{
Address: node2Info.Address,
NetworkID: node2Info.NetworkID,
Light: node2Info.Light,
}); err != nil {
t.Fatal(err)
}
if err := w.WriteMsg(&pb.Ack{Address: node2Info.Address}); err != nil {
t.Fatal(err)
}
res, err := handshakeService.Handle(stream1)
if err != nil {
t.Fatal(err)
}
if *res != node2Info {
t.Fatalf("got %+v, expected %+v", res, node2Info)
}
_, r := protobuf.NewWriterAndReader(stream2)
var got pb.ShakeHandAck
if err := r.ReadMsg(&got); err != nil {
t.Fatal(err)
}
if nodeInfo != Info(*got.ShakeHand) {
t.Fatalf("got %+v, expected %+v", got, node2Info)
}
})
t.Run("ERROR - read error ", func(t *testing.T) {
testErr := errors.New("test error")
expectedErr := fmt.Errorf("read message: %w", testErr)
stream := &StreamMock{}
stream.setReadErr(testErr, 0)
res, err := handshakeService.Handle(stream)
if err == nil || err.Error() != expectedErr.Error() {
t.Fatal("expected:", expectedErr, "got:", err)
}
if res != nil {
t.Fatal("handle returned non-nil res")
}
})
t.Run("ERROR - write error ", func(t *testing.T) {
testErr := errors.New("test error")
expectedErr := fmt.Errorf("write message: %w", testErr)
var buffer bytes.Buffer
stream := &StreamMock{readBuffer: &buffer, writeBuffer: &buffer}
stream.setWriteErr(testErr, 1)
w, _ := protobuf.NewWriterAndReader(stream)
if err := w.WriteMsg(&pb.ShakeHand{
Address: "node1",
NetworkID: 0,
Light: false,
}); err != nil {
t.Fatal(err)
}
res, err := handshakeService.Handle(stream)
if err == nil || err.Error() != expectedErr.Error() {
t.Fatal("expected:", expectedErr, "got:", err)
}
if res != nil {
t.Fatal("handshake returned non-nil res")
}
})
t.Run("ERROR - ack read error ", func(t *testing.T) {
testErr := errors.New("test error")
expectedErr := fmt.Errorf("ack: read message: %w", testErr)
node2Info := Info{
Address: "node2",
NetworkID: 1,
Light: false,
}
var buffer1 bytes.Buffer
var buffer2 bytes.Buffer
stream1 := &StreamMock{readBuffer: &buffer1, writeBuffer: &buffer2}
stream2 := &StreamMock{readBuffer: &buffer2, writeBuffer: &buffer1}
stream1.setReadErr(testErr, 1)
w, _ := protobuf.NewWriterAndReader(stream2)
if err := w.WriteMsg(&pb.ShakeHand{
Address: node2Info.Address,
NetworkID: node2Info.NetworkID,
Light: node2Info.Light,
}); err != nil {
t.Fatal(err)
}
res, err := handshakeService.Handle(stream1)
if err == nil || err.Error() != expectedErr.Error() {
t.Fatal("expected:", expectedErr, "got:", err)
}
if res != nil {
t.Fatal("handshake returned non-nil res")
}
})
}
| 1 | 8,713 | Use swarm.Address not []byte as type, and construct it from actual byteslice or use swarm.NewAddress if it is constructed from hex-encoded string. | ethersphere-bee | go |
@@ -80,8 +80,9 @@ func TestFSRepoInit(t *testing.T) {
dir, err := ioutil.TempDir("", "")
assert.NoError(t, err)
-
- defer os.RemoveAll(dir)
+ defer func() {
+ require.NoError(t, os.RemoveAll(dir))
+ }()
t.Log("init FSRepo")
assert.NoError(t, InitFSRepo(dir, config.NewDefaultConfig())) | 1 | package repo
import (
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
ds "github.com/ipfs/go-datastore"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-filecoin/config"
tf "github.com/filecoin-project/go-filecoin/testhelpers/testflags"
)
const (
expectContent = `{
"api": {
"address": "/ip4/127.0.0.1/tcp/3453",
"accessControlAllowOrigin": [
"http://localhost:8080",
"https://localhost:8080",
"http://127.0.0.1:8080",
"https://127.0.0.1:8080"
],
"accessControlAllowCredentials": false,
"accessControlAllowMethods": [
"GET",
"POST",
"PUT"
]
},
"bootstrap": {
"addresses": [],
"minPeerThreshold": 0,
"period": "1m"
},
"datastore": {
"type": "badgerds",
"path": "badger"
},
"swarm": {
"address": "/ip4/0.0.0.0/tcp/6000"
},
"mining": {
"minerAddress": "empty",
"autoSealIntervalSeconds": 120,
"storagePrice": "0"
},
"wallet": {
"defaultAddress": "empty"
},
"heartbeat": {
"beatTarget": "",
"beatPeriod": "3s",
"reconnectPeriod": "10s",
"nickname": ""
},
"net": "",
"metrics": {
"prometheusEnabled": false,
"reportInterval": "5s",
"prometheusEndpoint": "/ip4/0.0.0.0/tcp/9400"
},
"mpool": {
"maxPoolSize": 10000,
"maxNonceGap": "100"
},
"sectorbase": {
"rootdir": ""
}
}`
)
func TestFSRepoInit(t *testing.T) {
tf.UnitTest(t)
dir, err := ioutil.TempDir("", "")
assert.NoError(t, err)
defer os.RemoveAll(dir)
t.Log("init FSRepo")
assert.NoError(t, InitFSRepo(dir, config.NewDefaultConfig()))
content, err := ioutil.ReadFile(filepath.Join(dir, configFilename))
assert.NoError(t, err)
t.Log("snapshot dir was created during FSRepo Init")
assert.True(t, fileExists(filepath.Join(dir, snapshotStorePrefix)))
// TODO: asserting the exact content here is gonna get old real quick
t.Log("config file matches expected value")
assert.Equal(t,
expectContent,
string(content),
)
version, err := ioutil.ReadFile(filepath.Join(dir, versionFilename))
assert.NoError(t, err)
assert.Equal(t, "1", string(version))
}
func getSnapshotFilenames(t *testing.T, dir string) []string {
files, err := ioutil.ReadDir(dir)
require.NoError(t, err)
var snpFiles []string
for _, f := range files {
if strings.Contains(f.Name(), "snapshot") {
snpFiles = append(snpFiles, f.Name())
}
}
return snpFiles
}
func TestFSRepoOpen(t *testing.T) {
tf.UnitTest(t)
t.Run("[fail] repo version newer than binary", func(t *testing.T) {
dir, err := ioutil.TempDir("", "")
assert.NoError(t, err)
defer os.RemoveAll(dir)
assert.NoError(t, InitFSRepo(dir, config.NewDefaultConfig()))
// set wrong version
assert.NoError(t, ioutil.WriteFile(filepath.Join(dir, versionFilename), []byte("2"), 0644))
_, err = OpenFSRepo(dir)
assert.EqualError(t, err, "binary needs update to handle repo version, got 2 expected 1. Update binary to latest release")
})
t.Run("[fail] binary version newer than repo", func(t *testing.T) {
dir, err := ioutil.TempDir("", "")
assert.NoError(t, err)
defer os.RemoveAll(dir)
assert.NoError(t, InitFSRepo(dir, config.NewDefaultConfig()))
// set wrong version
assert.NoError(t, ioutil.WriteFile(filepath.Join(dir, versionFilename), []byte("0"), 0644))
_, err = OpenFSRepo(dir)
assert.EqualError(t, err, "out of date repo version, got 0 expected 1. Migrate with tools/migration/go-filecoin-migrate")
})
t.Run("[fail] version corrupt", func(t *testing.T) {
dir, err := ioutil.TempDir("", "")
assert.NoError(t, err)
defer os.RemoveAll(dir)
assert.NoError(t, InitFSRepo(dir, config.NewDefaultConfig()))
// set wrong version
assert.NoError(t, ioutil.WriteFile(filepath.Join(dir, versionFilename), []byte("v.8"), 0644))
_, err = OpenFSRepo(dir)
assert.EqualError(t, err, "failed to load version: corrupt version file: version is not an integer")
})
}
func TestFSRepoRoundtrip(t *testing.T) {
tf.UnitTest(t)
dir, err := ioutil.TempDir("", "")
assert.NoError(t, err)
defer os.RemoveAll(dir)
cfg := config.NewDefaultConfig()
cfg.API.Address = "foo" // testing that what we get back isnt just the default
assert.NoError(t, err, InitFSRepo(dir, cfg))
r, err := OpenFSRepo(dir)
assert.NoError(t, err)
assert.Equal(t, cfg, r.Config())
assert.NoError(t, r.Datastore().Put(ds.NewKey("beep"), []byte("boop")))
assert.NoError(t, r.Close())
r2, err := OpenFSRepo(dir)
assert.NoError(t, err)
val, err := r2.Datastore().Get(ds.NewKey("beep"))
assert.NoError(t, err)
assert.Equal(t, []byte("boop"), val)
assert.NoError(t, r2.Close())
}
func TestFSRepoReplaceAndSnapshotConfig(t *testing.T) {
tf.UnitTest(t)
dir, err := ioutil.TempDir("", "")
require.NoError(t, err)
defer os.RemoveAll(dir)
cfg := config.NewDefaultConfig()
cfg.API.Address = "foo"
assert.NoError(t, err, InitFSRepo(dir, cfg))
expSnpsht, err := ioutil.ReadFile(filepath.Join(dir, configFilename))
require.NoError(t, err)
r1, err := OpenFSRepo(dir)
assert.NoError(t, err)
newCfg := config.NewDefaultConfig()
newCfg.API.Address = "bar"
assert.NoError(t, r1.ReplaceConfig(newCfg))
assert.Equal(t, "bar", r1.Config().API.Address)
assert.NoError(t, r1.Close())
r2, err := OpenFSRepo(dir)
assert.NoError(t, err)
assert.Equal(t, "bar", r2.Config().API.Address)
assert.NoError(t, r2.Close())
// assert that a single snapshot was created when replacing the config
// get the snapshot file name
snpFiles := getSnapshotFilenames(t, filepath.Join(dir, snapshotStorePrefix))
require.Equal(t, 1, len(snpFiles))
snpsht, err := ioutil.ReadFile(filepath.Join(dir, snapshotStorePrefix, snpFiles[0]))
require.NoError(t, err)
assert.Equal(t, string(expSnpsht), string(snpsht))
}
func TestRepoLock(t *testing.T) {
tf.UnitTest(t)
dir, err := ioutil.TempDir("", "")
assert.NoError(t, err)
defer os.RemoveAll(dir)
cfg := config.NewDefaultConfig()
assert.NoError(t, err, InitFSRepo(dir, cfg))
r, err := OpenFSRepo(dir)
assert.NoError(t, err)
assert.FileExists(t, filepath.Join(dir, lockFile))
_, err = OpenFSRepo(dir)
assert.Error(t, err)
assert.Contains(t, err.Error(), "failed to take repo lock")
assert.NoError(t, r.Close())
_, err = os.Lstat(filepath.Join(dir, lockFile))
assert.True(t, os.IsNotExist(err))
}
func TestRepoLockFail(t *testing.T) {
tf.UnitTest(t)
dir, err := ioutil.TempDir("", "")
assert.NoError(t, err)
defer os.RemoveAll(dir)
cfg := config.NewDefaultConfig()
assert.NoError(t, err, InitFSRepo(dir, cfg))
// set invalid version, to make opening the repo fail
assert.NoError(t,
ioutil.WriteFile(filepath.Join(dir, versionFilename), []byte("hello"), 0644),
)
_, err = OpenFSRepo(dir)
assert.Error(t, err)
_, err = os.Lstat(filepath.Join(dir, lockFile))
assert.True(t, os.IsNotExist(err))
}
func TestRepoAPIFile(t *testing.T) {
tf.UnitTest(t)
t.Run("APIAddr returns last value written to API file", func(t *testing.T) {
withFSRepo(t, func(r *FSRepo) {
mustSetAPIAddr(t, r, "/ip4/127.0.0.1/tcp/1234")
addr := mustGetAPIAddr(t, r)
assert.Equal(t, "/ip4/127.0.0.1/tcp/1234", addr)
mustSetAPIAddr(t, r, "/ip4/127.0.0.1/tcp/4567")
addr = mustGetAPIAddr(t, r)
assert.Equal(t, "/ip4/127.0.0.1/tcp/4567", addr)
})
})
t.Run("SetAPIAddr is idempotent", func(t *testing.T) {
withFSRepo(t, func(r *FSRepo) {
mustSetAPIAddr(t, r, "/ip4/127.0.0.1/tcp/1234")
addr := mustGetAPIAddr(t, r)
assert.Equal(t, "/ip4/127.0.0.1/tcp/1234", addr)
mustSetAPIAddr(t, r, "/ip4/127.0.0.1/tcp/1234")
mustSetAPIAddr(t, r, "/ip4/127.0.0.1/tcp/1234")
mustSetAPIAddr(t, r, "/ip4/127.0.0.1/tcp/1234")
mustSetAPIAddr(t, r, "/ip4/127.0.0.1/tcp/1234")
addr = mustGetAPIAddr(t, r)
assert.Equal(t, "/ip4/127.0.0.1/tcp/1234", addr)
})
})
t.Run("APIAddr fails if called before SetAPIAddr", func(t *testing.T) {
withFSRepo(t, func(r *FSRepo) {
addr, err := r.APIAddr()
assert.Error(t, err)
assert.Equal(t, "", addr)
})
})
t.Run("Close deletes API file", func(t *testing.T) {
withFSRepo(t, func(r *FSRepo) {
mustSetAPIAddr(t, r, "/ip4/127.0.0.1/tcp/1234")
info, err := os.Stat(filepath.Join(r.path, apiFile))
assert.NoError(t, err)
assert.Equal(t, apiFile, info.Name())
r.Close()
_, err = os.Stat(filepath.Join(r.path, apiFile))
assert.Error(t, err)
})
})
t.Run("Close will succeed in spite of missing API file", func(t *testing.T) {
withFSRepo(t, func(r *FSRepo) {
mustSetAPIAddr(t, r, "/ip4/127.0.0.1/tcp/1234")
err := os.Remove(filepath.Join(r.path, apiFile))
assert.NoError(t, err)
assert.NoError(t, r.Close())
})
})
t.Run("SetAPI fails if unable to create API file", func(t *testing.T) {
withFSRepo(t, func(r *FSRepo) {
// create a file with permission bits that prevent us from truncating
err := ioutil.WriteFile(filepath.Join(r.path, apiFile), []byte("/ip4/127.0.0.1/tcp/9999"), 0000)
assert.NoError(t, err)
// try to os.Create to same path - will see a failure
err = r.SetAPIAddr("/ip4/127.0.0.1/tcp/1234")
assert.Error(t, err)
})
})
}
func TestCreateRepo(t *testing.T) {
tf.UnitTest(t)
cfg := config.NewDefaultConfig()
t.Run("successfully creates when directory exists", func(t *testing.T) {
dir, err := ioutil.TempDir("", "init")
assert.NoError(t, err)
defer os.RemoveAll(dir)
_, err = CreateRepo(dir, cfg)
assert.NoError(t, err)
assert.True(t, ConfigExists(dir))
})
t.Run("successfully creates when directory does not exist", func(t *testing.T) {
dir, err := ioutil.TempDir("", "init")
assert.NoError(t, err)
defer os.RemoveAll(dir)
dir = filepath.Join(dir, "nested")
_, err = CreateRepo(dir, cfg)
assert.NoError(t, err)
assert.True(t, ConfigExists(dir))
})
t.Run("fails with error if directory is not writeable", func(t *testing.T) {
parentDir, err := ioutil.TempDir("", "init")
assert.NoError(t, err)
defer os.RemoveAll(parentDir)
// make read only dir
dir := filepath.Join(parentDir, "readonly")
err = os.Mkdir(dir, 0444)
assert.NoError(t, err)
assert.False(t, ConfigExists(dir))
_, err = CreateRepo(dir, cfg)
assert.Contains(t, err.Error(), "permission denied")
})
t.Run("fails with error if config file already exists", func(t *testing.T) {
dir, err := ioutil.TempDir("", "init")
assert.NoError(t, err)
defer os.RemoveAll(dir)
err = ioutil.WriteFile(filepath.Join(dir, "config.json"), []byte("hello"), 0644)
assert.NoError(t, err)
defer os.RemoveAll(dir)
err = ioutil.WriteFile(filepath.Join(dir, "config.json"), []byte("hello"), 0644)
require.NoError(t, err)
_, err = CreateRepo(dir, cfg)
assert.Contains(t, err.Error(), "repo already initialized")
assert.True(t, ConfigExists(dir))
})
}
func withFSRepo(t *testing.T, f func(*FSRepo)) {
dir, err := ioutil.TempDir("", "")
require.NoError(t, err)
defer os.RemoveAll(dir)
cfg := config.NewDefaultConfig()
require.NoError(t, err, InitFSRepo(dir, cfg))
r, err := OpenFSRepo(dir)
require.NoError(t, err)
f(r)
}
func mustGetAPIAddr(t *testing.T, r *FSRepo) string {
addr, err := r.APIAddr()
require.NoError(t, err)
return addr
}
func mustSetAPIAddr(t *testing.T, r *FSRepo, addr string) {
require.NoError(t, r.SetAPIAddr(addr))
}
func ConfigExists(dir string) bool {
_, err := os.Stat(filepath.Join(dir, "config.json"))
if os.IsNotExist(err) {
return false
}
return err == nil
}
| 1 | 18,756 | It would be worth factoring this out to a function, but you don't have to do that here. | filecoin-project-venus | go |
@@ -0,0 +1,15 @@
+package com.fsck.k9.message.html;
+
+/**
+ * General framework to handle uris when parsing. Allows different handling depending on the scheme identifier.
+ */
+public interface UriParser {
+ /**
+ * Parse and linkify scheme specific uri beginning from given position. The result will be written to given buffer.
+ * @param text String to parse uri from.
+ * @param startPos Position where uri starts (first letter of scheme).
+ * @param outputBuffer Buffer where linkified variant of uri is written to.
+ * @return Index where parsed uri ends (first non-uri letter). Should be startPos or smaller if no valid uri was found.
+ */
+ int linkifyUri(String text, int startPos, StringBuffer outputBuffer);
+} | 1 | 1 | 15,440 | There's no need for `final` in interfaces. | k9mail-k-9 | java |
|
@@ -101,6 +101,9 @@ namespace Datadog.Trace.OpenTracing
case DatadogTags.ServiceName:
Span.ServiceName = value;
return this;
+ case DatadogTags.ServiceVersion:
+ Span.SetTag(Tags.Version, value);
+ return this;
}
if (key == global::OpenTracing.Tag.Tags.Error.Key) | 1 | using System;
using System.Collections.Generic;
using System.Globalization;
using OpenTracing;
using OpenTracing.Tag;
namespace Datadog.Trace.OpenTracing
{
internal class OpenTracingSpan : ISpan
{
internal OpenTracingSpan(Span span)
{
Span = span;
Context = new OpenTracingSpanContext(span.Context);
}
public OpenTracingSpanContext Context { get; }
global::OpenTracing.ISpanContext ISpan.Context => Context;
internal Span Span { get; }
// TODO lucas: inline this in a separate commit, it will modify a lot of files
// This is only exposed for tests
internal Span DDSpan => Span;
internal string OperationName => Span.OperationName;
internal TimeSpan Duration => Span.Duration;
public string GetBaggageItem(string key) => null;
public ISpan Log(DateTimeOffset timestamp, IEnumerable<KeyValuePair<string, object>> fields) => this;
public ISpan Log(string eventName) => this;
public ISpan Log(DateTimeOffset timestamp, string eventName) => this;
public ISpan Log(IEnumerable<KeyValuePair<string, object>> fields) => this;
public ISpan SetBaggageItem(string key, string value) => this;
public ISpan SetOperationName(string operationName)
{
Span.OperationName = operationName;
return this;
}
public string GetTag(string key)
{
return Span.GetTag(key);
}
public ISpan SetTag(string key, bool value)
{
return SetTag(key, value.ToString());
}
public ISpan SetTag(string key, double value)
{
return SetTag(key, value.ToString(CultureInfo.InvariantCulture));
}
public ISpan SetTag(string key, int value)
{
return SetTag(key, value.ToString(CultureInfo.InvariantCulture));
}
public ISpan SetTag(BooleanTag tag, bool value)
{
return SetTag(tag.Key, value);
}
public ISpan SetTag(IntOrStringTag tag, string value)
{
return SetTag(tag.Key, value);
}
public ISpan SetTag(IntTag tag, int value)
{
return SetTag(tag.Key, value);
}
public ISpan SetTag(StringTag tag, string value)
{
return SetTag(tag.Key, value);
}
public ISpan SetTag(string key, string value)
{
// TODO:bertrand do we want this behavior on the Span object too ?
switch (key)
{
case DatadogTags.ResourceName:
Span.ResourceName = value;
return this;
case DatadogTags.SpanType:
Span.Type = value;
return this;
case DatadogTags.ServiceName:
Span.ServiceName = value;
return this;
}
if (key == global::OpenTracing.Tag.Tags.Error.Key)
{
Span.Error = value == bool.TrueString;
return this;
}
Span.SetTag(key, value);
return this;
}
public void Finish()
{
Span.Finish();
}
public void Finish(DateTimeOffset finishTimestamp)
{
Span.Finish(finishTimestamp);
}
}
}
| 1 | 16,889 | Isn't this case handled as a custom tag in `Span.SetTag()` below? This switch is only for special tags that actually set `Span` properties. | DataDog-dd-trace-dotnet | .cs |
@@ -681,7 +681,6 @@ class Quitter:
@cmdutils.argument('session', completion=miscmodels.session)
def quit(self, save=False, session=None):
"""Quit qutebrowser.
-
Args:
save: When given, save the open windows even if auto_save.session
is turned off. | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2018 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Initialization of qutebrowser and application-wide things.
The run() function will get called once early initialization (in
qutebrowser.py/earlyinit.py) is done. See the qutebrowser.py docstring for
details about early initialization.
As we need to access the config before the QApplication is created, we
initialize everything the config needs before the QApplication is created, and
then leave it in a partially initialized state (no saving, no config errors
shown yet).
We then set up the QApplication object and initialize a few more low-level
things.
After that, init() and _init_modules() take over and initialize the rest.
After all initialization is done, the qt_mainloop() function is called, which
blocks and spins the Qt mainloop.
"""
import os
import sys
import subprocess
import functools
import json
import shutil
import tempfile
import atexit
import datetime
import tokenize
from PyQt5.QtWidgets import QApplication, QWidget
from PyQt5.QtGui import QDesktopServices, QPixmap, QIcon, QWindow
from PyQt5.QtCore import (pyqtSlot, qInstallMessageHandler, QTimer, QUrl,
QObject, QEvent, pyqtSignal, Qt)
try:
import hunter
except ImportError:
hunter = None
import qutebrowser
import qutebrowser.resources
from qutebrowser.completion import completiondelegate
from qutebrowser.completion.models import miscmodels
from qutebrowser.commands import cmdutils, runners, cmdexc
from qutebrowser.config import config, websettings, configfiles, configinit
from qutebrowser.browser import (urlmarks, adblock, history, browsertab,
qtnetworkdownloads, downloads, greasemonkey)
from qutebrowser.browser.network import proxy
from qutebrowser.browser.webkit import cookies, cache
from qutebrowser.browser.webkit.network import networkmanager
from qutebrowser.keyinput import macros
from qutebrowser.mainwindow import mainwindow, prompt
from qutebrowser.misc import (readline, ipc, savemanager, sessions,
crashsignal, earlyinit, sql, cmdhistory,
backendproblem)
from qutebrowser.utils import (log, version, message, utils, urlutils, objreg,
usertypes, standarddir, error)
# pylint: disable=unused-import
# We import those to run the cmdutils.register decorators.
from qutebrowser.mainwindow.statusbar import command
from qutebrowser.misc import utilcmds
# pylint: enable=unused-import
qApp = None
def run(args):
"""Initialize everything and run the application."""
if args.temp_basedir:
args.basedir = tempfile.mkdtemp(prefix='qutebrowser-basedir-')
quitter = Quitter(args)
objreg.register('quitter', quitter)
log.init.debug("Initializing directories...")
standarddir.init(args)
utils.preload_resources()
log.init.debug("Initializing config...")
configinit.early_init(args)
global qApp
qApp = Application(args)
qApp.setOrganizationName("qutebrowser")
qApp.setApplicationName("qutebrowser")
qApp.setApplicationVersion(qutebrowser.__version__)
qApp.lastWindowClosed.connect(quitter.on_last_window_closed)
if args.version:
print(version.version())
sys.exit(usertypes.Exit.ok)
crash_handler = crashsignal.CrashHandler(
app=qApp, quitter=quitter, args=args, parent=qApp)
crash_handler.activate()
objreg.register('crash-handler', crash_handler)
signal_handler = crashsignal.SignalHandler(app=qApp, quitter=quitter,
parent=qApp)
signal_handler.activate()
objreg.register('signal-handler', signal_handler)
try:
server = ipc.send_or_listen(args)
except ipc.Error:
# ipc.send_or_listen already displays the error message for us.
# We didn't really initialize much so far, so we just quit hard.
sys.exit(usertypes.Exit.err_ipc)
if server is None:
sys.exit(usertypes.Exit.ok)
else:
server.got_args.connect(lambda args, target_arg, cwd:
process_pos_args(args, cwd=cwd, via_ipc=True,
target_arg=target_arg))
init(args, crash_handler)
ret = qt_mainloop()
return ret
def qt_mainloop():
"""Simple wrapper to get a nicer stack trace for segfaults.
WARNING: misc/crashdialog.py checks the stacktrace for this function
name, so if this is changed, it should be changed there as well!
"""
return qApp.exec_()
def init(args, crash_handler):
"""Initialize everything.
Args:
args: The argparse namespace.
crash_handler: The CrashHandler instance.
"""
log.init.debug("Starting init...")
qApp.setQuitOnLastWindowClosed(False)
_init_icon()
try:
_init_modules(args, crash_handler)
except (OSError, UnicodeDecodeError, browsertab.WebTabError) as e:
error.handle_fatal_exc(e, args, "Error while initializing!",
pre_text="Error while initializing")
sys.exit(usertypes.Exit.err_init)
log.init.debug("Initializing eventfilter...")
event_filter = EventFilter(qApp)
qApp.installEventFilter(event_filter)
objreg.register('event-filter', event_filter)
log.init.debug("Connecting signals...")
qApp.focusChanged.connect(on_focus_changed)
_process_args(args)
QDesktopServices.setUrlHandler('http', open_desktopservices_url)
QDesktopServices.setUrlHandler('https', open_desktopservices_url)
QDesktopServices.setUrlHandler('qute', open_desktopservices_url)
objreg.get('web-history').import_txt()
log.init.debug("Init done!")
crash_handler.raise_crashdlg()
def _init_icon():
"""Initialize the icon of qutebrowser."""
icon = QIcon()
fallback_icon = QIcon()
for size in [16, 24, 32, 48, 64, 96, 128, 256, 512]:
filename = ':/icons/qutebrowser-{}x{}.png'.format(size, size)
pixmap = QPixmap(filename)
if pixmap.isNull():
log.init.warning("Failed to load {}".format(filename))
else:
fallback_icon.addPixmap(pixmap)
icon = QIcon.fromTheme('qutebrowser', fallback_icon)
if icon.isNull():
log.init.warning("Failed to load icon")
else:
qApp.setWindowIcon(icon)
def _process_args(args):
"""Open startpage etc. and process commandline args."""
if not args.override_restore:
_load_session(args.session)
session_manager = objreg.get('session-manager')
if not session_manager.did_load:
log.init.debug("Initializing main window...")
window = mainwindow.MainWindow(private=None)
if not args.nowindow:
window.show()
qApp.setActiveWindow(window)
process_pos_args(args.command)
_open_startpage()
_open_special_pages(args)
delta = datetime.datetime.now() - earlyinit.START_TIME
log.init.debug("Init finished after {}s".format(delta.total_seconds()))
def _load_session(name):
"""Load the default session.
Args:
name: The name of the session to load, or None to read state file.
"""
session_manager = objreg.get('session-manager')
if name is None and session_manager.exists('_autosave'):
name = '_autosave'
elif name is None:
try:
name = configfiles.state['general']['session']
except KeyError:
# No session given as argument and none in the session file ->
# start without loading a session
return
try:
session_manager.load(name)
except sessions.SessionNotFoundError:
message.error("Session {} not found!".format(name))
except sessions.SessionError as e:
message.error("Failed to load session {}: {}".format(name, e))
try:
del configfiles.state['general']['session']
except KeyError:
pass
# If this was a _restart session, delete it.
if name == '_restart':
session_manager.delete('_restart')
def process_pos_args(args, via_ipc=False, cwd=None, target_arg=None):
"""Process positional commandline args.
URLs to open have no prefix, commands to execute begin with a colon.
Args:
args: A list of arguments to process.
via_ipc: Whether the arguments were transmitted over IPC.
cwd: The cwd to use for fuzzy_url.
target_arg: Command line argument received by a running instance via
ipc. If the --target argument was not specified, target_arg
will be an empty string.
"""
if via_ipc and not args:
win_id = mainwindow.get_window(via_ipc, force_window=True)
_open_startpage(win_id)
return
win_id = None
for cmd in args:
if cmd.startswith(':'):
if win_id is None:
win_id = mainwindow.get_window(via_ipc, force_tab=True)
log.init.debug("Startup cmd {!r}".format(cmd))
commandrunner = runners.CommandRunner(win_id)
commandrunner.run_safely(cmd[1:])
elif not cmd:
log.init.debug("Empty argument")
win_id = mainwindow.get_window(via_ipc, force_window=True)
else:
if via_ipc and target_arg and target_arg != 'auto':
open_target = target_arg
else:
open_target = None
if not cwd: # could also be an empty string due to the PyQt signal
cwd = None
try:
url = urlutils.fuzzy_url(cmd, cwd, relative=True)
except urlutils.InvalidUrlError as e:
message.error("Error in startup argument '{}': {}".format(
cmd, e))
else:
win_id = open_url(url, target=open_target, via_ipc=via_ipc)
def open_url(url, target=None, no_raise=False, via_ipc=True):
"""Open an URL in new window/tab.
Args:
url: An URL to open.
target: same as new_instance_open_target (used as a default).
no_raise: suppress target window raising.
via_ipc: Whether the arguments were transmitted over IPC.
Return:
ID of a window that was used to open URL
"""
target = target or config.val.new_instance_open_target
background = target in {'tab-bg', 'tab-bg-silent'}
win_id = mainwindow.get_window(via_ipc, force_target=target,
no_raise=no_raise)
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
log.init.debug("About to open URL: {}".format(url.toDisplayString()))
tabbed_browser.tabopen(url, background=background, related=False)
return win_id
def _open_startpage(win_id=None):
"""Open startpage.
The startpage is never opened if the given windows are not empty.
Args:
win_id: If None, open startpage in all empty windows.
If set, open the startpage in the given window.
"""
if win_id is not None:
window_ids = [win_id]
else:
window_ids = objreg.window_registry
for cur_win_id in list(window_ids): # Copying as the dict could change
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=cur_win_id)
if tabbed_browser.widget.count() == 0:
log.init.debug("Opening start pages")
for url in config.val.url.start_pages:
tabbed_browser.tabopen(url)
def _open_special_pages(args):
"""Open special notification pages which are only shown once.
Currently this is:
- Quickstart page if it's the first start.
- Legacy QtWebKit warning if needed.
Args:
args: The argparse namespace.
"""
if args.basedir is not None:
# With --basedir given, don't open anything.
return
general_sect = configfiles.state['general']
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window='last-focused')
# Quickstart page
quickstart_done = general_sect.get('quickstart-done') == '1'
if not quickstart_done:
tabbed_browser.tabopen(
QUrl('https://www.qutebrowser.org/quickstart.html'))
general_sect['quickstart-done'] = '1'
# Setting migration page
needs_migration = os.path.exists(
os.path.join(standarddir.config(), 'qutebrowser.conf'))
migration_shown = general_sect.get('config-migration-shown') == '1'
if needs_migration and not migration_shown:
tabbed_browser.tabopen(QUrl('qute://help/configuring.html'),
background=False)
general_sect['config-migration-shown'] = '1'
def on_focus_changed(_old, new):
"""Register currently focused main window in the object registry."""
if new is None:
return
if not isinstance(new, QWidget):
log.misc.debug("on_focus_changed called with non-QWidget {!r}".format(
new))
return
window = new.window()
if isinstance(window, mainwindow.MainWindow):
objreg.register('last-focused-main-window', window, update=True)
# A focused window must also be visible, and in this case we should
# consider it as the most recently looked-at window
objreg.register('last-visible-main-window', window, update=True)
def open_desktopservices_url(url):
"""Handler to open a URL via QDesktopServices."""
win_id = mainwindow.get_window(via_ipc=True, force_window=False)
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
tabbed_browser.tabopen(url)
def _init_modules(args, crash_handler):
"""Initialize all 'modules' which need to be initialized.
Args:
args: The argparse namespace.
crash_handler: The CrashHandler instance.
"""
log.init.debug("Initializing save manager...")
save_manager = savemanager.SaveManager(qApp)
objreg.register('save-manager', save_manager)
configinit.late_init(save_manager)
log.init.debug("Checking backend requirements...")
backendproblem.init()
log.init.debug("Initializing prompts...")
prompt.init()
log.init.debug("Initializing network...")
networkmanager.init()
log.init.debug("Initializing proxy...")
proxy.init()
log.init.debug("Initializing readline-bridge...")
readline_bridge = readline.ReadlineBridge()
objreg.register('readline-bridge', readline_bridge)
try:
log.init.debug("Initializing sql...")
sql.init(os.path.join(standarddir.data(), 'history.sqlite'))
log.init.debug("Initializing web history...")
history.init(qApp)
except sql.SqlError as e:
if e.environmental:
error.handle_fatal_exc(e, args, 'Error initializing SQL',
pre_text='Error initializing SQL')
sys.exit(usertypes.Exit.err_init)
else:
raise
log.init.debug("Initializing completion...")
completiondelegate.init()
log.init.debug("Initializing command history...")
cmdhistory.init()
log.init.debug("Initializing crashlog...")
if not args.no_err_windows:
crash_handler.handle_segfault()
log.init.debug("Initializing sessions...")
sessions.init(qApp)
log.init.debug("Initializing websettings...")
websettings.init(args)
log.init.debug("Initializing adblock...")
host_blocker = adblock.HostBlocker()
host_blocker.read_hosts()
objreg.register('host-blocker', host_blocker)
log.init.debug("Initializing quickmarks...")
quickmark_manager = urlmarks.QuickmarkManager(qApp)
objreg.register('quickmark-manager', quickmark_manager)
log.init.debug("Initializing bookmarks...")
bookmark_manager = urlmarks.BookmarkManager(qApp)
objreg.register('bookmark-manager', bookmark_manager)
log.init.debug("Initializing cookies...")
cookie_jar = cookies.CookieJar(qApp)
ram_cookie_jar = cookies.RAMCookieJar(qApp)
objreg.register('cookie-jar', cookie_jar)
objreg.register('ram-cookie-jar', ram_cookie_jar)
log.init.debug("Initializing cache...")
diskcache = cache.DiskCache(standarddir.cache(), parent=qApp)
objreg.register('cache', diskcache)
log.init.debug("Initializing downloads...")
download_manager = qtnetworkdownloads.DownloadManager(parent=qApp)
objreg.register('qtnetwork-download-manager', download_manager)
log.init.debug("Initializing Greasemonkey...")
greasemonkey.init()
log.init.debug("Misc initialization...")
macros.init()
# Init backend-specific stuff
browsertab.init()
class Quitter:
"""Utility class to quit/restart the QApplication.
Attributes:
quit_status: The current quitting status.
_shutting_down: Whether we're currently shutting down.
_args: The argparse namespace.
"""
def __init__(self, args):
self.quit_status = {
'crash': True,
'tabs': False,
'main': False,
}
self._shutting_down = False
self._args = args
def on_last_window_closed(self):
"""Slot which gets invoked when the last window was closed."""
self.shutdown(last_window=True)
def _compile_modules(self):
"""Compile all modules to catch SyntaxErrors."""
if os.path.basename(sys.argv[0]) == 'qutebrowser':
# Launched via launcher script
return
elif hasattr(sys, 'frozen'):
return
else:
path = os.path.abspath(os.path.dirname(qutebrowser.__file__))
if not os.path.isdir(path):
# Probably running from a python egg.
return
for dirpath, _dirnames, filenames in os.walk(path):
for fn in filenames:
if os.path.splitext(fn)[1] == '.py' and os.path.isfile(fn):
with tokenize.open(os.path.join(dirpath, fn)) as f:
compile(f.read(), fn, 'exec')
def _get_restart_args(self, pages=(), session=None, override_args=None):
"""Get the current working directory and args to relaunch qutebrowser.
Args:
pages: The pages to re-open.
session: The session to load, or None.
override_args: Argument overrides as a dict.
Return:
An (args, cwd) tuple.
args: The commandline as a list of strings.
cwd: The current working directory as a string.
"""
if os.path.basename(sys.argv[0]) == 'qutebrowser':
# Launched via launcher script
args = [sys.argv[0]]
cwd = None
elif hasattr(sys, 'frozen'):
args = [sys.executable]
cwd = os.path.abspath(os.path.dirname(sys.executable))
else:
args = [sys.executable, '-m', 'qutebrowser']
cwd = os.path.join(
os.path.abspath(os.path.dirname(qutebrowser.__file__)), '..')
if not os.path.isdir(cwd):
# Probably running from a python egg. Let's fallback to
# cwd=None and see if that works out.
# See https://github.com/qutebrowser/qutebrowser/issues/323
cwd = None
# Add all open pages so they get reopened.
page_args = []
for win in pages:
page_args.extend(win)
page_args.append('')
# Serialize the argparse namespace into json and pass that to the new
# process via --json-args.
# We do this as there's no way to "unparse" the namespace while
# ignoring some arguments.
argdict = vars(self._args)
argdict['session'] = None
argdict['url'] = []
argdict['command'] = page_args[:-1]
argdict['json_args'] = None
# Ensure the given session (or none at all) gets opened.
if session is None:
argdict['session'] = None
argdict['override_restore'] = True
else:
argdict['session'] = session
argdict['override_restore'] = False
# Ensure :restart works with --temp-basedir
if self._args.temp_basedir:
argdict['temp_basedir'] = False
argdict['temp_basedir_restarted'] = True
if override_args is not None:
argdict.update(override_args)
# Dump the data
data = json.dumps(argdict)
args += ['--json-args', data]
log.destroy.debug("args: {}".format(args))
log.destroy.debug("cwd: {}".format(cwd))
return args, cwd
@cmdutils.register(instance='quitter', name='restart')
def restart_cmd(self):
"""Restart qutebrowser while keeping existing tabs open."""
try:
ok = self.restart(session='_restart')
except sessions.SessionError as e:
log.destroy.exception("Failed to save session!")
raise cmdexc.CommandError("Failed to save session: {}!".format(e))
except SyntaxError as e:
log.destroy.exception("Got SyntaxError")
raise cmdexc.CommandError("SyntaxError in {}:{}: {}".format(
e.filename, e.lineno, e))
if ok:
self.shutdown(restart=True)
def restart(self, pages=(), session=None, override_args=None):
"""Inner logic to restart qutebrowser.
The "better" way to restart is to pass a session (_restart usually) as
that'll save the complete state.
However we don't do that (and pass a list of pages instead) when we
restart because of an exception, as that's a lot simpler and we don't
want to risk anything going wrong.
Args:
pages: A list of URLs to open.
session: The session to load, or None.
override_args: Argument overrides as a dict.
Return:
True if the restart succeeded, False otherwise.
"""
self._compile_modules()
log.destroy.debug("sys.executable: {}".format(sys.executable))
log.destroy.debug("sys.path: {}".format(sys.path))
log.destroy.debug("sys.argv: {}".format(sys.argv))
log.destroy.debug("frozen: {}".format(hasattr(sys, 'frozen')))
# Save the session if one is given.
if session is not None:
session_manager = objreg.get('session-manager')
session_manager.save(session, with_private=True)
# Make sure we're not accepting a connection from the new process
# before we fully exited.
ipc.server.shutdown()
# Open a new process and immediately shutdown the existing one
try:
args, cwd = self._get_restart_args(pages, session, override_args)
if cwd is None:
subprocess.Popen(args)
else:
subprocess.Popen(args, cwd=cwd)
except OSError:
log.destroy.exception("Failed to restart")
return False
else:
return True
@cmdutils.register(instance='quitter', name='quit')
@cmdutils.argument('session', completion=miscmodels.session)
def quit(self, save=False, session=None):
"""Quit qutebrowser.
Args:
save: When given, save the open windows even if auto_save.session
is turned off.
session: The name of the session to save.
"""
if session is not None and not save:
raise cmdexc.CommandError("Session name given without --save!")
if save:
if session is None:
session = sessions.default
self.shutdown(session=session)
else:
self.shutdown()
def shutdown(self, status=0, session=None, last_window=False,
restart=False):
"""Quit qutebrowser.
Args:
status: The status code to exit with.
session: A session name if saving should be forced.
last_window: If the shutdown was triggered due to the last window
closing.
restart: If we're planning to restart.
"""
if self._shutting_down:
return
self._shutting_down = True
log.destroy.debug("Shutting down with status {}, session {}...".format(
status, session))
session_manager = objreg.get('session-manager', None)
if session_manager is not None:
if session is not None:
session_manager.save(session, last_window=last_window,
load_next_time=True)
elif config.val.auto_save.session:
session_manager.save(sessions.default, last_window=last_window,
load_next_time=True)
if prompt.prompt_queue.shutdown():
# If shutdown was called while we were asking a question, we're in
# a still sub-eventloop (which gets quit now) and not in the main
# one.
# This means we need to defer the real shutdown to when we're back
# in the real main event loop, or we'll get a segfault.
log.destroy.debug("Deferring real shutdown because question was "
"active.")
QTimer.singleShot(0, functools.partial(self._shutdown, status,
restart=restart))
else:
# If we have no questions to shut down, we are already in the real
# event loop, so we can shut down immediately.
self._shutdown(status, restart=restart)
def _shutdown(self, status, restart): # noqa
"""Second stage of shutdown."""
log.destroy.debug("Stage 2 of shutting down...")
if qApp is None:
# No QApplication exists yet, so quit hard.
sys.exit(status)
# Remove eventfilter
try:
log.destroy.debug("Removing eventfilter...")
event_filter = objreg.get('event-filter', None)
if event_filter is not None:
qApp.removeEventFilter(event_filter)
except AttributeError:
pass
# Close all windows
QApplication.closeAllWindows()
# Shut down IPC
try:
ipc.server.shutdown()
except KeyError:
pass
# Save everything
try:
save_manager = objreg.get('save-manager')
except KeyError:
log.destroy.debug("Save manager not initialized yet, so not "
"saving anything.")
else:
for key in save_manager.saveables:
try:
save_manager.save(key, is_exit=True)
except OSError as e:
error.handle_fatal_exc(
e, self._args, "Error while saving!",
pre_text="Error while saving {}".format(key))
# Disable storage so removing tempdir will work
websettings.shutdown()
# Disable application proxy factory to fix segfaults with Qt 5.10.1
proxy.shutdown()
# Re-enable faulthandler to stdout, then remove crash log
log.destroy.debug("Deactivating crash log...")
objreg.get('crash-handler').destroy_crashlogfile()
# Delete temp basedir
if ((self._args.temp_basedir or self._args.temp_basedir_restarted) and
not restart):
atexit.register(shutil.rmtree, self._args.basedir,
ignore_errors=True)
# Delete temp download dir
downloads.temp_download_manager.cleanup()
# If we don't kill our custom handler here we might get segfaults
log.destroy.debug("Deactivating message handler...")
qInstallMessageHandler(None)
# Now we can hopefully quit without segfaults
log.destroy.debug("Deferring QApplication::exit...")
objreg.get('signal-handler').deactivate()
session_manager = objreg.get('session-manager', None)
if session_manager is not None:
session_manager.delete_autosave()
# We use a singleshot timer to exit here to minimize the likelihood of
# segfaults.
QTimer.singleShot(0, functools.partial(qApp.exit, status))
class Application(QApplication):
"""Main application instance.
Attributes:
_args: ArgumentParser instance.
_last_focus_object: The last focused object's repr.
"""
new_window = pyqtSignal(mainwindow.MainWindow)
def __init__(self, args):
"""Constructor.
Args:
Argument namespace from argparse.
"""
self._last_focus_object = None
qt_args = configinit.qt_args(args)
log.init.debug("Qt arguments: {}, based on {}".format(qt_args, args))
super().__init__(qt_args)
log.init.debug("Initializing application...")
self._args = args
objreg.register('args', args)
objreg.register('app', self)
self.launch_time = datetime.datetime.now()
self.focusObjectChanged.connect(self.on_focus_object_changed)
self.setAttribute(Qt.AA_UseHighDpiPixmaps, True)
@pyqtSlot(QObject)
def on_focus_object_changed(self, obj):
"""Log when the focus object changed."""
output = repr(obj)
if self._last_focus_object != output:
log.misc.debug("Focus object changed: {}".format(output))
self._last_focus_object = output
def event(self, e):
"""Handle macOS FileOpen events."""
if e.type() == QEvent.FileOpen:
url = e.url()
if url.isValid():
open_url(url, no_raise=True)
else:
message.error("Invalid URL: {}".format(url.errorString()))
else:
return super().event(e)
return True
def __repr__(self):
return utils.get_repr(self)
def exit(self, status):
"""Extend QApplication::exit to log the event."""
log.destroy.debug("Now calling QApplication::exit.")
if 'debug-exit' in self._args.debug_flags:
if hunter is None:
print("Not logging late shutdown because hunter could not be "
"imported!", file=sys.stderr)
else:
print("Now logging late shutdown.", file=sys.stderr)
hunter.trace()
super().exit(status)
class EventFilter(QObject):
"""Global Qt event filter.
Attributes:
_activated: Whether the EventFilter is currently active.
_handlers; A {QEvent.Type: callable} dict with the handlers for an
event.
"""
def __init__(self, parent=None):
super().__init__(parent)
self._activated = True
self._handlers = {
QEvent.KeyPress: self._handle_key_event,
QEvent.KeyRelease: self._handle_key_event,
QEvent.ShortcutOverride: self._handle_key_event,
}
def _handle_key_event(self, event):
"""Handle a key press/release event.
Args:
event: The QEvent which is about to be delivered.
Return:
True if the event should be filtered, False if it's passed through.
"""
if qApp.activeWindow() not in objreg.window_registry.values():
# Some other window (print dialog, etc.) is focused so we pass the
# event through.
return False
try:
man = objreg.get('mode-manager', scope='window', window='current')
return man.handle_event(event)
except objreg.RegistryUnavailableError:
# No window available yet, or not a MainWindow
return False
def eventFilter(self, obj, event):
"""Handle an event.
Args:
obj: The object which will get the event.
event: The QEvent which is about to be delivered.
Return:
True if the event should be filtered, False if it's passed through.
"""
try:
if not self._activated:
return False
if not isinstance(obj, QWindow):
# We already handled this same event at some point earlier, so
# we're not interested in it anymore.
return False
try:
handler = self._handlers[event.type()]
except KeyError:
return False
else:
return handler(event)
except:
# If there is an exception in here and we leave the eventfilter
# activated, we'll get an infinite loop and a stack overflow.
self._activated = False
raise
| 1 | 21,628 | This shouldn't be changed. | qutebrowser-qutebrowser | py |
@@ -1,5 +1,5 @@
## This file is part of Invenio.
-## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 CERN.
+## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as | 1 | ## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio BibIndex Administrator Interface."""
__revision__ = "$Id$"
import random
from six import iteritems
from invenio.config import \
CFG_SITE_LANG, \
CFG_SITE_URL, \
CFG_BINDIR
from invenio.legacy.bibrank.adminlib import write_outcome, \
modify_translations, \
get_def_name, \
get_name, \
get_languages, \
addadminbox, \
tupletotable, \
createhiddenform
from invenio.modules.access.engine import acc_authorize_action
from invenio.legacy.dbquery import run_sql, get_table_status_info, wash_table_column_name
from invenio.legacy.bibindex.engine_stemmer import get_stemming_language_map
import invenio.legacy.template
from invenio.legacy.bibindex.engine_config import CFG_BIBINDEX_SYNONYM_MATCH_TYPE, \
CFG_BIBINDEX_COLUMN_VALUE_SEPARATOR
from invenio.modules.knowledge.dblayer import get_all_kb_names
from invenio.legacy.bibindex.engine_utils import load_tokenizers, \
get_idx_indexer, \
get_all_indexes, \
get_all_virtual_indexes, \
get_virtual_index_building_blocks, \
get_index_name_from_index_id, \
get_all_index_names_and_column_values, \
is_index_virtual, \
get_index_virtual_indexes, \
get_index_fields
from invenio.utils.datastructures import LazyDict
_TOKENIZERS = LazyDict(load_tokenizers)
websearch_templates = invenio.legacy.template.load('websearch')
def getnavtrail(previous = ''):
"""Get the navtrail"""
navtrail = """<a class="navtrail" href="%s/help/admin">Admin Area</a> """ % (CFG_SITE_URL,)
navtrail = navtrail + previous
return navtrail
def perform_index(ln=CFG_SITE_LANG, mtype='', content='', **params):
"""start area for modifying indexes
mtype - the method that called this method.
content - the output from that method."""
fin_output = """
<table>
<tr>
<td>0. <small><a href="%s/admin/bibindex/bibindexadmin.py/index?ln=%s">Show all</a></small></td>
<td>1. <small><a href="%s/admin/bibindex/bibindexadmin.py/index?ln=%s&mtype=perform_showindexoverview#1">Overview of indexes</a></small></td>
<td>2. <small><a href="%s/admin/bibindex/bibindexadmin.py/index?ln=%s&mtype=perform_showvirtualindexoverview#2">Overview of virtual indexes</a></small></td>
<td>3. <small><a href="%s/admin/bibindex/bibindexadmin.py/index?ln=%s&mtype=perform_editindexes#2">Edit index</a></small></td>
<td>4. <small><a href="%s/admin/bibindex/bibindexadmin.py/index?ln=%s&mtype=perform_addindex#3">Add new index</a></small></td>
<td>5. <small><a href="%s/admin/bibindex/bibindexadmin.py/field?ln=%s">Manage logical fields</a></small></td>
<td>6. <small><a href="%s/help/admin/bibindex-admin-guide">Guide</a></small></td>
</tr>
</table>
""" % (CFG_SITE_URL, ln, CFG_SITE_URL, ln, CFG_SITE_URL, ln, CFG_SITE_URL, ln, CFG_SITE_URL, ln, CFG_SITE_URL, ln, CFG_SITE_URL)
if mtype == "perform_showindexoverview" and content:
fin_output += content
elif mtype == "perform_showindexoverview" or not mtype:
fin_output += perform_showindexoverview(ln, callback='', **params)
if mtype == "perform_showvirtualindexoverview" and content:
fin_output += content
elif mtype == "perform_showvirtualindexoverview" or not mtype:
fin_output += perform_showvirtualindexoverview(ln, callback='', **params)
if mtype == "perform_editindexes" and content:
fin_output += content
elif mtype == "perform_editindexes" or not mtype:
fin_output += perform_editindexes(ln, callback='', **params)
if mtype == "perform_addindex" and content:
fin_output += content
elif mtype == "perform_addindex" or not mtype:
fin_output += perform_addindex(ln, callback='', **params)
if mtype == "perform_editvirtualindexes" and content:
fin_output += content
elif mtype == "perform_editvirtualindexes":
#not visible in 'show all' view of 'Manage Indexes'
fin_output += perform_editvirtualindexes(ln, callback='', **params)
if mtype == "perform_addvirtualindex" and content:
fin_output += content
elif mtype == "perform_addvirtualindex":
#not visible in 'show all' view of 'Manage Indexes'
fin_output += perform_addvirtualindex(ln, callback='', **params)
if mtype == "perform_deletevirtualindex" and content:
fin_output += content
elif mtype == "perform_deletevirtualindex":
#not visible in 'show all' view of 'Manage Indexes'
fin_output += perform_deletevirtualindex(ln, callback='', **params)
return addadminbox("<b>Menu</b>", [fin_output])
def perform_field(ln=CFG_SITE_LANG, mtype='', content=''):
"""Start area for modifying fields
mtype - the method that called this method.
content - the output from that method."""
fin_output = """
<table>
<tr>
<td>0. <small><a href="%s/admin/bibindex/bibindexadmin.py/field?ln=%s">Show all</a></small></td>
<td>1. <small><a href="%s/admin/bibindex/bibindexadmin.py/field?ln=%s&mtype=perform_showfieldoverview#1">Overview of logical fields</a></small></td>
<td>2. <small><a href="%s/admin/bibindex/bibindexadmin.py/field?ln=%s&mtype=perform_editfields#2">Edit logical field</a></small></td>
<td>3. <small><a href="%s/admin/bibindex/bibindexadmin.py/field?ln=%s&mtype=perform_addfield#3">Add new logical field</a></small></td>
<td>4. <small><a href="%s/admin/bibindex/bibindexadmin.py/index?ln=%s">Manage Indexes</a></small></td>
<td>5. <small><a href="%s/help/admin/bibindex-admin-guide">Guide</a></small></td>
</tr>
</table>
""" % (CFG_SITE_URL, ln, CFG_SITE_URL, ln, CFG_SITE_URL, ln, CFG_SITE_URL, ln, CFG_SITE_URL, ln, CFG_SITE_URL)
if mtype == "perform_showfieldoverview" and content:
fin_output += content
elif mtype == "perform_showfieldoverview" or not mtype:
fin_output += perform_showfieldoverview(ln, callback='')
if mtype == "perform_editfields" and content:
fin_output += content
elif mtype == "perform_editfields" or not mtype:
fin_output += perform_editfields(ln, callback='')
if mtype == "perform_addfield" and content:
fin_output += content
elif mtype == "perform_addfield" or not mtype:
fin_output += perform_addfield(ln, callback='')
return addadminbox("<b>Menu</b>", [fin_output])
def perform_editfield(fldID, ln=CFG_SITE_LANG, mtype='', content='', callback='yes', confirm=-1):
"""form to modify a field. this method is calling other methods which again is calling this and sending back the output of the method.
if callback, the method will call perform_editcollection, if not, it will just return its output.
fldID - id of the field
mtype - the method that called this method.
content - the output from that method."""
fld_dict = dict(get_def_name('', "field"))
if fldID in [-1, "-1"]:
return addadminbox("Edit logical field", ["""<b><span class="info">Please go back and select a logical field</span></b>"""])
fin_output = """
<table>
<tr>
<td><b>Menu</b></td>
</tr>
<tr>
<td>0. <small><a href="%s/admin/bibindex/bibindexadmin.py/editfield?fldID=%s&ln=%s">Show all</a></small></td>
<td>1. <small><a href="%s/admin/bibindex/bibindexadmin.py/editfield?fldID=%s&ln=%s&mtype=perform_modifyfield">Modify field code</a></small></td>
<td>2. <small><a href="%s/admin/bibindex/bibindexadmin.py/editfield?fldID=%s&ln=%s&mtype=perform_modifyfieldtranslations">Modify translations</a></small></td>
<td>3. <small><a href="%s/admin/bibindex/bibindexadmin.py/editfield?fldID=%s&ln=%s&mtype=perform_modifyfieldtags">Modify tags</a></small></td>
<td>4. <small><a href="%s/admin/bibindex/bibindexadmin.py/editfield?fldID=%s&ln=%s&mtype=perform_deletefield">Delete field</a></small></td>
</tr><tr>
<td>5. <small><a href="%s/admin/bibindex/bibindexadmin.py/editfield?fldID=%s&ln=%s&mtype=perform_showdetailsfield">Show field usage</a></small></td>
</tr>
</table>
""" % (CFG_SITE_URL, fldID, ln, CFG_SITE_URL, fldID, ln, CFG_SITE_URL, fldID, ln, CFG_SITE_URL, fldID, ln, CFG_SITE_URL, fldID, ln, CFG_SITE_URL, fldID, ln)
if mtype == "perform_modifyfield" and content:
fin_output += content
elif mtype == "perform_modifyfield" or not mtype:
fin_output += perform_modifyfield(fldID, ln, callback='')
if mtype == "perform_modifyfieldtranslations" and content:
fin_output += content
elif mtype == "perform_modifyfieldtranslations" or not mtype:
fin_output += perform_modifyfieldtranslations(fldID, ln, callback='')
if mtype == "perform_modifyfieldtags" and content:
fin_output += content
elif mtype == "perform_modifyfieldtags" or not mtype:
fin_output += perform_modifyfieldtags(fldID, ln, callback='')
if mtype == "perform_deletefield" and content:
fin_output += content
elif mtype == "perform_deletefield" or not mtype:
fin_output += perform_deletefield(fldID, ln, callback='')
return addadminbox("Edit logical field '%s'" % fld_dict[int(fldID)], [fin_output])
def perform_editindex(idxID, ln=CFG_SITE_LANG, mtype='', content='', callback='yes', confirm=-1):
"""form to modify a index. this method is calling other methods which again is calling this and sending back the output of the method.
idxID - id of the index
mtype - the method that called this method.
content - the output from that method."""
if idxID in [-1, "-1"]:
return addadminbox("Edit index", ["""<b><span class="info">Please go back and select a index</span></b>"""])
fin_output = """
<table>
<tr>
<td><b>Menu</b></td>
</tr>
<tr>
<td>0. <small><a href="%s/admin/bibindex/bibindexadmin.py/editindex?idxID=%s&ln=%s">Show all</a></small></td>
<td>1. <small><a href="%s/admin/bibindex/bibindexadmin.py/editindex?idxID=%s&ln=%s&mtype=perform_modifyindex">Modify index name / descriptor</a></small></td>
<td>2. <small><a href="%s/admin/bibindex/bibindexadmin.py/editindex?idxID=%s&ln=%s&mtype=perform_modifyindextranslations">Modify translations</a></small></td>
<td>3. <small><a href="%s/admin/bibindex/bibindexadmin.py/editindex?idxID=%s&ln=%s&mtype=perform_modifyindexfields">Modify index fields</a></small></td>
<td>4. <small><a href="%s/admin/bibindex/bibindexadmin.py/editindex?idxID=%s&ln=%s&mtype=perform_modifyindexstemming">Modify index stemming language</a></small></td>
<td>5. <small><a href="%s/admin/bibindex/bibindexadmin.py/editindex?idxID=%s&ln=%s&mtype=perform_modifysynonymkb">Modify synonym knowledge base</a></small></td>
<td>6. <small><a href="%s/admin/bibindex/bibindexadmin.py/editindex?idxID=%s&ln=%s&mtype=perform_modifystopwords">Modify remove stopwords</a></small></td>
<td>7. <small><a href="%s/admin/bibindex/bibindexadmin.py/editindex?idxID=%s&ln=%s&mtype=perform_modifyremovehtml">Modify remove HTML markup</a></small></td>
<td>8. <small><a href="%s/admin/bibindex/bibindexadmin.py/editindex?idxID=%s&ln=%s&mtype=perform_modifyremovelatex">Modify remove latex markup</a></small></td>
<td>9. <small><a href="%s/admin/bibindex/bibindexadmin.py/editindex?idxID=%s&ln=%s&mtype=perform_modifytokenizer">Modify tokenizer</a></small></td>
<td>10. <small><a href="%s/admin/bibindex/bibindexadmin.py/editindex?idxID=%s&ln=%s&mtype=perform_modifyindexer">Modify indexer</a></small></td>
<td>11. <small><a href="%s/admin/bibindex/bibindexadmin.py/editindex?idxID=%s&ln=%s&mtype=perform_deleteindex">Delete index</a></small></td>
</tr>
</table>
""" % (CFG_SITE_URL, idxID, ln, CFG_SITE_URL, idxID, ln, CFG_SITE_URL, idxID, ln, CFG_SITE_URL, idxID, ln, CFG_SITE_URL, idxID, ln, CFG_SITE_URL, idxID, ln, CFG_SITE_URL, idxID, ln, CFG_SITE_URL, idxID, ln, CFG_SITE_URL, idxID, ln, CFG_SITE_URL, idxID, ln, CFG_SITE_URL, idxID, ln, CFG_SITE_URL, idxID, ln)
if mtype == "perform_modifyindex" and content:
fin_output += content
elif mtype == "perform_modifyindex" or not mtype:
fin_output += perform_modifyindex(idxID, ln, callback='')
if mtype == "perform_modifyindextranslations" and content:
fin_output += content
elif mtype == "perform_modifyindextranslations" or not mtype:
fin_output += perform_modifyindextranslations(idxID, ln, callback='')
if mtype == "perform_modifyindexfields" and content:
fin_output += content
elif mtype == "perform_modifyindexfields" or not mtype:
fin_output += perform_modifyindexfields(idxID, ln, callback='')
if mtype == "perform_modifyindexstemming" and content:
fin_output += content
elif mtype == "perform_modifyindexstemming" or not mtype:
fin_output += perform_modifyindexstemming(idxID, ln, callback='')
if mtype == "perform_modifysynonymkb" and content:
fin_output += content
elif mtype == "perform_modifysynonymkb" or not mtype:
fin_output += perform_modifysynonymkb(idxID, ln, callback='')
if mtype == "perform_modifystopwords" and content:
fin_output += content
elif mtype == "perform_modifystopwords" or not mtype:
fin_output += perform_modifystopwords(idxID, ln, callback='')
if mtype == "perform_modifyremovehtml" and content:
fin_output += content
elif mtype == "perform_modifyremovehtml" or not mtype:
fin_output += perform_modifyremovehtml(idxID, ln, callback='')
if mtype == "perform_modifyremovelatex" and content:
fin_output += content
elif mtype == "perform_modifyremovelatex" or not mtype:
fin_output += perform_modifyremovelatex(idxID, ln, callback='')
if mtype == "perform_modifytokenizer" and content:
fin_output += content
elif mtype == "perform_modifytokenizer" or not mtype:
fin_output += perform_modifytokenizer(idxID, ln, callback='')
if mtype == "perform_modifyindexer" and content:
fin_output += content
elif mtype == "perform_modifyindexer" or not mtype:
fin_output += perform_modifyindexer(idxID, ln, callback='')
if mtype == "perform_deleteindex" and content:
fin_output += content
elif mtype == "perform_deleteindex" or not mtype:
fin_output += perform_deleteindex(idxID, ln, callback='')
return addadminbox("Edit index", [fin_output])
def perform_editvirtualindex(idxID, ln=CFG_SITE_LANG, mtype='', content='', callback='yes', confirm=-1):
if idxID in [-1, "-1"]:
return addadminbox("Edit virtual index", ["""<b><span class="info">Please go back and select an index</span></b>"""])
fin_output = """
<table>
<tr>
<td><b>Menu</b></td>
</tr>
<tr>
<td>0. <small><a href="%s/admin/bibindex/bibindexadmin.py/editvirtualindex?idxID=%s&ln=%s">Show all</a></small></td>
<td>1. <small><a href="%s/admin/bibindex/bibindexadmin.py/editvirtualindex?idxID=%s&ln=%s&mtype=perform_modifydependentindexes">Modify depedent indexes</a></small></td>
<td>2. <small><a href="%s/admin/bibindex/bibindexadmin.py/index?ln=%s&mtype=perform_showvirtualindexoverview#2">Overview of virtual indexes</a></small></td>
</tr>
</table>
""" % (CFG_SITE_URL, idxID, ln, CFG_SITE_URL, idxID, ln, CFG_SITE_URL, ln)
if mtype == "perform_modifydependentindexes" and content:
fin_output += content
elif mtype == "perform_modifydependentindexes" or not mtype:
fin_output += perform_modifydependentindexes(idxID, ln, callback='')
index_name = "( %s )" % get_index_name_from_index_id(idxID)
return addadminbox("Edit virtual index %s" % index_name, [fin_output])
def perform_showindexoverview(ln=CFG_SITE_LANG, callback='', confirm=0):
subtitle = """<a name="1"></a>1. Overview of all indexes"""
output = """<table cellpadding="3" border="1">"""
output += """<tr><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></tr>""" % ("ID", "Name", "Fwd.Idx Size", "Rev.Idx Size", "Fwd.Idx Words", "Rev.Idx Records", "Last updated", "Fields", "Translations", "Stemming Language", "Synonym knowledge base", "Remove stopwords", "Remove HTML markup", "Remove Latex markup", "Tokenizer", "Indexer type")
idx = get_idx()
idx_dict = dict(get_def_name('', "idxINDEX"))
stemming_language_map = get_stemming_language_map()
stemming_language_map_reversed = dict([(elem[1], elem[0]) for elem in iteritems(stemming_language_map)])
virtual_indexes = dict(get_all_virtual_indexes())
for idxID, idxNAME, idxDESC, idxUPD, idxSTEM, idxSYNKB, idxSTOPWORDS, idxHTML, idxLATEX, idxTOK in idx:
forward_table_status_info = get_table_status_info('idxWORD%sF' % (idxID < 10 and '0%s' % idxID or idxID))
reverse_table_status_info = get_table_status_info('idxWORD%sR' % (idxID < 10 and '0%s' % idxID or idxID))
if str(idxUPD)[-3:] == ".00":
idxUPD = str(idxUPD)[0:-3]
lang = get_lang_list("idxINDEXNAME", "id_idxINDEX", idxID)
idx_fld = get_idx_fld(idxID)
fld = ""
for row in idx_fld:
fld += row[3] + ", "
if fld.endswith(", "):
fld = fld[:-2]
if len(fld) == 0:
fld = """<strong><span class="info">None</span></strong>"""
date = (idxUPD and idxUPD or """<strong><span class="info">Not updated</span></strong>""")
stemming_lang = stemming_language_map_reversed.get(idxSTEM, None)
if not stemming_lang:
stemming_lang = """<strong><span class="info">None</span></strong>"""
synonym_kb = get_idx_synonym_kb(idxID)
if not synonym_kb:
synonym_kb = """<strong><span class="info">None</span></strong>"""
remove_stopwords = get_idx_remove_stopwords(idxID)
if not remove_stopwords:
remove_stopwords = """<strong><span class="info">None</span></strong>"""
remove_html_markup = get_idx_remove_html_markup(idxID)
if not remove_html_markup:
remove_html_markup = """<strong><span class="info">None</span></strong>"""
remove_latex_markup = get_idx_remove_latex_markup(idxID)
if not remove_latex_markup:
remove_latex_markup = """<strong><span class="info">None</span></strong>"""
tokenizer = get_idx_tokenizer(idxID)
if not remove_latex_markup:
tokenizer = """<strong><span class="info">None</span></strong>"""
type_of_indexer = virtual_indexes.get(idxID) and "virtual" or get_idx_indexer(idxNAME)
if forward_table_status_info and reverse_table_status_info:
output += """<tr><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>""" % \
(idxID,
"""<a href="%s/admin/bibindex/bibindexadmin.py/editindex?idxID=%s&ln=%s" title="%s">%s</a>""" % (CFG_SITE_URL, idxID, ln, idxDESC, idx_dict.get(idxID, idxNAME)),
"%s MB" % websearch_templates.tmpl_nice_number(forward_table_status_info['Data_length'] / 1048576.0, max_ndigits_after_dot=3),
"%s MB" % websearch_templates.tmpl_nice_number(reverse_table_status_info['Data_length'] / 1048576.0, max_ndigits_after_dot=3),
websearch_templates.tmpl_nice_number(forward_table_status_info['Rows']),
websearch_templates.tmpl_nice_number(reverse_table_status_info['Rows'], max_ndigits_after_dot=3),
date,
fld,
lang,
stemming_lang,
synonym_kb,
remove_stopwords,
remove_html_markup,
remove_latex_markup,
tokenizer,
type_of_indexer)
elif not forward_table_status_info:
output += """<tr><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>""" % \
(idxID,
"""<a href="%s/admin/bibindex/bibindexadmin.py/editindex?idxID=%s&ln=%s">%s</a>""" % (CFG_SITE_URL, idxID, ln, idx_dict.get(idxID, idxNAME)),
"Error", "%s MB" % websearch_templates.tmpl_nice_number(reverse_table_status_info['Data_length'] / 1048576.0, max_ndigits_after_dot=3),
"Error",
websearch_templates.tmpl_nice_number(reverse_table_status_info['Rows'], max_ndigits_after_dot=3),
date,
"",
lang,
synonym_kb,
remove_stopwords,
remove_html_markup,
remove_latex_markup,
tokenizer,
type_of_indexer)
elif not reverse_table_status_info:
output += """<tr><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>""" % \
(idxID,
"""<a href="%s/admin/bibindex/bibindexadmin.py/editindex?idxID=%s&ln=%s">%s</a>""" % (CFG_SITE_URL, idxID, ln, idx_dict.get(idxID, idxNAME)),
"%s MB" % websearch_templates.tmpl_nice_number(forward_table_status_info['Data_length'] / 1048576.0, max_ndigits_after_dot=3),
"Error", websearch_templates.tmpl_nice_number(forward_table_status_info['Rows'], max_ndigits_after_dot=3),
"Error",
date,
"",
lang,
synonym_kb,
remove_stopwords,
remove_html_markup,
remove_latex_markup,
tokenizer,
type_of_indexer)
output += "</table>"
body = [output]
if callback:
return perform_index(ln, "perform_showindexoverview", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_showvirtualindexoverview(ln=CFG_SITE_LANG, callback='', confirm=0):
subtitle = """<a name="1"></a>2. Overview of virtual indexes"""
output = """
<table>
<tr>
<td>1. <small><a href="%s/admin/bibindex/bibindexadmin.py/index?ln=%s&mtype=perform_editvirtualindexes#1">Edit virtual index</a></small></td>
<td>2. <small><a href="%s/admin/bibindex/bibindexadmin.py/index?ln=%s&mtype=perform_addvirtualindex#2">Add new virtual index</a></small></td>
<td>3. <small><a href="%s/admin/bibindex/bibindexadmin.py/index?ln=%s&mtype=perform_deletevirtualindex#3">Delete virtual index</a></small></td>
</tr>
</table>
""" % (CFG_SITE_URL, ln, CFG_SITE_URL, ln, CFG_SITE_URL, ln)
output += """<table cellpadding="3" border="1">"""
output += """<tr><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td></tr>""" % ("ID", "Virtual index", "Dependent indexes")
idx = get_all_virtual_indexes()
for idxID, idxNAME in idx:
normal_indexes = zip(*get_virtual_index_building_blocks(idxID))[1]
output += """<tr><td>%s</td><td>%s</td><td>%s</td></tr>""" % \
(idxID,
"""<a href="%s/admin/bibindex/bibindexadmin.py/editvirtualindex?idxID=%s&ln=%s">%s</a>""" % (CFG_SITE_URL, idxID, ln, idxNAME),
", ".join(normal_indexes))
output += "</table>"
body = [output]
if callback:
return perform_index(ln, "perform_showvirtualindexoverview", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_editindexes(ln=CFG_SITE_LANG, callback='yes', content='', confirm=-1):
"""show a list of indexes that can be edited."""
subtitle = """<a name="3"></a>3. Edit index <small>[<a title="See guide" href="%s/help/admin/bibindex-admin-guide">?</a>]</small>""" % (CFG_SITE_URL)
fin_output = ''
idx = get_idx()
output = ""
if len(idx) > 0:
text = """
<span class="adminlabel">Index name</span>
<select name="idxID" class="admin_w200">
<option value="-1">- Select a index -</option>
"""
for (idxID, idxNAME, idxDESC, idxUPD, idxSTEM, idxSYNKB, idxSTOPWORDS, idxHTML, idxLATEX, idxTOK) in idx:
text += """<option value="%s">%s</option>""" % (idxID, idxNAME)
text += """</select>"""
output += createhiddenform(action="%s/admin/bibindex/bibindexadmin.py/editindex" % CFG_SITE_URL,
text=text,
button="Edit",
ln=ln,
confirm=1)
else:
output += """No indexes exists"""
body = [output]
if callback:
return perform_index(ln, "perform_editindexes", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_editvirtualindexes(ln=CFG_SITE_LANG, callback='yes', content='', confirm=-1):
"""show a list of indexes that can be edited."""
subtitle = """<a name="2"></a>1. Edit virtual index <small>[<a title="See guide" href="%s/help/admin/bibindex-admin-guide">?</a>]</small>""" % (CFG_SITE_URL)
idx = get_all_virtual_indexes()
output = ""
if len(idx) > 0:
text = """
<span class="adminlabel">Virtual index name</span>
<select name="idxID" class="admin_w200">
<option value="-1">- Select a index -</option>
"""
for (idxID, idxNAME) in idx:
text += """<option value="%s">%s</option>""" % (idxID, idxNAME)
text += """</select>"""
output += createhiddenform(action="%s/admin/bibindex/bibindexadmin.py/editvirtualindex" % CFG_SITE_URL,
text=text,
button="Edit",
ln=ln,
confirm=1)
else:
output += """No indexes exist"""
body = [output]
if callback:
return perform_index(ln, "perform_editvirtualindexes", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_editfields(ln=CFG_SITE_LANG, callback='yes', content='', confirm=-1):
"""show a list of all logical fields that can be edited."""
subtitle = """<a name="4"></a>4. Edit logical field <small>[<a title="See guide" href="%s/help/admin/bibindex-admin-guide">?</a>]</small>""" % (CFG_SITE_URL)
fin_output = ''
res = get_fld()
output = ""
if len(res) > 0:
text = """
<span class="adminlabel">Field name</span>
<select name="fldID" class="admin_w200">
<option value="-1">- Select a field -</option>
"""
for (fldID, name, code) in res:
text += """<option value="%s">%s</option>""" % (fldID, name)
text += """</select>"""
output += createhiddenform(action="%s/admin/bibindex/bibindexadmin.py/editfield" % CFG_SITE_URL,
text=text,
button="Edit",
ln=ln,
confirm=1)
else:
output += """No logical fields exists"""
body = [output]
if callback:
return perform_field(ln, "perform_editfields", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_addindex(ln=CFG_SITE_LANG, idxNAME='', callback="yes", confirm=-1):
"""form to add a new index.
idxNAME - the name of the new index"""
output = ""
subtitle = """<a name="3"></a>3. Add new index"""
text = """
<span class="adminlabel">Index name</span>
<input class="admin_w200" type="text" name="idxNAME" value="%s" /><br />
""" % idxNAME
output = createhiddenform(action="%s/admin/bibindex/bibindexadmin.py/addindex" % CFG_SITE_URL,
text=text,
ln=ln,
button="Add index",
confirm=1)
if idxNAME and confirm in ["1", 1]:
res = add_idx(idxNAME)
output += write_outcome(res) + """<br /><a href="%s/admin/bibindex/bibindexadmin.py/editindex?idxID=%s&ln=%s">Configure this index</a>.""" % (CFG_SITE_URL, res[1], ln)
elif confirm not in ["-1", -1]:
output += """<b><span class="info">Please give the index a name.</span></b>
"""
body = [output]
if callback:
return perform_index(ln, "perform_addindex", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_addvirtualindex(ln=CFG_SITE_LANG, idxNEWVID='', idxNEWPID='', callback="yes", confirm=-1):
"""form to add a new virtual index from the set of physical indexes.
idxID - the name of the new virtual index"""
idx = get_all_indexes(virtual=False, with_ids=True)
output = ""
subtitle = """<a name="3"></a>2. Add new virtual index"""
if len(idx) > 0:
text = """
<span class="adminlabel">Choose new virtual index</span>
<select name="idxNEWVID" class="admin_w200">
<option value="-1">- Select an index -</option>
"""
for (idxID, idxNAME) in idx:
checked = str(idxNEWVID) == str(idxID) and 'selected="selected"' or ''
text += """<option value="%s" %s>%s</option>
""" % (idxID, checked, idxNAME)
text += """</select>"""
text += """
<span class="adminlabel">Add physical index</span>
<select name="idxNEWPID" class="admin_w200">
<option value="-1">- Select an index -</option>
"""
for (idxID, idxNAME) in idx:
text += """<option value="%s">%s</option>""" % (idxID, idxNAME)
text += """</select>"""
output += createhiddenform(action="%s/admin/bibindex/bibindexadmin.py/addvirtualindex" % CFG_SITE_URL,
text=text,
button="Add index",
ln=ln,
confirm=1)
else:
output += """No index exists"""
if idxNEWVID not in ['', "-1", -1] and idxNEWPID not in ['', "-1", -1] and confirm in ["1", 1]:
res = add_virtual_idx(idxNEWVID, idxNEWPID)
output += write_outcome(res)
output += """<br /><span class="info">Please note you must run as soon as possible:
<pre>$> %s/bibindex --reindex -w %s</pre></span>""" % (CFG_BINDIR, dict(idx)[int(idxNEWPID)])
elif confirm not in ["-1", -1] or idxNEWVID in ["-1", -1] or idxNEWPID in ["-1", -1]:
output += """<b><span class="info">Please specify the index.</span></b>"""
body = [output]
if callback:
return perform_index(ln, "perform_addvirtualindex", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifyindextranslations(idxID, ln=CFG_SITE_LANG, sel_type='', trans=[], confirm=-1, callback='yes'):
"""Modify the translations of a index
sel_type - the nametype to modify
trans - the translations in the same order as the languages from get_languages()"""
output = ''
subtitle = ''
langs = get_languages()
if confirm in ["2", 2] and idxID:
finresult = modify_translations(idxID, langs, sel_type, trans, "idxINDEX")
idx_dict = dict(get_def_name('', "idxINDEX"))
if idxID and int(idxID) in idx_dict:
idxID = int(idxID)
subtitle = """<a name="2"></a>2. Modify translations for index. <small>[<a title="See guide" href="%s/help/admin/bibindex-admin-guide">?</a>]</small>""" % CFG_SITE_URL
if type(trans) is str:
trans = [trans]
if sel_type == '':
sel_type = get_idx_nametypes()[0][0]
header = ['Language', 'Translation']
actions = []
types = get_idx_nametypes()
if len(types) > 1:
text = """
<span class="adminlabel">Name type</span>
<select name="sel_type" class="admin_w200">
"""
for (key, value) in types:
text += """<option value="%s" %s>%s""" % (key, key == sel_type and 'selected="selected"' or '', value)
trans_names = get_name(idxID, ln, key, "field")
if trans_names and trans_names[0][0]:
text += ": %s" % trans_names[0][0]
text += "</option>"
text += """</select>"""
output += createhiddenform(action="modifyindextranslations#2",
text=text,
button="Select",
idxID=idxID,
ln=ln,
confirm=0)
if confirm in [-1, "-1", 0, "0"]:
trans = []
for (key, value) in langs:
try:
trans_names = get_name(idxID, key, sel_type, "idxINDEX")
trans.append(trans_names[0][0])
except StandardError as e:
trans.append('')
for nr in range(0,len(langs)):
actions.append(["%s" % (langs[nr][1],)])
actions[-1].append('<input type="text" name="trans" size="30" value="%s"/>' % trans[nr])
text = tupletotable(header=header, tuple=actions)
output += createhiddenform(action="modifyindextranslations#2",
text=text,
button="Modify",
idxID=idxID,
sel_type=sel_type,
ln=ln,
confirm=2)
if sel_type and len(trans):
if confirm in ["2", 2]:
output += write_outcome(finresult)
body = [output]
if callback:
return perform_editindex(idxID, ln, "perform_modifyindextranslations", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifyfieldtranslations(fldID, ln=CFG_SITE_LANG, sel_type='', trans=[], confirm=-1, callback='yes'):
"""Modify the translations of a field
sel_type - the nametype to modify
trans - the translations in the same order as the languages from get_languages()"""
output = ''
subtitle = ''
langs = get_languages()
if confirm in ["2", 2] and fldID:
finresult = modify_translations(fldID, langs, sel_type, trans, "field")
fld_dict = dict(get_def_name('', "field"))
if fldID and int(fldID) in fld_dict:
fldID = int(fldID)
subtitle = """<a name="3"></a>3. Modify translations for logical field '%s' <small>[<a title="See guide" href="%s/help/admin/bibindex-admin-guide">?</a>]</small>""" % (fld_dict[fldID], CFG_SITE_URL)
if type(trans) is str:
trans = [trans]
if sel_type == '':
sel_type = get_fld_nametypes()[0][0]
header = ['Language', 'Translation']
actions = []
types = get_fld_nametypes()
if len(types) > 1:
text = """
<span class="adminlabel">Name type</span>
<select name="sel_type" class="admin_w200">
"""
for (key, value) in types:
text += """<option value="%s" %s>%s""" % (key, key == sel_type and 'selected="selected"' or '', value)
trans_names = get_name(fldID, ln, key, "field")
if trans_names and trans_names[0][0]:
text += ": %s" % trans_names[0][0]
text += "</option>"
text += """</select>"""
output += createhiddenform(action="modifyfieldtranslations#3",
text=text,
button="Select",
fldID=fldID,
ln=ln,
confirm=0)
if confirm in [-1, "-1", 0, "0"]:
trans = []
for (key, value) in langs:
try:
trans_names = get_name(fldID, key, sel_type, "field")
trans.append(trans_names[0][0])
except StandardError as e:
trans.append('')
for nr in range(0,len(langs)):
actions.append(["%s" % (langs[nr][1],)])
actions[-1].append('<input type="text" name="trans" size="30" value="%s"/>' % trans[nr])
text = tupletotable(header=header, tuple=actions)
output += createhiddenform(action="modifyfieldtranslations#3",
text=text,
button="Modify",
fldID=fldID,
sel_type=sel_type,
ln=ln,
confirm=2)
if sel_type and len(trans):
if confirm in ["2", 2]:
output += write_outcome(finresult)
body = [output]
if callback:
return perform_editfield(fldID, ln, "perform_modifytranslations", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_showdetailsfieldtag(fldID, tagID, ln=CFG_SITE_LANG, callback="yes", confirm=-1):
"""form to add a new field.
fldNAME - the name of the new field
code - the field code"""
fld_dict = dict(get_def_name('', "field"))
fldID = int(fldID)
tagname = run_sql("SELECT name from tag where id=%s", (tagID, ))[0][0]
output = ""
subtitle = """<a name="4.1"></a>Showing details for MARC tag '%s'""" % tagname
output += "<br /><b>This MARC tag is used directly in these logical fields:</b> "
fld_tag = get_fld_tags('', tagID)
exist = {}
for (id_field,id_tag, tname, tvalue, score) in fld_tag:
output += "%s, " % fld_dict[int(id_field)]
exist[id_field] = 1
output += "<br /><b>This MARC tag is used indirectly in these logical fields:</b> "
tag = run_sql("SELECT value from tag where id=%s", (id_tag, ))
tag = tag[0][0]
for i in range(0, len(tag) - 1):
res = run_sql("SELECT id_field,id_tag FROM field_tag,tag WHERE tag.id=field_tag.id_tag AND tag.value=%s", ('%' + tag[0:i] + '%',))
for (id_field, id_tag) in res:
output += "%s, " % fld_dict[int(id_field)]
exist[id_field] = 1
res = run_sql("SELECT id_field,id_tag FROM field_tag,tag WHERE tag.id=field_tag.id_tag AND tag.value like %s", (tag, ))
for (id_field, id_tag) in res:
if id_field not in exist:
output += "%s, " % fld_dict[int(id_field)]
body = [output]
if callback:
return perform_modifyfieldtags(fldID, ln, "perform_showdetailsfieldtag", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_showdetailsfield(fldID, ln=CFG_SITE_LANG, callback="yes", confirm=-1):
"""form to add a new field.
fldNAME - the name of the new field
code - the field code"""
fld_dict = dict(get_def_name('', "field"))
col_dict = dict(get_def_name('', "collection"))
fldID = int(fldID)
col_fld = get_col_fld('', '', fldID)
sort_types = dict(get_sort_nametypes())
fin_output = ""
subtitle = """<a name="1"></a>5. Show usage for logical field '%s'""" % fld_dict[fldID]
output = "This logical field is used in these collections:<br />"
ltype = ''
exist = {}
for (id_collection, id_field, id_fieldvalue, ftype, score, score_fieldvalue) in col_fld:
if ltype != ftype:
output += "<br /><b>%s: </b>" % sort_types[ftype]
ltype = ftype
exist = {}
if id_collection not in exist:
output += "%s, " % col_dict[int(id_collection)]
exist[id_collection] = 1
if not col_fld:
output = "This field is not used by any collections."
fin_output = addadminbox('Collections', [output])
body = [fin_output]
if callback:
return perform_editfield(ln, "perform_showdetailsfield", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_addfield(ln=CFG_SITE_LANG, fldNAME='', code='', callback="yes", confirm=-1):
"""form to add a new field.
fldNAME - the name of the new field
code - the field code"""
output = ""
subtitle = """<a name="3"></a>3. Add new logical field"""
code = str.replace(code,' ', '')
text = """
<span class="adminlabel">Field name</span>
<input class="admin_w200" type="text" name="fldNAME" value="%s" /><br />
<span class="adminlabel">Field code</span>
<input class="admin_w200" type="text" name="code" value="%s" /><br />
""" % (fldNAME, code)
output = createhiddenform(action="%s/admin/bibindex/bibindexadmin.py/addfield" % CFG_SITE_URL,
text=text,
ln=ln,
button="Add field",
confirm=1)
if fldNAME and code and confirm in ["1", 1]:
res = add_fld(fldNAME, code)
output += write_outcome(res)
elif confirm not in ["-1", -1]:
output += """<b><span class="info">Please give the logical field a name and code.</span></b>
"""
body = [output]
if callback:
return perform_field(ln, "perform_addfield", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_deletefield(fldID, ln=CFG_SITE_LANG, callback='yes', confirm=0):
"""form to remove a field.
fldID - the field id from table field.
"""
fld_dict = dict(get_def_name('', "field"))
if int(fldID) not in fld_dict:
return """<b><span class="info">Field does not exist</span></b>"""
subtitle = """<a name="4"></a>4. Delete the logical field '%s' <small>[<a title="See guide" href="%s/help/admin/bibindex-admin-guide">?</a>]</small>""" % (fld_dict[int(fldID)], CFG_SITE_URL)
output = ""
if fldID:
fldID = int(fldID)
if confirm in ["0", 0]:
check = run_sql("SELECT id_field from idxINDEX_field where id_field=%s", (fldID, ))
text = ""
if check:
text += """<b><span class="info">This field is used in an index, deletion may cause problems.</span></b><br />"""
text += """Do you want to delete the logical field '%s' and all its relations and definitions.""" % (fld_dict[fldID])
output += createhiddenform(action="deletefield#4",
text=text,
button="Confirm",
fldID=fldID,
confirm=1)
elif confirm in ["1", 1]:
res = delete_fld(fldID)
if res[0] == 1:
return """<br /><b><span class="info">Field deleted.</span></b>""" + write_outcome(res)
else:
output += write_outcome(res)
body = [output]
if callback:
return perform_editfield(fldID, ln, "perform_deletefield", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_deleteindex(idxID, ln=CFG_SITE_LANG, callback='yes', confirm=0):
"""form to delete an index.
idxID - the index id from table idxINDEX.
"""
if idxID:
subtitle = """<a name="5"></a>11. Delete the index. <small>[<a title="See guide" href="%s/help/admin/bibindex-admin-guide">?</a>]</small>""" % CFG_SITE_URL
output = ""
if confirm in ["0", 0]:
idx = get_idx(idxID)
if idx:
text = ""
text += """<b><span class="info">By deleting an index, you may also loose any indexed data in the forward and reverse table for this index.</span></b><br />"""
text += """Do you want to delete the index '%s' and all its relations and definitions.""" % (idx[0][1])
output += createhiddenform(action="deleteindex#5",
text=text,
button="Confirm",
idxID=idxID,
confirm=1)
else:
return """<br /><b><span class="info">Index specified does not exist.</span></b>"""
elif confirm in ["1", 1]:
res = delete_idx(idxID)
if res[0] == 1:
return """<br /><b><span class="info">Index deleted.</span></b>""" + write_outcome(res)
else:
output += write_outcome(res)
body = [output]
if callback:
return perform_editindex(idxID, ln, "perform_deleteindex", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_deletevirtualindex(ln=CFG_SITE_LANG, idxID='', callback='yes', confirm=-1):
"""form to delete a virtual index.
idxID - the index id from table idxINDEX.
"""
output = ""
subtitle = """<a name="3"></a>3. Delete virtual index"""
idx = get_all_virtual_indexes()
if len(idx) > 0:
text = """<span class="adminlabel">Choose a virtual index</span>
<select name="idxID" class="admin_w200">
<option value="-1">- Select an index -</option>
"""
for idx_id, idx_name in idx:
selected = str(idxID) == str(idx_id) and 'selected="selected"' or ''
text += """<option value="%s" %s>%s</option>""" % (idx_id, selected, idx_name)
text += """</select>"""
output += createhiddenform(action="deletevirtualindex#3",
text=text,
button="Confirm",
confirm=1)
else:
output = "No index specified"
if confirm in ["1", 1] and idxID not in ['', "-1", -1]:
res = delete_virtual_idx(int(idxID))
if res[0] == 1:
output += """<br /><b><span class="info">Virtual index deleted.</span></b><br />"""
output += write_outcome(res)
else:
output += write_outcome(res)
elif idxID in ["-1", -1]:
output += """<b><span class="info">Please specify the index.</span></b>"""
body = [output]
if callback:
return perform_index(ln, "perform_deletevirtualindex", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifydependentindexes(idxID, ln=CFG_SITE_LANG, newIDs=[], callback='yes', confirm=-1):
"""page on which dependent indexes for specific virtual index
can be chosen"""
subtitle = ""
output = ""
non_virtual_indexes = dict(get_all_indexes(virtual=False, with_ids=True)) #[(id1, name1), (id2, name2)..]
already_dependent = dict(get_virtual_index_building_blocks(idxID))
if not already_dependent:
idxID = -1
if idxID not in [-1, "-1"]:
subtitle = """<a name="1"></a>1. Modify dependent indexes.
<small>
[<a title="See guide" href="%s/help/admin/bibindex-admin-guide">?</a>]
</small>""" % CFG_SITE_URL
if confirm in [-1, "-1"]:
newIDs = []
if not newIDs:
newIDs = []
tick_list = ""
checked_values = already_dependent.values()
if confirm > -1:
checked_values = newIDs
for index_name in non_virtual_indexes.values():
checked = index_name in checked_values and 'checked="checked"' or ''
tick_list += """<input type="checkbox" name='newIDs' value="%s" %s >%s </br>""" % \
(index_name, checked, index_name)
output += createhiddenform(action="modifydependentindexes#1",
text=tick_list,
button="Modify",
idxID=idxID,
ln=ln,
confirm=0)
if confirm in [0, "0"] and newIDs == []:
output += "</br>"
text = """
<span class="important">Removing all dependent indexes
means removing virtual index.</span>
<br /> <strong>Are you sure you want to do this?</strong>"""
output += createhiddenform(action="modifydependentindexes#1",
text=text,
button="Confirm",
idxID=idxID,
newIDs=newIDs,
ln=ln,
confirm=1)
elif confirm in [0, "0"]:
output += "</br>"
text = """
<span class="important">You are about to change dependent indexes</span>.<br /> <strong>Are you sure you want to do this?</strong>"""
output += createhiddenform(action="modifydependentindexes#1",
text=text,
button="Confirm",
idxID=idxID,
newIDs=newIDs,
ln=ln,
confirm=1)
elif idxID > -1 and confirm in [1, "1"]:
output += "</br>"
to_add, to_remove = find_dependent_indexes_to_change(idxID, newIDs)
# NOTE: we don't need to take care of indexes to remove, because
# -w <<virutal_index>> --remove-dependent-index will take care of everything
# so it's enough to just post a message
res = modify_dependent_indexes(idxID, to_add)
output += write_outcome(res)
if len(to_remove) + len(to_add) > 0:
output += """<br /><span class="info">Please note you should run as soon as possible:"""
if len(to_add) > 0:
output += """<pre>$> %s/bibindex --reindex -w %s</pre>
""" % (CFG_BINDIR, get_index_name_from_index_id(idxID))
for index in to_remove:
output += """<pre>$> %s/bibindex -w %s --remove-dependent-index %s</pre>
""" % (CFG_BINDIR, get_index_name_from_index_id(idxID), index)
if len(to_remove) + len(to_add) > 0:
output += "</span>"
elif confirm in [1, "1"]:
output += """<br /><b><span class="info">Please give a name for the index.</span></b>"""
else:
output = """It seems that this index is not virtual."""
body = [output]
if callback:
return perform_editvirtualindex(idxID, ln, "perform_modifydependentindexes", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def find_dependent_indexes_to_change(idxID, new_indexes):
"""From new set of dependent indexes finds out
which indexes should be added and which should be removed
from database (idxINDEX_idxINDEX table)
@param idxID: id of the virtual index
@param new_indexes: future set of dependent indexes
"""
if not type(new_indexes) is list:
new_indexes = [new_indexes]
dependent_indexes = dict(get_virtual_index_building_blocks(idxID)).values()
to_add = set(new_indexes) - set(dependent_indexes)
to_remove = set(dependent_indexes) - set(new_indexes)
return list(to_add), list(to_remove)
def perform_showfieldoverview(ln=CFG_SITE_LANG, callback='', confirm=0):
subtitle = """<a name="1"></a>1. Logical fields overview"""
output = """<table cellpadding="3" border="1">"""
output += """<tr><td><strong>%s</strong></td>
<td><strong>%s</strong></td>
<td><strong>%s</strong></td>
<td><strong>%s</strong></td></tr>""" % ("Field", "MARC Tags", "RecJson Fields", "Translations")
query = "SELECT id,name FROM field"
res = run_sql(query)
col_dict = dict(get_def_name('', "collection"))
fld_dict = dict(get_def_name('', "field"))
for field_id,field_name in res:
query = """SELECT tag.value, tag.recjson_value FROM tag,
field_tag
WHERE tag.id=field_tag.id_tag AND
field_tag.id_field=%s
ORDER BY field_tag.score DESC,tag.value ASC"""
tag_values = run_sql(query, (field_id, ) )
marc_tags = recjson_fields = """<b><span class="info">None</span></b>"""
if tag_values:
try:
marc_tags_l = [tag for tag in zip(*tag_values)[0] if tag]
marc_tags = marc_tags_l and ", ".join(marc_tags_l) or marc_tags
recjson = []
[recjson.extend(f.split(",")) for f in zip(*tag_values)[1] if f]
recjson_fields = recjson and ", ".join(recjson) or recjson_fields
except IndexError:
pass
lang = get_lang_list("fieldname", "id_field", field_id)
output += """<tr><td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td></tr>""" % ("""<a href="%s/admin/bibindex/bibindexadmin.py/editfield?fldID=%s&ln=%s">%s</a>
""" % (CFG_SITE_URL, field_id, ln, fld_dict[field_id]),
marc_tags,
recjson_fields,
lang)
output += "</table>"
body = [output]
if callback:
return perform_field(ln, "perform_showfieldoverview", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifyindex(idxID, ln=CFG_SITE_LANG, idxNAME='', idxDESC='', callback='yes', confirm=-1):
"""form to modify an index name.
idxID - the index name to change.
idxNAME - new name of index
idxDESC - description of index content"""
subtitle = ""
output = ""
idx = get_idx(idxID)
if not idx:
idxID = -1
if idxID not in [-1, "-1"]:
subtitle = """<a name="2"></a>1. Modify index name. <small>[<a title="See guide" href="%s/help/admin/bibindex-admin-guide">?</a>]</small>""" % CFG_SITE_URL
if confirm in [-1, "-1"]:
idxNAME = idx[0][1]
idxDESC = idx[0][2]
text = """
<span class="adminlabel">Index name</span>
<input class="admin_w200" type="text" name="idxNAME" value="%s" /><br />
<span class="adminlabel">Index description</span>
<textarea class="admin_w200" name="idxDESC">%s</textarea><br />
""" % (idxNAME, idxDESC)
output += createhiddenform(action="modifyindex#1",
text=text,
button="Modify",
idxID=idxID,
ln=ln,
confirm=1)
if idxID > -1 and idxNAME and confirm in [1, "1"]:
res = modify_idx(idxID, idxNAME, idxDESC)
output += write_outcome(res)
elif confirm in [1, "1"]:
output += """<br /><b><span class="info">Please give a name for the index.</span></b>"""
else:
output = """No index to modify."""
body = [output]
if callback:
return perform_editindex(idxID, ln, "perform_modifyindex", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifyindexstemming(idxID, ln=CFG_SITE_LANG, idxSTEM='', callback='yes', confirm=-1):
"""form to modify an index name.
idxID - the index name to change.
idxSTEM - new stemming language code"""
subtitle = ""
output = ""
stemming_language_map = get_stemming_language_map()
stemming_language_map['None'] = ''
idx = get_idx(idxID)
if not idx:
idxID = -1
if idxID not in [-1, "-1"]:
subtitle = """<a name="4"></a>4. Modify index stemming language. <small>[<a title="See guide" href="%s/help/admin/bibindex-admin-guide">?</a>]</small>""" % CFG_SITE_URL
if confirm in [-1, "-1"]:
idxSTEM = idx[0][4]
if not idxSTEM:
idxSTEM = ''
language_html_element = """<select name="idxSTEM" class="admin_w200">"""
languages = stemming_language_map.keys()
languages.sort()
for language in languages:
if stemming_language_map[language] == idxSTEM:
selected = 'selected="selected"'
else:
selected = ""
language_html_element += """<option value="%s" %s>%s</option>""" % (stemming_language_map[language], selected, language)
language_html_element += """</select>"""
text = """
<span class="adminlabel">Index stemming language</span>
""" + language_html_element
output += createhiddenform(action="modifyindexstemming#4",
text=text,
button="Modify",
idxID=idxID,
ln=ln,
confirm=0)
if confirm in [0, "0"] and get_idx(idxID)[0][4] == idxSTEM:
output += """<span class="info">Stemming language has not been changed</span>"""
elif confirm in [0, "0"]:
text = """
<span class="important">You are about to either disable or change the stemming language setting for this index. Please note that it is not recommended to enable stemming for structured-data indexes like "report number", "year", "author" or "collection". On the contrary, it is advisable to enable stemming for indexes like "fulltext", "abstract", "title", etc. since this would overall improve the retrieval quality. <br /> Beware, however, that after disabling or changing the stemming language setting of an index you will have to reindex it. It is a good idea to change the stemming language and to reindex during low usage hours of your service, since searching results will be potentially affected by the discrepancy between search terms now being (not) stemmed and indexes still using the previous settings until the reindexing is completed</span>.<br /> <strong>Are you sure you want to disable/change the stemming language setting of this index?</strong>"""
output += createhiddenform(action="modifyindexstemming#4",
text=text,
button="Modify",
idxID=idxID,
idxSTEM=idxSTEM,
ln=ln,
confirm=1)
elif idxID > -1 and confirm in [1, "1"]:
res = modify_idx_stemming(idxID, idxSTEM)
output += write_outcome(res)
output += """<br /><span class="info">Please note you must run as soon as possible:
<pre>$> %s/bibindex --reindex -w %s</pre></span>
""" % (CFG_BINDIR, get_idx(idxID)[0][1])
elif confirm in [1, "1"]:
output += """<br /><b><span class="info">Please give a name for the index.</span></b>"""
else:
output = """No index to modify."""
body = [output]
if callback:
return perform_editindex(idxID, ln, "perform_modifyindexstemming", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifyindexer(idxID, ln=CFG_SITE_LANG, indexer='', callback='yes', confirm=-1):
"""form to modify an indexer.
idxID - the index name to change.
idexer - indexer type: native/SOLR/XAPIAN/virtual"""
subtitle = ""
output = ""
idx = get_idx(idxID)
if idx:
current_indexer = is_index_virtual(idx[0][0]) and "virtual" or get_idx_indexer(idx[0][1])
subtitle = """<a name="4"></a>5. Modify indexer.
<small>
[<a title="See guide" href="%s/help/admin/bibindex-admin-guide">?</a>]
</small>""" % CFG_SITE_URL
if confirm in [-1, "-1"]:
indexer = current_indexer or ''
items = ["native"]
if idx[0][1] == "fulltext":
items.extend(["SOLR", "XAPIAN"])
else:
items.extend(["virtual"])
html_element = """<select name="indexer" class="admin_w200">"""
for item in items:
selected = indexer==item and 'selected="selected"' or ''
html_element += """<option value="%s" %s>%s</option>""" % (item, selected, item)
html_element += """</select>"""
text = """<span class="adminlabel">Indexer type</span>""" + html_element
output += createhiddenform(action="modifyindexer#5",
text=text,
button="Modify",
idxID=idxID,
ln=ln,
confirm=1)
if confirm in [1, "1"] and idx[0][1]=="fulltext":
res = modify_idx_indexer(idxID, indexer)
output += write_outcome(res)
output += """<br /><span class="info">Please note you should run:
<pre>$> %s/bibindex --reindex -w fulltext</pre></span>""" % CFG_BINDIR
elif confirm in [1, "1"]:
if indexer=="virtual" and current_indexer == "native":
params = {'idxNEWVID': idxID}
return perform_index(ln, "perform_addvirtualindex", "", **params)
elif indexer=="native" and current_indexer == "virtual":
params = {'idxID':idxID}
return perform_index(ln, "perform_deletevirtualindex", "", **params)
else:
output = """No index to modify."""
body = [output]
if callback:
return perform_editindex(idxID, ln, "perform_modifyindexer", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifysynonymkb(idxID, ln=CFG_SITE_LANG, idxKB='', idxMATCH='', callback='yes', confirm=-1):
"""form to modify the knowledge base for the synonym lookup.
idxID - the index name to change.
idxKB - new knowledge base name
idxMATCH - new match type
"""
subtitle = ""
output = ""
idx = get_idx(idxID)
if not idx:
idxID = -1
if idxID not in [-1, "-1"]:
subtitle = """<a name="4"></a>5. Modify knowledge base for synonym lookup. <small>[<a title="See guide" href="%s/help/admin/bibindex-admin-guide">?</a>]</small>""" % CFG_SITE_URL
if confirm in [-1, "-1"]:
field_value = get_idx_synonym_kb(idxID)
if CFG_BIBINDEX_COLUMN_VALUE_SEPARATOR in field_value:
idxKB, idxMATCH = field_value.split(CFG_BIBINDEX_COLUMN_VALUE_SEPARATOR)
if not idxKB:
idxKB = ''
idxMATCH = ''
kb_html_element = """<select name="idxKB" class="admin_w200">"""
knowledge_base_names = get_all_kb_names()
knowledge_base_names.append(CFG_BIBINDEX_SYNONYM_MATCH_TYPE["None"])
knowledge_base_names.sort()
for knowledge_base_name in knowledge_base_names:
if knowledge_base_name == idxKB:
selected = 'selected="selected"'
else:
selected = ""
kb_html_element += """<option value="%s" %s>%s</option>""" % (knowledge_base_name, selected, knowledge_base_name)
kb_html_element += """</select>"""
match_html_element = """<select name="idxMATCH" class="admin_w200">"""
match_names = CFG_BIBINDEX_SYNONYM_MATCH_TYPE.values()
match_names.sort()
for match_name in match_names:
if match_name == idxMATCH:
selected = 'selected="selected"'
else:
selected = ""
match_html_element += """<option value="%s" %s>%s</option>""" % (match_name, selected, match_name)
match_html_element += """</select>"""
text = """<span class="adminlabel">Knowledge base name and match type</span>""" + kb_html_element + match_html_element
output += createhiddenform(action="modifysynonymkb#4",
text=text,
button="Modify",
idxID=idxID,
ln=ln,
confirm=0)
if confirm in [0, "0"] and get_idx(idxID)[0][5] == idxKB + CFG_BIBINDEX_COLUMN_VALUE_SEPARATOR + idxMATCH:
output += """<span class="info">Knowledge base has not been changed</span>"""
elif confirm in [0, "0"]:
text = """
<span class="important">You are going to change the knowledge base for this index.<br /> <strong>Are you sure you want
to change the knowledge base of this index?</strong>"""
output += createhiddenform(action="modifysynonymkb#4",
text=text,
button="Modify",
idxID=idxID,
idxKB=idxKB,
idxMATCH=idxMATCH,
ln=ln,
confirm=1)
elif idxID > -1 and confirm in [1, "1"]:
res = modify_idx_synonym_kb(idxID, idxKB, idxMATCH)
output += write_outcome(res)
output += """<br /><span class="info">Please note that you must run as soon as possible:
<pre>$> %s/bibindex --reindex -w %s</pre></span>""" % (CFG_BINDIR, get_idx(idxID)[0][1])
elif confirm in [1, "1"]:
output += """<br /><b><span class="info">Please give a name for the index.</span></b>"""
else:
output = """No index to modify."""
body = [output]
if callback:
return perform_editindex(idxID, ln, "perform_modifysynonymkb", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifystopwords(idxID, ln=CFG_SITE_LANG, idxSTOPWORDS='', callback='yes', confirm=-1):
"""Form to modify the stopwords configuration
@param idxID: id of the index on which modification will be performed.
@param idxSTOPWORDS: remove stopwords or not ('Yes' or 'No')
"""
subtitle = ""
output = ""
idx = get_idx(idxID)
if not idx:
idxID = -1
if idxID not in [-1, "-1"]:
subtitle = """<a name="4"></a>6. Modify remove stopwords. <small>[<a title="See guide" href="%s/help/admin/bibindex-admin-guide">?</a>]</small>""" % CFG_SITE_URL
if confirm in [-1, "-1"]:
idxSTOPWORDS = get_idx_remove_stopwords(idxID)
if not idxSTOPWORDS:
idxSTOPWORDS = ''
if isinstance(idxSTOPWORDS, tuple):
idxSTOPWORDS = ''
stopwords_html_element = """<input class="admin_w200" type="text" name="idxSTOPWORDS" value="%s" /><br />""" % idxSTOPWORDS
text = """<span class="adminlabel">Remove stopwords</span><br />""" + stopwords_html_element
output += createhiddenform(action="modifystopwords#4",
text=text,
button="Modify",
idxID=idxID,
ln=ln,
confirm=0)
if confirm in [0, "0"] and get_idx(idxID)[0][6] == idxSTOPWORDS:
output += """<span class="info">Stopwords have not been changed</span>"""
elif confirm in [0, "0"] and idxSTOPWORDS == '':
output += """<span class="info">You need to provide a name of the file with stopwords</span>"""
elif confirm in [0, "0"]:
text = """<span class="important">You are going to change the stopwords configuration for this index.<br />
<strong>Are you sure you want to do this?</strong>"""
output += createhiddenform(action="modifystopwords#4",
text=text,
button="Modify",
idxID=idxID,
idxSTOPWORDS=idxSTOPWORDS,
ln=ln,
confirm=1)
elif idxID > -1 and confirm in [1, "1"]:
res = modify_idx_stopwords(idxID, idxSTOPWORDS)
output += write_outcome(res)
output += """<br /><span class="info">Please note you must run as soon as possible:
<pre>$> %s/bibindex --reindex -w %s</pre></span>""" % (CFG_BINDIR, get_idx(idxID)[0][1])
elif confirm in [1, "1"]:
output += """<br /><b><span class="info">Please give a name for the index.</span></b>"""
else:
output = """No index to modify."""
body = [output]
if callback:
return perform_editindex(idxID, ln, "perform_modifystopwords", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifyremovehtml(idxID, ln=CFG_SITE_LANG, idxHTML='', callback='yes', confirm=-1):
"""Form to modify the 'remove html' configuration.
@param idxID: id of the index on which modification will be performed.
@param idxHTML: remove html markup or not ('Yes' or 'No')"""
subtitle = ""
output = ""
idx = get_idx(idxID)
if not idx:
idxID = -1
if idxID not in [-1, "-1"]:
subtitle = """<a name="4"></a>7. Modify remove HTML markup. <small>[<a title="See guide" href="%s/help/admin/bibindex-admin-guide">?</a>]</small>""" % CFG_SITE_URL
if confirm in [-1, "-1"]:
idxHTML = get_idx_remove_html_markup(idxID)
if not idxHTML:
idxHTML = ''
remove_html_element = """<select name="idxHTML" class="admin_w200">"""
if idxHTML == 'Yes':
remove_html_element += """<option value="Yes" selected ="selected">Yes</option>"""
remove_html_element += """<option value="No">No</option>"""
elif idxHTML == 'No':
remove_html_element += """<option value="Yes">Yes</option>"""
remove_html_element += """<option value="No" selected ="selected">No</option>"""
else:
remove_html_element += """<option value="Yes">Yes</option>"""
remove_html_element += """<option value="No">No</option>"""
remove_html_element += """</select>"""
text = """<span class="adminlabel">Remove HTML markup</span>""" + remove_html_element
output += createhiddenform(action="modifyremovehtml#4",
text=text,
button="Modify",
idxID=idxID,
ln=ln,
confirm=0)
if confirm in [0, "0"] and get_idx_remove_html_markup(idxID) == idxHTML:
output += """<span class="info">Remove HTML markup parameter has not been changed</span>"""
elif confirm in [0, "0"]:
text = """<span class="important">You are going to change the remove HTML markup for this index.<br />
<strong>Are you sure you want to change the remove HTML markup of this index?</strong>"""
output += createhiddenform(action="modifyremovehtml#4",
text=text,
button="Modify",
idxID=idxID,
idxHTML=idxHTML,
ln=ln,
confirm=1)
elif idxID > -1 and confirm in [1, "1"]:
res = modify_idx_html_markup(idxID, idxHTML)
output += write_outcome(res)
output += """<br /><span class="info">Please note you must run as soon as possible:
<pre>$> %s/bibindex --reindex -w %s</pre></span>""" % (CFG_BINDIR, get_idx(idxID)[0][1])
elif confirm in [1, "1"]:
output += """<br /><b><span class="info">Please give a name for the index.</span></b>"""
else:
output = """No index to modify."""
body = [output]
if callback:
return perform_editindex(idxID, ln, "perform_modifyremovehtml", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifyremovelatex(idxID, ln=CFG_SITE_LANG, idxLATEX='', callback='yes', confirm=-1):
"""Form to modify the 'remove latex' configuration.
@param idxID: id of the index on which modification will be performed.
@param idxLATEX: remove latex markup or not ('Yes' or 'No')"""
subtitle = ""
output = ""
idx = get_idx(idxID)
if not idx:
idxID = -1
if idxID not in [-1, "-1"]:
subtitle = """<a name="4"></a>8. Modify remove latex markup. <small>[<a title="See guide" href="%s/help/admin/bibindex-admin-guide">?</a>]</small>""" % CFG_SITE_URL
if confirm in [-1, "-1"]:
idxLATEX = get_idx_remove_latex_markup(idxID)
if not idxLATEX:
idxLATEX = ''
remove_latex_element = """<select name="idxLATEX" class="admin_w200">"""
if idxLATEX == 'Yes':
remove_latex_element += """<option value="Yes" selected ="selected">Yes</option>"""
remove_latex_element += """<option value="No">No</option>"""
elif idxLATEX == 'No':
remove_latex_element += """<option value="Yes">Yes</option>"""
remove_latex_element += """<option value="No" selected ="selected">No</option>"""
else:
remove_latex_element += """<option value="Yes">Yes</option>"""
remove_latex_element += """<option value="No">No</option>"""
remove_latex_element += """</select>"""
text = """<span class="adminlabel">Remove latex markup</span>""" + remove_latex_element
output += createhiddenform(action="modifyremovelatex#4",
text=text,
button="Modify",
idxID=idxID,
ln=ln,
confirm=0)
if confirm in [0, "0"] and get_idx_remove_latex_markup(idxID) == idxLATEX:
output += """<span class="info">Remove latex markup parameter has not been changed</span>"""
elif confirm in [0, "0"]:
text = """<span class="important">You are going to change the remove latex markup for this index.<br />
<strong>Are you sure you want to change the remove latex markup of this index?</strong>"""
output += createhiddenform(action="modifyremovelatex#4",
text=text,
button="Modify",
idxID=idxID,
idxLATEX=idxLATEX,
ln=ln,
confirm=1)
elif idxID > -1 and confirm in [1, "1"]:
res = modify_idx_latex_markup(idxID, idxLATEX)
output += write_outcome(res)
output += """<br /><span class="info">Please note you must run as soon as possible:
<pre>$> %s/bibindex --reindex -w %s</pre></span>""" % (CFG_BINDIR, get_idx(idxID)[0][1])
elif confirm in [1, "1"]:
output += """<br /><b><span class="info">Please give a name for the index.</span></b>"""
else:
output = """No index to modify."""
body = [output]
if callback:
return perform_editindex(idxID, ln, "perform_modifyremovelatex", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifytokenizer(idxID, ln=CFG_SITE_LANG, idxTOK='', callback='yes', confirm=-1):
"""Form to modify the 'tokenizer' configuration.
@param idxID: id of the index on which modification will be performed.
@param idxTOK: tokenizer name"""
subtitle = ""
output = ""
idx = get_idx(idxID)
if not idx:
idxID = -1
if idxID not in [-1, "-1"]:
subtitle = """<a name="4"></a>9. Modify tokenizer. <small>[<a title="See guide" href="%s/help/admin/bibindex-admin-guide">?</a>]</small>""" % CFG_SITE_URL
if confirm in [-1, "-1"]:
idxTOK = get_idx_tokenizer(idxID)
if not idxTOK:
idxTOK = ''
tokenizer_element = """<select name="idxTOK" class="admin_w200">"""
tokenizers = [tokenizer for tokenizer in _TOKENIZERS if _TOKENIZERS[tokenizer]().implemented]
for key in tokenizers:
if key == idxTOK:
tokenizer_element += """<option value="%s" selected ="selected">%s</option>""" % (key, key)
else:
tokenizer_element += """<option value="%s">%s</option>""" % (key, key)
tokenizer_element += """</select>"""
text = """<span class="adminlabel">Tokenizer</span>""" + tokenizer_element
output += createhiddenform(action="modifytokenizer#4",
text=text,
button="Modify",
idxID=idxID,
ln=ln,
confirm=0)
if confirm in [0, "0"] and get_idx_tokenizer(idxID) == idxTOK:
output += """<span class="info">Tokenizer has not been changed</span>"""
elif confirm in [0, "0"]:
text = """<span class="important">You are going to change a tokenizer for this index.<br />
<strong>Are you sure you want to do this?</strong>"""
output += createhiddenform(action="modifytokenizer#4",
text=text,
button="Modify",
idxID=idxID,
idxTOK=idxTOK,
ln=ln,
confirm=1)
elif idxID > -1 and confirm in [1, "1"]:
res = modify_idx_tokenizer(idxID, idxTOK)
output += write_outcome(res)
output += """<br /><span class="info">Please note you must run as soon as possible:
<pre>$> %s/bibindex --reindex -w %s</pre></span>""" % (CFG_BINDIR, get_idx(idxID)[0][1])
elif confirm in [1, "1"]:
output += """<br /><b><span class="info">Please give a name for the index.</span></b>"""
else:
output = """No index to modify."""
body = [output]
if callback:
return perform_editindex(idxID, ln, "perform_modifytokenizer", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifyfield(fldID, ln=CFG_SITE_LANG, code='', callback='yes', confirm=-1):
"""form to modify a field.
fldID - the field to change."""
subtitle = ""
output = ""
fld_dict = dict(get_def_name('', "field"))
if fldID not in [-1, "-1"]:
if confirm in [-1, "-1"]:
res = get_fld(fldID)
code = res[0][2]
else:
code = str.replace("%s" % code, " ", "")
fldID = int(fldID)
subtitle = """<a name="2"></a>1. Modify field code for logical field '%s' <small>[<a title="See guide" href="%s/help/admin/bibindex-admin-guide">?</a>]</small>""" % (fld_dict[int(fldID)], CFG_SITE_URL)
text = """
<span class="adminlabel">Field code</span>
<input class="admin_w200" type="text" name="code" value="%s" /><br />
""" % code
output += createhiddenform(action="modifyfield#2",
text=text,
button="Modify",
fldID=fldID,
ln=ln,
confirm=1)
if fldID > -1 and confirm in [1, "1"]:
fldID = int(fldID)
res = modify_fld(fldID, code)
output += write_outcome(res)
else:
output = """No field to modify.
"""
body = [output]
if callback:
return perform_editfield(fldID, ln, "perform_modifyfield", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifyindexfields(idxID, ln=CFG_SITE_LANG, callback='yes', content='', confirm=-1):
"""Modify which logical fields to use in this index.."""
fields = get_index_fields(idxID)
output = ''
subtitle = """<a name="3"></a>3. Modify index fields. <small>[<a title="See guide" href="%s/help/admin/bibindex-admin-guide">?</a>]</small>""" % CFG_SITE_URL
output += """<table cellpadding="3" border="1">"""
output += """<tr><td><strong>%s</strong></td>
<td><strong>%s</strong></td>
<td><strong>%s</strong></td>
""" % ("Field", "MARC Tags", "RecJson Fields")
for field_id, field_name in fields:
query = """SELECT tag.value, tag.recjson_value FROM tag,
field_tag
WHERE tag.id=field_tag.id_tag AND
field_tag.id_field=%s
ORDER BY field_tag.score DESC,tag.value ASC"""
tag_values = run_sql(query, (field_id, ) )
marc_tags = tag_values and ", ".join(zip(*tag_values)[0]) or """<b><span class="info">None</span></b>"""
recjson_fields = """<b><span class="info">None</span></b>"""
if tag_values:
recjson = []
[recjson.extend(f.split(",")) for f in zip(*tag_values)[1] if f]
recjson_fields = recjson and ", ".join(recjson) or recjson_fields
output += """<tr><td>%s</td>
<td>%s</td>
<td>%s</td></tr>
""" % ("""<a href="%s/admin/bibindex/bibindexadmin.py/editfield?fldID=%s&ln=%s">%s</a>
""" % (CFG_SITE_URL, field_id, ln, field_name),
marc_tags,
recjson_fields)
output += "</table>"
output += """<dl>
<dt>Menu</dt>
<dd><a href="%s/admin/bibindex/bibindexadmin.py/addindexfield?idxID=%s&ln=%s#3.1">Add field to index</a></dd>
<dd><a href="%s/admin/bibindex/bibindexadmin.py/field?ln=%s">Manage fields</a></dd>
</dl>
""" % (CFG_SITE_URL, idxID, ln, CFG_SITE_URL, ln)
header = ['Field', '']
actions = []
idx_fld = get_idx_fld(idxID)
if len(idx_fld) > 0:
for (idxID, idxNAME,fldID, fldNAME, regexp_punct, regexp_alpha_sep) in idx_fld:
actions.append([fldNAME])
for col in [(('Remove','removeindexfield'),)]:
actions[-1].append('<a href="%s/admin/bibindex/bibindexadmin.py/%s?idxID=%s&fldID=%s&ln=%s#3.1">%s</a>' % (CFG_SITE_URL, col[0][1], idxID, fldID, ln, col[0][0]))
for (_str, function) in col[1:]:
actions[-1][-1] += ' / <a href="%s/admin/bibindex/bibindexadmin.py/%s?fldID=%s&flID=%s&ln=%s#4.1">%s</a>' % (CFG_SITE_URL, function, idxID, fldID, ln, _str)
output += tupletotable(header=header, tuple=actions)
else:
output += """No index fields exists"""
output += content
body = [output]
if callback:
return perform_editindex(idxID, ln, "perform_modifyindexfields", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifyfieldtags(fldID, ln=CFG_SITE_LANG, callback='yes', content='', confirm=-1):
"""show the sort fields of this collection.."""
output = ''
fld_dict = dict(get_def_name('', "field"))
fld_type = get_fld_nametypes()
fldID = int(fldID)
subtitle = """<a name="4"></a>3. Modify tags for the logical field '%s' <small>[<a title="See guide" href="%s/help/admin/bibindex-admin-guide">?</a>]</small>""" % (fld_dict[int(fldID)], CFG_SITE_URL)
output = """<dl>
<dt>Menu</dt>
<dd><a href="%s/admin/bibindex/bibindexadmin.py/addtag?fldID=%s&ln=%s#4.1">Add a new tag</a></dd>
<dd><a href="%s/admin/bibindex/bibindexadmin.py/deletetag?fldID=%s&ln=%s#4.1">Delete unused tags</a></dd>
</dl>
""" % (CFG_SITE_URL, fldID, ln, CFG_SITE_URL, fldID, ln)
header = ['', 'Value', 'Comment', 'Actions']
actions = []
res = get_fld_tags(fldID)
if len(res) > 0:
i = 0
for (fldID, tagID, tname, tvalue, score) in res:
move = ""
if i != 0:
move += """<a href="%s/admin/bibindex/bibindexadmin.py/switchtagscore?fldID=%s&id_1=%s&id_2=%s&ln=%s&=rand=%s#4"><img border="0" src="%s/img/smallup.gif" title="Move tag up"></a>""" % (CFG_SITE_URL, fldID, tagID, res[i - 1][1], ln, random.randint(0, 1000), CFG_SITE_URL)
else:
move += " "
i += 1
if i != len(res):
move += '<a href="%s/admin/bibindex/bibindexadmin.py/switchtagscore?fldID=%s&id_1=%s&id_2=%s&ln=%s&rand=%s#4"><img border="0" src="%s/img/smalldown.gif" title="Move tag down"></a>' % (CFG_SITE_URL, fldID, tagID, res[i][1], ln, random.randint(0, 1000), CFG_SITE_URL)
actions.append([move, tvalue, tname])
for col in [(('Details','showdetailsfieldtag'), ('Modify','modifytag'),('Remove','removefieldtag'),)]:
actions[-1].append('<a href="%s/admin/bibindex/bibindexadmin.py/%s?fldID=%s&tagID=%s&ln=%s#4.1">%s</a>' % (CFG_SITE_URL, col[0][1], fldID, tagID, ln, col[0][0]))
for (str, function) in col[1:]:
actions[-1][-1] += ' / <a href="%s/admin/bibindex/bibindexadmin.py/%s?fldID=%s&tagID=%s&ln=%s#4.1">%s</a>' % (CFG_SITE_URL, function, fldID, tagID, ln, str)
output += tupletotable(header=header, tuple=actions)
else:
output += """No fields exists"""
output += content
body = [output]
if callback:
return perform_editfield(fldID, ln, "perform_modifyfieldtags", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_addtag(fldID, ln=CFG_SITE_LANG, name='', value='', recjson_value='', existing_tag=-1, callback="yes", confirm=-1):
"""Form to add a new tag to the field specified by fldID.
@param fldID: the name of the field which we want to extend with a new tag
@param existing_tag: id of the existing tag we want to add to given field or -1
if we want to add completely new tag
@param value: MARC value for new tag, can be empty string
@param recjson_value: non-MARC value for new tag, can be empty string
@param name: name of the new tag to add to field and to the list of tags
@param confirm: state of the confirmation: -1 not started, 0 waiting for confirmation, 1 confirmed
"""
output = ""
subtitle = """<a name="4.1"></a>Add a tag to logical field"""
text = """Add new tag:<br />
<span class="adminlabel">MARC value</span>
<input class="admin_w200" maxlength="6" type="text" name="value" value="%s" /><br />
<span class="adminlabel">RecJson value</span>
<input class="admin_w200" type="text" name="recjson_value" value="%s" /><br />
<span class="adminlabel">Name</span>
<input class="admin_w200" type="text" name="name" value="%s" /><br />
""" % (value, recjson_value, name)
text += """Or existing tag:<br />
<span class="adminlabel">Tag</span>
<select name="existing_tag" class="admin_w200">
<option value="-1">- Select a tag -</option>
"""
fld_tags = get_fld_tags(fldID)
tags = get_tags()
fld_tags = dict(map(lambda x: (x[1], x[0]), fld_tags))
for (_id_tag, _name, _value, _recjson_value) in tags:
if not fld_tags.has_key(_id_tag):
text += """<option value="%s" %s>%s</option>""" % (_id_tag,
(_id_tag==existing_tag and 'selected="selected"' or ''),
"%s - %s" % (_name, _value))
text += """</select>"""
output = createhiddenform(action="%s/admin/bibindex/bibindexadmin.py/addtag" % CFG_SITE_URL,
text=text,
fldID=fldID,
ln=ln,
button="Add tag",
confirm=1)
if confirm in ["1", 1]:
if ((value or recjson_value) and existing_tag in [-1, "-1"]) or \
(not value and not recjson_value and existing_tag not in [-1, "-1"]):
res = add_fld_tag(fldID, name, value, recjson_value, existing_tag)
output += write_outcome(res)
elif not value and not recjson_value and existing_tag in [-1, "-1"]:
output += """<b><span class="info">Please choose to add either a new or an existing MARC tag.</span></b>
"""
else:
output += """<b><span class="info">Please choose to add either a new or an existing MARC tag, but not both.</span></b>
"""
body = [output]
if callback:
return perform_modifyfieldtags(fldID, ln, "perform_addtag", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifytag(fldID, tagID, ln=CFG_SITE_LANG, name='', value='', recjson_value='', callback='yes', confirm=-1):
"""form to modify a field.
fldID - the field to change."""
subtitle = """<a name="3.1"></a>Modify a tag"""
output = ""
fldID = int(fldID)
tagID = int(tagID)
tag = get_tags(tagID)
if confirm in [-1, "-1"] and not value and not name:
name = tag[0][1]
value = tag[0][2]
recjson_value = tag[0][3]
text = """
Any modifications will apply to all logical fields using this tag.<br />
<span class="adminlabel">Name</span>
<input class="admin_w200" type="text" name="name" value="%s" /><br />
<span class="adminlabel">MARC value</span>
<input class="admin_w200" type="text" name="value" value="%s" /><br />
<span class="adminlabel">RecJson value</span>
<input class="admin_w200" type="text" name="recjson_value" value="%s" /><br />
""" % (name, value, recjson_value)
output += createhiddenform(action="modifytag#4.1",
text=text,
button="Modify",
fldID=fldID,
tagID=tagID,
ln=ln,
confirm=1)
if name and (value or recjson_value) and confirm in [1, "1"]:
res = modify_tag(tagID, name, value, recjson_value)
output += write_outcome(res)
body = [output]
if callback:
return perform_modifyfieldtags(fldID, ln, "perform_modifytag", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_removefieldtag(fldID, tagID, ln=CFG_SITE_LANG, callback='yes', confirm=0):
"""form to remove a tag from a field.
fldID - the current field, remove the tag from this field.
tagID - remove the tag with this id"""
subtitle = """<a name="4.1"></a>Remove MARC tag from logical field"""
output = ""
fld_dict = dict(get_def_name('', "field"))
if fldID and tagID:
fldID = int(fldID)
tagID = int(tagID)
tag = get_fld_tags(fldID, tagID)
if confirm not in ["1", 1]:
text = """Do you want to remove the tag '%s - %s ' from the field '%s'.""" % (tag[0][3], tag[0][2], fld_dict[fldID])
output += createhiddenform(action="removefieldtag#4.1",
text=text,
button="Confirm",
fldID=fldID,
tagID=tagID,
confirm=1)
elif confirm in ["1", 1]:
res = remove_fldtag(fldID, tagID)
output += write_outcome(res)
body = [output]
if callback:
return perform_modifyfieldtags(fldID, ln, "perform_removefieldtag", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_addindexfield(idxID, ln=CFG_SITE_LANG, fldID='', callback="yes", confirm=-1):
"""form to add a new field.
fldNAME - the name of the new field
code - the field code"""
output = ""
subtitle = """<a name="4.1"></a>Add logical field to index"""
text = """
<span class="adminlabel">Field name</span>
<select name="fldID" class="admin_w200">
<option value="-1">- Select a field -</option>
"""
fld = get_fld()
for (fldID2, fldNAME, fldCODE) in fld:
text += """<option value="%s" %s>%s</option>""" % (fldID2, (fldID==fldID2 and 'selected="selected"' or ''), fldNAME)
text += """</select>"""
output = createhiddenform(action="%s/admin/bibindex/bibindexadmin.py/addindexfield" % CFG_SITE_URL,
text=text,
idxID=idxID,
ln=ln,
button="Add field",
confirm=1)
if fldID and not fldID in [-1, "-1"] and confirm in ["1", 1]:
res = add_idx_fld(idxID, fldID)
output += write_outcome(res)
elif confirm in ["1", 1]:
output += """<b><span class="info">Please select a field to add.</span></b>"""
body = [output]
if callback:
return perform_modifyindexfields(idxID, ln, "perform_addindexfield", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_removeindexfield(idxID, fldID, ln=CFG_SITE_LANG, callback='yes', confirm=0):
"""form to remove a field from an index.
idxID - the current index, remove the field from this index.
fldID - remove the field with this id"""
subtitle = """<a name="3.1"></a>Remove field from index"""
output = ""
if fldID and idxID:
fldID = int(fldID)
idxID = int(idxID)
fld = get_fld(fldID)
idx = get_idx(idxID)
if fld and idx and confirm not in ["1", 1]:
text = """Do you want to remove the field '%s' from the index '%s'.""" % (fld[0][1], idx[0][1])
output += createhiddenform(action="removeindexfield#3.1",
text=text,
button="Confirm",
idxID=idxID,
fldID=fldID,
confirm=1)
elif confirm in ["1", 1]:
res = remove_idxfld(idxID, fldID)
output += write_outcome(res)
body = [output]
if callback:
return perform_modifyindexfields(idxID, ln, "perform_removeindexfield", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_switchtagscore(fldID, id_1, id_2, ln=CFG_SITE_LANG):
"""Switch the score of id_1 and id_2 in the table type.
colID - the current collection
id_1/id_2 - the id's to change the score for.
type - like "format" """
output = ""
name_1 = run_sql("select name from tag where id=%s", (id_1, ))[0][0]
name_2 = run_sql("select name from tag where id=%s", (id_2, ))[0][0]
res = switch_score(fldID, id_1, id_2)
output += write_outcome(res)
return perform_modifyfieldtags(fldID, ln, content=output)
def perform_deletetag(fldID, ln=CFG_SITE_LANG, tagID=-1, callback='yes', confirm=-1):
"""form to delete an MARC tag not in use.
fldID - the collection id of the current collection.
fmtID - the format id to delete."""
subtitle = """<a name="10.3"></a>Delete an unused tag"""
output = """
<dl>
<dd>Deleting an tag will also delete the translations associated.</dd>
</dl>
"""
fldID = int(fldID)
if tagID not in [-1," -1"] and confirm in [1, "1"]:
ares = delete_tag(tagID)
fld_tag = get_fld_tags()
fld_tag = dict(map(lambda x: (x[1], x[0]), fld_tag))
tags = get_tags()
text = """
<span class="adminlabel">Tag</span>
<select name="tagID" class="admin_w200">
"""
text += """<option value="-1">- Select a tag -"""
i = 0
for (id, name, value, value_recjson) in tags:
if not fld_tag.has_key(id):
text += """<option value="%s" %s>%s</option>""" % (id,
id == int(tagID) and 'selected="selected"' or '',
"%s - %s" % (name, value))
i += 1
text += """</select><br />"""
if i == 0:
output += """<b><span class="info">No unused tags</span></b><br />"""
else:
output += createhiddenform(action="deletetag#4.1",
text=text,
button="Delete",
fldID=fldID,
ln=ln,
confirm=0)
if tagID not in [-1,"-1"]:
tagID = int(tagID)
tags = get_tags(tagID)
if confirm in [0, "0"]:
text = """<b>Do you want to delete the tag '%s'.</b>""" % tags[0][2]
output += createhiddenform(action="deletetag#4.1",
text=text,
button="Confirm",
fldID=fldID,
tagID=tagID,
ln=ln,
confirm=1)
elif confirm in [1, "1"]:
output += write_outcome(ares)
elif confirm not in [-1, "-1"]:
output += """<b><span class="info">Choose a tag to delete.</span></b>"""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_modifyfieldtags(fldID, ln, content=output)
def compare_on_val(first, second):
"""Compare the two values"""
return cmp(first[1], second[1])
def get_col_fld(colID=-1, type = '', id_field=''):
"""Returns either all portalboxes associated with a collection, or based on either colID or language or both.
colID - collection id
ln - language id"""
sql = "SELECT id_collection,id_field,id_fieldvalue,type,score,score_fieldvalue FROM collection_field_fieldvalue, field WHERE id_field=field.id"
params = []
try:
if id_field:
sql += " AND id_field=%s"
params.append(id_field)
sql += " ORDER BY type, score desc, score_fieldvalue desc"
res = run_sql(sql, tuple(params))
return res
except StandardError as e:
return ""
def get_idx(idxID=''):
sql = "SELECT id,name,description,last_updated,stemming_language, synonym_kbrs,remove_stopwords,remove_html_markup,remove_latex_markup,tokenizer FROM idxINDEX"
params = []
try:
if idxID:
sql += " WHERE id=%s"
params.append(idxID)
sql += " ORDER BY id asc"
res = run_sql(sql, tuple(params))
return res
except StandardError as e:
return ""
def get_idx_synonym_kb(idxID):
"""Returns a synonym knowledge base field value"""
try:
return run_sql("SELECT synonym_kbrs FROM idxINDEX WHERE ID=%s", (idxID, ))[0][0]
except StandardError as e:
return e.__str__()
def get_idx_remove_stopwords(idxID):
"""Returns a stopwords field value"""
try:
return run_sql("SELECT remove_stopwords FROM idxINDEX WHERE ID=%s", (idxID, ))[0][0]
except StandardError as e:
return (0, e)
def get_idx_remove_html_markup(idxID):
"""Returns a remove html field value"""
try:
return run_sql("SELECT remove_html_markup FROM idxINDEX WHERE ID=%s", (idxID, ))[0][0]
except StandardError as e:
return (0, e)
def get_idx_remove_latex_markup(idxID):
"""Returns a remove latex field value"""
try:
return run_sql("SELECT remove_latex_markup FROM idxINDEX WHERE ID=%s", (idxID, ))[0][0]
except StandardError as e:
return (0, e)
def get_idx_tokenizer(idxID):
"""Returns a tokenizer field value"""
try:
return run_sql("SELECT tokenizer FROM idxINDEX WHERE ID=%s", (idxID, ))[0][0]
except StandardError as e:
return (0, e)
def get_fld_tags(fldID='', tagID=''):
"""Returns tags associated with a field.
fldID - field id
tagID - tag id"""
sql = "SELECT id_field, id_tag, tag.name, tag.value, score FROM field_tag,tag WHERE tag.id=field_tag.id_tag"
params = []
try:
if fldID:
sql += " AND id_field=%s"
params.append(fldID)
if tagID:
sql += " AND id_tag=%s"
params.append(tagID)
sql += " ORDER BY score desc, tag.value, tag.name"
res = run_sql(sql, tuple(params))
return res
except StandardError as e:
return ""
def get_tags(tagID=''):
"""Returns all or a given tag.
tagID - tag id
ln - language id"""
sql = "SELECT id, name, value, recjson_value FROM tag"
params = []
try:
if tagID:
sql += " WHERE id=%s"
params.append(tagID)
sql += " ORDER BY name, value"
res = run_sql(sql, tuple(params))
return res
except StandardError as e:
return ""
def get_fld(fldID=''):
"""Returns all fields or only the given field"""
try:
if not fldID:
res = run_sql("SELECT id, name, code FROM field ORDER by name, code")
else:
res = run_sql("SELECT id, name, code FROM field WHERE id=%s ORDER by name, code", (fldID, ))
return res
except StandardError as e:
return ""
def get_fld_id(fld_name=''):
"""Returns field id for a field name"""
try:
res = run_sql('SELECT id FROM field WHERE name=%s', (fld_name,))
return res[0][0]
except StandardError as e:
return ''
def get_fld_value(fldvID = ''):
"""Returns fieldvalue"""
try:
sql = "SELECT id, name, value FROM fieldvalue"
params = []
if fldvID:
sql += " WHERE id=%s"
params.append(fldvID)
res = run_sql(sql, tuple(params))
return res
except StandardError as e:
return ""
def get_idx_fld(idxID=''):
"""Return a list of fields associated with one or all indexes"""
try:
sql = "SELECT id_idxINDEX, idxINDEX.name, id_field, field.name, regexp_punctuation, regexp_alphanumeric_separators FROM idxINDEX, field, idxINDEX_field WHERE idxINDEX.id = idxINDEX_field.id_idxINDEX AND field.id = idxINDEX_field.id_field"
params = []
if idxID:
sql += " AND id_idxINDEX=%s"
params.append(idxID)
sql += " ORDER BY id_idxINDEX asc"
res = run_sql(sql, tuple(params))
return res
except StandardError as e:
return ""
def get_col_nametypes():
"""Return a list of the various translationnames for the fields"""
type = []
type.append(('ln', 'Long name'))
return type
def get_fld_nametypes():
"""Return a list of the various translationnames for the fields"""
type = []
type.append(('ln', 'Long name'))
return type
def get_idx_nametypes():
"""Return a list of the various translationnames for the index"""
type = []
type.append(('ln', 'Long name'))
return type
def get_sort_nametypes():
"""Return a list of the various translationnames for the fields"""
type = {}
type['soo'] = 'Sort options'
type['seo'] = 'Search options'
type['sew'] = 'Search within'
return type
def remove_fld(colID,fldID, fldvID=''):
"""Removes a field from the collection given.
colID - the collection the format is connected to
fldID - the field which should be removed from the collection."""
try:
sql = "DELETE FROM collection_field_fieldvalue WHERE id_collection=%s AND id_field=%s"
params = [colID, fldID]
if fldvID:
sql += " AND id_fieldvalue=%s"
params.append(fldvID)
res = run_sql(sql, tuple(params))
return (1, "")
except StandardError as e:
return (0, e)
def remove_idxfld(idxID, fldID):
"""Remove a field from a index in table idxINDEX_field
idxID - index id from idxINDEX
fldID - field id from field table"""
try:
sql = "DELETE FROM idxINDEX_field WHERE id_field=%s and id_idxINDEX=%s"
res = run_sql(sql, (fldID, idxID))
return (1, "")
except StandardError as e:
return (0, e)
def remove_fldtag(fldID,tagID):
"""Removes a tag from the field given.
fldID - the field the tag is connected to
tagID - the tag which should be removed from the field."""
try:
sql = "DELETE FROM field_tag WHERE id_field=%s AND id_tag=%s"
res = run_sql(sql, (fldID, tagID))
return (1, "")
except StandardError as e:
return (0, e)
def delete_tag(tagID):
"""Deletes all data for the given field
fldID - delete all data in the tables associated with field and this id """
try:
res = run_sql("DELETE FROM tag where id=%s", (tagID, ))
return (1, "")
except StandardError as e:
return (0, e)
def delete_idx(idxID):
"""Deletes all data for the given index together with the idxWORDXXR and idxWORDXXF tables"""
try:
idxID = int(idxID)
res = run_sql("DELETE FROM idxINDEX WHERE id=%s", (idxID, ))
res = run_sql("DELETE FROM idxINDEXNAME WHERE id_idxINDEX=%s", (idxID, ))
res = run_sql("DELETE FROM idxINDEX_field WHERE id_idxINDEX=%s", (idxID, ))
res = run_sql("DROP TABLE idxWORD%02dF" % idxID) # kwalitee: disable=sql
res = run_sql("DROP TABLE idxWORD%02dR" % idxID) # kwalitee: disable=sql
res = run_sql("DROP TABLE idxPAIR%02dF" % idxID) # kwalitee: disable=sql
res = run_sql("DROP TABLE idxPAIR%02dR" % idxID) # kwalitee: disable=sql
res = run_sql("DROP TABLE idxPHRASE%02dF" % idxID) # kwalitee: disable=sql
res = run_sql("DROP TABLE idxPHRASE%02dR" % idxID) # kwalitee: disable=sql
return (1, "")
except StandardError as e:
return (0, e)
def delete_virtual_idx(idxID):
"""Deletes this virtual index - it means that function
changes type of the index from 'virtual' to 'normal'
@param idxID -id of the virtual index to delete/change into normal idx
"""
try:
run_sql("""UPDATE idxINDEX SET indexer='native'
WHERE id=%s""", (idxID, ))
run_sql("""DELETE FROM idxINDEX_idxINDEX
WHERE id_virtual=%s""", (idxID, ))
drop_queue_tables(idxID)
return (1, "")
except StandardError as e:
return (0, e)
def delete_fld(fldID):
"""Deletes all data for the given field
fldID - delete all data in the tables associated with field and this id """
try:
res = run_sql("DELETE FROM collection_field_fieldvalue WHERE id_field=%s", (fldID, ))
res = run_sql("DELETE FROM field_tag WHERE id_field=%s", (fldID, ))
res = run_sql("DELETE FROM idxINDEX_field WHERE id_field=%s", (fldID, ))
res = run_sql("DELETE FROM field WHERE id=%s", (fldID, ))
return (1, "")
except StandardError as e:
return (0, e)
def add_idx(idxNAME):
"""Add a new index. returns the id of the new index.
idxID - the id for the index, number
idxNAME - the default name for the default language of the format."""
try:
idxID = 0
res = run_sql("SELECT id from idxINDEX WHERE name=%s", (idxNAME,))
if res:
return (0, (0, "A index with the given name already exists."))
for i in xrange(1, 100):
res = run_sql("SELECT id from idxINDEX WHERE id=%s", (i, ))
res2 = get_table_status_info("idxWORD%02d%%" % i)
if not res and not res2:
idxID = i
break
if idxID == 0:
return (0, (0, "Not possible to create new indexes, delete an index and try again."))
res = run_sql("INSERT INTO idxINDEX (id, name) VALUES (%s,%s)", (idxID, idxNAME))
type = get_idx_nametypes()[0][0]
res = run_sql("INSERT INTO idxINDEXNAME (id_idxINDEX, ln, type, value) VALUES (%s,%s,%s,%s)",
(idxID, CFG_SITE_LANG, type, idxNAME))
res = run_sql("""CREATE TABLE IF NOT EXISTS idxWORD%02dF (
id mediumint(9) unsigned NOT NULL auto_increment,
term varchar(50) default NULL,
hitlist longblob,
PRIMARY KEY (id),
UNIQUE KEY term (term)
) ENGINE=MyISAM""" % idxID)
res = run_sql("""CREATE TABLE IF NOT EXISTS idxWORD%02dR (
id_bibrec mediumint(9) unsigned NOT NULL,
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type),
KEY type (type)
) ENGINE=MyISAM""" % idxID)
res = run_sql("""CREATE TABLE IF NOT EXISTS idxPAIR%02dF (
id mediumint(9) unsigned NOT NULL auto_increment,
term varchar(100) default NULL,
hitlist longblob,
PRIMARY KEY (id),
UNIQUE KEY term (term)
) ENGINE=MyISAM""" % idxID)
res = run_sql("""CREATE TABLE IF NOT EXISTS idxPAIR%02dR (
id_bibrec mediumint(9) unsigned NOT NULL,
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type),
KEY type (type)
) ENGINE=MyISAM""" % idxID)
res = run_sql("""CREATE TABLE IF NOT EXISTS idxPHRASE%02dF (
id mediumint(9) unsigned NOT NULL auto_increment,
term text default NULL,
hitlist longblob,
PRIMARY KEY (id),
KEY term (term(50))
) ENGINE=MyISAM""" % idxID)
res = run_sql("""CREATE TABLE IF NOT EXISTS idxPHRASE%02dR (
id_bibrec mediumint(9) unsigned NOT NULL default '0',
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type),
KEY type (type)
) ENGINE=MyISAM""" % idxID)
res = run_sql("SELECT id from idxINDEX WHERE id=%s", (idxID, ))
res2 = get_table_status_info("idxWORD%02dF" % idxID)
res3 = get_table_status_info("idxWORD%02dR" % idxID)
if res and res2 and res3:
return (1, res[0][0])
elif not res:
return (0, (0, "Could not add the new index to idxINDEX"))
elif not res2:
return (0, (0, "Forward table not created for unknown reason."))
elif not res3:
return (0, (0, "Reverse table not created for unknown reason."))
except StandardError as e:
return (0, e)
def create_queue_tables(index_id):
"""Creates queue tables for virtual index.
Queue tables store orders for virtual index
from its dependent indexes.
@param index_id: id of the index we want to create queue tables for
"""
query = """
CREATE TABLE IF NOT EXISTS idx%s%02dQ (
id mediumint(10) unsigned NOT NULL auto_increment,
runtime datetime NOT NULL default '0000-00-00 00:00:00',
id_bibrec_low mediumint(9) unsigned NOT NULL,
id_bibrec_high mediumint(9) unsigned NOT NULL,
index_name varchar(50) NOT NULL default '',
mode varchar(50) NOT NULL default 'update',
PRIMARY KEY (id),
INDEX (index_name),
INDEX (runtime)
) ENGINE=MyISAM;"""
run_sql(query % ("WORD", int(index_id)))
run_sql(query % ("PAIR", int(index_id)))
run_sql(query % ("PHRASE", int(index_id)))
def drop_queue_tables(index_id):
"""
Drops queue tables.
@param index_id: id of the index we want to drop tables for
"""
query = """DROP TABLE IF EXISTS idx%s%02dQ"""
run_sql(query % ("WORD", int(index_id)))
run_sql(query % ("PAIR", int(index_id)))
run_sql(query % ("PHRASE", int(index_id)))
def add_virtual_idx(id_virtual, id_normal):
"""Adds new virtual index and its first dependent index.
Doesn't change index's settings, but they're not
used anymore.
Uses function add_dependent_index, because
query in both cases is the same.
"""
try:
run_sql("""UPDATE idxINDEX SET indexer='virtual'
WHERE id=%s""", (id_virtual, ))
create_queue_tables(id_virtual)
return add_dependent_index(id_virtual, id_normal)
except StandardError as e:
return (0, e)
def modify_dependent_indexes(idxID, indexes_to_add=[]):
"""
Adds indexes to a list of dependent indexes of
a specific virtual index.
@param idxID: id of the virtual index
@param indexes_to_add: list of names of indexes which
should be added as new dependent
indexes for a virtual index
"""
all_indexes = dict(get_all_index_names_and_column_values("id"))
for index_name in indexes_to_add:
res = add_dependent_index(idxID, all_indexes[index_name])
if res[0] == 0:
return res
return (1, "")
def add_dependent_index(id_virtual, id_normal):
"""Adds dependent index to specific virtual index"""
try:
query = """INSERT INTO idxINDEX_idxINDEX (id_virtual, id_normal)
VALUES (%s, %s)""" % (id_virtual, id_normal)
res = run_sql(query)
return (1, "")
except StandardError as e:
return (0, e)
def add_fld(name, code):
"""Add a new logical field. Returns the id of the field.
code - the code for the field,
name - the default name for the default language of the field."""
try:
type = get_fld_nametypes()[0][0]
res = run_sql("INSERT INTO field (name, code) VALUES (%s,%s)", (name, code))
fldID = run_sql("SELECT id FROM field WHERE code=%s", (code,))
res = run_sql("INSERT INTO fieldname (id_field, type, ln, value) VALUES (%s,%s,%s,%s)", (fldID[0][0], type, CFG_SITE_LANG, name))
if fldID:
return (1, fldID[0][0])
else:
raise StandardError
except StandardError as e:
return (0, e)
def add_fld_tag(fldID, name='', value='', recjson_value='', existing_tag=-1, score=0):
"""Add a completly new tag (with MARC value, RecJson value or both) or existing one
to specific field.
@param fldID: the id of the field
@param name: name of the new tag
@param value: MARC value of the new tag
@param recjson_value: RecJson value of the new tag
@param existing_tag: id of the existing tag to add or -1 if we want to add a new tag
@param score: score assigned to tag
"""
try:
existing_tag = int(existing_tag)
if not score:
res = run_sql("SELECT score FROM field_tag WHERE id_field=%s ORDER BY score desc", (fldID, ))
if res:
score = int(res[0][0]) + 1
if existing_tag > -1:
res = run_sql("INSERT INTO field_tag(id_field, id_tag, score) values(%s, %s, %s)", (fldID, existing_tag, score))
return (1, "")
elif name != '' and (value != '' or recjson_value != ''):
res = run_sql("INSERT INTO tag (name, value, recjson_value) VALUES (%s,%s,%s)", (name, value, recjson_value))
res = run_sql("SELECT id FROM tag WHERE name=%s AND value=%s AND recjson_value=%s", (name, value, recjson_value))
res = run_sql("INSERT INTO field_tag(id_field, id_tag, score) values(%s, %s, %s)", (fldID, res[0][0], score))
return (1, "")
else:
return (0, "Not all necessary values specified")
except StandardError as e:
return (0, e)
def add_idx_fld(idxID, fldID):
"""Add a field to an index"""
try:
sql = "SELECT id_idxINDEX FROM idxINDEX_field WHERE id_idxINDEX=%s and id_field=%s"
res = run_sql(sql, (idxID, fldID))
if res:
return (0, (0, "The field selected already exists for this index"))
sql = "INSERT INTO idxINDEX_field(id_idxINDEX, id_field) values (%s, %s)"
res = run_sql(sql, (idxID, fldID))
return (1, "")
except StandardError as e:
return (0, e)
def update_all_queue_tables_with_new_name(idxID, idxNAME_new, idxNAME_old):
"""
Updates queue tables for all virtual indexes connected to this index
with new name of this index.
@param idxID: id of the index
@param idxNAME_new: new name for specified index
@param idxNAME_old: old name of specified index
"""
virtual_indexes = get_index_virtual_indexes(idxID)
for index in virtual_indexes:
id_virtual, name = index
query = """UPDATE idxWORD%02dQ SET index_name=%%s WHERE index_name=%%s""" % id_virtual
run_sql(query, (idxNAME_new, idxNAME_old))
query = """UPDATE idxPAIR%02dQ SET index_name=%%s WHERE index_name=%%s""" % id_virtual
run_sql(query, (idxNAME_new, idxNAME_old))
query = """UPDATE idxPHRASE%02dQ SET index_name=%%s WHERE index_name=%%s""" % id_virtual
run_sql(query, (idxNAME_new, idxNAME_old))
def modify_idx(idxID, idxNAME, idxDESC):
"""Modify index name or index description in idxINDEX table"""
query = """SELECT proc,status FROM schTASK WHERE proc='bibindex' AND status='RUNNING'"""
res = run_sql(query)
if len(res) == 0:
idxNAME_old = get_index_name_from_index_id(idxID)
try:
update_all_queue_tables_with_new_name(idxID, idxNAME, idxNAME_old)
res = run_sql("UPDATE idxINDEX SET name=%s WHERE id=%s", (idxNAME, idxID))
res = run_sql("UPDATE idxINDEX SET description=%s WHERE ID=%s", (idxDESC, idxID))
return (1, "")
except StandardError as e:
return (0, e)
else:
return (0, "Try again later. Cannot change details of an index when bibindex is running.")
def modify_idx_stemming(idxID, idxSTEM):
"""Modify the index stemming language in idxINDEX table"""
try:
run_sql("UPDATE idxINDEX SET stemming_language=%s WHERE ID=%s", (idxSTEM, idxID))
return (1, "")
except StandardError as e:
return (0, e)
def modify_idx_indexer(idxID, indexer):
"""Modify an indexer type in idxINDEX table"""
try:
res = run_sql("UPDATE idxINDEX SET indexer=%s WHERE ID=%s", (indexer, idxID))
return (1, "")
except StandardError as e:
return (0, e)
def modify_idx_synonym_kb(idxID, idxKB, idxMATCH):
"""Modify the knowledge base for the synonym lookup in idxINDEX table
@param idxID: id of the index in idxINDEX table
@param idxKB: name of the knowledge base (for example: INDEX-SYNONYM-TITLE)
@param idxMATCH: type of match in the knowledge base: exact, leading-to-coma, leading-to-number
"""
try:
field_value = ""
if idxKB != CFG_BIBINDEX_SYNONYM_MATCH_TYPE["None"] and idxMATCH != CFG_BIBINDEX_SYNONYM_MATCH_TYPE["None"]:
field_value = idxKB + CFG_BIBINDEX_COLUMN_VALUE_SEPARATOR + idxMATCH
run_sql("UPDATE idxINDEX SET synonym_kbrs=%s WHERE ID=%s", (field_value, idxID))
return (1, "")
except StandardError as e:
return (0, e)
def modify_idx_stopwords(idxID, idxSTOPWORDS):
"""Modify the stopwords in idxINDEX table
@param idxID: id of the index which we modify
@param idxSTOPWORDS: tells if stopwords should be removed ('Yes' or 'No')
"""
try:
run_sql("UPDATE idxINDEX SET remove_stopwords=%s WHERE ID=%s", (idxSTOPWORDS, idxID))
return (1, "")
except StandardError as e:
return (0, e)
def modify_idx_html_markup(idxID, idxHTML):
"""Modify the index remove html markup in idxINDEX table"""
try:
run_sql("UPDATE idxINDEX SET remove_html_markup=%s WHERE ID=%s", (idxHTML, idxID))
return (1, "")
except StandardError as e:
return (0, e)
def modify_idx_latex_markup(idxID, idxLATEX):
"""Modify the index remove latex markup in idxINDEX table"""
try:
run_sql("UPDATE idxINDEX SET remove_latex_markup=%s WHERE ID=%s", (idxLATEX, idxID))
return (1, "")
except StandardError as e:
return (0, e)
def modify_idx_tokenizer(idxID, idxTOK):
"""Modify a tokenizer in idxINDEX table for given index"""
try:
run_sql("UPDATE idxINDEX SET tokenizer=%s WHERE ID=%s", (idxTOK, idxID))
return (1, "")
except StandardError as e:
return (0, e)
def modify_fld(fldID, code):
"""Modify the code of field
fldID - the id of the field to modify
code - the new code"""
try:
sql = "UPDATE field SET code=%s"
sql += " WHERE id=%s"
res = run_sql(sql, (code, fldID))
return (1, "")
except StandardError as e:
return (0, e)
def modify_tag(tagID, name, value, recjson_value):
"""Modify the name and value of a tag.
@param tagID: the id of the tag to modify
@param name: the new name of the tag
@param value: the new MARC value of the tag
@param recjson_value: the new RecJson value of the tag
"""
try:
sql = "UPDATE tag SET name=%s, value=%s, recjson_value=%s WHERE id=%s"
res = run_sql(sql, (name, value, recjson_value, tagID))
return (1, "")
except StandardError as e:
return (0, e)
def switch_score(fldID, id_1, id_2):
"""Switch the scores of id_1 and id_2 in the table given by the argument.
colID - collection the id_1 or id_2 is connected to
id_1/id_2 - id field from tables like format..portalbox...
table - name of the table"""
try:
res1 = run_sql("SELECT score FROM field_tag WHERE id_field=%s and id_tag=%s", (fldID, id_1))
res2 = run_sql("SELECT score FROM field_tag WHERE id_field=%s and id_tag=%s", (fldID, id_2))
res = run_sql("UPDATE field_tag SET score=%s WHERE id_field=%s and id_tag=%s", (res2[0][0], fldID, id_1))
res = run_sql("UPDATE field_tag SET score=%s WHERE id_field=%s and id_tag=%s", (res1[0][0], fldID, id_2))
return (1, "")
except StandardError as e:
return (0, e)
def get_lang_list(table, field, id):
langs = run_sql("SELECT ln FROM %s WHERE %s=%%s" % (wash_table_column_name(table), wash_table_column_name(field)), (id, )) # kwalitee: disable=sql
exists = {}
lang = ''
for lng in langs:
if lng[0] not in exists:
lang += lng[0] + ", "
exists[lng[0]] = 1
if lang.endswith(", "):
lang = lang [:-2]
if len(exists) == 0:
lang = """<b><span class="info">None</span></b>"""
return lang
def check_user(req, role, adminarea=2, authorized=0):
# FIXME: Add doctype.
# This function is similar to the one found in
# oairepository/lib/oai_repository_admin.py, bibrank/lib/bibrankadminlib.py and
# websubmit/lib/websubmitadmin_engine.py.
auth_code, auth_message = acc_authorize_action(req, role)
if not authorized and auth_code != 0:
return ("false", auth_message)
return ("", auth_message)
| 1 | 12,294 | This one is important `2: I102 copyright year is outdated, expected 2014 but got 2012` | inveniosoftware-invenio | py |
@@ -32,6 +32,8 @@ import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.TimeoutException;
+import java.util.function.BiConsumer;
+import java.util.function.Consumer;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.client.solrj.impl;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.net.URL;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.TimeoutException;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.lucene.util.TestUtil;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.client.solrj.request.V2Request;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.client.solrj.response.RequestStatusState;
import org.apache.solr.client.solrj.response.SolrPingResponse;
import org.apache.solr.client.solrj.response.UpdateResponse;
import org.apache.solr.cloud.AbstractDistribZkTestBase;
import org.apache.solr.cloud.SolrCloudTestCase;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.DocRouter;
import org.apache.solr.common.cloud.PerReplicaStates;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.params.ShardParams;
import org.apache.solr.common.params.UpdateParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.handler.admin.CollectionsHandler;
import org.apache.solr.handler.admin.ConfigSetsHandler;
import org.apache.solr.handler.admin.CoreAdminHandler;
import org.apache.solr.util.LogLevel;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.solr.client.solrj.SolrRequest.METHOD.POST;
/**
* This test would be faster if we simulated the zk state instead.
*/
@Slow
@LogLevel("org.apache.solr.cloud.Overseer=INFO;org.apache.solr.common.cloud=INFO;org.apache.solr.cloud.api.collections=INFO;org.apache.solr.cloud.overseer=INFO")
public class CloudSolrClientTest extends SolrCloudTestCase {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private static final String COLLECTION = "collection1";
private static final String COLLECTION2 = "2nd_collection";
private static final String id = "id";
private static final int TIMEOUT = 30;
private static final int NODE_COUNT = 3;
private static CloudSolrClient httpBasedCloudSolrClient = null;
@Before
public void setupCluster() throws Exception {
System.setProperty("metricsEnabled", "true");
configureCluster(NODE_COUNT)
.addConfig("conf", getFile("solrj").toPath().resolve("solr").resolve("configsets").resolve("streaming").resolve("conf"))
.configure();
final List<String> solrUrls = new ArrayList<>();
solrUrls.add(cluster.getJettySolrRunner(0).getBaseUrl().toString());
httpBasedCloudSolrClient = new CloudSolrClient.Builder(solrUrls).build();
}
@After
public void tearDown() throws Exception {
if (httpBasedCloudSolrClient != null) {
try {
httpBasedCloudSolrClient.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
shutdownCluster();
super.tearDown();
}
@AfterClass
public static void cleanUpAfterClass() throws Exception {
httpBasedCloudSolrClient = null;
}
/**
* Randomly return the cluster's ZK based CSC, or HttpClusterProvider based CSC.
*/
private CloudSolrClient getRandomClient() {
return random().nextBoolean() ? cluster.getSolrClient() : httpBasedCloudSolrClient;
}
@Test
public void testParallelUpdateQTime() throws Exception {
CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 1)
.setPerReplicaState(USE_PER_REPLICA_STATE)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(COLLECTION, 2, 2);
UpdateRequest req = new UpdateRequest();
for (int i = 0; i < 10; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", String.valueOf(TestUtil.nextInt(random(), 1000, 1100)));
req.add(doc);
}
UpdateResponse response = req.process(getRandomClient(), COLLECTION);
// See SOLR-6547, we just need to ensure that no exception is thrown here
assertTrue(response.getQTime() >= 0);
}
@Test
public void testOverwriteOption() throws Exception {
CollectionAdminRequest.createCollection("overwrite", "conf", 1, 1)
.setPerReplicaState(USE_PER_REPLICA_STATE)
.processAndWait(cluster.getSolrClient(), TIMEOUT);
cluster.waitForActiveCollection("overwrite", 1, 1);
new UpdateRequest()
.add("id", "0", "a_t", "hello1")
.add("id", "0", "a_t", "hello2")
.commit(cluster.getSolrClient(), "overwrite");
QueryResponse resp = cluster.getSolrClient().query("overwrite", new SolrQuery("*:*"));
assertEquals("There should be one document because overwrite=true", 1, resp.getResults().getNumFound());
new UpdateRequest()
.add(new SolrInputDocument(id, "1", "a_t", "hello1"), /* overwrite = */ false)
.add(new SolrInputDocument(id, "1", "a_t", "hello2"), false)
.commit(cluster.getSolrClient(), "overwrite");
resp = getRandomClient().query("overwrite", new SolrQuery("*:*"));
assertEquals("There should be 3 documents because there should be two id=1 docs due to overwrite=false", 3, resp.getResults().getNumFound());
}
@Test
public void testAliasHandling() throws Exception {
CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 1)
.setPerReplicaState(USE_PER_REPLICA_STATE)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(COLLECTION, 2, 2);
CollectionAdminRequest.createCollection(COLLECTION2, "conf", 2, 1)
.setPerReplicaState(USE_PER_REPLICA_STATE)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(COLLECTION2, 2, 2);
CloudSolrClient client = getRandomClient();
SolrInputDocument doc = new SolrInputDocument("id", "1", "title_s", "my doc");
client.add(COLLECTION, doc);
client.commit(COLLECTION);
CollectionAdminRequest.createAlias("testalias", COLLECTION).process(cluster.getSolrClient());
SolrInputDocument doc2 = new SolrInputDocument("id", "2", "title_s", "my doc too");
client.add(COLLECTION2, doc2);
client.commit(COLLECTION2);
CollectionAdminRequest.createAlias("testalias2", COLLECTION2).process(cluster.getSolrClient());
CollectionAdminRequest.createAlias("testaliascombined", COLLECTION + "," + COLLECTION2).process(cluster.getSolrClient());
// ensure that the aliases have been registered
Map<String, String> aliases = new CollectionAdminRequest.ListAliases().process(cluster.getSolrClient()).getAliases();
assertEquals(COLLECTION, aliases.get("testalias"));
assertEquals(COLLECTION2, aliases.get("testalias2"));
assertEquals(COLLECTION + "," + COLLECTION2, aliases.get("testaliascombined"));
assertEquals(1, client.query(COLLECTION, params("q", "*:*")).getResults().getNumFound());
assertEquals(1, client.query("testalias", params("q", "*:*")).getResults().getNumFound());
assertEquals(1, client.query(COLLECTION2, params("q", "*:*")).getResults().getNumFound());
assertEquals(1, client.query("testalias2", params("q", "*:*")).getResults().getNumFound());
assertEquals(2, client.query("testaliascombined", params("q", "*:*")).getResults().getNumFound());
ModifiableSolrParams paramsWithBothCollections = params("q", "*:*", "collection", COLLECTION + "," + COLLECTION2);
assertEquals(2, client.query(null, paramsWithBothCollections).getResults().getNumFound());
ModifiableSolrParams paramsWithBothAliases = params("q", "*:*", "collection", "testalias,testalias2");
assertEquals(2, client.query(null, paramsWithBothAliases).getResults().getNumFound());
ModifiableSolrParams paramsWithCombinedAlias = params("q", "*:*", "collection", "testaliascombined");
assertEquals(2, client.query(null, paramsWithCombinedAlias).getResults().getNumFound());
ModifiableSolrParams paramsWithMixedCollectionAndAlias = params("q", "*:*", "collection", "testalias," + COLLECTION2);
assertEquals(2, client.query(null, paramsWithMixedCollectionAndAlias).getResults().getNumFound());
}
@Test
public void testRouting() throws Exception {
CollectionAdminRequest.createCollection("routing_collection", "conf", 2, 1)
.setPerReplicaState(USE_PER_REPLICA_STATE)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection("routing_collection", 2, 2);
AbstractUpdateRequest request = new UpdateRequest()
.add(id, "0", "a_t", "hello1")
.add(id, "2", "a_t", "hello2")
.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true);
// Test single threaded routed updates for UpdateRequest
NamedList<Object> response = getRandomClient().request(request, "routing_collection");
if (getRandomClient().isDirectUpdatesToLeadersOnly()) {
checkSingleServer(response);
}
CloudSolrClient.RouteResponse rr = (CloudSolrClient.RouteResponse) response;
Map<String, LBHttpSolrClient.Req> routes = rr.getRoutes();
Iterator<Map.Entry<String, LBHttpSolrClient.Req>> it = routes.entrySet()
.iterator();
while (it.hasNext()) {
Map.Entry<String, LBHttpSolrClient.Req> entry = it.next();
String url = entry.getKey();
UpdateRequest updateRequest = (UpdateRequest) entry.getValue()
.getRequest();
SolrInputDocument doc = updateRequest.getDocuments().get(0);
String id = doc.getField("id").getValue().toString();
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", "id:" + id);
params.add("distrib", "false");
QueryRequest queryRequest = new QueryRequest(params);
try (HttpSolrClient solrClient = getHttpSolrClient(url)) {
QueryResponse queryResponse = queryRequest.process(solrClient);
SolrDocumentList docList = queryResponse.getResults();
assertTrue(docList.getNumFound() == 1);
}
}
// Test the deleteById routing for UpdateRequest
final UpdateResponse uResponse = new UpdateRequest()
.deleteById("0")
.deleteById("2")
.commit(cluster.getSolrClient(), "routing_collection");
if (getRandomClient().isDirectUpdatesToLeadersOnly()) {
checkSingleServer(uResponse.getResponse());
}
QueryResponse qResponse = getRandomClient().query("routing_collection", new SolrQuery("*:*"));
SolrDocumentList docs = qResponse.getResults();
assertEquals(0, docs.getNumFound());
// Test Multi-Threaded routed updates for UpdateRequest
try (CloudSolrClient threadedClient = new CloudSolrClientBuilder
(Collections.singletonList(cluster.getZkServer().getZkAddress()), Optional.empty())
.withParallelUpdates(true)
.build()) {
threadedClient.setDefaultCollection("routing_collection");
response = threadedClient.request(request);
if (threadedClient.isDirectUpdatesToLeadersOnly()) {
checkSingleServer(response);
}
rr = (CloudSolrClient.RouteResponse) response;
routes = rr.getRoutes();
it = routes.entrySet()
.iterator();
while (it.hasNext()) {
Map.Entry<String, LBHttpSolrClient.Req> entry = it.next();
String url = entry.getKey();
UpdateRequest updateRequest = (UpdateRequest) entry.getValue()
.getRequest();
SolrInputDocument doc = updateRequest.getDocuments().get(0);
String id = doc.getField("id").getValue().toString();
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", "id:" + id);
params.add("distrib", "false");
QueryRequest queryRequest = new QueryRequest(params);
try (HttpSolrClient solrClient = getHttpSolrClient(url)) {
QueryResponse queryResponse = queryRequest.process(solrClient);
SolrDocumentList docList = queryResponse.getResults();
assertTrue(docList.getNumFound() == 1);
}
}
}
// Test that queries with _route_ params are routed by the client
// Track request counts on each node before query calls
ClusterState clusterState = cluster.getSolrClient().getZkStateReader().getClusterState();
DocCollection col = clusterState.getCollection("routing_collection");
Map<String, Long> requestCountsMap = Maps.newHashMap();
for (Slice slice : col.getSlices()) {
for (Replica replica : slice.getReplicas()) {
String baseURL = replica.getBaseUrl();
requestCountsMap.put(baseURL, getNumRequests(baseURL, "routing_collection"));
}
}
// Collect the base URLs of the replicas of shard that's expected to be hit
DocRouter router = col.getRouter();
Collection<Slice> expectedSlices = router.getSearchSlicesSingle("0", null, col);
Set<String> expectedBaseURLs = Sets.newHashSet();
for (Slice expectedSlice : expectedSlices) {
for (Replica replica : expectedSlice.getReplicas()) {
expectedBaseURLs.add(replica.getBaseUrl());
}
}
assertTrue("expected urls is not fewer than all urls! expected=" + expectedBaseURLs
+ "; all=" + requestCountsMap.keySet(),
expectedBaseURLs.size() < requestCountsMap.size());
// Calculate a number of shard keys that route to the same shard.
int n;
if (TEST_NIGHTLY) {
n = random().nextInt(999) + 2;
} else {
n = random().nextInt(9) + 2;
}
List<String> sameShardRoutes = Lists.newArrayList();
sameShardRoutes.add("0");
for (int i = 1; i < n; i++) {
String shardKey = Integer.toString(i);
Collection<Slice> slices = router.getSearchSlicesSingle(shardKey, null, col);
log.info("Expected Slices {}", slices);
if (expectedSlices.equals(slices)) {
sameShardRoutes.add(shardKey);
}
}
assertTrue(sameShardRoutes.size() > 1);
// Do N queries with _route_ parameter to the same shard
for (int i = 0; i < n; i++) {
ModifiableSolrParams solrParams = new ModifiableSolrParams();
solrParams.set(CommonParams.Q, "*:*");
solrParams.set(ShardParams._ROUTE_, sameShardRoutes.get(random().nextInt(sameShardRoutes.size())));
if (log.isInfoEnabled()) {
log.info("output: {}", getRandomClient().query("routing_collection", solrParams));
}
}
// Request counts increase from expected nodes should aggregate to 1000, while there should be
// no increase in unexpected nodes.
int increaseFromExpectedUrls = 0;
int increaseFromUnexpectedUrls = 0;
Map<String, Long> numRequestsToUnexpectedUrls = Maps.newHashMap();
for (Slice slice : col.getSlices()) {
for (Replica replica : slice.getReplicas()) {
String baseURL = replica.getBaseUrl();
Long prevNumRequests = requestCountsMap.get(baseURL);
Long curNumRequests = getNumRequests(baseURL, "routing_collection");
long delta = curNumRequests - prevNumRequests;
if (expectedBaseURLs.contains(baseURL)) {
increaseFromExpectedUrls += delta;
} else {
increaseFromUnexpectedUrls += delta;
numRequestsToUnexpectedUrls.put(baseURL, delta);
}
}
}
assertEquals("Unexpected number of requests to expected URLs", n, increaseFromExpectedUrls);
assertEquals("Unexpected number of requests to unexpected URLs: " + numRequestsToUnexpectedUrls,
0, increaseFromUnexpectedUrls);
}
/**
* Tests if the specification of 'preferLocalShards' in the query-params
* limits the distributed query to locally hosted shards only
*/
@Test
// commented 4-Sep-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 2-Aug-2018
public void preferLocalShardsTest() throws Exception {
String collectionName = "localShardsTestColl";
int liveNodes = cluster.getJettySolrRunners().size();
// For preferLocalShards to succeed in a test, every shard should have
// all its cores on the same node.
// Hence the below configuration for our collection
CollectionAdminRequest.createCollection(collectionName, "conf", liveNodes, liveNodes)
.setPerReplicaState(USE_PER_REPLICA_STATE)
.setMaxShardsPerNode(liveNodes * liveNodes)
.processAndWait(cluster.getSolrClient(), TIMEOUT);
cluster.waitForActiveCollection(collectionName, liveNodes, liveNodes * liveNodes);
// Add some new documents
new UpdateRequest()
.add(id, "0", "a_t", "hello1")
.add(id, "2", "a_t", "hello2")
.add(id, "3", "a_t", "hello2")
.commit(getRandomClient(), collectionName);
// Run the actual test for 'preferLocalShards'
queryWithShardsPreferenceRules(getRandomClient(), false, collectionName);
queryWithShardsPreferenceRules(getRandomClient(), true, collectionName);
}
@SuppressWarnings("deprecation")
private void queryWithShardsPreferenceRules(CloudSolrClient cloudClient,
boolean useShardsPreference,
String collectionName)
throws Exception {
SolrQuery qRequest = new SolrQuery("*:*");
ModifiableSolrParams qParams = new ModifiableSolrParams();
if (useShardsPreference) {
qParams.add(ShardParams.SHARDS_PREFERENCE, ShardParams.SHARDS_PREFERENCE_REPLICA_LOCATION + ":" + ShardParams.REPLICA_LOCAL);
} else {
qParams.add(CommonParams.PREFER_LOCAL_SHARDS, "true");
}
qParams.add(ShardParams.SHARDS_INFO, "true");
qRequest.add(qParams);
// CloudSolrClient sends the request to some node.
// And since all the nodes are hosting cores from all shards, the
// distributed query formed by this node will select cores from the
// local shards only
QueryResponse qResponse = cloudClient.query(collectionName, qRequest);
Object shardsInfo = qResponse.getResponse().get(ShardParams.SHARDS_INFO);
assertNotNull("Unable to obtain " + ShardParams.SHARDS_INFO, shardsInfo);
// Iterate over shards-info and check what cores responded
SimpleOrderedMap<?> shardsInfoMap = (SimpleOrderedMap<?>) shardsInfo;
@SuppressWarnings({"unchecked"})
Iterator<Map.Entry<String, ?>> itr = shardsInfoMap.asMap(100).entrySet().iterator();
List<String> shardAddresses = new ArrayList<String>();
while (itr.hasNext()) {
Map.Entry<String, ?> e = itr.next();
assertTrue("Did not find map-type value in " + ShardParams.SHARDS_INFO, e.getValue() instanceof Map);
String shardAddress = (String) ((Map) e.getValue()).get("shardAddress");
assertNotNull(ShardParams.SHARDS_INFO + " did not return 'shardAddress' parameter", shardAddress);
shardAddresses.add(shardAddress);
}
if (log.isInfoEnabled()) {
log.info("Shards giving the response: {}", Arrays.toString(shardAddresses.toArray()));
}
// Make sure the distributed queries were directed to a single node only
Set<Integer> ports = new HashSet<Integer>();
for (String shardAddr : shardAddresses) {
URL url = new URL(shardAddr);
ports.add(url.getPort());
}
// This assertion would hold true as long as every shard has a core on each node
assertTrue("Response was not received from shards on a single node",
shardAddresses.size() > 1 && ports.size() == 1);
}
/**
* Tests if the 'shards.preference' parameter works with single-sharded collections.
*/
@Test
public void singleShardedPreferenceRules() throws Exception {
String collectionName = "singleShardPreferenceTestColl";
int liveNodes = cluster.getJettySolrRunners().size();
// For testing replica.type, we want to have all replica types available for the collection
CollectionAdminRequest.createCollection(collectionName, "conf", 1, liveNodes / 3, liveNodes / 3, liveNodes / 3)
.setPerReplicaState(USE_PER_REPLICA_STATE)
.setMaxShardsPerNode(liveNodes)
.processAndWait(cluster.getSolrClient(), TIMEOUT);
cluster.waitForActiveCollection(collectionName, 1, liveNodes);
// Add some new documents
new UpdateRequest()
.add(id, "0", "a_t", "hello1")
.add(id, "2", "a_t", "hello2")
.add(id, "3", "a_t", "hello2")
.commit(getRandomClient(), collectionName);
// Run the actual test for 'queryReplicaType'
queryReplicaType(getRandomClient(), Replica.Type.PULL, collectionName);
queryReplicaType(getRandomClient(), Replica.Type.TLOG, collectionName);
queryReplicaType(getRandomClient(), Replica.Type.NRT, collectionName);
}
private void queryReplicaType(CloudSolrClient cloudClient,
Replica.Type typeToQuery,
String collectionName)
throws Exception {
SolrQuery qRequest = new SolrQuery("*:*");
ModifiableSolrParams qParams = new ModifiableSolrParams();
qParams.add(ShardParams.SHARDS_PREFERENCE, ShardParams.SHARDS_PREFERENCE_REPLICA_TYPE + ":" + typeToQuery.toString());
qParams.add(ShardParams.SHARDS_INFO, "true");
qRequest.add(qParams);
Map<String, String> replicaTypeToReplicas = mapReplicasToReplicaType(getCollectionState(collectionName));
QueryResponse qResponse = cloudClient.query(collectionName, qRequest);
Object shardsInfo = qResponse.getResponse().get(ShardParams.SHARDS_INFO);
assertNotNull("Unable to obtain " + ShardParams.SHARDS_INFO, shardsInfo);
// Iterate over shards-info and check what cores responded
SimpleOrderedMap<?> shardsInfoMap = (SimpleOrderedMap<?>) shardsInfo;
@SuppressWarnings({"unchecked"})
Iterator<Map.Entry<String, ?>> itr = shardsInfoMap.asMap(100).entrySet().iterator();
List<String> shardAddresses = new ArrayList<String>();
while (itr.hasNext()) {
Map.Entry<String, ?> e = itr.next();
assertTrue("Did not find map-type value in " + ShardParams.SHARDS_INFO, e.getValue() instanceof Map);
String shardAddress = (String) ((Map) e.getValue()).get("shardAddress");
if (shardAddress.endsWith("/")) {
shardAddress = shardAddress.substring(0, shardAddress.length() - 1);
}
assertNotNull(ShardParams.SHARDS_INFO + " did not return 'shardAddress' parameter", shardAddress);
shardAddresses.add(shardAddress);
}
assertEquals("Shard addresses must be of size 1, since there is only 1 shard in the collection", 1, shardAddresses.size());
assertEquals("Make sure that the replica queried was the replicaType desired", typeToQuery.toString().toUpperCase(Locale.ROOT), replicaTypeToReplicas.get(shardAddresses.get(0)).toUpperCase(Locale.ROOT));
}
private Long getNumRequests(String baseUrl, String collectionName) throws
SolrServerException, IOException {
return getNumRequests(baseUrl, collectionName, "QUERY", "/select", null, false);
}
private Long getNumRequests(String baseUrl, String collectionName, String category, String key, String scope, boolean returnNumErrors) throws
SolrServerException, IOException {
NamedList<Object> resp;
try (HttpSolrClient client = getHttpSolrClient(baseUrl + "/" + collectionName, 15000, 60000)) {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("qt", "/admin/mbeans");
params.set("stats", "true");
params.set("key", key);
params.set("cat", category);
// use generic request to avoid extra processing of queries
QueryRequest req = new QueryRequest(params);
resp = client.request(req);
}
String name;
if (returnNumErrors) {
name = category + "." + (scope != null ? scope : key) + ".errors";
} else {
name = category + "." + (scope != null ? scope : key) + ".requests";
}
@SuppressWarnings({"unchecked"})
Map<String, Object> map = (Map<String, Object>) resp.findRecursive("solr-mbeans", category, key, "stats");
if (map == null) {
return null;
}
if (scope != null) { // admin handler uses a meter instead of counter here
return (Long) map.get(name + ".count");
} else {
return (Long) map.get(name);
}
}
@Test
public void testNonRetryableRequests() throws Exception {
try (CloudSolrClient client = getCloudSolrClient(cluster.getZkServer().getZkAddress())) {
// important to have one replica on each node
RequestStatusState state = CollectionAdminRequest.createCollection("foo", "conf", 1, NODE_COUNT).processAndWait(client, 60);
if (state == RequestStatusState.COMPLETED) {
cluster.waitForActiveCollection("foo", 1, NODE_COUNT);
client.setDefaultCollection("foo");
Map<String, String> adminPathToMbean = new HashMap<>(CommonParams.ADMIN_PATHS.size());
adminPathToMbean.put(CommonParams.COLLECTIONS_HANDLER_PATH, CollectionsHandler.class.getName());
adminPathToMbean.put(CommonParams.CORES_HANDLER_PATH, CoreAdminHandler.class.getName());
adminPathToMbean.put(CommonParams.CONFIGSETS_HANDLER_PATH, ConfigSetsHandler.class.getName());
// we do not add the authc/authz handlers because they do not currently expose any mbeans
for (String adminPath : adminPathToMbean.keySet()) {
long errorsBefore = 0;
for (JettySolrRunner runner : cluster.getJettySolrRunners()) {
Long numRequests = getNumRequests(runner.getBaseUrl().toString(), "foo", "ADMIN", adminPathToMbean.get(adminPath), adminPath, true);
errorsBefore += numRequests;
if (log.isInfoEnabled()) {
log.info("Found {} requests to {} on {}", numRequests, adminPath, runner.getBaseUrl());
}
}
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("qt", adminPath);
params.set("action", "foobar"); // this should cause an error
QueryRequest req = new QueryRequest(params);
try {
NamedList<Object> resp = client.request(req);
fail("call to foo for admin path " + adminPath + " should have failed");
} catch (Exception e) {
// expected
}
long errorsAfter = 0;
for (JettySolrRunner runner : cluster.getJettySolrRunners()) {
Long numRequests = getNumRequests(runner.getBaseUrl().toString(), "foo", "ADMIN", adminPathToMbean.get(adminPath), adminPath, true);
errorsAfter += numRequests;
if (log.isInfoEnabled()) {
log.info("Found {} requests to {} on {}", numRequests, adminPath, runner.getBaseUrl());
}
}
assertEquals(errorsBefore + 1, errorsAfter);
}
} else {
fail("Collection could not be created within 60 seconds");
}
}
}
@Test
public void checkCollectionParameters() throws Exception {
try (CloudSolrClient client = getCloudSolrClient(cluster.getZkServer().getZkAddress())) {
String async1 = CollectionAdminRequest.createCollection("multicollection1", "conf", 2, 1)
.setPerReplicaState(USE_PER_REPLICA_STATE)
.processAsync(client);
String async2 = CollectionAdminRequest.createCollection("multicollection2", "conf", 2, 1)
.setPerReplicaState(USE_PER_REPLICA_STATE)
.processAsync(client);
CollectionAdminRequest.waitForAsyncRequest(async1, client, TIMEOUT);
CollectionAdminRequest.waitForAsyncRequest(async2, client, TIMEOUT);
cluster.waitForActiveCollection("multicollection1", 2, 2);
cluster.waitForActiveCollection("multicollection2", 2, 2);
client.setDefaultCollection("multicollection1");
List<SolrInputDocument> docs = new ArrayList<>(3);
for (int i = 0; i < 3; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField(id, Integer.toString(i));
doc.addField("a_t", "hello");
docs.add(doc);
}
client.add(docs); // default - will add them to multicollection1
client.commit();
ModifiableSolrParams queryParams = new ModifiableSolrParams();
queryParams.add("q", "*:*");
assertEquals(3, client.query(queryParams).getResults().size());
assertEquals(0, client.query("multicollection2", queryParams).getResults().size());
SolrQuery query = new SolrQuery("*:*");
query.set("collection", "multicollection2");
assertEquals(0, client.query(query).getResults().size());
client.add("multicollection2", docs);
client.commit("multicollection2");
assertEquals(3, client.query("multicollection2", queryParams).getResults().size());
}
}
@Test
public void stateVersionParamTest() throws Exception {
CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 1).process(cluster.getSolrClient());
cluster.waitForActiveCollection(COLLECTION, 2, 2);
DocCollection coll = cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION);
Replica r = coll.getSlices().iterator().next().getReplicas().iterator().next();
SolrQuery q = new SolrQuery().setQuery("*:*");
HttpSolrClient.RemoteSolrException sse = null;
final String url = r.getBaseUrl() + "/" + COLLECTION;
try (HttpSolrClient solrClient = getHttpSolrClient(url)) {
if (log.isInfoEnabled()) {
log.info("should work query, result {}", solrClient.query(q));
}
//no problem
q.setParam(CloudSolrClient.STATE_VERSION, COLLECTION + ":" + coll.getZNodeVersion());
if (log.isInfoEnabled()) {
log.info("2nd query , result {}", solrClient.query(q));
}
//no error yet good
q.setParam(CloudSolrClient.STATE_VERSION, COLLECTION + ":" + (coll.getZNodeVersion() - 1)); //an older version expect error
QueryResponse rsp = solrClient.query(q);
@SuppressWarnings({"rawtypes"})
Map m = (Map) rsp.getResponse().get(CloudSolrClient.STATE_VERSION, rsp.getResponse().size() - 1);
assertNotNull("Expected an extra information from server with the list of invalid collection states", m);
assertNotNull(m.get(COLLECTION));
}
//now send the request to another node that does not serve the collection
Set<String> allNodesOfColl = new HashSet<>();
for (Slice slice : coll.getSlices()) {
for (Replica replica : slice.getReplicas()) {
allNodesOfColl.add(replica.getBaseUrl());
}
}
String theNode = null;
Set<String> liveNodes = cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes();
for (String s : liveNodes) {
String n = cluster.getSolrClient().getZkStateReader().getBaseUrlForNodeName(s);
if (!allNodesOfColl.contains(n)) {
theNode = n;
break;
}
}
log.info("the node which does not serve this collection{} ", theNode);
assertNotNull(theNode);
final String solrClientUrl = theNode + "/" + COLLECTION;
try (SolrClient solrClient = getHttpSolrClient(solrClientUrl)) {
q.setParam(CloudSolrClient.STATE_VERSION, COLLECTION + ":" + (coll.getZNodeVersion() - 1));
try {
QueryResponse rsp = solrClient.query(q);
log.info("error was expected");
} catch (HttpSolrClient.RemoteSolrException e) {
sse = e;
}
assertNotNull(sse);
assertEquals(" Error code should be 510", SolrException.ErrorCode.INVALID_STATE.code, sse.code());
}
}
@Test
public void testShutdown() throws IOException {
try (CloudSolrClient client = getCloudSolrClient(DEAD_HOST_1)) {
client.setZkConnectTimeout(100);
SolrException ex = expectThrows(SolrException.class, client::connect);
assertTrue(ex.getCause() instanceof TimeoutException);
}
}
@Rule
public ExpectedException exception = ExpectedException.none();
@Test
public void testWrongZkChrootTest() throws IOException {
try (CloudSolrClient client = getCloudSolrClient(cluster.getZkServer().getZkAddress() + "/xyz/foo")) {
client.setZkClientTimeout(1000 * 60);
SolrException ex = expectThrows(SolrException.class, client::connect);
assertTrue(ex.getMessage().contains("cluster not found/not ready"));
}
}
@Test
public void customHttpClientTest() throws IOException {
CloseableHttpClient client = HttpClientUtil.createClient(null);
try (CloudSolrClient solrClient = getCloudSolrClient(cluster.getZkServer().getZkAddress(), client)) {
assertTrue(solrClient.getLbClient().getHttpClient() == client);
} finally {
HttpClientUtil.close(client);
}
}
@Test
public void testVersionsAreReturned() throws Exception {
CollectionAdminRequest.createCollection("versions_collection", "conf", 2, 1)
.setPerReplicaState(USE_PER_REPLICA_STATE)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection("versions_collection", 2, 2);
// assert that "adds" are returned
UpdateRequest updateRequest = new UpdateRequest()
.add("id", "1", "a_t", "hello1")
.add("id", "2", "a_t", "hello2");
updateRequest.setParam(UpdateParams.VERSIONS, Boolean.TRUE.toString());
NamedList<Object> response = updateRequest.commit(getRandomClient(), "versions_collection").getResponse();
Object addsObject = response.get("adds");
assertNotNull("There must be a adds parameter", addsObject);
assertTrue(addsObject instanceof NamedList<?>);
NamedList<?> adds = (NamedList<?>) addsObject;
assertEquals("There must be 2 versions (one per doc)", 2, adds.size());
Map<String, Long> versions = new HashMap<>();
Object object = adds.get("1");
assertNotNull("There must be a version for id 1", object);
assertTrue("Version for id 1 must be a long", object instanceof Long);
versions.put("1", (Long) object);
object = adds.get("2");
assertNotNull("There must be a version for id 2", object);
assertTrue("Version for id 2 must be a long", object instanceof Long);
versions.put("2", (Long) object);
QueryResponse resp = getRandomClient().query("versions_collection", new SolrQuery("*:*"));
assertEquals("There should be one document because overwrite=true", 2, resp.getResults().getNumFound());
for (SolrDocument doc : resp.getResults()) {
Long version = versions.get(doc.getFieldValue("id"));
assertEquals("Version on add must match _version_ field", version, doc.getFieldValue("_version_"));
}
// assert that "deletes" are returned
UpdateRequest deleteRequest = new UpdateRequest().deleteById("1");
deleteRequest.setParam(UpdateParams.VERSIONS, Boolean.TRUE.toString());
response = deleteRequest.commit(getRandomClient(), "versions_collection").getResponse();
Object deletesObject = response.get("deletes");
assertNotNull("There must be a deletes parameter", deletesObject);
@SuppressWarnings({"rawtypes"})
NamedList deletes = (NamedList) deletesObject;
assertEquals("There must be 1 version", 1, deletes.size());
}
@Test
public void testInitializationWithSolrUrls() throws Exception {
CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 1)
.setPerReplicaState(USE_PER_REPLICA_STATE)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(COLLECTION, 2, 2);
CloudSolrClient client = httpBasedCloudSolrClient;
SolrInputDocument doc = new SolrInputDocument("id", "1", "title_s", "my doc");
client.add(COLLECTION, doc);
client.commit(COLLECTION);
assertEquals(1, client.query(COLLECTION, params("q", "*:*")).getResults().getNumFound());
}
@Test
public void testCollectionDoesntExist() throws Exception {
CloudSolrClient client = getRandomClient();
SolrInputDocument doc = new SolrInputDocument("id", "1", "title_s", "my doc");
SolrException ex = expectThrows(SolrException.class, () -> client.add("boguscollectionname", doc));
assertEquals("Collection not found: boguscollectionname", ex.getMessage());
}
public void testRetryUpdatesWhenClusterStateIsStale() throws Exception {
final String COL = "stale_state_test_col";
assert cluster.getJettySolrRunners().size() >= 2;
final JettySolrRunner old_leader_node = cluster.getJettySolrRunners().get(0);
final JettySolrRunner new_leader_node = cluster.getJettySolrRunners().get(1);
// start with exactly 1 shard/replica...
assertEquals("Couldn't create collection", 0,
CollectionAdminRequest.createCollection(COL, "conf", 1, 1)
.setPerReplicaState(USE_PER_REPLICA_STATE)
.setCreateNodeSet(old_leader_node.getNodeName())
.process(cluster.getSolrClient()).getStatus());
cluster.waitForActiveCollection(COL, 1, 1);
// determine the coreNodeName of only current replica
Collection<Slice> slices = cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COL).getSlices();
assertEquals(1, slices.size()); // sanity check
Slice slice = slices.iterator().next();
assertEquals(1, slice.getReplicas().size()); // sanity check
final String old_leader_core_node_name = slice.getLeader().getName();
// NOTE: creating our own CloudSolrClient whose settings we can muck with...
try (CloudSolrClient stale_client = new CloudSolrClientBuilder
(Collections.singletonList(cluster.getZkServer().getZkAddress()), Optional.empty())
.sendDirectUpdatesToAnyShardReplica()
.withParallelUpdates(true)
.build()) {
// don't let collection cache entries get expired, even on a slow machine...
stale_client.setCollectionCacheTTl(Integer.MAX_VALUE);
stale_client.setDefaultCollection(COL);
// do a query to populate stale_client's cache...
assertEquals(0, stale_client.query(new SolrQuery("*:*")).getResults().getNumFound());
// add 1 replica on a diff node...
assertEquals("Couldn't create collection", 0,
CollectionAdminRequest.addReplicaToShard(COL, "shard1")
.setNode(new_leader_node.getNodeName())
// NOTE: don't use our stale_client for this -- don't tip it off of a collection change
.process(cluster.getSolrClient()).getStatus());
AbstractDistribZkTestBase.waitForRecoveriesToFinish
(COL, cluster.getSolrClient().getZkStateReader(), true, true, 330);
// ...and delete our original leader.
assertEquals("Couldn't create collection", 0,
CollectionAdminRequest.deleteReplica(COL, "shard1", old_leader_core_node_name)
// NOTE: don't use our stale_client for this -- don't tip it off of a collection change
.process(cluster.getSolrClient()).getStatus());
AbstractDistribZkTestBase.waitForRecoveriesToFinish
(COL, cluster.getSolrClient().getZkStateReader(), true, true, 330);
// stale_client's collection state cache should now only point at a leader that no longer exists.
// attempt a (direct) update that should succeed in spite of cached cluster state
// pointing solely to a node that's no longer part of our collection...
assertEquals(0, (new UpdateRequest().add("id", "1").commit(stale_client, COL)).getStatus());
assertEquals(1, stale_client.query(new SolrQuery("*:*")).getResults().getNumFound());
}
}
private static void checkSingleServer(NamedList<Object> response) {
final CloudSolrClient.RouteResponse rr = (CloudSolrClient.RouteResponse) response;
final Map<String, LBHttpSolrClient.Req> routes = rr.getRoutes();
final Iterator<Map.Entry<String, LBHttpSolrClient.Req>> it =
routes.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<String, LBHttpSolrClient.Req> entry = it.next();
assertEquals("wrong number of servers: " + entry.getValue().getServers(),
1, entry.getValue().getServers().size());
}
}
/**
* Tests if the specification of 'preferReplicaTypes` in the query-params
* limits the distributed query to locally hosted shards only
*/
@Test
// commented 15-Sep-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 2-Aug-2018
public void preferReplicaTypesTest() throws Exception {
String collectionName = "replicaTypesTestColl";
int liveNodes = cluster.getJettySolrRunners().size();
// For these tests we need to have multiple replica types.
// Hence the below configuration for our collection
int pullReplicas = Math.max(1, liveNodes - 2);
CollectionAdminRequest.createCollection(collectionName, "conf", liveNodes, 1, 1, pullReplicas)
.setPerReplicaState(USE_PER_REPLICA_STATE)
.setMaxShardsPerNode(liveNodes)
.processAndWait(cluster.getSolrClient(), TIMEOUT);
cluster.waitForActiveCollection(collectionName, liveNodes, liveNodes * (2 + pullReplicas));
// Add some new documents
new UpdateRequest()
.add(id, "0", "a_t", "hello1")
.add(id, "2", "a_t", "hello2")
.add(id, "3", "a_t", "hello2")
.commit(getRandomClient(), collectionName);
// Run the actual tests for 'shards.preference=replica.type:*'
queryWithPreferReplicaTypes(getRandomClient(), "PULL", false, collectionName);
queryWithPreferReplicaTypes(getRandomClient(), "PULL|TLOG", false, collectionName);
queryWithPreferReplicaTypes(getRandomClient(), "TLOG", false, collectionName);
queryWithPreferReplicaTypes(getRandomClient(), "TLOG|PULL", false, collectionName);
queryWithPreferReplicaTypes(getRandomClient(), "NRT", false, collectionName);
queryWithPreferReplicaTypes(getRandomClient(), "NRT|PULL", false, collectionName);
// Test to verify that preferLocalShards=true doesn't break this
queryWithPreferReplicaTypes(getRandomClient(), "PULL", true, collectionName);
queryWithPreferReplicaTypes(getRandomClient(), "PULL|TLOG", true, collectionName);
queryWithPreferReplicaTypes(getRandomClient(), "TLOG", true, collectionName);
queryWithPreferReplicaTypes(getRandomClient(), "TLOG|PULL", true, collectionName);
queryWithPreferReplicaTypes(getRandomClient(), "NRT", false, collectionName);
queryWithPreferReplicaTypes(getRandomClient(), "NRT|PULL", true, collectionName);
CollectionAdminRequest.deleteCollection(collectionName)
.processAndWait(cluster.getSolrClient(), TIMEOUT);
}
private void queryWithPreferReplicaTypes(CloudSolrClient cloudClient,
String preferReplicaTypes,
boolean preferLocalShards,
String collectionName)
throws Exception {
SolrQuery qRequest = new SolrQuery("*:*");
ModifiableSolrParams qParams = new ModifiableSolrParams();
final List<String> preferredTypes = Arrays.asList(preferReplicaTypes.split("\\|"));
StringBuilder rule = new StringBuilder();
preferredTypes.forEach(type -> {
if (rule.length() != 0) {
rule.append(',');
}
rule.append(ShardParams.SHARDS_PREFERENCE_REPLICA_TYPE);
rule.append(':');
rule.append(type);
});
if (preferLocalShards) {
if (rule.length() != 0) {
rule.append(',');
}
rule.append(ShardParams.SHARDS_PREFERENCE_REPLICA_LOCATION);
rule.append(":local");
}
qParams.add(ShardParams.SHARDS_PREFERENCE, rule.toString());
qParams.add(ShardParams.SHARDS_INFO, "true");
qRequest.add(qParams);
// CloudSolrClient sends the request to some node.
// And since all the nodes are hosting cores from all shards, the
// distributed query formed by this node will select cores from the
// local shards only
QueryResponse qResponse = cloudClient.query(collectionName, qRequest);
Object shardsInfo = qResponse.getResponse().get(ShardParams.SHARDS_INFO);
assertNotNull("Unable to obtain " + ShardParams.SHARDS_INFO, shardsInfo);
Map<String, String> replicaTypeMap = new HashMap<String, String>();
DocCollection collection = getCollectionState(collectionName);
for (Slice slice : collection.getSlices()) {
for (Replica replica : slice.getReplicas()) {
String coreUrl = replica.getCoreUrl();
// It seems replica reports its core URL with a trailing slash while shard
// info returned from the query doesn't. Oh well.
if (coreUrl.endsWith("/")) {
coreUrl = coreUrl.substring(0, coreUrl.length() - 1);
}
replicaTypeMap.put(coreUrl, replica.getType().toString());
}
}
// Iterate over shards-info and check that replicas of correct type responded
SimpleOrderedMap<?> shardsInfoMap = (SimpleOrderedMap<?>) shardsInfo;
@SuppressWarnings({"unchecked"})
Iterator<Map.Entry<String, ?>> itr = shardsInfoMap.asMap(100).entrySet().iterator();
List<String> shardAddresses = new ArrayList<String>();
while (itr.hasNext()) {
Map.Entry<String, ?> e = itr.next();
assertTrue("Did not find map-type value in " + ShardParams.SHARDS_INFO, e.getValue() instanceof Map);
String shardAddress = (String) ((Map) e.getValue()).get("shardAddress");
assertNotNull(ShardParams.SHARDS_INFO + " did not return 'shardAddress' parameter", shardAddress);
assertTrue(replicaTypeMap.containsKey(shardAddress));
assertTrue(preferredTypes.indexOf(replicaTypeMap.get(shardAddress)) == 0);
shardAddresses.add(shardAddress);
}
assertTrue("No responses", shardAddresses.size() > 0);
if (log.isInfoEnabled()) {
log.info("Shards giving the response: {}", Arrays.toString(shardAddresses.toArray()));
}
}
@Test
public void testPing() throws Exception {
final String testCollection = "ping_test";
CollectionAdminRequest.createCollection(testCollection, "conf", 2, 1)
.setPerReplicaState(USE_PER_REPLICA_STATE)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(testCollection, 2, 2);
final SolrClient clientUnderTest = getRandomClient();
final SolrPingResponse response = clientUnderTest.ping(testCollection);
assertEquals("This should be OK", 0, response.getStatus());
}
public void testPerReplicaStateCollection() throws Exception {
CollectionAdminRequest.createCollection("versions_collection", "conf", 2, 1)
.process(cluster.getSolrClient());
String testCollection = "perReplicaState_test";
int liveNodes = cluster.getJettySolrRunners().size();
CollectionAdminRequest.createCollection(testCollection, "conf", 2, 2)
.setMaxShardsPerNode(liveNodes)
.setPerReplicaState(Boolean.TRUE)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(testCollection, 2, 4);
final SolrClient clientUnderTest = getRandomClient();
final SolrPingResponse response = clientUnderTest.ping(testCollection);
assertEquals("This should be OK", 0, response.getStatus());
DocCollection c = cluster.getSolrClient().getZkStateReader().getCollection(testCollection);
c.forEachReplica((s, replica) -> assertNotNull(replica.getReplicaState()));
PerReplicaStates prs = PerReplicaStates.fetch(ZkStateReader.getCollectionPath(testCollection), cluster.getZkClient(), null);
assertEquals(4, prs.states.size());
// Now let's do an add replica
CollectionAdminRequest
.addReplicaToShard(testCollection, "shard1")
.process(cluster.getSolrClient());
prs = PerReplicaStates.fetch(ZkStateReader.getCollectionPath(testCollection), cluster.getZkClient(), null);
assertEquals(5, prs.states.size());
testCollection = "perReplicaState_testv2";
new V2Request.Builder("/collections")
.withMethod(POST)
.withPayload("{create: {name: perReplicaState_testv2, config : conf, numShards : 2, nrtReplicas : 2, perReplicaState : true, maxShardsPerNode : 5}}")
.build()
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(testCollection, 2, 4);
c = cluster.getSolrClient().getZkStateReader().getCollection(testCollection);
c.forEachReplica((s, replica) -> assertNotNull(replica.getReplicaState()));
prs = PerReplicaStates.fetch(ZkStateReader.getCollectionPath(testCollection), cluster.getZkClient(), null);
assertEquals(4, prs.states.size());
}
}
| 1 | 40,876 | Are these imports needed (BiConsumer and Consumer)? If they are unused imports, then precommit will fail. | apache-lucene-solr | java |
@@ -0,0 +1,10 @@
+class TraceDestroyerJob < ApplicationJob
+ queue_as :default
+
+ def perform(trace)
+ trace.destroy
+ rescue StandardError => ex
+ logger.info ex.to_s
+ ex.backtrace.each { |l| logger.info l }
+ end
+end | 1 | 1 | 11,753 | Why are we catching and logging exceptions? By doing that we make it look like the job has succeeded and it will be removed from the queue - if we didn't do that then it would stay queued... | openstreetmap-openstreetmap-website | rb |
|
@@ -90,14 +90,11 @@ class CartController extends FrontBaseController
*/
public function indexAction(Request $request)
{
- $cart = $this->cartFacade->getCartOfCurrentCustomer();
-
- if ($cart->isEmpty()) {
- $this->cartFacade->cleanAdditionalData();
- }
+ $cart = $this->cartFacade->findCartOfCurrentCustomer();
+ $cartItems = $cart === null ? [] : $cart->getItems();
$cartFormData = ['quantities' => []];
- foreach ($cart->getItems() as $cartItem) {
+ foreach ($cartItems as $cartItem) {
$cartFormData['quantities'][$cartItem->getId()] = $cartItem->getQuantity();
}
| 1 | <?php
namespace Shopsys\ShopBundle\Controller\Front;
use Shopsys\FrameworkBundle\Component\Domain\Domain;
use Shopsys\FrameworkBundle\Component\FlashMessage\ErrorExtractor;
use Shopsys\FrameworkBundle\Model\Cart\AddProductResult;
use Shopsys\FrameworkBundle\Model\Cart\CartFacade;
use Shopsys\FrameworkBundle\Model\Customer\CurrentCustomer;
use Shopsys\FrameworkBundle\Model\Module\ModuleList;
use Shopsys\FrameworkBundle\Model\Order\Preview\OrderPreviewFactory;
use Shopsys\FrameworkBundle\Model\Product\Accessory\ProductAccessoryFacade;
use Shopsys\FrameworkBundle\Model\Product\Product;
use Shopsys\FrameworkBundle\Model\TransportAndPayment\FreeTransportAndPaymentFacade;
use Shopsys\ShopBundle\Form\Front\Cart\AddProductFormType;
use Shopsys\ShopBundle\Form\Front\Cart\CartFormType;
use Symfony\Component\HttpFoundation\Request;
use Symfony\Component\Security\Csrf\CsrfToken;
class CartController extends FrontBaseController
{
const AFTER_ADD_WINDOW_ACCESSORIES_LIMIT = 3;
const RECALCULATE_ONLY_PARAMETER_NAME = 'recalculateOnly';
/**
* @var \Shopsys\FrameworkBundle\Model\Cart\CartFacade
*/
private $cartFacade;
/**
* @var \Shopsys\FrameworkBundle\Model\Customer\CurrentCustomer
*/
private $currentCustomer;
/**
* @var \Shopsys\FrameworkBundle\Component\Domain\Domain
*/
private $domain;
/**
* @var \Shopsys\FrameworkBundle\Model\Product\Accessory\ProductAccessoryFacade
*/
private $productAccessoryFacade;
/**
* @var \Shopsys\FrameworkBundle\Model\TransportAndPayment\FreeTransportAndPaymentFacade
*/
private $freeTransportAndPaymentFacade;
/**
* @var \Shopsys\FrameworkBundle\Model\Order\Preview\OrderPreviewFactory
*/
private $orderPreviewFactory;
/**
* @var \Shopsys\FrameworkBundle\Component\FlashMessage\ErrorExtractor
*/
private $errorExtractor;
/**
* @param \Shopsys\FrameworkBundle\Model\Product\Accessory\ProductAccessoryFacade $productAccessoryFacade
* @param \Shopsys\FrameworkBundle\Model\Cart\CartFacade $cartFacade
* @param \Shopsys\FrameworkBundle\Model\Customer\CurrentCustomer $currentCustomer
* @param \Shopsys\FrameworkBundle\Component\Domain\Domain $domain
* @param \Shopsys\FrameworkBundle\Model\TransportAndPayment\FreeTransportAndPaymentFacade $freeTransportAndPaymentFacade
* @param \Shopsys\FrameworkBundle\Model\Order\Preview\OrderPreviewFactory $orderPreviewFactory
* @param \Shopsys\FrameworkBundle\Component\FlashMessage\ErrorExtractor $errorExtractor
*/
public function __construct(
ProductAccessoryFacade $productAccessoryFacade,
CartFacade $cartFacade,
CurrentCustomer $currentCustomer,
Domain $domain,
FreeTransportAndPaymentFacade $freeTransportAndPaymentFacade,
OrderPreviewFactory $orderPreviewFactory,
ErrorExtractor $errorExtractor
) {
$this->productAccessoryFacade = $productAccessoryFacade;
$this->cartFacade = $cartFacade;
$this->currentCustomer = $currentCustomer;
$this->domain = $domain;
$this->freeTransportAndPaymentFacade = $freeTransportAndPaymentFacade;
$this->orderPreviewFactory = $orderPreviewFactory;
$this->errorExtractor = $errorExtractor;
}
/**
* @param \Symfony\Component\HttpFoundation\Request $request
*/
public function indexAction(Request $request)
{
$cart = $this->cartFacade->getCartOfCurrentCustomer();
if ($cart->isEmpty()) {
$this->cartFacade->cleanAdditionalData();
}
$cartFormData = ['quantities' => []];
foreach ($cart->getItems() as $cartItem) {
$cartFormData['quantities'][$cartItem->getId()] = $cartItem->getQuantity();
}
$form = $this->createForm(CartFormType::class, $cartFormData);
$form->handleRequest($request);
$invalidCart = false;
if ($form->isSubmitted() && $form->isValid()) {
try {
$this->cartFacade->changeQuantities($form->getData()['quantities']);
if (!$request->get(self::RECALCULATE_ONLY_PARAMETER_NAME, false)) {
return $this->redirectToRoute('front_order_index');
}
} catch (\Shopsys\FrameworkBundle\Model\Cart\Exception\InvalidQuantityException $ex) {
$invalidCart = true;
}
} elseif ($form->isSubmitted()) {
$invalidCart = true;
}
if ($invalidCart) {
$this->getFlashMessageSender()->addErrorFlash(
t('Please make sure that you entered right quantity of all items in cart.')
);
}
$cartItems = $cart->getItems();
$domainId = $this->domain->getId();
$orderPreview = $this->orderPreviewFactory->createForCurrentUser();
$productsPrice = $orderPreview->getProductsPrice();
$remainingPriceWithVat = $this->freeTransportAndPaymentFacade->getRemainingPriceWithVat(
$productsPrice->getPriceWithVat(),
$domainId
);
return $this->render('@ShopsysShop/Front/Content/Cart/index.html.twig', [
'cart' => $cart,
'cartItems' => $cartItems,
'cartItemPrices' => $orderPreview->getQuantifiedItemsPrices(),
'form' => $form->createView(),
'isFreeTransportAndPaymentActive' => $this->freeTransportAndPaymentFacade->isActive($domainId),
'isPaymentAndTransportFree' => $this->freeTransportAndPaymentFacade->isFree($productsPrice->getPriceWithVat(), $domainId),
'remainingPriceWithVat' => $remainingPriceWithVat,
'cartItemDiscounts' => $orderPreview->getQuantifiedItemsDiscounts(),
'productsPrice' => $productsPrice,
]);
}
public function boxAction()
{
$orderPreview = $this->orderPreviewFactory->createForCurrentUser();
return $this->render('@ShopsysShop/Front/Inline/Cart/cartBox.html.twig', [
'cart' => $this->cartFacade->getCartOfCurrentCustomer(),
'productsPrice' => $orderPreview->getProductsPrice(),
]);
}
/**
* @param \Shopsys\FrameworkBundle\Model\Product\Product $product
* @param string $type
*/
public function addProductFormAction(Product $product, $type = 'normal')
{
$form = $this->createForm(AddProductFormType::class, ['productId' => $product->getId()], [
'action' => $this->generateUrl('front_cart_add_product'),
]);
return $this->render('@ShopsysShop/Front/Inline/Cart/addProduct.html.twig', [
'form' => $form->createView(),
'product' => $product,
'type' => $type,
]);
}
/**
* @param \Symfony\Component\HttpFoundation\Request $request
*/
public function addProductAction(Request $request)
{
$form = $this->createForm(AddProductFormType::class);
$form->handleRequest($request);
if ($form->isSubmitted() && $form->isValid()) {
try {
$formData = $form->getData();
$addProductResult = $this->cartFacade->addProductToCart($formData['productId'], (int)$formData['quantity']);
$this->sendAddProductResultFlashMessage($addProductResult);
} catch (\Shopsys\FrameworkBundle\Model\Product\Exception\ProductNotFoundException $ex) {
$this->getFlashMessageSender()->addErrorFlash(t('Selected product no longer available or doesn\'t exist.'));
} catch (\Shopsys\FrameworkBundle\Model\Cart\Exception\InvalidQuantityException $ex) {
$this->getFlashMessageSender()->addErrorFlash(t('Please enter valid quantity you want to add to cart.'));
} catch (\Shopsys\FrameworkBundle\Model\Cart\Exception\CartException $ex) {
$this->getFlashMessageSender()->addErrorFlash(t('Unable to add product to cart'));
}
} else {
// Form errors list in flash message is temporary solution.
// We need to determine couse of error when adding product to cart.
$flashMessageBag = $this->get('shopsys.shop.component.flash_message.bag.front');
$formErrors = $this->errorExtractor->getAllErrorsAsArray($form, $flashMessageBag);
$this->getFlashMessageSender()->addErrorFlashTwig(
t('Unable to add product to cart:<br/><ul><li>{{ errors|raw }}</li></ul>'),
[
'errors' => implode('</li><li>', $formErrors),
]
);
}
if ($request->headers->get('referer')) {
$redirectTo = $request->headers->get('referer');
} else {
$redirectTo = $this->generateUrl('front_homepage');
}
return $this->redirect($redirectTo);
}
/**
* @param \Symfony\Component\HttpFoundation\Request $request
*/
public function addProductAjaxAction(Request $request)
{
$form = $this->createForm(AddProductFormType::class);
$form->handleRequest($request);
if ($form->isSubmitted() && $form->isValid()) {
try {
$formData = $form->getData();
$addProductResult = $this->cartFacade->addProductToCart($formData['productId'], (int)$formData['quantity']);
$this->sendAddProductResultFlashMessage($addProductResult);
$accessories = $this->productAccessoryFacade->getTopOfferedAccessories(
$addProductResult->getCartItem()->getProduct(),
$this->domain->getId(),
$this->currentCustomer->getPricingGroup(),
self::AFTER_ADD_WINDOW_ACCESSORIES_LIMIT
);
return $this->render('@ShopsysShop/Front/Inline/Cart/afterAddWindow.html.twig', [
'accessories' => $accessories,
'ACCESSORIES_ON_BUY' => ModuleList::ACCESSORIES_ON_BUY,
]);
} catch (\Shopsys\FrameworkBundle\Model\Product\Exception\ProductNotFoundException $ex) {
$this->getFlashMessageSender()->addErrorFlash(t('Selected product no longer available or doesn\'t exist.'));
} catch (\Shopsys\FrameworkBundle\Model\Cart\Exception\InvalidQuantityException $ex) {
$this->getFlashMessageSender()->addErrorFlash(t('Please enter valid quantity you want to add to cart.'));
} catch (\Shopsys\FrameworkBundle\Model\Cart\Exception\CartException $ex) {
$this->getFlashMessageSender()->addErrorFlash(t('Unable to add product to cart'));
}
} else {
// Form errors list in flash message is temporary solution.
// We need to determine couse of error when adding product to cart.
$flashMessageBag = $this->get('shopsys.shop.component.flash_message.bag.front');
$formErrors = $this->errorExtractor->getAllErrorsAsArray($form, $flashMessageBag);
$this->getFlashMessageSender()->addErrorFlashTwig(
t('Unable to add product to cart:<br/><ul><li>{{ errors|raw }}</li></ul>'),
[
'errors' => implode('</li><li>', $formErrors),
]
);
}
return $this->forward('ShopsysShopBundle:Front/FlashMessage:index');
}
/**
* @param \Shopsys\FrameworkBundle\Model\Cart\AddProductResult $addProductResult
*/
private function sendAddProductResultFlashMessage(
AddProductResult $addProductResult
) {
if ($addProductResult->getIsNew()) {
$this->getFlashMessageSender()->addSuccessFlashTwig(
t('Product <strong>{{ name }}</strong> ({{ quantity|formatNumber }} {{ unitName }}) added to the cart'),
[
'name' => $addProductResult->getCartItem()->getName(),
'quantity' => $addProductResult->getAddedQuantity(),
'unitName' => $addProductResult->getCartItem()->getProduct()->getUnit()->getName(),
]
);
} else {
$this->getFlashMessageSender()->addSuccessFlashTwig(
t('Product <strong>{{ name }}</strong> added to the cart (total amount {{ quantity|formatNumber }} {{ unitName }})'),
[
'name' => $addProductResult->getCartItem()->getName(),
'quantity' => $addProductResult->getCartItem()->getQuantity(),
'unitName' => $addProductResult->getCartItem()->getProduct()->getUnit()->getName(),
]
);
}
}
/**
* @param \Symfony\Component\HttpFoundation\Request $request
* @param int $cartItemId
*/
public function deleteAction(Request $request, $cartItemId)
{
$cartItemId = (int)$cartItemId;
$token = new CsrfToken('front_cart_delete_' . $cartItemId, $request->query->get('_token'));
if ($this->get('security.csrf.token_manager')->isTokenValid($token)) {
try {
$productName = $this->cartFacade->getProductByCartItemId($cartItemId)->getName();
$this->cartFacade->deleteCartItem($cartItemId);
$this->getFlashMessageSender()->addSuccessFlashTwig(
t('Product {{ name }} removed from cart'),
['name' => $productName]
);
} catch (\Shopsys\FrameworkBundle\Model\Cart\Exception\InvalidCartItemException $ex) {
$this->getFlashMessageSender()->addErrorFlash(t('Unable to remove item from cart. The item is probably already removed.'));
}
} else {
$this->getFlashMessageSender()->addErrorFlash(
t('Unable to remove item from cart. The link for removing it probably expired, try it again.')
);
}
return $this->redirectToRoute('front_cart');
}
}
| 1 | 13,978 | If there are `@param` tags in docblock, there should be `@return` tag also. (applies for a whole file) | shopsys-shopsys | php |
@@ -34,7 +34,8 @@ public enum BesuMetricCategory implements MetricCategory {
PRUNER("pruner"),
RPC("rpc"),
SYNCHRONIZER("synchronizer"),
- TRANSACTION_POOL("transaction_pool");
+ TRANSACTION_POOL("transaction_pool"),
+ VALIDATORS("validators");
private static final Optional<String> BESU_PREFIX = Optional.of("besu_");
public static final Set<MetricCategory> DEFAULT_METRIC_CATEGORIES; | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.metrics;
import org.hyperledger.besu.plugin.services.metrics.MetricCategory;
import java.util.EnumSet;
import java.util.Optional;
import java.util.Set;
import com.google.common.collect.ImmutableSet;
public enum BesuMetricCategory implements MetricCategory {
BLOCKCHAIN("blockchain"),
ETHEREUM("ethereum", false),
EXECUTORS("executors"),
NETWORK("network"),
PEERS("peers"),
PERMISSIONING("permissioning"),
KVSTORE_ROCKSDB("rocksdb"),
KVSTORE_ROCKSDB_STATS("rocksdb", false),
PRUNER("pruner"),
RPC("rpc"),
SYNCHRONIZER("synchronizer"),
TRANSACTION_POOL("transaction_pool");
private static final Optional<String> BESU_PREFIX = Optional.of("besu_");
public static final Set<MetricCategory> DEFAULT_METRIC_CATEGORIES;
static {
// Why not ROCKSDB and KVSTORE_ROCKSDB_STATS? They hurt performance under load.
final EnumSet<BesuMetricCategory> besuCategories =
EnumSet.complementOf(EnumSet.of(KVSTORE_ROCKSDB, KVSTORE_ROCKSDB_STATS));
DEFAULT_METRIC_CATEGORIES =
ImmutableSet.<MetricCategory>builder()
.addAll(besuCategories)
.addAll(EnumSet.allOf(StandardMetricCategory.class))
.build();
}
private final String name;
private final boolean besuSpecific;
BesuMetricCategory(final String name) {
this(name, true);
}
BesuMetricCategory(final String name, final boolean besuSpecific) {
this.name = name;
this.besuSpecific = besuSpecific;
}
@Override
public String getName() {
return name;
}
@Override
public Optional<String> getApplicationPrefix() {
return besuSpecific ? BESU_PREFIX : Optional.empty();
}
}
| 1 | 19,732 | Is the concept of validators exclusive to IBFT2? I wonder if this category should be more explicitly linked to IBFT2. | hyperledger-besu | java |
@@ -14,9 +14,7 @@
*/
package org.hyperledger.besu.tests.acceptance.dsl.account;
-import org.hyperledger.besu.crypto.SECP256K1.KeyPair;
-import org.hyperledger.besu.crypto.SECP256K1.PrivateKey;
-import org.hyperledger.besu.crypto.SECP256K1.PublicKey;
+import org.hyperledger.besu.crypto.*;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.Hash;
import org.hyperledger.besu.tests.acceptance.dsl.blockchain.Amount; | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.tests.acceptance.dsl.account;
import org.hyperledger.besu.crypto.SECP256K1.KeyPair;
import org.hyperledger.besu.crypto.SECP256K1.PrivateKey;
import org.hyperledger.besu.crypto.SECP256K1.PublicKey;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.Hash;
import org.hyperledger.besu.tests.acceptance.dsl.blockchain.Amount;
import org.hyperledger.besu.tests.acceptance.dsl.condition.Condition;
import org.hyperledger.besu.tests.acceptance.dsl.condition.account.ExpectAccountBalance;
import org.hyperledger.besu.tests.acceptance.dsl.condition.account.ExpectAccountBalanceAtBlock;
import org.hyperledger.besu.tests.acceptance.dsl.condition.account.ExpectAccountBalanceNotChanging;
import org.hyperledger.besu.tests.acceptance.dsl.transaction.eth.EthTransactions;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.util.Optional;
import org.apache.tuweni.bytes.Bytes32;
import org.web3j.crypto.Credentials;
import org.web3j.utils.Convert.Unit;
public class Account {
private final EthTransactions eth;
private final String name;
private final Optional<PrivateKey> privateKey;
private final Optional<PublicKey> publicKey;
private final Address address;
private long nonce = 0;
private Account(
final EthTransactions eth,
final String name,
final Address address,
final Optional<KeyPair> keyPair) {
this.name = name;
this.privateKey = keyPair.map(KeyPair::getPrivateKey);
this.publicKey = keyPair.map(KeyPair::getPublicKey);
this.address = address;
this.eth = eth;
}
private Account(final EthTransactions eth, final String name, final KeyPair keyPair) {
this(
eth,
name,
Address.extract(Hash.hash(keyPair.getPublicKey().getEncodedBytes())),
Optional.of(keyPair));
}
public static Account create(final EthTransactions eth, final Address address) {
return new Account(eth, address.toString(), address, Optional.empty());
}
public static Account create(final EthTransactions eth, final String name) {
return new Account(eth, name, KeyPair.generate());
}
static Account fromPrivateKey(
final EthTransactions eth, final String name, final String privateKey) {
return new Account(
eth, name, KeyPair.create(PrivateKey.create(Bytes32.fromHexString(privateKey))));
}
public Optional<Credentials> web3jCredentials() {
if (!publicKey.isPresent() || !privateKey.isPresent()) {
return Optional.empty();
}
return Optional.of(Credentials.create(privateKey.get().toString(), publicKey.get().toString()));
}
public Credentials web3jCredentialsOrThrow() {
return web3jCredentials()
.orElseThrow(() -> new IllegalStateException("Account is missing required signing key."));
}
public BigInteger getNextNonce() {
return BigInteger.valueOf(nonce++);
}
public String getAddress() {
return address.toString();
}
public Condition balanceEquals(final int expectedBalance) {
return new ExpectAccountBalance(eth, this, BigDecimal.valueOf(expectedBalance), Unit.ETHER);
}
public Condition balanceEquals(final Amount expectedBalance) {
return new ExpectAccountBalance(
eth, this, expectedBalance.getValue(), expectedBalance.getUnit());
}
public Condition balanceAtBlockEquals(final Amount expectedBalance, final BigInteger block) {
return new ExpectAccountBalanceAtBlock(
eth, this, block, expectedBalance.getValue(), expectedBalance.getUnit());
}
public Condition balanceDoesNotChange(final int startingBalance) {
return new ExpectAccountBalanceNotChanging(
eth, this, BigDecimal.valueOf(startingBalance), Unit.ETHER);
}
@Override
public String toString() {
return "Account{"
+ "eth="
+ eth
+ ", name='"
+ name
+ '\''
+ ", address="
+ address
+ ", nonce="
+ nonce
+ '}';
}
}
| 1 | 24,585 | Spotless is configured to reject star imports. Please replace with explicit imports. | hyperledger-besu | java |
@@ -115,6 +115,8 @@ void SYCLInternal::initialize(const sycl::device& d) {
m_maxThreadsPerSM =
d.template get_info<sycl::info::device::max_work_group_size>();
+ m_maxShmemPerBlock =
+ d.template get_info<sycl::info::device::local_mem_size>();
m_indirectKernelMem.reset(*m_queue);
m_indirectReducerMem.reset(*m_queue);
} else { | 1 | /*
//@HEADER
// ************************************************************************
//
// Kokkos v. 3.0
// Copyright (2020) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
//
// ************************************************************************
//@HEADER
*/
#include <Kokkos_Concepts.hpp>
#include <SYCL/Kokkos_SYCL_Instance.hpp>
#include <Kokkos_SYCL.hpp>
#include <Kokkos_HostSpace.hpp>
#include <Kokkos_Serial.hpp>
#include <impl/Kokkos_Error.hpp>
namespace Kokkos {
namespace Experimental {
namespace Impl {
int SYCLInternal::was_finalized = 0;
SYCLInternal::~SYCLInternal() {
if (m_scratchSpace || m_scratchFlags) {
std::cerr << "Kokkos::Experimental::SYCL ERROR: Failed to call "
"Kokkos::Experimental::SYCL::finalize()"
<< std::endl;
std::cerr.flush();
}
m_scratchSpace = nullptr;
m_scratchFlags = nullptr;
}
int SYCLInternal::verify_is_initialized(const char* const label) const {
if (!is_initialized()) {
std::cerr << "Kokkos::Experimental::SYCL::" << label
<< " : ERROR device not initialized" << std::endl;
}
return is_initialized();
}
SYCLInternal& SYCLInternal::singleton() {
static SYCLInternal self;
return self;
}
// FIME_SYCL
void SYCLInternal::initialize(const sycl::device& d) {
if (was_finalized)
Kokkos::abort("Calling SYCL::initialize after SYCL::finalize is illegal\n");
if (is_initialized()) return;
if (!HostSpace::execution_space::impl_is_initialized()) {
const std::string msg(
"SYCL::initialize ERROR : HostSpace::execution_space is not "
"initialized");
Kokkos::Impl::throw_runtime_exception(msg);
}
const bool ok_init = nullptr == m_scratchSpace || nullptr == m_scratchFlags;
const bool ok_dev = true;
if (ok_init && ok_dev) {
auto exception_handler = [](sycl::exception_list exceptions) {
bool asynchronous_error = false;
for (std::exception_ptr const& e : exceptions) {
try {
std::rethrow_exception(e);
} catch (sycl::exception const& e) {
std::cerr << e.what() << '\n';
asynchronous_error = true;
}
}
if (asynchronous_error)
Kokkos::Impl::throw_runtime_exception(
"There was an asynchronous SYCL error!\n");
};
m_queue.emplace(d, exception_handler);
std::cout << SYCL::SYCLDevice(d) << '\n';
m_maxThreadsPerSM =
d.template get_info<sycl::info::device::max_work_group_size>();
m_indirectKernelMem.reset(*m_queue);
m_indirectReducerMem.reset(*m_queue);
} else {
std::ostringstream msg;
msg << "Kokkos::Experimental::SYCL::initialize(...) FAILED";
if (!ok_init) {
msg << " : Already initialized";
}
Kokkos::Impl::throw_runtime_exception(msg.str());
}
}
void SYCLInternal::finalize() {
SYCL().fence();
was_finalized = 1;
if (nullptr != m_scratchSpace || nullptr != m_scratchFlags) {
// FIXME_SYCL
std::abort();
}
m_indirectKernelMem.reset();
m_indirectReducerMem.reset();
m_queue.reset();
}
} // namespace Impl
} // namespace Experimental
} // namespace Kokkos
| 1 | 27,390 | Remind me why you are using CUDA vocabulary when naming these variables. | kokkos-kokkos | cpp |
@@ -898,6 +898,16 @@ public final class DBReader {
}
final LongIntMap feedCounters = adapter.getFeedCounters(feedIds);
+ int feedFilter = UserPreferences.getFeedFilter();
+ if (feedFilter == UserPreferences.FEED_FILTER_COUNTER_ZERO) {
+ for (int i = 0; i < feeds.size(); i++) {
+ if (feedCounters.get(feeds.get(i).getId()) <= 0) {
+ feedCounters.delete(feeds.get(i).getId());
+ feeds.remove(i);
+ }
+ }
+ }
+
Comparator<Feed> comparator;
int feedOrder = UserPreferences.getFeedOrder();
if (feedOrder == UserPreferences.FEED_ORDER_COUNTER) { | 1 | package de.danoeh.antennapod.core.storage;
import android.database.Cursor;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.collection.ArrayMap;
import android.text.TextUtils;
import android.util.Log;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.Date;
import java.util.List;
import java.util.Map;
import de.danoeh.antennapod.core.feed.Chapter;
import de.danoeh.antennapod.core.feed.Feed;
import de.danoeh.antennapod.core.feed.FeedItem;
import de.danoeh.antennapod.core.feed.FeedMedia;
import de.danoeh.antennapod.core.feed.FeedPreferences;
import de.danoeh.antennapod.core.preferences.UserPreferences;
import de.danoeh.antennapod.core.service.download.DownloadStatus;
import de.danoeh.antennapod.core.util.LongIntMap;
import de.danoeh.antennapod.core.util.LongList;
import de.danoeh.antennapod.core.util.comparator.DownloadStatusComparator;
import de.danoeh.antennapod.core.util.comparator.FeedItemPubdateComparator;
import de.danoeh.antennapod.core.util.comparator.PlaybackCompletionDateComparator;
/**
* Provides methods for reading data from the AntennaPod database.
* In general, all database calls in DBReader-methods are executed on the caller's thread.
* This means that the caller should make sure that DBReader-methods are not executed on the GUI-thread.
*/
public final class DBReader {
private static final String TAG = "DBReader";
/**
* Maximum size of the list returned by {@link #getPlaybackHistory()}.
*/
public static final int PLAYBACK_HISTORY_SIZE = 50;
/**
* Maximum size of the list returned by {@link #getDownloadLog()}.
*/
private static final int DOWNLOAD_LOG_SIZE = 200;
private DBReader() {
}
/**
* Returns a list of Feeds, sorted alphabetically by their title.
*
* @return A list of Feeds, sorted alphabetically by their title. A Feed-object
* of the returned list does NOT have its list of FeedItems yet. The FeedItem-list
* can be loaded separately with {@link #getFeedItemList(Feed)}.
*/
@NonNull
public static List<Feed> getFeedList() {
Log.d(TAG, "Extracting Feedlist");
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
try {
return getFeedList(adapter);
} finally {
adapter.close();
}
}
@NonNull
private static List<Feed> getFeedList(PodDBAdapter adapter) {
Cursor cursor = null;
try {
cursor = adapter.getAllFeedsCursor();
List<Feed> feeds = new ArrayList<>(cursor.getCount());
while (cursor.moveToNext()) {
Feed feed = extractFeedFromCursorRow(cursor);
feeds.add(feed);
}
return feeds;
} finally {
if (cursor != null) {
cursor.close();
}
}
}
/**
* Returns a list with the download URLs of all feeds.
*
* @return A list of Strings with the download URLs of all feeds.
*/
public static List<String> getFeedListDownloadUrls() {
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
Cursor cursor = null;
try {
cursor = adapter.getFeedCursorDownloadUrls();
List<String> result = new ArrayList<>(cursor.getCount());
while (cursor.moveToNext()) {
result.add(cursor.getString(1));
}
return result;
} finally {
if (cursor != null) {
cursor.close();
}
adapter.close();
}
}
/**
* Loads additional data in to the feed items from other database queries
*
* @param items the FeedItems who should have other data loaded
*/
public static void loadAdditionalFeedItemListData(List<FeedItem> items) {
loadTagsOfFeedItemList(items);
loadFeedDataOfFeedItemList(items);
}
private static void loadTagsOfFeedItemList(List<FeedItem> items) {
LongList favoriteIds = getFavoriteIDList();
LongList queueIds = getQueueIDList();
for (FeedItem item : items) {
if (favoriteIds.contains(item.getId())) {
item.addTag(FeedItem.TAG_FAVORITE);
}
if (queueIds.contains(item.getId())) {
item.addTag(FeedItem.TAG_QUEUE);
}
}
}
/**
* Takes a list of FeedItems and loads their corresponding Feed-objects from the database.
* The feedID-attribute of a FeedItem must be set to the ID of its feed or the method will
* not find the correct feed of an item.
*
* @param items The FeedItems whose Feed-objects should be loaded.
*/
private static void loadFeedDataOfFeedItemList(List<FeedItem> items) {
List<Feed> feeds = getFeedList();
Map<Long, Feed> feedIndex = new ArrayMap<>(feeds.size());
for (Feed feed : feeds) {
feedIndex.put(feed.getId(), feed);
}
for (FeedItem item : items) {
Feed feed = feedIndex.get(item.getFeedId());
if (feed == null) {
Log.w(TAG, "No match found for item with ID " + item.getId() + ". Feed ID was " + item.getFeedId());
}
item.setFeed(feed);
}
}
/**
* Loads the list of FeedItems for a certain Feed-object.
* This method should NOT be used if the FeedItems are not used.
*
* @param feed The Feed whose items should be loaded
* @return A list with the FeedItems of the Feed. The Feed-attribute of the FeedItems will already be set correctly.
* The method does NOT change the items-attribute of the feed.
*/
public static List<FeedItem> getFeedItemList(final Feed feed) {
Log.d(TAG, "getFeedItemList() called with: " + "feed = [" + feed + "]");
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
Cursor cursor = null;
try {
cursor = adapter.getAllItemsOfFeedCursor(feed);
List<FeedItem> items = extractItemlistFromCursor(adapter, cursor);
Collections.sort(items, new FeedItemPubdateComparator());
for (FeedItem item : items) {
item.setFeed(feed);
}
return items;
} finally {
if (cursor != null) {
cursor.close();
}
adapter.close();
}
}
public static List<FeedItem> extractItemlistFromCursor(Cursor itemlistCursor) {
Log.d(TAG, "extractItemlistFromCursor() called with: " + "itemlistCursor = [" + itemlistCursor + "]");
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
try {
return extractItemlistFromCursor(adapter, itemlistCursor);
} finally {
adapter.close();
}
}
@NonNull
private static List<FeedItem> extractItemlistFromCursor(PodDBAdapter adapter, Cursor cursor) {
List<FeedItem> result = new ArrayList<>(cursor.getCount());
if (cursor.moveToFirst()) {
int indexMediaId = cursor.getColumnIndexOrThrow(PodDBAdapter.SELECT_KEY_MEDIA_ID);
do {
FeedItem item = FeedItem.fromCursor(cursor);
result.add(item);
if (!cursor.isNull(indexMediaId)) {
item.setMedia(FeedMedia.fromCursor(cursor));
}
} while (cursor.moveToNext());
}
return result;
}
private static Feed extractFeedFromCursorRow(Cursor cursor) {
Feed feed = Feed.fromCursor(cursor);
FeedPreferences preferences = FeedPreferences.fromCursor(cursor);
feed.setPreferences(preferences);
return feed;
}
@NonNull
static List<FeedItem> getQueue(PodDBAdapter adapter) {
Log.d(TAG, "getQueue()");
Cursor cursor = null;
try {
cursor = adapter.getQueueCursor();
List<FeedItem> items = extractItemlistFromCursor(adapter, cursor);
loadAdditionalFeedItemListData(items);
return items;
} finally {
if (cursor != null) {
cursor.close();
}
}
}
/**
* Loads the IDs of the FeedItems in the queue. This method should be preferred over
* {@link #getQueue()} if the FeedItems of the queue are not needed.
*
* @return A list of IDs sorted by the same order as the queue.
*/
public static LongList getQueueIDList() {
Log.d(TAG, "getQueueIDList() called");
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
try {
return getQueueIDList(adapter);
} finally {
adapter.close();
}
}
private static LongList getQueueIDList(PodDBAdapter adapter) {
Cursor cursor = null;
try {
cursor = adapter.getQueueIDCursor();
LongList queueIds = new LongList(cursor.getCount());
while (cursor.moveToNext()) {
queueIds.add(cursor.getLong(0));
}
return queueIds;
} finally {
if (cursor != null) {
cursor.close();
}
}
}
/**
* Loads a list of the FeedItems in the queue. If the FeedItems of the queue are not used directly, consider using
* {@link #getQueueIDList()} instead.
*
* @return A list of FeedItems sorted by the same order as the queue.
*/
@NonNull
public static List<FeedItem> getQueue() {
Log.d(TAG, "getQueue() called");
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
try {
return getQueue(adapter);
} finally {
adapter.close();
}
}
/**
* Loads a list of FeedItems whose episode has been downloaded.
*
* @return A list of FeedItems whose episdoe has been downloaded.
*/
@NonNull
public static List<FeedItem> getDownloadedItems() {
Log.d(TAG, "getDownloadedItems() called");
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
Cursor cursor = null;
try {
cursor = adapter.getDownloadedItemsCursor();
List<FeedItem> items = extractItemlistFromCursor(adapter, cursor);
loadAdditionalFeedItemListData(items);
Collections.sort(items, new FeedItemPubdateComparator());
return items;
} finally {
if (cursor != null) {
cursor.close();
}
adapter.close();
}
}
/**
* Loads a list of FeedItems whose episode has been played.
*
* @return A list of FeedItems whose episdoe has been played.
*/
@NonNull
public static List<FeedItem> getPlayedItems() {
Log.d(TAG, "getPlayedItems() called");
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
Cursor cursor = null;
try {
cursor = adapter.getPlayedItemsCursor();
List<FeedItem> items = extractItemlistFromCursor(adapter, cursor);
loadAdditionalFeedItemListData(items);
return items;
} finally {
if (cursor != null) {
cursor.close();
}
adapter.close();
}
}
/**
* Loads a list of FeedItems that are considered new.
* Excludes items from feeds that do not have keep updated enabled.
*
* @param offset The first episode that should be loaded.
* @param limit The maximum number of episodes that should be loaded.
* @return A list of FeedItems that are considered new.
*/
public static List<FeedItem> getNewItemsList(int offset, int limit) {
Log.d(TAG, "getNewItemsList() called");
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
Cursor cursor = null;
try {
cursor = adapter.getNewItemsCursor(offset, limit);
List<FeedItem> items = extractItemlistFromCursor(adapter, cursor);
loadAdditionalFeedItemListData(items);
return items;
} finally {
if (cursor != null) {
cursor.close();
}
adapter.close();
}
}
/**
* Loads a list of favorite items.
*
* @param offset The first episode that should be loaded.
* @param limit The maximum number of episodes that should be loaded.
* @return A list of FeedItems that are marked as favorite.
*/
public static List<FeedItem> getFavoriteItemsList(int offset, int limit) {
Log.d(TAG, "getFavoriteItemsList() called");
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
Cursor cursor = null;
try {
cursor = adapter.getFavoritesCursor(offset, limit);
List<FeedItem> items = extractItemlistFromCursor(adapter, cursor);
loadAdditionalFeedItemListData(items);
return items;
} finally {
if (cursor != null) {
cursor.close();
}
adapter.close();
}
}
private static LongList getFavoriteIDList() {
Log.d(TAG, "getFavoriteIDList() called");
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
Cursor cursor = null;
try {
cursor = adapter.getFavoritesCursor(0, Integer.MAX_VALUE);
LongList favoriteIDs = new LongList(cursor.getCount());
while (cursor.moveToNext()) {
favoriteIDs.add(cursor.getLong(0));
}
return favoriteIDs;
} finally {
if (cursor != null) {
cursor.close();
}
adapter.close();
}
}
/**
* Loads a list of FeedItems sorted by pubDate in descending order.
*
* @param offset The first episode that should be loaded.
* @param limit The maximum number of episodes that should be loaded.
*/
@NonNull
public static List<FeedItem> getRecentlyPublishedEpisodes(int offset, int limit) {
Log.d(TAG, "getRecentlyPublishedEpisodes() called with: " + "offset = [" + offset + "]" + " limit = [" + limit + "]" );
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
Cursor cursor = null;
try {
cursor = adapter.getRecentlyPublishedItemsCursor(offset, limit);
List<FeedItem> items = extractItemlistFromCursor(adapter, cursor);
loadAdditionalFeedItemListData(items);
return items;
} finally {
if (cursor != null) {
cursor.close();
}
adapter.close();
}
}
/**
* Loads the playback history from the database. A FeedItem is in the playback history if playback of the correpsonding episode
* has been completed at least once.
*
* @return The playback history. The FeedItems are sorted by their media's playbackCompletionDate in descending order.
* The size of the returned list is limited by {@link #PLAYBACK_HISTORY_SIZE}.
*/
@NonNull
public static List<FeedItem> getPlaybackHistory() {
Log.d(TAG, "getPlaybackHistory() called");
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
Cursor mediaCursor = null;
Cursor itemCursor = null;
try {
mediaCursor = adapter.getCompletedMediaCursor(PLAYBACK_HISTORY_SIZE);
String[] itemIds = new String[mediaCursor.getCount()];
for (int i = 0; i < itemIds.length && mediaCursor.moveToPosition(i); i++) {
int index = mediaCursor.getColumnIndex(PodDBAdapter.KEY_FEEDITEM);
itemIds[i] = Long.toString(mediaCursor.getLong(index));
}
itemCursor = adapter.getFeedItemCursor(itemIds);
List<FeedItem> items = extractItemlistFromCursor(adapter, itemCursor);
loadAdditionalFeedItemListData(items);
Collections.sort(items, new PlaybackCompletionDateComparator());
return items;
} finally {
if (mediaCursor != null) {
mediaCursor.close();
}
if (itemCursor != null) {
itemCursor.close();
}
adapter.close();
}
}
/**
* Loads the download log from the database.
*
* @return A list with DownloadStatus objects that represent the download log.
* The size of the returned list is limited by {@link #DOWNLOAD_LOG_SIZE}.
*/
public static List<DownloadStatus> getDownloadLog() {
Log.d(TAG, "getDownloadLog() called");
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
Cursor cursor = null;
try {
cursor = adapter.getDownloadLogCursor(DOWNLOAD_LOG_SIZE);
List<DownloadStatus> downloadLog = new ArrayList<>(cursor.getCount());
while (cursor.moveToNext()) {
downloadLog.add(DownloadStatus.fromCursor(cursor));
}
Collections.sort(downloadLog, new DownloadStatusComparator());
return downloadLog;
} finally {
if (cursor != null) {
cursor.close();
}
adapter.close();
}
}
/**
* Loads the download log for a particular feed from the database.
*
* @param feedId Feed id for which the download log is loaded
* @return A list with DownloadStatus objects that represent the feed's download log,
* newest events first.
*/
public static List<DownloadStatus> getFeedDownloadLog(long feedId) {
Log.d(TAG, "getFeedDownloadLog() called with: " + "feed = [" + feedId + "]");
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
Cursor cursor = null;
try {
cursor = adapter.getDownloadLog(Feed.FEEDFILETYPE_FEED, feedId);
List<DownloadStatus> downloadLog = new ArrayList<>(cursor.getCount());
while (cursor.moveToNext()) {
downloadLog.add(DownloadStatus.fromCursor(cursor));
}
Collections.sort(downloadLog, new DownloadStatusComparator());
return downloadLog;
} finally {
if (cursor != null) {
cursor.close();
}
adapter.close();
}
}
/**
* Loads a specific Feed from the database.
*
* @param feedId The ID of the Feed
* @return The Feed or null if the Feed could not be found. The Feeds FeedItems will also be loaded from the
* database and the items-attribute will be set correctly.
*/
public static Feed getFeed(final long feedId) {
Log.d(TAG, "getFeed() called with: " + "feedId = [" + feedId + "]");
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
try {
return getFeed(feedId, adapter);
} finally {
adapter.close();
}
}
@Nullable
static Feed getFeed(final long feedId, PodDBAdapter adapter) {
Feed feed = null;
Cursor cursor = null;
try {
cursor = adapter.getFeedCursor(feedId);
if (cursor.moveToNext()) {
feed = extractFeedFromCursorRow(cursor);
feed.setItems(getFeedItemList(feed));
} else {
Log.e(TAG, "getFeed could not find feed with id " + feedId);
}
return feed;
} finally {
if (cursor != null) {
cursor.close();
}
}
}
@Nullable
private static FeedItem getFeedItem(final long itemId, PodDBAdapter adapter) {
Log.d(TAG, "Loading feeditem with id " + itemId);
FeedItem item = null;
Cursor cursor = null;
try {
cursor = adapter.getFeedItemCursor(Long.toString(itemId));
if (cursor.moveToNext()) {
List<FeedItem> list = extractItemlistFromCursor(adapter, cursor);
if (!list.isEmpty()) {
item = list.get(0);
loadAdditionalFeedItemListData(list);
}
}
return item;
} finally {
if (cursor != null) {
cursor.close();
}
}
}
/**
* Loads a specific FeedItem from the database. This method should not be used for loading more
* than one FeedItem because this method might query the database several times for each item.
*
* @param itemId The ID of the FeedItem
* @return The FeedItem or null if the FeedItem could not be found.
*/
@Nullable
public static FeedItem getFeedItem(final long itemId) {
Log.d(TAG, "getFeedItem() called with: " + "itemId = [" + itemId + "]");
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
try {
return getFeedItem(itemId, adapter);
} finally {
adapter.close();
}
}
/**
* Loads a specific FeedItem from the database.
*
* @param podcastUrl the corresponding feed's url
* @param episodeUrl the feed item's url
* @return The FeedItem or null if the FeedItem could not be found.
* Does NOT load additional attributes like feed or queue state.
*/
@Nullable
private static FeedItem getFeedItemByUrl(final String podcastUrl, final String episodeUrl, PodDBAdapter adapter) {
Log.d(TAG, "Loading feeditem with podcast url " + podcastUrl + " and episode url " + episodeUrl);
Cursor cursor = null;
try {
cursor = adapter.getFeedItemCursor(podcastUrl, episodeUrl);
if (!cursor.moveToNext()) {
return null;
}
List<FeedItem> list = extractItemlistFromCursor(adapter, cursor);
if (!list.isEmpty()) {
return list.get(0);
}
return null;
} finally {
if (cursor != null) {
cursor.close();
}
}
}
/**
* Returns credentials based on image URL
*
* @param imageUrl The URL of the image
* @return Credentials in format "Username:Password", empty String if no authorization given
*/
public static String getImageAuthentication(final String imageUrl) {
Log.d(TAG, "getImageAuthentication() called with: " + "imageUrl = [" + imageUrl + "]");
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
try {
return getImageAuthentication(imageUrl, adapter);
} finally {
adapter.close();
}
}
private static String getImageAuthentication(final String imageUrl, PodDBAdapter adapter) {
String credentials = null;
Cursor cursor = null;
try {
cursor = adapter.getImageAuthenticationCursor(imageUrl);
if (cursor.moveToFirst()) {
String username = cursor.getString(0);
String password = cursor.getString(1);
if (!TextUtils.isEmpty(username) && password != null) {
credentials = username + ":" + password;
} else {
credentials = "";
}
} else {
credentials = "";
}
} finally {
if (cursor != null) {
cursor.close();
}
}
return credentials;
}
/**
* Loads a specific FeedItem from the database.
*
* @param podcastUrl the corresponding feed's url
* @param episodeUrl the feed item's url
* @return The FeedItem or null if the FeedItem could not be found.
* Does NOT load additional attributes like feed or queue state.
*/
public static FeedItem getFeedItemByUrl(final String podcastUrl, final String episodeUrl) {
Log.d(TAG, "getFeedItem() called with: " + "podcastUrl = [" + podcastUrl + "], episodeUrl = [" + episodeUrl + "]");
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
try {
return getFeedItemByUrl(podcastUrl, episodeUrl, adapter);
} finally {
adapter.close();
}
}
/**
* Loads shownotes information about a FeedItem.
*
* @param item The FeedItem
*/
public static void loadDescriptionOfFeedItem(final FeedItem item) {
Log.d(TAG, "loadDescriptionOfFeedItem() called with: " + "item = [" + item + "]");
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
Cursor cursor = null;
try {
cursor = adapter.getDescriptionOfItem(item);
if (cursor.moveToFirst()) {
int indexDescription = cursor.getColumnIndex(PodDBAdapter.KEY_DESCRIPTION);
String description = cursor.getString(indexDescription);
int indexContentEncoded = cursor.getColumnIndex(PodDBAdapter.KEY_CONTENT_ENCODED);
String contentEncoded = cursor.getString(indexContentEncoded);
item.setDescription(description);
item.setContentEncoded(contentEncoded);
}
} finally {
if (cursor != null) {
cursor.close();
}
adapter.close();
}
}
/**
* Loads the list of chapters that belongs to this FeedItem if available. This method overwrites
* any chapters that this FeedItem has. If no chapters were found in the database, the chapters
* reference of the FeedItem will be set to null.
*
* @param item The FeedItem
*/
public static void loadChaptersOfFeedItem(final FeedItem item) {
Log.d(TAG, "loadChaptersOfFeedItem() called with: " + "item = [" + item + "]");
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
try {
loadChaptersOfFeedItem(adapter, item);
} finally {
adapter.close();
}
}
private static void loadChaptersOfFeedItem(PodDBAdapter adapter, FeedItem item) {
try (Cursor cursor = adapter.getSimpleChaptersOfFeedItemCursor(item)) {
int chaptersCount = cursor.getCount();
if (chaptersCount == 0) {
item.setChapters(null);
return;
}
item.setChapters(new ArrayList<>(chaptersCount));
while (cursor.moveToNext()) {
item.getChapters().add(Chapter.fromCursor(cursor));
}
}
}
/**
* Returns the number of downloaded episodes.
*
* @return The number of downloaded episodes.
*/
public static int getNumberOfDownloadedEpisodes() {
Log.d(TAG, "getNumberOfDownloadedEpisodes() called with: " + "");
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
try {
return adapter.getNumberOfDownloadedEpisodes();
} finally {
adapter.close();
}
}
/**
* Searches the DB for a FeedMedia of the given id.
*
* @param mediaId The id of the object
* @return The found object, or null if it does not exist
*/
@Nullable
public static FeedMedia getFeedMedia(final long mediaId) {
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
try (Cursor mediaCursor = adapter.getSingleFeedMediaCursor(mediaId)) {
if (!mediaCursor.moveToFirst()) {
return null;
}
int indexFeedItem = mediaCursor.getColumnIndex(PodDBAdapter.KEY_FEEDITEM);
long itemId = mediaCursor.getLong(indexFeedItem);
FeedMedia media = FeedMedia.fromCursor(mediaCursor);
FeedItem item = getFeedItem(itemId);
if (item != null) {
media.setItem(item);
item.setMedia(media);
}
return media;
} finally {
adapter.close();
}
}
/**
* Searches the DB for statistics.
*
* @return The list of statistics objects
*/
@NonNull
public static List<StatisticsItem> getStatistics() {
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
List<StatisticsItem> feedTime = new ArrayList<>();
List<Feed> feeds = getFeedList();
for (Feed feed : feeds) {
long feedPlayedTimeCountAll = 0;
long feedPlayedTime = 0;
long feedTotalTime = 0;
long episodes = 0;
long episodesStarted = 0;
long episodesStartedIncludingMarked = 0;
long totalDownloadSize = 0;
List<FeedItem> items = getFeed(feed.getId()).getItems();
for (FeedItem item : items) {
FeedMedia media = item.getMedia();
if (media == null) {
continue;
}
feedPlayedTime += media.getPlayedDuration() / 1000;
if (item.isPlayed()) {
feedPlayedTimeCountAll += media.getDuration() / 1000;
} else {
feedPlayedTimeCountAll += media.getPosition() / 1000;
}
if (media.getPlaybackCompletionDate() != null || media.getPlayedDuration() > 0) {
episodesStarted++;
}
if (item.isPlayed() || media.getPosition() != 0) {
episodesStartedIncludingMarked++;
}
feedTotalTime += media.getDuration() / 1000;
if (media.isDownloaded()) {
totalDownloadSize = totalDownloadSize + media.getSize();
}
episodes++;
}
feedTime.add(new StatisticsItem(
feed, feedTotalTime, feedPlayedTime, feedPlayedTimeCountAll, episodes,
episodesStarted, episodesStartedIncludingMarked, totalDownloadSize));
}
adapter.close();
return feedTime;
}
/**
* Returns data necessary for displaying the navigation drawer. This includes
* the list of subscriptions, the number of items in the queue and the number of unread
* items.
*/
@NonNull
public static NavDrawerData getNavDrawerData() {
Log.d(TAG, "getNavDrawerData() called with: " + "");
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
List<Feed> feeds = getFeedList(adapter);
long[] feedIds = new long[feeds.size()];
for (int i = 0; i < feeds.size(); i++) {
feedIds[i] = feeds.get(i).getId();
}
final LongIntMap feedCounters = adapter.getFeedCounters(feedIds);
Comparator<Feed> comparator;
int feedOrder = UserPreferences.getFeedOrder();
if (feedOrder == UserPreferences.FEED_ORDER_COUNTER) {
comparator = (lhs, rhs) -> {
long counterLhs = feedCounters.get(lhs.getId());
long counterRhs = feedCounters.get(rhs.getId());
if (counterLhs > counterRhs) {
// reverse natural order: podcast with most unplayed episodes first
return -1;
} else if (counterLhs == counterRhs) {
return lhs.getTitle().compareToIgnoreCase(rhs.getTitle());
} else {
return 1;
}
};
} else if (feedOrder == UserPreferences.FEED_ORDER_ALPHABETICAL) {
comparator = (lhs, rhs) -> {
String t1 = lhs.getTitle();
String t2 = rhs.getTitle();
if (t1 == null) {
return 1;
} else if (t2 == null) {
return -1;
} else {
return t1.compareToIgnoreCase(t2);
}
};
} else if (feedOrder == UserPreferences.FEED_ORDER_MOST_PLAYED) {
final LongIntMap playedCounters = adapter.getPlayedEpisodesCounters(feedIds);
comparator = (lhs, rhs) -> {
long counterLhs = playedCounters.get(lhs.getId());
long counterRhs = playedCounters.get(rhs.getId());
if (counterLhs > counterRhs) {
// podcast with most played episodes first
return -1;
} else if (counterLhs == counterRhs) {
return lhs.getTitle().compareToIgnoreCase(rhs.getTitle());
} else {
return 1;
}
};
} else {
comparator = (lhs, rhs) -> {
if (lhs.getItems() == null || lhs.getItems().size() == 0) {
List<FeedItem> items = DBReader.getFeedItemList(lhs);
lhs.setItems(items);
}
if (rhs.getItems() == null || rhs.getItems().size() == 0) {
List<FeedItem> items = DBReader.getFeedItemList(rhs);
rhs.setItems(items);
}
if (lhs.getMostRecentItem() == null) {
return 1;
} else if (rhs.getMostRecentItem() == null) {
return -1;
} else {
Date d1 = lhs.getMostRecentItem().getPubDate();
Date d2 = rhs.getMostRecentItem().getPubDate();
return d2.compareTo(d1);
}
};
}
Collections.sort(feeds, comparator);
int queueSize = adapter.getQueueSize();
int numNewItems = adapter.getNumberOfNewItems();
int numDownloadedItems = adapter.getNumberOfDownloadedEpisodes();
NavDrawerData result = new NavDrawerData(feeds, queueSize, numNewItems, numDownloadedItems,
feedCounters, UserPreferences.getEpisodeCleanupAlgorithm().getReclaimableItems());
adapter.close();
return result;
}
public static class NavDrawerData {
public final List<Feed> feeds;
public final int queueSize;
public final int numNewItems;
public final int numDownloadedItems;
public final LongIntMap feedCounters;
public final int reclaimableSpace;
public NavDrawerData(List<Feed> feeds,
int queueSize,
int numNewItems,
int numDownloadedItems,
LongIntMap feedIndicatorValues,
int reclaimableSpace) {
this.feeds = feeds;
this.queueSize = queueSize;
this.numNewItems = numNewItems;
this.numDownloadedItems = numDownloadedItems;
this.feedCounters = feedIndicatorValues;
this.reclaimableSpace = reclaimableSpace;
}
}
}
| 1 | 16,688 | Please turn the iteration order around (instead of `0...size` to `size...0`). The reason is that this sometimes skips indices when removing an item. You can therefore end up with feeds that have counter 0 and are still displayed. | AntennaPod-AntennaPod | java |
@@ -620,7 +620,7 @@ def execute_reentrant_pipeline(pipeline, typed_environment, throw_on_error, reen
def get_subset_pipeline(pipeline, solid_subset):
check.inst_param(pipeline, 'pipeline', PipelineDefinition)
check.opt_list_param(solid_subset, 'solid_subset', of_type=str)
- return pipeline if solid_subset is None else build_sub_pipeline(pipeline, solid_subset)
+ return pipeline if not solid_subset else build_sub_pipeline(pipeline, solid_subset)
def create_typed_environment(pipeline, environment=None): | 1 | '''
Naming conventions:
For public functions:
execute_*
These represent functions which do purely in-memory compute. They will evaluate expectations
the core transform, and exercise all logging and metrics tracking (outside of outputs), but they
will not invoke *any* outputs (and their APIs don't allow the user to).
'''
# too many lines
# pylint: disable=C0302
from collections import defaultdict, namedtuple
from contextlib import contextmanager
import itertools
import inspect
import uuid
from contextlib2 import ExitStack
import six
from dagster import check
from .definitions import (
DEFAULT_OUTPUT,
ContextCreationExecutionInfo,
DependencyDefinition,
PipelineDefinition,
Solid,
SolidInstance,
solids_in_topological_order,
)
from .execution_context import ExecutionContext, ReentrantInfo, RuntimeExecutionContext
from .errors import DagsterInvariantViolationError, DagsterUserCodeExecutionError
from .types.evaluator import EvaluationError, evaluate_config_value, friendly_string_for_error
from .events import construct_event_logger
from .execution_plan.create import create_execution_plan_core, create_subplan
from .execution_plan.objects import (
ExecutionPlan,
ExecutionPlanInfo,
ExecutionPlanSubsetInfo,
StepResult,
StepTag,
)
from .execution_plan.simple_engine import execute_plan_core
from .system_config.objects import EnvironmentConfig
from .system_config.types import construct_environment_config
class PipelineExecutionResult(object):
'''Result of execution of the whole pipeline. Returned eg by :py:func:`execute_pipeline`.
Attributes:
pipeline (PipelineDefinition): Pipeline that was executed
context (ExecutionContext): ExecutionContext of that particular Pipeline run.
result_list (list[SolidExecutionResult]): List of results for each pipeline solid.
'''
def __init__(self, pipeline, context, result_list):
self.pipeline = check.inst_param(pipeline, 'pipeline', PipelineDefinition)
self.context = check.inst_param(context, 'context', RuntimeExecutionContext)
self.result_list = check.list_param(
result_list, 'result_list', of_type=SolidExecutionResult
)
self.run_id = context.run_id
@property
def success(self):
'''Whether the pipeline execution was successful at all steps'''
return all([result.success for result in self.result_list])
def result_for_solid(self, name):
'''Get a :py:class:`SolidExecutionResult` for a given solid name.
Returns:
SolidExecutionResult
'''
check.str_param(name, 'name')
if not self.pipeline.has_solid(name):
raise DagsterInvariantViolationError(
'Try to get result for solid {name} in {pipeline}. No such solid.'.format(
name=name, pipeline=self.pipeline.display_name
)
)
for result in self.result_list:
if result.solid.name == name:
return result
raise DagsterInvariantViolationError(
'Did not find result for solid {name} in pipeline execution result'.format(name=name)
)
class SolidExecutionResult(object):
'''Execution result for one solid of the pipeline.
Attributes:
context (ExecutionContext): ExecutionContext of that particular Pipeline run.
solid (SolidDefinition): Solid for which this result is
'''
def __init__(self, context, solid, step_results_by_tag):
self.context = check.inst_param(context, 'context', RuntimeExecutionContext)
self.solid = check.inst_param(solid, 'solid', Solid)
self.step_results_by_tag = check.dict_param(
step_results_by_tag, 'step_results_by_tag', key_type=StepTag, value_type=list
)
@property
def transforms(self):
return self.step_results_by_tag.get(StepTag.TRANSFORM, [])
@property
def input_expectations(self):
return self.step_results_by_tag.get(StepTag.INPUT_EXPECTATION, [])
@property
def output_expectations(self):
return self.step_results_by_tag.get(StepTag.OUTPUT_EXPECTATION, [])
@staticmethod
def from_results(context, results):
check.inst_param(context, 'context', RuntimeExecutionContext)
results = check.list_param(results, 'results', StepResult)
if results:
step_results_by_tag = defaultdict(list)
solid = None
for result in results:
if solid is None:
solid = result.step.solid
check.invariant(result.step.solid is solid, 'Must all be from same solid')
for result in results:
step_results_by_tag[result.tag].append(result)
return SolidExecutionResult(
context=context,
solid=results[0].step.solid,
step_results_by_tag=dict(step_results_by_tag),
)
else:
check.failed("Cannot create SolidExecutionResult from empty list")
@property
def success(self):
'''Whether the solid execution was successful'''
return all(
[
result.success
for result in itertools.chain(
self.input_expectations, self.output_expectations, self.transforms
)
]
)
@property
def transformed_values(self):
'''Return dictionary of transformed results, with keys being output names.
Returns None if execution result isn't a success.'''
if self.success and self.transforms:
return {
result.success_data.output_name: result.success_data.value
for result in self.transforms
}
else:
return None
def transformed_value(self, output_name=DEFAULT_OUTPUT):
'''Returns transformed value either for DEFAULT_OUTPUT or for the output
given as output_name. Returns None if execution result isn't a success'''
check.str_param(output_name, 'output_name')
if not self.solid.definition.has_output(output_name):
raise DagsterInvariantViolationError(
'{output_name} not defined in solid {solid}'.format(
output_name=output_name, solid=self.solid.name
)
)
if self.success:
for result in self.transforms:
if result.success_data.output_name == output_name:
return result.success_data.value
raise DagsterInvariantViolationError(
(
'Did not find result {output_name} in solid {self.solid.name} '
'execution result'
).format(output_name=output_name, self=self)
)
else:
return None
def reraise_user_error(self):
if not self.success:
for result in itertools.chain(
self.input_expectations, self.output_expectations, self.transforms
):
if not result.success:
if isinstance(result.failure_data.dagster_error, DagsterUserCodeExecutionError):
six.reraise(*result.failure_data.dagster_error.original_exc_info)
else:
raise result.failure_data.dagster_error
@property
def dagster_error(self):
'''Returns exception that happened during this solid's execution, if any'''
for result in itertools.chain(
self.input_expectations, self.output_expectations, self.transforms
):
if not result.success:
return result.failure_data.dagster_error
def create_execution_plan(pipeline, env_config=None):
check.inst_param(pipeline, 'pipeline', PipelineDefinition)
check.opt_dict_param(env_config, 'env_config', key_type=str)
typed_environment = create_typed_environment(pipeline, env_config)
return create_execution_plan_with_typed_environment(pipeline, typed_environment)
def create_execution_plan_with_typed_environment(pipeline, typed_environment):
check.inst_param(pipeline, 'pipeline', PipelineDefinition)
check.inst_param(typed_environment, 'environment', EnvironmentConfig)
with yield_context(pipeline, typed_environment) as context:
return create_execution_plan_core(ExecutionPlanInfo(context, pipeline, typed_environment))
def get_run_id(reentrant_info):
check.opt_inst_param(reentrant_info, 'reentrant_info', ReentrantInfo)
if reentrant_info and reentrant_info.run_id:
return reentrant_info.run_id
else:
return str(uuid.uuid4())
def merge_two_dicts(left, right):
result = left.copy()
result.update(right)
return result
def get_context_stack(user_context_params, reentrant_info):
check.inst(user_context_params, ExecutionContext)
check.opt_inst_param(reentrant_info, 'reentrant_info', ReentrantInfo)
if reentrant_info and reentrant_info.context_stack:
user_keys = set(user_context_params.context_stack.keys())
reentrant_keys = set(reentrant_info.context_stack.keys())
if not user_keys.isdisjoint(reentrant_keys):
raise DagsterInvariantViolationError(
(
'You have specified re-entrant keys and user-defined keys '
'that overlap. User keys: {user_keys}. Reentrant keys: '
'{reentrant_keys}.'
).format(user_keys=user_keys, reentrant_keys=reentrant_keys)
)
return merge_two_dicts(user_context_params.context_stack, reentrant_info.context_stack)
else:
return user_context_params.context_stack
ResourceCreationInfo = namedtuple('ResourceCreationInfo', 'config run_id')
def _ensure_gen(thing_or_gen):
if not inspect.isgenerator(thing_or_gen):
def _gen_thing():
yield thing_or_gen
return _gen_thing()
return thing_or_gen
@contextmanager
def with_maybe_gen(thing_or_gen):
gen = _ensure_gen(thing_or_gen)
try:
thing = next(gen)
except StopIteration:
check.failed('Must yield one item. You did not yield anything.')
yield thing
stopped = False
try:
next(gen)
except StopIteration:
stopped = True
check.invariant(stopped, 'Must yield one item. Yielded more than one item')
@contextmanager
def yield_context(pipeline, environment, reentrant_info=None):
check.inst_param(pipeline, 'pipeline', PipelineDefinition)
check.inst_param(environment, 'environment', EnvironmentConfig)
check.opt_inst_param(reentrant_info, 'reentrant_info', ReentrantInfo)
context_definition = pipeline.context_definitions[environment.context.name]
run_id = get_run_id(reentrant_info)
ec_or_gen = context_definition.context_fn(
ContextCreationExecutionInfo(
config=environment.context.config, pipeline_def=pipeline, run_id=run_id
)
)
with with_maybe_gen(ec_or_gen) as execution_context:
check.inst(execution_context, ExecutionContext)
with _create_resources(
pipeline, context_definition, environment, execution_context, run_id
) as resources:
loggers = _create_loggers(reentrant_info, execution_context)
yield RuntimeExecutionContext(
run_id=get_run_id(reentrant_info),
loggers=loggers,
resources=resources,
context_stack=get_context_stack(execution_context, reentrant_info),
)
def _create_loggers(reentrant_info, execution_context):
if reentrant_info and reentrant_info.event_callback:
return execution_context.loggers + [construct_event_logger(reentrant_info.event_callback)]
else:
return execution_context.loggers
@contextmanager
def _create_resources(pipeline_def, context_def, environment, execution_context, run_id):
if not context_def.resources:
yield execution_context.resources
return
resources = {}
check.invariant(
not execution_context.resources,
(
'If resources explicitly specified on context definition, the context '
'creation function should not return resources as a property of the '
'ExecutionContext.'
),
)
# See https://bit.ly/2zIXyqw
# The "ExitStack" allows one to stack up N context managers and then yield
# something. We do this so that resources can cleanup after themselves. We
# can potentially have many resources so we need to use this abstraction.
with ExitStack() as stack:
for resource_name in context_def.resources.keys():
resource_obj_or_gen = get_resource_or_gen(
context_def, resource_name, environment, run_id
)
resource_obj = stack.enter_context(with_maybe_gen(resource_obj_or_gen))
resources[resource_name] = resource_obj
context_name = environment.context.name
resources_type = pipeline_def.context_definitions[context_name].resources_type
yield resources_type(**resources)
def get_resource_or_gen(context_definition, resource_name, environment, run_id):
resource_def = context_definition.resources[resource_name]
# Need to do default values
resource_config = environment.context.resources.get(resource_name, {}).get('config')
return resource_def.resource_fn(ResourceCreationInfo(resource_config, run_id))
def _do_iterate_pipeline(pipeline, context, typed_environment, throw_on_error=True):
check.inst(context, RuntimeExecutionContext)
pipeline_success = True
with context.value('pipeline', pipeline.display_name):
context.events.pipeline_start()
execution_plan = create_execution_plan_core(
ExecutionPlanInfo(context, pipeline, typed_environment)
)
steps = list(execution_plan.topological_steps())
if not steps:
context.debug(
'Pipeline {pipeline} has no nodes and no execution will happen'.format(
pipeline=pipeline.display_name
)
)
context.events.pipeline_success()
return
context.debug(
'About to execute the compute node graph in the following order {order}'.format(
order=[step.key for step in steps]
)
)
check.invariant(len(steps[0].step_inputs) == 0)
for solid_result in _process_step_results(
context, pipeline, execute_plan_core(context, execution_plan)
):
if throw_on_error and not solid_result.success:
solid_result.reraise_user_error()
if not solid_result.success:
pipeline_success = False
yield solid_result
if pipeline_success:
context.events.pipeline_success()
else:
context.events.pipeline_failure()
def execute_pipeline_iterator(
pipeline, environment=None, throw_on_error=True, reentrant_info=None, solid_subset=None
):
'''Returns iterator that yields :py:class:`SolidExecutionResult` for each
solid executed in the pipeline.
This is intended to allow the caller to do things between each executed
node. For the 'synchronous' API, see :py:func:`execute_pipeline`.
Parameters:
pipeline (PipelineDefinition): pipeline to run
execution (ExecutionContext): execution context of the run
'''
check.inst_param(pipeline, 'pipeline', PipelineDefinition)
check.opt_dict_param(environment, 'environment')
check.bool_param(throw_on_error, 'throw_on_error')
check.opt_inst_param(reentrant_info, 'reentrant_info', ReentrantInfo)
check.opt_list_param(solid_subset, 'solid_subset', of_type=str)
pipeline_to_execute = get_subset_pipeline(pipeline, solid_subset)
typed_environment = create_typed_environment(pipeline_to_execute, environment)
with yield_context(pipeline_to_execute, typed_environment, reentrant_info) as context:
for solid_result in _do_iterate_pipeline(
pipeline_to_execute, context, typed_environment, throw_on_error
):
yield solid_result
def _process_step_results(context, pipeline, step_results):
check.inst_param(context, 'context', RuntimeExecutionContext)
check.inst_param(pipeline, 'pipeline', PipelineDefinition)
step_results_by_solid_name = defaultdict(list)
for step_result in step_results:
step_results_by_solid_name[step_result.step.solid.name].append(step_result)
for topo_solid in solids_in_topological_order(pipeline):
if topo_solid.name in step_results_by_solid_name:
yield SolidExecutionResult.from_results(
context, step_results_by_solid_name[topo_solid.name]
)
class PipelineConfigEvaluationError(Exception):
def __init__(self, pipeline, errors, config_value, *args, **kwargs):
self.pipeline = check.inst_param(pipeline, 'pipeline', PipelineDefinition)
self.errors = check.list_param(errors, 'errors', of_type=EvaluationError)
self.config_value = config_value
error_msg = 'Pipeline "{pipeline}" config errors:'.format(pipeline=pipeline.name)
error_messages = []
for i_error, error in enumerate(self.errors):
error_message = friendly_string_for_error(error)
error_messages.append(error_message)
error_msg += '\n Error {i_error}: {error_message}'.format(
i_error=i_error + 1, error_message=error_message
)
self.message = error_msg
self.error_messages = error_messages
super(PipelineConfigEvaluationError, self).__init__(error_msg, *args, **kwargs)
def execute_plan(pipeline, execution_plan, environment=None, subset_info=None, reentrant_info=None):
check.inst_param(pipeline, 'pipeline', PipelineDefinition)
check.inst_param(execution_plan, 'execution_plan', ExecutionPlan)
check.opt_dict_param(environment, 'environment')
check.opt_inst_param(subset_info, 'subset_info', ExecutionPlanSubsetInfo)
check.opt_inst_param(reentrant_info, 'reentrant_info', ReentrantInfo)
typed_environment = create_typed_environment(pipeline, environment)
with yield_context(pipeline, typed_environment, reentrant_info) as context:
plan_to_execute = (
create_subplan(
ExecutionPlanInfo(
context=context, pipeline=pipeline, environment=typed_environment
),
execution_plan,
subset_info,
)
if subset_info
else execution_plan
)
return list(execute_plan_core(context, plan_to_execute))
def execute_pipeline(
pipeline, environment=None, throw_on_error=True, reentrant_info=None, solid_subset=None
):
'''
"Synchronous" version of :py:func:`execute_pipeline_iterator`.
Note: throw_on_error is very useful in testing contexts when not testing for error conditions
Parameters:
pipeline (PipelineDefinition): Pipeline to run
environment (dict): The enviroment that parameterizes this run
throw_on_error (bool):
throw_on_error makes the function throw when an error is encoutered rather than returning
the py:class:`SolidExecutionResult` in an error-state.
Returns:
PipelineExecutionResult
'''
check.inst_param(pipeline, 'pipeline', PipelineDefinition)
check.opt_dict_param(environment, 'environment')
check.bool_param(throw_on_error, 'throw_on_error')
check.opt_inst_param(reentrant_info, 'reentrant_info', ReentrantInfo)
check.opt_list_param(solid_subset, 'solid_subset', of_type=str)
pipeline_to_execute = get_subset_pipeline(pipeline, solid_subset)
typed_environment = create_typed_environment(pipeline_to_execute, environment)
return execute_reentrant_pipeline(
pipeline_to_execute, typed_environment, throw_on_error, reentrant_info
)
def _dep_key_of(solid):
return SolidInstance(solid.definition.name, solid.name)
def build_sub_pipeline(pipeline_def, solid_names):
'''
Build a pipeline which is a subset of another pipeline.
Only includes the solids which are in solid_names.
'''
check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition)
check.list_param(solid_names, 'solid_names', of_type=str)
solid_name_set = set(solid_names)
solids = list(map(pipeline_def.solid_named, solid_names))
deps = {_dep_key_of(solid): {} for solid in solids}
def _out_handle_of_inp(input_handle):
if pipeline_def.dependency_structure.has_dep(input_handle):
output_handle = pipeline_def.dependency_structure.get_dep(input_handle)
if output_handle.solid.name in solid_name_set:
return output_handle
return None
for solid in solids:
for input_handle in solid.input_handles():
output_handle = _out_handle_of_inp(input_handle)
if output_handle:
deps[_dep_key_of(solid)][input_handle.input_def.name] = DependencyDefinition(
solid=output_handle.solid.name, output=output_handle.output_def.name
)
return PipelineDefinition(
name=pipeline_def.name,
solids=list(set([solid.definition for solid in solids])),
context_definitions=pipeline_def.context_definitions,
dependencies=deps,
)
def execute_reentrant_pipeline(pipeline, typed_environment, throw_on_error, reentrant_info):
check.inst_param(pipeline, 'pipeline', PipelineDefinition)
check.inst_param(typed_environment, 'typed_environment', EnvironmentConfig)
check.opt_inst_param(reentrant_info, 'reentrant_info', ReentrantInfo)
with yield_context(pipeline, typed_environment, reentrant_info) as context:
return PipelineExecutionResult(
pipeline,
context,
list(_do_iterate_pipeline(pipeline, context, typed_environment, throw_on_error)),
)
def get_subset_pipeline(pipeline, solid_subset):
check.inst_param(pipeline, 'pipeline', PipelineDefinition)
check.opt_list_param(solid_subset, 'solid_subset', of_type=str)
return pipeline if solid_subset is None else build_sub_pipeline(pipeline, solid_subset)
def create_typed_environment(pipeline, environment=None):
check.inst_param(pipeline, 'pipeline', PipelineDefinition)
check.opt_dict_param(environment, 'environment')
result = evaluate_config_value(pipeline.environment_type, environment)
if not result.success:
raise PipelineConfigEvaluationError(pipeline, result.errors, environment)
return construct_environment_config(result.value)
| 1 | 12,192 | this is a behavior change. solid_subset=[] represents an empty pipeline where as solid_subset=None is the full pipeline | dagster-io-dagster | py |
@@ -318,8 +318,7 @@ Licensed under the MIT License. See License.txt in the project root for license
{
foreach (var unmatchedSetting in CustomSettings.Keys)
{
- Logger.LogError(new ArgumentException(unmatchedSetting),
- Resources.ParameterIsNotValid, unmatchedSetting);
+ Logger.LogWarning(Resources.ParameterIsNotValid, unmatchedSetting);
}
}
ErrorManager.ThrowErrors(); | 1 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Reflection;
using Microsoft.Rest.Generator.Logging;
using Microsoft.Rest.Generator.Properties;
using Microsoft.Rest.Generator.Utilities;
using System.Globalization;
namespace Microsoft.Rest.Generator
{
public class Settings
{
public const string DefaultCodeGenerationHeader = @"Code generated by Microsoft (R) AutoRest Code Generator {0}
Changes may cause incorrect behavior and will be lost if the code is regenerated.";
public const string MicrosoftApacheLicenseHeader = @"Copyright (c) Microsoft and contributors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the ""License"");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an ""AS IS"" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
";
public const string MicrosoftMitLicenseHeader = @"Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License. See License.txt in the project root for license information.
";
private string _header;
public Settings()
{
FileSystem = new FileSystem();
OutputDirectory = Path.Combine(Environment.CurrentDirectory, "Generated");
CustomSettings = new Dictionary<string, string>(StringComparer.OrdinalIgnoreCase);
Header = string.Format(CultureInfo.InvariantCulture, DefaultCodeGenerationHeader, AutoRest.Version);
CodeGenerator = "CSharp";
Modeler = "Swagger";
}
/// <summary>
/// Gets or sets the IFileSystem used by code generation.
/// </summary>
public IFileSystem FileSystem { get; set; }
/// <summary>
/// Custom provider specific settings.
/// </summary>
public IDictionary<string, string> CustomSettings { get; private set; }
// The CommandLineInfo attribute is reflected to display help.
// Prefer to show required properties before optional.
// Although not guaranteed by the Framework, the iteration order matches the
// order of definition.
#region ordered_properties
/// <summary>
/// Gets or sets the path to the input specification file.
/// </summary>
[SettingsInfo("The location of the input specification.", true)]
[SettingsAlias("i")]
[SettingsAlias("input")]
public string Input { get; set; }
/// <summary>
/// Gets or sets a base namespace for generated code.
/// </summary>
[SettingsInfo("The namespace to use for generated code.")]
[SettingsAlias("n")]
public string Namespace { get; set; }
/// <summary>
/// Gets or sets the output directory for generated files. If not specified, uses 'Generated' as the default.
/// </summary>
[SettingsInfo("The location for generated files. If not specified, uses \"Generated\" as the default.")]
[SettingsAlias("o")]
[SettingsAlias("output")]
public string OutputDirectory { get; set; }
/// <summary>
/// Gets or sets the code generation language.
/// </summary>
[SettingsInfo("The code generator language. If not specified, defaults to CSharp.")]
[SettingsAlias("g")]
public string CodeGenerator { get; set; }
/// <summary>
/// Gets or sets the modeler to use for processing the input specification.
/// </summary>
[SettingsInfo("The Modeler to use on the input. If not specified, defaults to Swagger.")]
[SettingsAlias("m")]
public string Modeler { get; set; }
#endregion
/// <summary>
/// Gets or sets a name of the generated client type. If not specified, will use
/// a value from the specification. For Swagger specifications,
/// the value of the 'Title' field is used.
/// </summary>
[SettingsInfo("Name to use for the generated client type. By default, uses " +
"the value of the 'Title' field from the Swagger input.")]
[SettingsAlias("name")]
public string ClientName { get; set; }
/// <summary>
/// Gets or sets the maximum number of properties in the request body.
/// If the number of properties in the request body is less than or
/// equal to this value, then these properties will be represented as method arguments.
/// </summary>
[SettingsInfo("The maximum number of properties in the request body. " +
"If the number of properties in the request body is less " +
"than or equal to this value, these properties will " +
"be represented as method arguments.")]
[SettingsAlias("ft")]
public int PayloadFlatteningThreshold { get; set; }
/// <summary>
/// Gets or sets a comment header to include in each generated file.
/// </summary>
[SettingsInfo("Text to include as a header comment in generated files. " +
"Use NONE to suppress the default header.")]
[SettingsAlias("header")]
public string Header
{
get { return _header; }
set
{
if (value == "MICROSOFT_MIT")
{
_header = MicrosoftMitLicenseHeader + Environment.NewLine + string.Format(CultureInfo.InvariantCulture, DefaultCodeGenerationHeader, AutoRest.Version);
}
else if (value == "MICROSOFT_APACHE")
{
_header = MicrosoftApacheLicenseHeader + Environment.NewLine + string.Format(CultureInfo.InvariantCulture, DefaultCodeGenerationHeader, AutoRest.Version);
}
else if (value == "NONE")
{
_header = String.Empty;
}
else
{
_header = value;
}
}
}
/// <summary>
/// If set to true, generate client with a ServiceClientCredentials property and optional constructor parameter.
/// </summary>
[SettingsInfo(
"If true, the generated client includes a ServiceClientCredentials property and constructor parameter. " +
"Authentication behaviors are implemented by extending the ServiceClientCredentials type.")]
public bool AddCredentials { get; set; }
/// <summary>
/// If set, will cause generated code to be output to a single file. Not supported by all code generators.
/// </summary>
[SettingsInfo(
"If set, will cause generated code to be output to a single file. Not supported by all code generators.")]
public string OutputFileName { get; set; }
/// <summary>
/// If set to true, print out help message.
/// </summary>
[SettingsAlias("?")]
[SettingsAlias("h")]
public bool ShowHelp { get; set; }
/// <summary>
/// Factory method to generate CodeGenerationSettings from command line arguments.
/// Matches dictionary keys to the settings properties.
/// </summary>
/// <param name="arguments">Command line arguments</param>
/// <returns>CodeGenerationSettings</returns>
public static Settings Create(string[] arguments)
{
var argsDictionary = new Dictionary<string, string>(StringComparer.OrdinalIgnoreCase);
if (arguments != null && arguments.Length > 0)
{
string key = null;
string value = null;
for (int i = 0; i < arguments.Length; i++)
{
string argument = arguments[i] ?? String.Empty;
argument = argument.Trim();
if (argument.StartsWith("-", StringComparison.OrdinalIgnoreCase))
{
if (key != null)
{
AddArgumentToDictionary(key, value, argsDictionary);
value = null;
}
key = argument.TrimStart('-');
}
else
{
value = argument;
}
}
AddArgumentToDictionary(key, value, argsDictionary);
}
else
{
argsDictionary["?"] = String.Empty;
}
return Create(argsDictionary);
}
private static void AddArgumentToDictionary(string key, string value, Dictionary<string, string> argsDictionary)
{
key = key ?? "Default";
value = value ?? String.Empty;
argsDictionary[key] = value;
}
/// <summary>
/// Factory method to generate Settings from a dictionary. Matches dictionary
/// keys to the settings properties.
/// </summary>
/// <param name="settings">Dictionary of settings</param>
/// <returns>Settings</returns>
public static Settings Create(IDictionary<string, string> settings)
{
var autoRestSettings = new Settings();
var unmatchedSettings = PopulateSettings(autoRestSettings, settings);
if (settings == null || settings.Count > 0)
{
autoRestSettings.ShowHelp = true;
}
autoRestSettings.CustomSettings = unmatchedSettings;
return autoRestSettings;
}
/// <summary>
/// Sets object properties from the dictionary matching keys to property names or aliases.
/// </summary>
/// <param name="entityToPopulate">Object to populate from dictionary.</param>
/// <param name="settings">Dictionary of settings.</param>
/// <returns>Dictionary of settings that were not matched.</returns>
public static IDictionary<string, string> PopulateSettings(object entityToPopulate, IDictionary<string, string> settings)
{
Dictionary<string, string> unmatchedSettings = new Dictionary<string, string>();
if (entityToPopulate == null)
{
throw new ArgumentNullException("entityToPopulate");
}
if (settings != null && settings.Count > 0)
{
// Setting property value from dictionary
foreach (var setting in settings)
{
PropertyInfo property = entityToPopulate.GetType().GetProperties()
.FirstOrDefault(p => setting.Key.Equals(p.Name, StringComparison.OrdinalIgnoreCase) ||
p.GetCustomAttributes<SettingsAliasAttribute>()
.Any(a => setting.Key.Equals(a.Alias, StringComparison.OrdinalIgnoreCase)));
if (property != null)
{
try
{
if (setting.Value.IsNullOrEmpty() && property.PropertyType == typeof(bool))
{
property.SetValue(entityToPopulate, true);
}
else
{
property.SetValue(entityToPopulate,
Convert.ChangeType(setting.Value, property.PropertyType, CultureInfo.InvariantCulture), null);
}
}
catch (Exception exception)
{
throw new ArgumentException(String.Format(CultureInfo.InvariantCulture, Resources.ParameterValueIsNotValid,
setting.Key, property.GetType().Name), exception);
}
}
else
{
unmatchedSettings[setting.Key] = setting.Value;
}
}
}
return unmatchedSettings;
}
public void Validate()
{
foreach (PropertyInfo property in (typeof (Settings)).GetProperties())
{
// If property value is not set - throw exception.
var doc = property.GetCustomAttributes<SettingsInfoAttribute>().FirstOrDefault();
if (doc != null && doc.IsRequired && property.GetValue(this) == null)
{
Logger.LogError(new ArgumentException(property.Name),
Resources.ParameterValueIsMissing, property.Name);
}
}
if (CustomSettings != null)
{
foreach (var unmatchedSetting in CustomSettings.Keys)
{
Logger.LogError(new ArgumentException(unmatchedSetting),
Resources.ParameterIsNotValid, unmatchedSetting);
}
}
ErrorManager.ThrowErrors();
}
}
}
| 1 | 21,487 | this is a breaking change, any specific reason you want to do this? | Azure-autorest | java |
@@ -91,6 +91,8 @@ type nodeChainReader interface {
GetTipSetStateRoot(tsKey types.SortedCidSet) (cid.Cid, error)
HeadEvents() *ps.PubSub
Load(context.Context) error
+ PutTipSetAndState(context.Context, *chain.TipSetAndState) error
+ SetHead(context.Context, types.TipSet) error
Stop()
}
| 1 | package node
import (
"context"
"encoding/json"
"fmt"
"os"
"sync"
"time"
ps "github.com/cskr/pubsub"
"github.com/ipfs/go-bitswap"
bsnet "github.com/ipfs/go-bitswap/network"
bserv "github.com/ipfs/go-blockservice"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-hamt-ipld"
bstore "github.com/ipfs/go-ipfs-blockstore"
"github.com/ipfs/go-ipfs-exchange-interface"
"github.com/ipfs/go-ipfs-exchange-offline"
offroute "github.com/ipfs/go-ipfs-routing/offline"
logging "github.com/ipfs/go-log"
"github.com/ipfs/go-merkledag"
"github.com/libp2p/go-libp2p"
autonatsvc "github.com/libp2p/go-libp2p-autonat-svc"
circuit "github.com/libp2p/go-libp2p-circuit"
"github.com/libp2p/go-libp2p-host"
"github.com/libp2p/go-libp2p-kad-dht"
"github.com/libp2p/go-libp2p-kad-dht/opts"
p2pmetrics "github.com/libp2p/go-libp2p-metrics"
libp2ppeer "github.com/libp2p/go-libp2p-peer"
dhtprotocol "github.com/libp2p/go-libp2p-protocol"
libp2pps "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p-routing"
rhost "github.com/libp2p/go-libp2p/p2p/host/routed"
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
ma "github.com/multiformats/go-multiaddr"
"github.com/pkg/errors"
"github.com/filecoin-project/go-filecoin/actor/builtin"
"github.com/filecoin-project/go-filecoin/address"
"github.com/filecoin-project/go-filecoin/chain"
"github.com/filecoin-project/go-filecoin/config"
"github.com/filecoin-project/go-filecoin/consensus"
"github.com/filecoin-project/go-filecoin/core"
"github.com/filecoin-project/go-filecoin/flags"
"github.com/filecoin-project/go-filecoin/metrics"
"github.com/filecoin-project/go-filecoin/mining"
"github.com/filecoin-project/go-filecoin/net"
"github.com/filecoin-project/go-filecoin/net/pubsub"
"github.com/filecoin-project/go-filecoin/paths"
"github.com/filecoin-project/go-filecoin/plumbing"
"github.com/filecoin-project/go-filecoin/plumbing/cfg"
"github.com/filecoin-project/go-filecoin/plumbing/cst"
"github.com/filecoin-project/go-filecoin/plumbing/dag"
"github.com/filecoin-project/go-filecoin/plumbing/msg"
"github.com/filecoin-project/go-filecoin/plumbing/strgdls"
"github.com/filecoin-project/go-filecoin/porcelain"
"github.com/filecoin-project/go-filecoin/proofs"
"github.com/filecoin-project/go-filecoin/proofs/sectorbuilder"
"github.com/filecoin-project/go-filecoin/protocol/block"
"github.com/filecoin-project/go-filecoin/protocol/hello"
"github.com/filecoin-project/go-filecoin/protocol/retrieval"
"github.com/filecoin-project/go-filecoin/protocol/storage"
"github.com/filecoin-project/go-filecoin/repo"
"github.com/filecoin-project/go-filecoin/sampling"
"github.com/filecoin-project/go-filecoin/state"
"github.com/filecoin-project/go-filecoin/types"
vmerr "github.com/filecoin-project/go-filecoin/vm/errors"
"github.com/filecoin-project/go-filecoin/wallet"
)
const (
filecoinDHTProtocol dhtprotocol.ID = "/fil/kad/1.0.0"
)
var log = logging.Logger("node") // nolint: deadcode
var (
// ErrNoMinerAddress is returned when the node is not configured to have any miner addresses.
ErrNoMinerAddress = errors.New("no miner addresses configured")
)
type pubSubProcessorFunc func(ctx context.Context, msg pubsub.Message) error
type nodeChainReader interface {
GenesisCid() cid.Cid
GetBlock(context.Context, cid.Cid) (*types.Block, error)
GetHead() types.SortedCidSet
GetTipSet(types.SortedCidSet) (types.TipSet, error)
GetTipSetStateRoot(tsKey types.SortedCidSet) (cid.Cid, error)
HeadEvents() *ps.PubSub
Load(context.Context) error
Stop()
}
// Node represents a full Filecoin node.
type Node struct {
host host.Host
PeerHost host.Host
Consensus consensus.Protocol
ChainReader nodeChainReader
Syncer chain.Syncer
PowerTable consensus.PowerTableView
BlockMiningAPI *block.MiningAPI
PorcelainAPI *porcelain.API
RetrievalAPI *retrieval.API
StorageAPI *storage.API
// HeavyTipSetCh is a subscription to the heaviest tipset topic on the chain.
HeaviestTipSetCh chan interface{}
// HeavyTipSetHandled is a hook for tests because pubsub notifications
// arrive async. It's called after handling a new heaviest tipset.
// Remove this after replacing the tipset "pubsub" with a synchronous event bus:
// https://github.com/filecoin-project/go-filecoin/issues/2309
HeaviestTipSetHandled func()
// Incoming messages for block mining.
Inbox *core.Inbox
// Messages sent and not yet mined.
Outbox *core.Outbox
Wallet *wallet.Wallet
// Mining stuff.
AddNewlyMinedBlock newBlockFunc
blockTime time.Duration
cancelMining context.CancelFunc
MiningWorker mining.Worker
MiningScheduler mining.Scheduler
mining struct {
sync.Mutex
isMining bool
}
miningCtx context.Context
miningDoneWg *sync.WaitGroup
// Storage Market Interfaces
StorageMiner *storage.Miner
// Retrieval Interfaces
RetrievalMiner *retrieval.Miner
// Network Fields
BlockSub pubsub.Subscription
MessageSub pubsub.Subscription
HelloSvc *hello.Handler
Bootstrapper *net.Bootstrapper
// Data Storage Fields
// Repo is the repo this node was created with
// it contains all persistent artifacts of the filecoin node
Repo repo.Repo
// SectorBuilder is used by the miner to fill and seal sectors.
sectorBuilder sectorbuilder.SectorBuilder
// Fetcher is the interface for fetching data from nodes.
Fetcher *net.Fetcher
// Exchange is the interface for fetching data from other nodes.
Exchange exchange.Interface
// Blockstore is the un-networked blocks interface
Blockstore bstore.Blockstore
// Blockservice is a higher level interface for fetching data
blockservice bserv.BlockService
// CborStore is a temporary interface for interacting with IPLD objects.
cborStore *hamt.CborIpldStore
// cancelSubscriptionsCtx is a handle to cancel the block and message subscriptions.
cancelSubscriptionsCtx context.CancelFunc
// OfflineMode, when true, disables libp2p
OfflineMode bool
// Router is a router from IPFS
Router routing.IpfsRouting
}
// Config is a helper to aid in the construction of a filecoin node.
type Config struct {
BlockTime time.Duration
Libp2pOpts []libp2p.Option
OfflineMode bool
Verifier proofs.Verifier
Rewarder consensus.BlockRewarder
Repo repo.Repo
IsRelay bool
}
// ConfigOpt is a configuration option for a filecoin node.
type ConfigOpt func(*Config) error
// OfflineMode enables or disables offline mode.
func OfflineMode(offlineMode bool) ConfigOpt {
return func(c *Config) error {
c.OfflineMode = offlineMode
return nil
}
}
// IsRelay configures node to act as a libp2p relay.
func IsRelay() ConfigOpt {
return func(c *Config) error {
c.IsRelay = true
return nil
}
}
// BlockTime sets the blockTime.
func BlockTime(blockTime time.Duration) ConfigOpt {
return func(c *Config) error {
c.BlockTime = blockTime
return nil
}
}
// Libp2pOptions returns a node config option that sets up the libp2p node
func Libp2pOptions(opts ...libp2p.Option) ConfigOpt {
return func(nc *Config) error {
// Quietly having your options overridden leads to hair loss
if len(nc.Libp2pOpts) > 0 {
panic("Libp2pOptions can only be called once")
}
nc.Libp2pOpts = opts
return nil
}
}
// VerifierConfigOption returns a function that sets the verifier to use in the node consensus
func VerifierConfigOption(verifier proofs.Verifier) ConfigOpt {
return func(c *Config) error {
c.Verifier = verifier
return nil
}
}
// RewarderConfigOption returns a function that sets the rewarder to use in the node consensus
func RewarderConfigOption(rewarder consensus.BlockRewarder) ConfigOpt {
return func(c *Config) error {
c.Rewarder = rewarder
return nil
}
}
// New creates a new node.
func New(ctx context.Context, opts ...ConfigOpt) (*Node, error) {
n := &Config{}
for _, o := range opts {
if err := o(n); err != nil {
return nil, err
}
}
return n.Build(ctx)
}
type blankValidator struct{}
func (blankValidator) Validate(_ string, _ []byte) error { return nil }
func (blankValidator) Select(_ string, _ [][]byte) (int, error) { return 0, nil }
// readGenesisCid is a helper function that queries the provided datastore for
// an entry with the genesisKey cid, returning if found.
func readGenesisCid(ds datastore.Datastore) (cid.Cid, error) {
bb, err := ds.Get(chain.GenesisKey)
if err != nil {
return cid.Undef, errors.Wrap(err, "failed to read genesisKey")
}
var c cid.Cid
err = json.Unmarshal(bb, &c)
if err != nil {
return cid.Undef, errors.Wrap(err, "failed to cast genesisCid")
}
return c, nil
}
// buildHost determines if we are publically dialable. If so use public
// Address, if not configure node to announce relay address.
func (nc *Config) buildHost(ctx context.Context, makeDHT func(host host.Host) (routing.IpfsRouting, error)) (host.Host, error) {
// Node must build a host acting as a libp2p relay. Additionally it
// runs the autoNAT service which allows other nodes to check for their
// own dialability by having this node attempt to dial them.
makeDHTRightType := func(h host.Host) (routing.PeerRouting, error) {
return makeDHT(h)
}
if nc.IsRelay {
cfg := nc.Repo.Config()
publicAddr, err := ma.NewMultiaddr(cfg.Swarm.PublicRelayAddress)
if err != nil {
return nil, err
}
publicAddrFactory := func(lc *libp2p.Config) error {
lc.AddrsFactory = func(addrs []ma.Multiaddr) []ma.Multiaddr {
if cfg.Swarm.PublicRelayAddress == "" {
return addrs
}
return append(addrs, publicAddr)
}
return nil
}
relayHost, err := libp2p.New(
ctx,
libp2p.EnableRelay(circuit.OptHop),
libp2p.EnableAutoRelay(),
libp2p.Routing(makeDHTRightType),
publicAddrFactory,
libp2p.ChainOptions(nc.Libp2pOpts...),
)
if err != nil {
return nil, err
}
// Set up autoNATService as a streamhandler on the host.
_, err = autonatsvc.NewAutoNATService(ctx, relayHost)
if err != nil {
return nil, err
}
return relayHost, nil
}
return libp2p.New(
ctx,
libp2p.EnableAutoRelay(),
libp2p.Routing(makeDHTRightType),
libp2p.ChainOptions(nc.Libp2pOpts...),
)
}
// Build instantiates a filecoin Node from the settings specified in the config.
func (nc *Config) Build(ctx context.Context) (*Node, error) {
if nc.Repo == nil {
nc.Repo = repo.NewInMemoryRepo()
}
bs := bstore.NewBlockstore(nc.Repo.Datastore())
validator := blankValidator{}
var peerHost host.Host
var router routing.IpfsRouting
bandwidthTracker := p2pmetrics.NewBandwidthCounter()
nc.Libp2pOpts = append(nc.Libp2pOpts, libp2p.BandwidthReporter(bandwidthTracker))
if !nc.OfflineMode {
makeDHT := func(h host.Host) (routing.IpfsRouting, error) {
r, err := dht.New(
ctx,
h,
dhtopts.Datastore(nc.Repo.Datastore()),
dhtopts.NamespacedValidator("v", validator),
dhtopts.Protocols(filecoinDHTProtocol),
)
if err != nil {
return nil, errors.Wrap(err, "failed to setup routing")
}
router = r
return r, err
}
var err error
peerHost, err = nc.buildHost(ctx, makeDHT)
if err != nil {
return nil, err
}
} else {
router = offroute.NewOfflineRouter(nc.Repo.Datastore(), validator)
peerHost = rhost.Wrap(noopLibP2PHost{}, router)
}
// set up pinger
pingService := ping.NewPingService(peerHost)
// set up bitswap
nwork := bsnet.NewFromIpfsHost(peerHost, router)
//nwork := bsnet.NewFromIpfsHost(innerHost, router)
bswap := bitswap.New(ctx, nwork, bs)
bservice := bserv.New(bs, bswap)
fetcher := net.NewFetcher(ctx, bservice)
cstOffline := hamt.CborIpldStore{Blocks: bserv.New(bs, offline.Exchange(bs))}
genCid, err := readGenesisCid(nc.Repo.Datastore())
if err != nil {
return nil, err
}
// set up chainstore
chainStore := chain.NewDefaultStore(nc.Repo.ChainDatastore(), genCid)
chainState := cst.NewChainStateProvider(chainStore, &cstOffline)
powerTable := &consensus.MarketView{}
// set up processor
var processor consensus.Processor
if nc.Rewarder == nil {
processor = consensus.NewDefaultProcessor()
} else {
processor = consensus.NewConfiguredProcessor(consensus.NewDefaultMessageValidator(), nc.Rewarder)
}
// set up consensus
var nodeConsensus consensus.Protocol
if nc.Verifier == nil {
nodeConsensus = consensus.NewExpected(&cstOffline, bs, processor, powerTable, genCid, &proofs.RustVerifier{})
} else {
nodeConsensus = consensus.NewExpected(&cstOffline, bs, processor, powerTable, genCid, nc.Verifier)
}
// Set up libp2p network
fsub, err := libp2pps.NewFloodSub(ctx, peerHost)
if err != nil {
return nil, errors.Wrap(err, "failed to set up network")
}
backend, err := wallet.NewDSBackend(nc.Repo.WalletDatastore())
if err != nil {
return nil, errors.Wrap(err, "failed to set up wallet backend")
}
fcWallet := wallet.New(backend)
// only the syncer gets the storage which is online connected
chainSyncer := chain.NewDefaultSyncer(&cstOffline, nodeConsensus, chainStore, fetcher, chain.Syncing)
msgPool := core.NewMessagePool(nc.Repo.Config().Mpool, consensus.NewIngestionValidator(chainState, nc.Repo.Config().Mpool))
inbox := core.NewInbox(msgPool, core.InboxMaxAgeTipsets, chainStore)
msgQueue := core.NewMessageQueue()
outboxPolicy := core.NewMessageQueuePolicy(chainStore, core.OutboxMaxAgeRounds)
msgPublisher := newDefaultMessagePublisher(pubsub.NewPublisher(fsub), core.Topic, msgPool)
outbox := core.NewOutbox(fcWallet, consensus.NewOutboundMessageValidator(), msgQueue, msgPublisher, outboxPolicy, chainStore, chainState)
PorcelainAPI := porcelain.New(plumbing.New(&plumbing.APIDeps{
Bitswap: bswap,
Chain: chainState,
Config: cfg.NewConfig(nc.Repo),
DAG: dag.NewDAG(merkledag.NewDAGService(bservice)),
Deals: strgdls.New(nc.Repo.DealsDatastore()),
MsgPool: msgPool,
MsgPreviewer: msg.NewPreviewer(fcWallet, chainStore, &cstOffline, bs),
MsgQueryer: msg.NewQueryer(nc.Repo, fcWallet, chainStore, &cstOffline, bs),
MsgWaiter: msg.NewWaiter(chainStore, bs, &cstOffline),
Network: net.New(peerHost, pubsub.NewPublisher(fsub), pubsub.NewSubscriber(fsub), net.NewRouter(router), bandwidthTracker, net.NewPinger(peerHost, pingService)),
Outbox: outbox,
Wallet: fcWallet,
}))
nd := &Node{
blockservice: bservice,
Blockstore: bs,
cborStore: &cstOffline,
Consensus: nodeConsensus,
ChainReader: chainStore,
Syncer: chainSyncer,
PowerTable: powerTable,
PorcelainAPI: PorcelainAPI,
Fetcher: fetcher,
Exchange: bswap,
host: peerHost,
Inbox: inbox,
OfflineMode: nc.OfflineMode,
Outbox: outbox,
PeerHost: peerHost,
Repo: nc.Repo,
Wallet: fcWallet,
blockTime: nc.BlockTime,
Router: router,
}
// Bootstrapping network peers.
periodStr := nd.Repo.Config().Bootstrap.Period
period, err := time.ParseDuration(periodStr)
if err != nil {
return nil, errors.Wrapf(err, "couldn't parse bootstrap period %s", periodStr)
}
// Bootstrapper maintains connections to some subset of addresses
ba := nd.Repo.Config().Bootstrap.Addresses
bpi, err := net.PeerAddrsToPeerInfos(ba)
if err != nil {
return nil, errors.Wrapf(err, "couldn't parse bootstrap addresses [%s]", ba)
}
minPeerThreshold := nd.Repo.Config().Bootstrap.MinPeerThreshold
nd.Bootstrapper = net.NewBootstrapper(bpi, nd.Host(), nd.Host().Network(), nd.Router, minPeerThreshold, period)
return nd, nil
}
// Start boots up the node.
func (node *Node) Start(ctx context.Context) error {
if err := metrics.RegisterPrometheusEndpoint(node.Repo.Config().Observability.Metrics); err != nil {
return errors.Wrap(err, "failed to setup metrics")
}
if err := metrics.RegisterJaeger(node.host.ID().Pretty(), node.Repo.Config().Observability.Tracing); err != nil {
return errors.Wrap(err, "failed to setup tracing")
}
var err error
if err = node.ChainReader.Load(ctx); err != nil {
return err
}
// Only set these up if there is a miner configured.
if _, err := node.miningAddress(); err == nil {
if err := node.setupMining(ctx); err != nil {
log.Errorf("setup mining failed: %v", err)
return err
}
}
// Start up 'hello' handshake service
syncCallBack := func(pid libp2ppeer.ID, cids []cid.Cid, height uint64) {
cidSet := types.NewSortedCidSet(cids...)
err := node.Syncer.HandleNewTipset(context.Background(), cidSet)
if err != nil {
log.Infof("error handling blocks: %s", cidSet.String())
}
}
node.HelloSvc = hello.New(node.Host(), node.ChainReader.GenesisCid(), syncCallBack, node.PorcelainAPI.ChainHead, node.Repo.Config().Net, flags.Commit)
err = node.setupProtocols()
if err != nil {
return errors.Wrap(err, "failed to set up protocols:")
}
node.RetrievalMiner = retrieval.NewMiner(node)
// subscribe to block notifications
blkSub, err := node.PorcelainAPI.PubSubSubscribe(BlockTopic)
if err != nil {
return errors.Wrap(err, "failed to subscribe to blocks topic")
}
node.BlockSub = blkSub
// subscribe to message notifications
msgSub, err := node.PorcelainAPI.PubSubSubscribe(core.Topic)
if err != nil {
return errors.Wrap(err, "failed to subscribe to message topic")
}
node.MessageSub = msgSub
cctx, cancel := context.WithCancel(context.Background())
node.cancelSubscriptionsCtx = cancel
go node.handleSubscription(cctx, node.processBlock, "processBlock", node.BlockSub, "BlockSub")
go node.handleSubscription(cctx, node.processMessage, "processMessage", node.MessageSub, "MessageSub")
node.HeaviestTipSetHandled = func() {}
node.HeaviestTipSetCh = node.ChainReader.HeadEvents().Sub(chain.NewHeadTopic)
head, err := node.PorcelainAPI.ChainHead()
if err != nil {
return errors.Wrap(err, "failed to get chain head")
}
go node.handleNewHeaviestTipSet(cctx, head)
if !node.OfflineMode {
node.Bootstrapper.Start(context.Background())
}
if err := node.setupHeartbeatServices(ctx); err != nil {
return errors.Wrap(err, "failed to start heartbeat services")
}
return nil
}
func (node *Node) setupHeartbeatServices(ctx context.Context) error {
mag := func() address.Address {
addr, err := node.miningAddress()
// the only error miningAddress() returns is ErrNoMinerAddress.
// if there is no configured miner address, simply send a zero
// address across the wire.
if err != nil {
return address.Undef
}
return addr
}
// start the primary heartbeat service
if len(node.Repo.Config().Heartbeat.BeatTarget) > 0 {
hbs := metrics.NewHeartbeatService(node.Host(), node.Repo.Config().Heartbeat, node.PorcelainAPI.ChainHead, metrics.WithMinerAddressGetter(mag))
go hbs.Start(ctx)
}
// check if we want to connect to an alert service. An alerting service is a heartbeat
// service that can trigger alerts based on the contents of heatbeats.
if alertTarget := os.Getenv("FIL_HEARTBEAT_ALERTS"); len(alertTarget) > 0 {
ahbs := metrics.NewHeartbeatService(node.Host(), &config.HeartbeatConfig{
BeatTarget: alertTarget,
BeatPeriod: "10s",
ReconnectPeriod: "10s",
Nickname: node.Repo.Config().Heartbeat.Nickname,
}, node.PorcelainAPI.ChainHead, metrics.WithMinerAddressGetter(mag))
go ahbs.Start(ctx)
}
return nil
}
func (node *Node) setupMining(ctx context.Context) error {
// initialize a sector builder
sectorBuilder, err := initSectorBuilderForNode(ctx, node)
if err != nil {
return errors.Wrap(err, "failed to initialize sector builder")
}
node.sectorBuilder = sectorBuilder
return nil
}
func (node *Node) setIsMining(isMining bool) {
node.mining.Lock()
defer node.mining.Unlock()
node.mining.isMining = isMining
}
func (node *Node) handleNewMiningOutput(miningOutCh <-chan mining.Output) {
defer func() {
node.miningDoneWg.Done()
}()
for {
select {
case <-node.miningCtx.Done():
return
case output, ok := <-miningOutCh:
if !ok {
return
}
if output.Err != nil {
log.Errorf("stopping mining. error: %s", output.Err.Error())
node.StopMining(context.Background())
} else {
node.miningDoneWg.Add(1)
go func() {
if node.IsMining() {
node.AddNewlyMinedBlock(node.miningCtx, output.NewBlock)
}
node.miningDoneWg.Done()
}()
}
}
}
}
func (node *Node) handleNewHeaviestTipSet(ctx context.Context, head types.TipSet) {
for {
select {
case ts, ok := <-node.HeaviestTipSetCh:
if !ok {
return
}
newHead, ok := ts.(types.TipSet)
if !ok {
log.Error("non-tipset published on heaviest tipset channel")
continue
}
if len(newHead) == 0 {
log.Error("tipset of size 0 published on heaviest tipset channel. ignoring and waiting for a new heaviest tipset.")
continue
}
if err := node.Outbox.HandleNewHead(ctx, head, newHead); err != nil {
log.Error("updating outbound message queue for new tipset", err)
}
if err := node.Inbox.HandleNewHead(ctx, head, newHead); err != nil {
log.Error("updating message pool for new tipset", err)
}
head = newHead
if node.StorageMiner != nil {
node.StorageMiner.OnNewHeaviestTipSet(newHead)
}
node.HeaviestTipSetHandled()
case <-ctx.Done():
return
}
}
}
func (node *Node) cancelSubscriptions() {
if node.BlockSub != nil || node.MessageSub != nil {
node.cancelSubscriptionsCtx()
}
if node.BlockSub != nil {
node.BlockSub.Cancel()
node.BlockSub = nil
}
if node.MessageSub != nil {
node.MessageSub.Cancel()
node.MessageSub = nil
}
}
// Stop initiates the shutdown of the node.
func (node *Node) Stop(ctx context.Context) {
node.ChainReader.HeadEvents().Unsub(node.HeaviestTipSetCh)
node.StopMining(ctx)
node.cancelSubscriptions()
node.ChainReader.Stop()
if node.SectorBuilder() != nil {
if err := node.SectorBuilder().Close(); err != nil {
fmt.Printf("error closing sector builder: %s\n", err)
}
node.sectorBuilder = nil
}
if err := node.Host().Close(); err != nil {
fmt.Printf("error closing host: %s\n", err)
}
if err := node.Repo.Close(); err != nil {
fmt.Printf("error closing repo: %s\n", err)
}
node.Bootstrapper.Stop()
fmt.Println("stopping filecoin :(")
}
type newBlockFunc func(context.Context, *types.Block)
func (node *Node) addNewlyMinedBlock(ctx context.Context, b *types.Block) {
log.Debugf("Got a newly mined block from the mining worker: %s", b)
if err := node.AddNewBlock(ctx, b); err != nil {
log.Warningf("error adding new mined block: %s. err: %s", b.Cid().String(), err.Error())
}
}
// miningAddress returns the address of the mining actor mining on behalf of
// the node.
func (node *Node) miningAddress() (address.Address, error) {
addr := node.Repo.Config().Mining.MinerAddress
if addr.Empty() {
return address.Undef, ErrNoMinerAddress
}
return addr, nil
}
// MiningTimes returns the configured time it takes to mine a block, and also
// the mining delay duration, which is currently a fixed fraction of block time.
// Note this is mocked behavior, in production this time is determined by how
// long it takes to generate PoSTs.
func (node *Node) MiningTimes() (time.Duration, time.Duration) {
mineDelay := node.GetBlockTime() / mining.MineDelayConversionFactor
return node.GetBlockTime(), mineDelay
}
// GetBlockTime returns the current block time.
// TODO this should be surfaced somewhere in the plumbing API.
func (node *Node) GetBlockTime() time.Duration {
return node.blockTime
}
// SetBlockTime sets the block time.
func (node *Node) SetBlockTime(blockTime time.Duration) {
node.blockTime = blockTime
}
// StartMining causes the node to start feeding blocks to the mining worker and initializes
// the SectorBuilder for the mining address.
func (node *Node) StartMining(ctx context.Context) error {
if node.IsMining() {
return errors.New("Node is already mining")
}
minerAddr, err := node.miningAddress()
if err != nil {
return errors.Wrap(err, "failed to get mining address")
}
// ensure we have a sector builder
if node.SectorBuilder() == nil {
if err := node.setupMining(ctx); err != nil {
return err
}
}
minerOwnerAddr, err := node.miningOwnerAddress(ctx, minerAddr)
if err != nil {
return errors.Wrapf(err, "failed to get mining owner address for miner %s", minerAddr)
}
_, mineDelay := node.MiningTimes()
if node.MiningWorker == nil {
if node.MiningWorker, err = node.CreateMiningWorker(ctx); err != nil {
return err
}
}
if node.MiningScheduler == nil {
node.MiningScheduler = mining.NewScheduler(node.MiningWorker, mineDelay, node.PorcelainAPI.ChainHead)
}
// paranoid check
if !node.MiningScheduler.IsStarted() {
node.miningCtx, node.cancelMining = context.WithCancel(context.Background())
outCh, doneWg := node.MiningScheduler.Start(node.miningCtx)
node.miningDoneWg = doneWg
node.AddNewlyMinedBlock = node.addNewlyMinedBlock
node.miningDoneWg.Add(1)
go node.handleNewMiningOutput(outCh)
}
// initialize a storage miner
storageMiner, err := initStorageMinerForNode(ctx, node)
if err != nil {
return errors.Wrap(err, "failed to initialize storage miner")
}
node.StorageMiner = storageMiner
// loop, turning sealing-results into commitSector messages to be included
// in the chain
go func() {
for {
select {
case result := <-node.SectorBuilder().SectorSealResults():
if result.SealingErr != nil {
log.Errorf("failed to seal sector with id %d: %s", result.SectorID, result.SealingErr.Error())
} else if result.SealingResult != nil {
// TODO: determine these algorithmically by simulating call and querying historical prices
gasPrice := types.NewGasPrice(1)
gasUnits := types.NewGasUnits(300)
val := result.SealingResult
// This call can fail due to, e.g. nonce collisions. Our miners existence depends on this.
// We should deal with this, but MessageSendWithRetry is problematic.
msgCid, err := node.PorcelainAPI.MessageSend(
node.miningCtx,
minerOwnerAddr,
minerAddr,
nil,
gasPrice,
gasUnits,
"commitSector",
val.SectorID,
val.CommD[:],
val.CommR[:],
val.CommRStar[:],
val.Proof[:],
)
if err != nil {
log.Errorf("failed to send commitSector message from %s to %s for sector with id %d: %s", minerOwnerAddr, minerAddr, val.SectorID, err)
continue
}
node.StorageMiner.OnCommitmentSent(val, msgCid, nil)
}
case <-node.miningCtx.Done():
return
}
}
}()
// schedules sealing of staged piece-data
if node.Repo.Config().Mining.AutoSealIntervalSeconds > 0 {
go func() {
for {
select {
case <-node.miningCtx.Done():
return
case <-time.After(time.Duration(node.Repo.Config().Mining.AutoSealIntervalSeconds) * time.Second):
log.Info("auto-seal has been triggered")
if err := node.SectorBuilder().SealAllStagedSectors(node.miningCtx); err != nil {
log.Errorf("scheduler received error from node.SectorBuilder.SealAllStagedSectors (%s) - exiting", err.Error())
return
}
}
}
}()
} else {
log.Debug("auto-seal is disabled")
}
node.setIsMining(true)
return nil
}
func initSectorBuilderForNode(ctx context.Context, node *Node) (sectorbuilder.SectorBuilder, error) {
minerAddr, err := node.miningAddress()
if err != nil {
return nil, errors.Wrap(err, "failed to get node's mining address")
}
sectorSize, err := node.PorcelainAPI.MinerGetSectorSize(ctx, minerAddr)
if err != nil {
return nil, errors.Wrapf(err, "failed to get sector size for miner w/address %s", minerAddr.String())
}
lastUsedSectorID, err := node.PorcelainAPI.MinerGetLastCommittedSectorID(ctx, minerAddr)
if err != nil {
return nil, errors.Wrapf(err, "failed to get last used sector id for miner w/address %s", minerAddr.String())
}
// TODO: Currently, weconfigure the RustSectorBuilder to store its
// metadata in the staging directory, it should be in its own directory.
//
// Tracked here: https://github.com/filecoin-project/rust-fil-proofs/issues/402
repoPath, err := node.Repo.Path()
if err != nil {
return nil, err
}
sectorDir, err := paths.GetSectorPath(node.Repo.Config().SectorBase.RootDir, repoPath)
if err != nil {
return nil, err
}
stagingDir, err := paths.StagingDir(sectorDir)
if err != nil {
return nil, err
}
sealedDir, err := paths.SealedDir(sectorDir)
if err != nil {
return nil, err
}
cfg := sectorbuilder.RustSectorBuilderConfig{
BlockService: node.blockservice,
LastUsedSectorID: lastUsedSectorID,
MetadataDir: stagingDir,
MinerAddr: minerAddr,
SealedSectorDir: sealedDir,
StagedSectorDir: stagingDir,
SectorClass: types.NewSectorClass(sectorSize),
}
sb, err := sectorbuilder.NewRustSectorBuilder(cfg)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("failed to initialize sector builder for miner %s", minerAddr.String()))
}
return sb, nil
}
func initStorageMinerForNode(ctx context.Context, node *Node) (*storage.Miner, error) {
minerAddr, err := node.miningAddress()
if err != nil {
return nil, errors.Wrap(err, "failed to get node's mining address")
}
miningOwnerAddr, err := node.miningOwnerAddress(ctx, minerAddr)
if err != nil {
return nil, errors.Wrap(err, "no mining owner available, skipping storage miner setup")
}
miner, err := storage.NewMiner(minerAddr, miningOwnerAddr, node, node.Repo.DealsDatastore(), node.PorcelainAPI)
if err != nil {
return nil, errors.Wrap(err, "failed to instantiate storage miner")
}
return miner, nil
}
// StopMining stops mining on new blocks.
func (node *Node) StopMining(ctx context.Context) {
node.setIsMining(false)
if node.cancelMining != nil {
node.cancelMining()
}
if node.miningDoneWg != nil {
node.miningDoneWg.Wait()
}
// TODO: stop node.StorageMiner
}
// NewAddress creates a new account address on the default wallet backend.
func (node *Node) NewAddress() (address.Address, error) {
return wallet.NewAddress(node.Wallet)
}
// miningOwnerAddress returns the owner of miningAddr.
// TODO: find a better home for this method
func (node *Node) miningOwnerAddress(ctx context.Context, miningAddr address.Address) (address.Address, error) {
ownerAddr, err := node.PorcelainAPI.MinerGetOwnerAddress(ctx, miningAddr)
if err != nil {
return address.Undef, errors.Wrap(err, "failed to get miner owner address")
}
return ownerAddr, nil
}
func (node *Node) handleSubscription(ctx context.Context, f pubSubProcessorFunc, fname string, s pubsub.Subscription, sname string) {
for {
pubSubMsg, err := s.Next(ctx)
if err != nil {
log.Errorf("%s.Next(): %s", sname, err)
return
}
if err := f(ctx, pubSubMsg); err != nil {
if vmerr.ShouldRevert(err) {
log.Infof("%s(): %s", fname, err)
} else if err != context.Canceled {
log.Errorf("%s(): %s", fname, err)
}
}
}
}
// setupProtocols creates protocol clients and miners, then sets the node's APIs
// for each
func (node *Node) setupProtocols() error {
_, mineDelay := node.MiningTimes()
blockMiningAPI := block.New(
node.AddNewBlock,
node.ChainReader,
mineDelay,
node.StartMining,
node.StopMining,
node.CreateMiningWorker)
node.BlockMiningAPI = &blockMiningAPI
// set up retrieval client and api
retapi := retrieval.NewAPI(retrieval.NewClient(node.host, node.blockTime, node.PorcelainAPI))
node.RetrievalAPI = &retapi
// set up storage client and api
smc := storage.NewClient(node.blockTime, node.host, node.PorcelainAPI)
smcAPI := storage.NewAPI(smc)
node.StorageAPI = &smcAPI
return nil
}
// CreateMiningWorker creates a mining.Worker for the node using the configured
// getStateTree, getWeight, and getAncestors functions for the node
func (node *Node) CreateMiningWorker(ctx context.Context) (mining.Worker, error) {
processor := consensus.NewDefaultProcessor()
minerAddr, err := node.miningAddress()
if err != nil {
return nil, errors.Wrap(err, "failed to get mining address")
}
minerPubKey, err := node.PorcelainAPI.MinerGetKey(ctx, minerAddr)
if err != nil {
return nil, errors.Wrap(err, "could not get key from miner actor")
}
minerOwnerAddr, err := node.miningOwnerAddress(ctx, minerAddr)
if err != nil {
log.Errorf("could not get owner address of miner actor")
return nil, err
}
return mining.NewDefaultWorker(
node.Inbox.Pool(), node.getStateTree, node.getWeight, node.getAncestors, processor, node.PowerTable,
node.Blockstore, node.CborStore(), minerAddr, minerOwnerAddr, minerPubKey,
node.Wallet, node.blockTime), nil
}
// getStateFromKey returns the state tree based on tipset fetched with provided key tsKey
func (node *Node) getStateFromKey(ctx context.Context, tsKey types.SortedCidSet) (state.Tree, error) {
stateCid, err := node.ChainReader.GetTipSetStateRoot(tsKey)
if err != nil {
return nil, err
}
return state.LoadStateTree(ctx, node.CborStore(), stateCid, builtin.Actors)
}
// getStateTree is the default GetStateTree function for the mining worker.
func (node *Node) getStateTree(ctx context.Context, ts types.TipSet) (state.Tree, error) {
return node.getStateFromKey(ctx, ts.ToSortedCidSet())
}
// getWeight is the default GetWeight function for the mining worker.
func (node *Node) getWeight(ctx context.Context, ts types.TipSet) (uint64, error) {
parent, err := ts.Parents()
if err != nil {
return uint64(0), err
}
// TODO handle genesis cid more gracefully
if parent.Len() == 0 {
return node.Consensus.Weight(ctx, ts, nil)
}
pSt, err := node.getStateFromKey(ctx, parent)
if err != nil {
return uint64(0), err
}
return node.Consensus.Weight(ctx, ts, pSt)
}
// getAncestors is the default GetAncestors function for the mining worker.
func (node *Node) getAncestors(ctx context.Context, ts types.TipSet, newBlockHeight *types.BlockHeight) ([]types.TipSet, error) {
ancestorHeight := types.NewBlockHeight(consensus.AncestorRoundsNeeded)
return chain.GetRecentAncestors(ctx, ts, node.ChainReader, newBlockHeight, ancestorHeight, sampling.LookbackParameter)
}
// -- Accessors
// Host returns the nodes host.
func (node *Node) Host() host.Host {
return node.host
}
// SectorBuilder returns the nodes sectorBuilder.
func (node *Node) SectorBuilder() sectorbuilder.SectorBuilder {
return node.sectorBuilder
}
// BlockService returns the nodes blockservice.
func (node *Node) BlockService() bserv.BlockService {
return node.blockservice
}
// CborStore returns the nodes cborStore.
func (node *Node) CborStore() *hamt.CborIpldStore {
return node.cborStore
}
// IsMining returns a boolean indicating whether the node is mining blocks.
func (node *Node) IsMining() bool {
node.mining.Lock()
defer node.mining.Unlock()
return node.mining.isMining
}
| 1 | 19,354 | Blocking: we still want read write separation. Node functions should absolutely not write to the chain store and the interface should reflect that. Only the syncer should have this capability in production code. It should be no problem to keep casting chainForTest to a read-write interface, or doing other function decomposition and automatic casting tricks. | filecoin-project-venus | go |
@@ -22,7 +22,8 @@ const (
minimalPrefetchWorkerQueueSize int = 1
testBlockRetrievalWorkerQueueSize int = 5
testPrefetchWorkerQueueSize int = 1
- defaultOnDemandRequestPriority int = 100
+ defaultOnDemandRequestPriority int = 1<<30 - 1
+ lowestTriggerPrefetchPriority int = 1
// Channel buffer size can be big because we use the empty struct.
workerQueueSize int = 1<<31 - 1
) | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"container/heap"
"io"
"reflect"
"sync"
"github.com/keybase/client/go/logger"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
const (
defaultBlockRetrievalWorkerQueueSize int = 100
defaultPrefetchWorkerQueueSize int = 2
minimalBlockRetrievalWorkerQueueSize int = 2
minimalPrefetchWorkerQueueSize int = 1
testBlockRetrievalWorkerQueueSize int = 5
testPrefetchWorkerQueueSize int = 1
defaultOnDemandRequestPriority int = 100
// Channel buffer size can be big because we use the empty struct.
workerQueueSize int = 1<<31 - 1
)
type blockRetrievalPartialConfig interface {
dataVersioner
logMaker
blockCacher
diskBlockCacheGetter
}
type blockRetrievalConfig interface {
blockRetrievalPartialConfig
blockGetter() blockGetter
}
type realBlockRetrievalConfig struct {
blockRetrievalPartialConfig
bg blockGetter
}
func (c *realBlockRetrievalConfig) blockGetter() blockGetter {
return c.bg
}
// blockRetrievalRequest represents one consumer's request for a block.
type blockRetrievalRequest struct {
block Block
doneCh chan error
}
// blockRetrieval contains the metadata for a given block retrieval. May
// represent many requests, all of which will be handled at once.
type blockRetrieval struct {
//// Retrieval Metadata
// the block pointer to retrieve
blockPtr BlockPointer
// the key metadata for the request
kmd KeyMetadata
// the context encapsulating all request contexts
ctx *CoalescingContext
// cancel function for the context
cancelFunc context.CancelFunc
// protects requests and lifetime
reqMtx sync.RWMutex
// the individual requests for this block pointer: they must be notified
// once the block is returned
requests []*blockRetrievalRequest
// the cache lifetime for the retrieval
cacheLifetime BlockCacheLifetime
//// Queueing Metadata
// the index of the retrieval in the heap
index int
// the priority of the retrieval: larger priorities are processed first
priority int
// state of global request counter when this retrieval was created;
// maintains FIFO
insertionOrder uint64
}
// blockPtrLookup is used to uniquely identify block retrieval requests. The
// reflect.Type is needed because sometimes a request is placed concurrently
// for a specific block type and a generic block type. The requests will both
// cause a retrieval, but branching on type allows us to avoid special casing
// the code.
type blockPtrLookup struct {
bp BlockPointer
t reflect.Type
}
// blockRetrievalQueue manages block retrieval requests. Higher priority
// requests are executed first. Requests are executed in FIFO order within a
// given priority level.
type blockRetrievalQueue struct {
config blockRetrievalConfig
log logger.Logger
// protects ptrs, insertionCount, and the heap
mtx sync.RWMutex
// queued or in progress retrievals
ptrs map[blockPtrLookup]*blockRetrieval
// global counter of insertions to queue
// capacity: ~584 years at 1 billion requests/sec
insertionCount uint64
heap *blockRetrievalHeap
// These are notification channels to maximize the time that each request
// is in the heap, allowing preemption as long as possible. This way, a
// request only exits the heap once a worker is ready.
workerCh chan<- struct{}
prefetchWorkerCh chan<- struct{}
// slices to store the workers so we can terminate them when we're done
workers []*blockRetrievalWorker
// channel to be closed when we're done accepting requests
doneCh chan struct{}
// protects prefetcher
prefetchMtx sync.RWMutex
// prefetcher for handling prefetching scenarios
prefetcher Prefetcher
}
var _ BlockRetriever = (*blockRetrievalQueue)(nil)
// newBlockRetrievalQueue creates a new block retrieval queue. The numWorkers
// parameter determines how many workers can concurrently call Work (more than
// numWorkers will block).
func newBlockRetrievalQueue(numWorkers int, numPrefetchWorkers int,
config blockRetrievalConfig) *blockRetrievalQueue {
workerCh := make(chan struct{}, workerQueueSize)
prefetchWorkerCh := make(chan struct{}, workerQueueSize)
q := &blockRetrievalQueue{
config: config,
log: config.MakeLogger(""),
ptrs: make(map[blockPtrLookup]*blockRetrieval),
heap: &blockRetrievalHeap{},
workerCh: workerCh,
prefetchWorkerCh: prefetchWorkerCh,
doneCh: make(chan struct{}),
workers: make([]*blockRetrievalWorker, 0,
numWorkers+numPrefetchWorkers),
}
q.prefetcher = newBlockPrefetcher(q, config)
for i := 0; i < numWorkers; i++ {
q.workers = append(q.workers, newBlockRetrievalWorker(
config.blockGetter(), q, workerCh))
}
for i := 0; i < numPrefetchWorkers; i++ {
q.workers = append(q.workers, newBlockRetrievalWorker(
config.blockGetter(), q, prefetchWorkerCh))
}
return q
}
func (brq *blockRetrievalQueue) popIfNotEmpty() *blockRetrieval {
brq.mtx.Lock()
defer brq.mtx.Unlock()
if brq.heap.Len() > 0 {
return heap.Pop(brq.heap).(*blockRetrieval)
}
return nil
}
func (brq *blockRetrievalQueue) shutdownRetrieval() {
retrieval := brq.popIfNotEmpty()
if retrieval != nil {
brq.FinalizeRequest(retrieval, nil, io.EOF)
}
}
// notifyWorker notifies workers that there is a new request for processing.
func (brq *blockRetrievalQueue) notifyWorker(priority int) {
// On-demand workers and prefetch workers share the priority queue. This
// allows maximum time for requests to jump the queue, at least until the
// worker actually begins working on it.
//
// Note that the worker being notified won't necessarily work on the exact
// request that caused the notification. It's just a counter. That means
// that sometimes on-demand workers will work on prefetch requests, and
// vice versa. But the numbers should match.
//
// However, there are some pathological scenarios where if all the workers
// of one type are making progress but the other type are not (which is
// highly improbable), requests of one type could starve the other. By
// design, on-demand requests _should_ starve prefetch requests, so this is
// a problem only if prefetch requests can starve on-demand workers. But
// because there are far more on-demand workers than prefetch workers, this
// should never actually happen.
workerCh := brq.workerCh
if priority < defaultOnDemandRequestPriority {
workerCh = brq.prefetchWorkerCh
}
select {
case <-brq.doneCh:
brq.shutdownRetrieval()
// Notify the next queued worker.
case workerCh <- struct{}{}:
default:
panic("notifyWorker() would have blocked, which means we somehow " +
"have around MaxInt32 requests already waiting.")
}
}
// CacheAndPrefetch implements the BlockRetrieval interface for
// blockRetrievalQueue. It also updates the LRU time for the block in the disk
// cache.
func (brq *blockRetrievalQueue) CacheAndPrefetch(ctx context.Context,
ptr BlockPointer, block Block, kmd KeyMetadata, priority int,
lifetime BlockCacheLifetime, hasPrefetched bool) (err error) {
defer func() {
if err != nil {
brq.log.CWarningf(ctx, "Error Putting into the block cache: %+v",
err)
}
dbc := brq.config.DiskBlockCache()
if dbc != nil {
go func() {
err := dbc.UpdateMetadata(ctx, ptr.ID, hasPrefetched)
switch err.(type) {
case nil:
case NoSuchBlockError:
default:
brq.log.CWarningf(ctx, "Error updating metadata: %+v", err)
}
}()
}
}()
if hasPrefetched {
return brq.config.BlockCache().PutWithPrefetch(ptr, kmd.TlfID(), block,
lifetime, hasPrefetched)
}
if priority < defaultOnDemandRequestPriority {
// Only on-demand or higher priority requests can trigger prefetches.
hasPrefetched = false
return brq.config.BlockCache().PutWithPrefetch(ptr, kmd.TlfID(), block,
lifetime, hasPrefetched)
}
// We must let the cache know at this point that we've prefetched.
// 1) To prevent any other Gets from prefetching.
// 2) To prevent prefetching if a cache Put fails, since prefetching if
// only useful when combined with the cache.
hasPrefetched = true
err = brq.config.BlockCache().PutWithPrefetch(ptr, kmd.TlfID(), block,
lifetime, hasPrefetched)
switch err.(type) {
case nil:
case cachePutCacheFullError:
brq.log.CDebugf(ctx, "Skipping prefetch because the cache "+
"is full")
return err
default:
// We should return the error here because otherwise we could thrash
// the prefetcher.
return err
}
// This must be called in a goroutine to prevent deadlock in case this
// CacheAndPrefetch call was triggered by the prefetcher itself.
go brq.Prefetcher().PrefetchAfterBlockRetrieved(block, ptr, kmd)
return nil
}
func (brq *blockRetrievalQueue) checkCaches(ctx context.Context,
priority int, kmd KeyMetadata, ptr BlockPointer, block Block,
lifetime BlockCacheLifetime) error {
// Attempt to retrieve the block from the cache. This might be a specific
// type where the request blocks are CommonBlocks, but that direction can
// Set correctly. The cache will never have CommonBlocks. TODO: verify
// that the returned lifetime here matches `lifetime` (which should always
// be TransientEntry, since a PermanentEntry would have been served
// directly from the cache elsewhere)?
cachedBlock, hasPrefetched, _, err :=
brq.config.BlockCache().GetWithPrefetch(ptr)
if err == nil && cachedBlock != nil {
block.Set(cachedBlock)
return brq.CacheAndPrefetch(ctx, ptr, cachedBlock, kmd, priority,
lifetime, hasPrefetched)
}
// Check the disk cache.
dbc := brq.config.DiskBlockCache()
if dbc == nil {
return NoSuchBlockError{ptr.ID}
}
blockBuf, serverHalf, hasPrefetched, err := dbc.Get(ctx, kmd.TlfID(),
ptr.ID)
if err != nil {
return err
}
if len(blockBuf) == 0 {
return NoSuchBlockError{ptr.ID}
}
// Assemble the block from the encrypted block buffer.
err = brq.config.blockGetter().assembleBlock(ctx, kmd, ptr, block,
blockBuf, serverHalf)
if err != nil {
return err
}
// TODO: once the DiskBlockCache knows about hasPrefetched, pipe that
// through here.
return brq.CacheAndPrefetch(ctx, ptr, block, kmd, priority, lifetime,
hasPrefetched)
}
// Request submits a block request to the queue.
func (brq *blockRetrievalQueue) Request(ctx context.Context,
priority int, kmd KeyMetadata, ptr BlockPointer, block Block,
lifetime BlockCacheLifetime) <-chan error {
// Only continue if we haven't been shut down
ch := make(chan error, 1)
select {
case <-brq.doneCh:
ch <- io.EOF
return ch
default:
}
if block == nil {
ch <- errors.New("nil block passed to blockRetrievalQueue.Request")
return ch
}
// Check caches before locking the mutex.
err := brq.checkCaches(ctx, priority, kmd, ptr, block, lifetime)
if err == nil {
ch <- nil
return ch
}
bpLookup := blockPtrLookup{ptr, reflect.TypeOf(block)}
brq.mtx.Lock()
defer brq.mtx.Unlock()
// We might have to retry if the context has been canceled. This loop will
// iterate a maximum of 2 times. It either hits the `return` statement at
// the bottom on the first iteration, or the `continue` statement first
// which causes it to `return` on the next iteration.
for {
br, exists := brq.ptrs[bpLookup]
if !exists {
// Add to the heap
br = &blockRetrieval{
blockPtr: ptr,
kmd: kmd,
index: -1,
priority: priority,
insertionOrder: brq.insertionCount,
cacheLifetime: lifetime,
}
br.ctx, br.cancelFunc = NewCoalescingContext(ctx)
brq.insertionCount++
brq.ptrs[bpLookup] = br
heap.Push(brq.heap, br)
brq.notifyWorker(priority)
} else {
err := br.ctx.AddContext(ctx)
if err == context.Canceled {
// We need to delete the request pointer, but we'll still let
// the existing request be processed by a worker.
delete(brq.ptrs, bpLookup)
continue
}
}
br.reqMtx.Lock()
br.requests = append(br.requests, &blockRetrievalRequest{
block: block,
doneCh: ch,
})
if lifetime > br.cacheLifetime {
br.cacheLifetime = lifetime
}
br.reqMtx.Unlock()
// If the new request priority is higher, elevate the retrieval in the
// queue. Skip this if the request is no longer in the queue (which
// means it's actively being processed).
oldPriority := br.priority
if br.index != -1 && priority > oldPriority {
br.priority = priority
heap.Fix(brq.heap, br.index)
if oldPriority < defaultOnDemandRequestPriority &&
priority >= defaultOnDemandRequestPriority {
// We've crossed the priority threshold for prefetch workers,
// so we now need an on-demand worker to pick up the request.
// This means that we might have up to two workers "activated"
// per request. However, they won't leak because if a worker
// sees an empty queue, it continues merrily along.
brq.notifyWorker(priority)
}
}
return ch
}
}
// FinalizeRequest is the last step of a retrieval request once a block has
// been obtained. It removes the request from the blockRetrievalQueue,
// preventing more requests from mutating the retrieval, then notifies all
// subscribed requests.
func (brq *blockRetrievalQueue) FinalizeRequest(
retrieval *blockRetrieval, block Block, err error) {
brq.mtx.Lock()
// This might have already been removed if the context has been canceled.
// That's okay, because this will then be a no-op.
bpLookup := blockPtrLookup{retrieval.blockPtr, reflect.TypeOf(block)}
delete(brq.ptrs, bpLookup)
brq.mtx.Unlock()
defer retrieval.cancelFunc()
// Cache the block and trigger prefetches if there is no error.
if err == nil {
// We treat this request as not having been prefetched, because the
// only way to get here is if the request wasn't already cached.
// Need to call with context.Background() because the retrieval's
// context will be canceled as soon as this method returns.
brq.CacheAndPrefetch(context.Background(), retrieval.blockPtr, block,
retrieval.kmd, retrieval.priority, retrieval.cacheLifetime, false)
}
// This is a symbolic lock, since there shouldn't be any other goroutines
// accessing requests at this point. But requests had contentious access
// earlier, so we'll lock it here as well to maintain the integrity of the
// lock.
retrieval.reqMtx.Lock()
defer retrieval.reqMtx.Unlock()
for _, r := range retrieval.requests {
req := r
if block != nil {
// Copy the decrypted block to the caller
req.block.Set(block)
}
// Since we created this channel with a buffer size of 1, this won't
// block.
req.doneCh <- err
}
}
// Shutdown is called when we are no longer accepting requests.
func (brq *blockRetrievalQueue) Shutdown() {
select {
case <-brq.doneCh:
default:
// We close `doneCh` first so that new requests coming in get finalized
// immediately rather than racing with dying workers.
close(brq.doneCh)
for _, w := range brq.workers {
w.Shutdown()
}
brq.prefetchMtx.Lock()
defer brq.prefetchMtx.Unlock()
brq.prefetcher.Shutdown()
}
}
// TogglePrefetcher allows upstream components to turn the prefetcher on or
// off. If an error is returned due to a context cancelation, the prefetcher is
// never re-enabled.
func (brq *blockRetrievalQueue) TogglePrefetcher(ctx context.Context,
enable bool) (err error) {
// We must hold this lock for the whole function so that multiple calls to
// this function doesn't leak prefetchers.
brq.prefetchMtx.Lock()
defer brq.prefetchMtx.Unlock()
// Don't wait for the existing prefetcher to shutdown so we don't deadlock
// any callers.
_ = brq.prefetcher.Shutdown()
if enable {
brq.prefetcher = newBlockPrefetcher(brq, brq.config)
}
return nil
}
// Prefetcher allows us to retrieve the prefetcher.
func (brq *blockRetrievalQueue) Prefetcher() Prefetcher {
brq.prefetchMtx.RLock()
defer brq.prefetchMtx.RUnlock()
return brq.prefetcher
}
| 1 | 17,371 | The PR description says the lowest on-demand request priority is `2^30`. Why the `-1` here? | keybase-kbfs | go |
@@ -609,6 +609,14 @@ bool Game::removeCreature(Creature* creature, bool isLogout/* = true*/)
return true;
}
+void Game::executeDeath(uint32_t creatureId)
+{
+ Creature* creature = getCreatureByID(creatureId);
+ if (creature && !creature->isRemoved() && creature->getHealth() < 1) {
+ creature->onDeath();
+ }
+}
+
void Game::playerMoveThing(uint32_t playerId, const Position& fromPos,
uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count)
{ | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2019 Mark Samman <mark.samman@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include "pugicast.h"
#include "actions.h"
#include "bed.h"
#include "configmanager.h"
#include "creature.h"
#include "creatureevent.h"
#include "databasetasks.h"
#include "events.h"
#include "game.h"
#include "globalevent.h"
#include "iologindata.h"
#include "iomarket.h"
#include "items.h"
#include "monster.h"
#include "movement.h"
#include "scheduler.h"
#include "server.h"
#include "spells.h"
#include "talkaction.h"
#include "weapons.h"
#include "script.h"
extern ConfigManager g_config;
extern Actions* g_actions;
extern Chat* g_chat;
extern TalkActions* g_talkActions;
extern Spells* g_spells;
extern Vocations g_vocations;
extern GlobalEvents* g_globalEvents;
extern CreatureEvents* g_creatureEvents;
extern Events* g_events;
extern Monsters g_monsters;
extern MoveEvents* g_moveEvents;
extern Weapons* g_weapons;
extern Scripts* g_scripts;
Game::Game()
{
offlineTrainingWindow.choices.emplace_back("Sword Fighting and Shielding", SKILL_SWORD);
offlineTrainingWindow.choices.emplace_back("Axe Fighting and Shielding", SKILL_AXE);
offlineTrainingWindow.choices.emplace_back("Club Fighting and Shielding", SKILL_CLUB);
offlineTrainingWindow.choices.emplace_back("Distance Fighting and Shielding", SKILL_DISTANCE);
offlineTrainingWindow.choices.emplace_back("Magic Level and Shielding", SKILL_MAGLEVEL);
offlineTrainingWindow.buttons.emplace_back("Okay", 1);
offlineTrainingWindow.buttons.emplace_back("Cancel", 0);
offlineTrainingWindow.defaultEnterButton = 1;
offlineTrainingWindow.defaultEscapeButton = 0;
offlineTrainingWindow.priority = true;
}
Game::~Game()
{
for (const auto& it : guilds) {
delete it.second;
}
}
void Game::start(ServiceManager* manager)
{
serviceManager = manager;
g_scheduler.addEvent(createSchedulerTask(EVENT_LIGHTINTERVAL, std::bind(&Game::checkLight, this)));
g_scheduler.addEvent(createSchedulerTask(EVENT_CREATURE_THINK_INTERVAL, std::bind(&Game::checkCreatures, this, 0)));
g_scheduler.addEvent(createSchedulerTask(EVENT_DECAYINTERVAL, std::bind(&Game::checkDecay, this)));
}
GameState_t Game::getGameState() const
{
return gameState;
}
void Game::setWorldType(WorldType_t type)
{
worldType = type;
}
void Game::setGameState(GameState_t newState)
{
if (gameState == GAME_STATE_SHUTDOWN) {
return; //this cannot be stopped
}
if (gameState == newState) {
return;
}
gameState = newState;
switch (newState) {
case GAME_STATE_INIT: {
loadExperienceStages();
groups.load();
g_chat->load();
map.spawns.startup();
raids.loadFromXml();
raids.startup();
quests.loadFromXml();
mounts.loadFromXml();
loadMotdNum();
loadPlayersRecord();
g_globalEvents->startup();
break;
}
case GAME_STATE_SHUTDOWN: {
g_globalEvents->execute(GLOBALEVENT_SHUTDOWN);
//kick all players that are still online
auto it = players.begin();
while (it != players.end()) {
it->second->kickPlayer(true);
it = players.begin();
}
saveMotdNum();
saveGameState();
g_dispatcher.addTask(
createTask(std::bind(&Game::shutdown, this)));
g_scheduler.stop();
g_databaseTasks.stop();
g_dispatcher.stop();
break;
}
case GAME_STATE_CLOSED: {
/* kick all players without the CanAlwaysLogin flag */
auto it = players.begin();
while (it != players.end()) {
if (!it->second->hasFlag(PlayerFlag_CanAlwaysLogin)) {
it->second->kickPlayer(true);
it = players.begin();
} else {
++it;
}
}
saveGameState();
break;
}
default:
break;
}
}
void Game::saveGameState()
{
if (gameState == GAME_STATE_NORMAL) {
setGameState(GAME_STATE_MAINTAIN);
}
std::cout << "Saving server..." << std::endl;
for (const auto& it : players) {
it.second->loginPosition = it.second->getPosition();
IOLoginData::savePlayer(it.second);
}
Map::save();
g_databaseTasks.flush();
if (gameState == GAME_STATE_MAINTAIN) {
setGameState(GAME_STATE_NORMAL);
}
}
bool Game::loadMainMap(const std::string& filename)
{
Monster::despawnRange = g_config.getNumber(ConfigManager::DEFAULT_DESPAWNRANGE);
Monster::despawnRadius = g_config.getNumber(ConfigManager::DEFAULT_DESPAWNRADIUS);
return map.loadMap("data/world/" + filename + ".otbm", true);
}
void Game::loadMap(const std::string& path)
{
map.loadMap(path, false);
}
Cylinder* Game::internalGetCylinder(Player* player, const Position& pos) const
{
if (pos.x != 0xFFFF) {
return map.getTile(pos);
}
//container
if (pos.y & 0x40) {
uint8_t from_cid = pos.y & 0x0F;
return player->getContainerByID(from_cid);
}
//inventory
return player;
}
Thing* Game::internalGetThing(Player* player, const Position& pos, int32_t index, uint32_t spriteId, stackPosType_t type) const
{
if (pos.x != 0xFFFF) {
Tile* tile = map.getTile(pos);
if (!tile) {
return nullptr;
}
Thing* thing;
switch (type) {
case STACKPOS_LOOK: {
return tile->getTopVisibleThing(player);
}
case STACKPOS_MOVE: {
Item* item = tile->getTopDownItem();
if (item && item->isMoveable()) {
thing = item;
} else {
thing = tile->getTopVisibleCreature(player);
}
break;
}
case STACKPOS_USEITEM: {
thing = tile->getUseItem(index);
break;
}
case STACKPOS_TOPDOWN_ITEM: {
thing = tile->getTopDownItem();
break;
}
case STACKPOS_USETARGET: {
thing = tile->getTopVisibleCreature(player);
if (!thing) {
thing = tile->getUseItem(index);
}
break;
}
default: {
thing = nullptr;
break;
}
}
if (player && tile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) {
//do extra checks here if the thing is accessable
if (thing && thing->getItem()) {
if (tile->hasProperty(CONST_PROP_ISVERTICAL)) {
if (player->getPosition().x + 1 == tile->getPosition().x) {
thing = nullptr;
}
} else { // horizontal
if (player->getPosition().y + 1 == tile->getPosition().y) {
thing = nullptr;
}
}
}
}
return thing;
}
//container
if (pos.y & 0x40) {
uint8_t fromCid = pos.y & 0x0F;
Container* parentContainer = player->getContainerByID(fromCid);
if (!parentContainer) {
return nullptr;
}
if (parentContainer->getID() == ITEM_BROWSEFIELD) {
Tile* tile = parentContainer->getTile();
if (tile && tile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) {
if (tile->hasProperty(CONST_PROP_ISVERTICAL)) {
if (player->getPosition().x + 1 == tile->getPosition().x) {
return nullptr;
}
} else { // horizontal
if (player->getPosition().y + 1 == tile->getPosition().y) {
return nullptr;
}
}
}
}
uint8_t slot = pos.z;
return parentContainer->getItemByIndex(player->getContainerIndex(fromCid) + slot);
} else if (pos.y == 0 && pos.z == 0) {
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return nullptr;
}
int32_t subType;
if (it.isFluidContainer() && index < static_cast<int32_t>(sizeof(reverseFluidMap) / sizeof(uint8_t))) {
subType = reverseFluidMap[index];
} else {
subType = -1;
}
return findItemOfType(player, it.id, true, subType);
}
//inventory
slots_t slot = static_cast<slots_t>(pos.y);
return player->getInventoryItem(slot);
}
void Game::internalGetPosition(Item* item, Position& pos, uint8_t& stackpos)
{
pos.x = 0;
pos.y = 0;
pos.z = 0;
stackpos = 0;
Cylinder* topParent = item->getTopParent();
if (topParent) {
if (Player* player = dynamic_cast<Player*>(topParent)) {
pos.x = 0xFFFF;
Container* container = dynamic_cast<Container*>(item->getParent());
if (container) {
pos.y = static_cast<uint16_t>(0x40) | static_cast<uint16_t>(player->getContainerID(container));
pos.z = container->getThingIndex(item);
stackpos = pos.z;
} else {
pos.y = player->getThingIndex(item);
stackpos = pos.y;
}
} else if (Tile* tile = topParent->getTile()) {
pos = tile->getPosition();
stackpos = tile->getThingIndex(item);
}
}
}
Creature* Game::getCreatureByID(uint32_t id)
{
if (id <= Player::playerAutoID) {
return getPlayerByID(id);
} else if (id <= Monster::monsterAutoID) {
return getMonsterByID(id);
} else if (id <= Npc::npcAutoID) {
return getNpcByID(id);
}
return nullptr;
}
Monster* Game::getMonsterByID(uint32_t id)
{
if (id == 0) {
return nullptr;
}
auto it = monsters.find(id);
if (it == monsters.end()) {
return nullptr;
}
return it->second;
}
Npc* Game::getNpcByID(uint32_t id)
{
if (id == 0) {
return nullptr;
}
auto it = npcs.find(id);
if (it == npcs.end()) {
return nullptr;
}
return it->second;
}
Player* Game::getPlayerByID(uint32_t id)
{
if (id == 0) {
return nullptr;
}
auto it = players.find(id);
if (it == players.end()) {
return nullptr;
}
return it->second;
}
Creature* Game::getCreatureByName(const std::string& s)
{
if (s.empty()) {
return nullptr;
}
const std::string& lowerCaseName = asLowerCaseString(s);
auto m_it = mappedPlayerNames.find(lowerCaseName);
if (m_it != mappedPlayerNames.end()) {
return m_it->second;
}
for (const auto& it : npcs) {
if (lowerCaseName == asLowerCaseString(it.second->getName())) {
return it.second;
}
}
for (const auto& it : monsters) {
if (lowerCaseName == asLowerCaseString(it.second->getName())) {
return it.second;
}
}
return nullptr;
}
Npc* Game::getNpcByName(const std::string& s)
{
if (s.empty()) {
return nullptr;
}
const char* npcName = s.c_str();
for (const auto& it : npcs) {
if (strcasecmp(npcName, it.second->getName().c_str()) == 0) {
return it.second;
}
}
return nullptr;
}
Player* Game::getPlayerByName(const std::string& s)
{
if (s.empty()) {
return nullptr;
}
auto it = mappedPlayerNames.find(asLowerCaseString(s));
if (it == mappedPlayerNames.end()) {
return nullptr;
}
return it->second;
}
Player* Game::getPlayerByGUID(const uint32_t& guid)
{
if (guid == 0) {
return nullptr;
}
auto it = mappedPlayerGuids.find(guid);
if (it == mappedPlayerGuids.end()) {
return nullptr;
}
return it->second;
}
ReturnValue Game::getPlayerByNameWildcard(const std::string& s, Player*& player)
{
size_t strlen = s.length();
if (strlen == 0 || strlen > 20) {
return RETURNVALUE_PLAYERWITHTHISNAMEISNOTONLINE;
}
if (s.back() == '~') {
const std::string& query = asLowerCaseString(s.substr(0, strlen - 1));
std::string result;
ReturnValue ret = wildcardTree.findOne(query, result);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
player = getPlayerByName(result);
} else {
player = getPlayerByName(s);
}
if (!player) {
return RETURNVALUE_PLAYERWITHTHISNAMEISNOTONLINE;
}
return RETURNVALUE_NOERROR;
}
Player* Game::getPlayerByAccount(uint32_t acc)
{
for (const auto& it : players) {
if (it.second->getAccount() == acc) {
return it.second;
}
}
return nullptr;
}
bool Game::internalPlaceCreature(Creature* creature, const Position& pos, bool extendedPos /*=false*/, bool forced /*= false*/)
{
if (creature->getParent() != nullptr) {
return false;
}
if (!map.placeCreature(pos, creature, extendedPos, forced)) {
return false;
}
creature->incrementReferenceCounter();
creature->setID();
creature->addList();
return true;
}
bool Game::placeCreature(Creature* creature, const Position& pos, bool extendedPos /*=false*/, bool forced /*= false*/)
{
if (!internalPlaceCreature(creature, pos, extendedPos, forced)) {
return false;
}
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), true);
for (Creature* spectator : spectators) {
if (Player* tmpPlayer = spectator->getPlayer()) {
tmpPlayer->sendCreatureAppear(creature, creature->getPosition(), true);
}
}
for (Creature* spectator : spectators) {
spectator->onCreatureAppear(creature, true);
}
creature->getParent()->postAddNotification(creature, nullptr, 0);
addCreatureCheck(creature);
creature->onPlacedCreature();
return true;
}
bool Game::removeCreature(Creature* creature, bool isLogout/* = true*/)
{
if (creature->isRemoved()) {
return false;
}
Tile* tile = creature->getTile();
std::vector<int32_t> oldStackPosVector;
SpectatorVec spectators;
map.getSpectators(spectators, tile->getPosition(), true);
for (Creature* spectator : spectators) {
if (Player* player = spectator->getPlayer()) {
oldStackPosVector.push_back(player->canSeeCreature(creature) ? tile->getStackposOfCreature(player, creature) : -1);
}
}
tile->removeCreature(creature);
const Position& tilePosition = tile->getPosition();
//send to client
size_t i = 0;
for (Creature* spectator : spectators) {
if (Player* player = spectator->getPlayer()) {
player->sendRemoveTileThing(tilePosition, oldStackPosVector[i++]);
}
}
//event method
for (Creature* spectator : spectators) {
spectator->onRemoveCreature(creature, isLogout);
}
creature->getParent()->postRemoveNotification(creature, nullptr, 0);
creature->removeList();
creature->setRemoved();
ReleaseCreature(creature);
removeCreatureCheck(creature);
for (Creature* summon : creature->summons) {
summon->setSkillLoss(false);
removeCreature(summon);
}
return true;
}
void Game::playerMoveThing(uint32_t playerId, const Position& fromPos,
uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
uint8_t fromIndex = 0;
if (fromPos.x == 0xFFFF) {
if (fromPos.y & 0x40) {
fromIndex = fromPos.z;
} else {
fromIndex = static_cast<uint8_t>(fromPos.y);
}
} else {
fromIndex = fromStackPos;
}
Thing* thing = internalGetThing(player, fromPos, fromIndex, 0, STACKPOS_MOVE);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (Creature* movingCreature = thing->getCreature()) {
Tile* tile = map.getTile(toPos);
if (!tile) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (Position::areInRange<1, 1, 0>(movingCreature->getPosition(), player->getPosition())) {
SchedulerTask* task = createSchedulerTask(1000,
std::bind(&Game::playerMoveCreatureByID, this, player->getID(),
movingCreature->getID(), movingCreature->getPosition(), tile->getPosition()));
player->setNextActionTask(task);
} else {
playerMoveCreature(player, movingCreature, movingCreature->getPosition(), tile);
}
} else if (thing->getItem()) {
Cylinder* toCylinder = internalGetCylinder(player, toPos);
if (!toCylinder) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
playerMoveItem(player, fromPos, spriteId, fromStackPos, toPos, count, thing->getItem(), toCylinder);
}
}
void Game::playerMoveCreatureByID(uint32_t playerId, uint32_t movingCreatureId, const Position& movingCreatureOrigPos, const Position& toPos)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Creature* movingCreature = getCreatureByID(movingCreatureId);
if (!movingCreature) {
return;
}
Tile* toTile = map.getTile(toPos);
if (!toTile) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
playerMoveCreature(player, movingCreature, movingCreatureOrigPos, toTile);
}
void Game::playerMoveCreature(Player* player, Creature* movingCreature, const Position& movingCreatureOrigPos, Tile* toTile)
{
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerMoveCreatureByID,
this, player->getID(), movingCreature->getID(), movingCreatureOrigPos, toTile->getPosition()));
player->setNextActionTask(task);
return;
}
player->setNextActionTask(nullptr);
if (!Position::areInRange<1, 1, 0>(movingCreatureOrigPos, player->getPosition())) {
//need to walk to the creature first before moving it
std::forward_list<Direction> listDir;
if (player->getPathTo(movingCreatureOrigPos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(1500, std::bind(&Game::playerMoveCreatureByID, this,
player->getID(), movingCreature->getID(), movingCreatureOrigPos, toTile->getPosition()));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
if ((!movingCreature->isPushable() && !player->hasFlag(PlayerFlag_CanPushAllCreatures)) ||
(movingCreature->isInGhostMode() && !player->isAccessPlayer())) {
player->sendCancelMessage(RETURNVALUE_NOTMOVEABLE);
return;
}
//check throw distance
const Position& movingCreaturePos = movingCreature->getPosition();
const Position& toPos = toTile->getPosition();
if ((Position::getDistanceX(movingCreaturePos, toPos) > movingCreature->getThrowRange()) || (Position::getDistanceY(movingCreaturePos, toPos) > movingCreature->getThrowRange()) || (Position::getDistanceZ(movingCreaturePos, toPos) * 4 > movingCreature->getThrowRange())) {
player->sendCancelMessage(RETURNVALUE_DESTINATIONOUTOFREACH);
return;
}
if (player != movingCreature) {
if (toTile->hasFlag(TILESTATE_BLOCKPATH)) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM);
return;
} else if ((movingCreature->getZone() == ZONE_PROTECTION && !toTile->hasFlag(TILESTATE_PROTECTIONZONE)) || (movingCreature->getZone() == ZONE_NOPVP && !toTile->hasFlag(TILESTATE_NOPVPZONE))) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
} else {
if (CreatureVector* tileCreatures = toTile->getCreatures()) {
for (Creature* tileCreature : *tileCreatures) {
if (!tileCreature->isInGhostMode()) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM);
return;
}
}
}
Npc* movingNpc = movingCreature->getNpc();
if (movingNpc && !Spawns::isInZone(movingNpc->getMasterPos(), movingNpc->getMasterRadius(), toPos)) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM);
return;
}
}
}
if (!g_events->eventPlayerOnMoveCreature(player, movingCreature, movingCreaturePos, toPos)) {
return;
}
ReturnValue ret = internalMoveCreature(*movingCreature, *toTile);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
}
}
ReturnValue Game::internalMoveCreature(Creature* creature, Direction direction, uint32_t flags /*= 0*/)
{
creature->setLastPosition(creature->getPosition());
const Position& currentPos = creature->getPosition();
Position destPos = getNextPosition(direction, currentPos);
Player* player = creature->getPlayer();
bool diagonalMovement = (direction & DIRECTION_DIAGONAL_MASK) != 0;
if (player && !diagonalMovement) {
//try go up
if (currentPos.z != 8 && creature->getTile()->hasHeight(3)) {
Tile* tmpTile = map.getTile(currentPos.x, currentPos.y, currentPos.getZ() - 1);
if (tmpTile == nullptr || (tmpTile->getGround() == nullptr && !tmpTile->hasFlag(TILESTATE_BLOCKSOLID))) {
tmpTile = map.getTile(destPos.x, destPos.y, destPos.getZ() - 1);
if (tmpTile && tmpTile->getGround() && !tmpTile->hasFlag(TILESTATE_BLOCKSOLID)) {
flags |= FLAG_IGNOREBLOCKITEM | FLAG_IGNOREBLOCKCREATURE;
if (!tmpTile->hasFlag(TILESTATE_FLOORCHANGE)) {
player->setDirection(direction);
destPos.z--;
}
}
}
}
//try go down
if (currentPos.z != 7 && currentPos.z == destPos.z) {
Tile* tmpTile = map.getTile(destPos.x, destPos.y, destPos.z);
if (tmpTile == nullptr || (tmpTile->getGround() == nullptr && !tmpTile->hasFlag(TILESTATE_BLOCKSOLID))) {
tmpTile = map.getTile(destPos.x, destPos.y, destPos.z + 1);
if (tmpTile && tmpTile->hasHeight(3)) {
flags |= FLAG_IGNOREBLOCKITEM | FLAG_IGNOREBLOCKCREATURE;
player->setDirection(direction);
destPos.z++;
}
}
}
}
Tile* toTile = map.getTile(destPos);
if (!toTile) {
return RETURNVALUE_NOTPOSSIBLE;
}
return internalMoveCreature(*creature, *toTile, flags);
}
ReturnValue Game::internalMoveCreature(Creature& creature, Tile& toTile, uint32_t flags /*= 0*/)
{
//check if we can move the creature to the destination
ReturnValue ret = toTile.queryAdd(0, creature, 1, flags);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
map.moveCreature(creature, toTile);
if (creature.getParent() != &toTile) {
return RETURNVALUE_NOERROR;
}
int32_t index = 0;
Item* toItem = nullptr;
Tile* subCylinder = nullptr;
Tile* toCylinder = &toTile;
Tile* fromCylinder = nullptr;
uint32_t n = 0;
while ((subCylinder = toCylinder->queryDestination(index, creature, &toItem, flags)) != toCylinder) {
map.moveCreature(creature, *subCylinder);
if (creature.getParent() != subCylinder) {
//could happen if a script move the creature
fromCylinder = nullptr;
break;
}
fromCylinder = toCylinder;
toCylinder = subCylinder;
flags = 0;
//to prevent infinite loop
if (++n >= MAP_MAX_LAYERS) {
break;
}
}
if (fromCylinder) {
const Position& fromPosition = fromCylinder->getPosition();
const Position& toPosition = toCylinder->getPosition();
if (fromPosition.z != toPosition.z && (fromPosition.x != toPosition.x || fromPosition.y != toPosition.y)) {
Direction dir = getDirectionTo(fromPosition, toPosition);
if ((dir & DIRECTION_DIAGONAL_MASK) == 0) {
internalCreatureTurn(&creature, dir);
}
}
}
return RETURNVALUE_NOERROR;
}
void Game::playerMoveItemByPlayerID(uint32_t playerId, const Position& fromPos, uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
playerMoveItem(player, fromPos, spriteId, fromStackPos, toPos, count, nullptr, nullptr);
}
void Game::playerMoveItem(Player* player, const Position& fromPos,
uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count, Item* item, Cylinder* toCylinder)
{
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerMoveItemByPlayerID, this,
player->getID(), fromPos, spriteId, fromStackPos, toPos, count));
player->setNextActionTask(task);
return;
}
player->setNextActionTask(nullptr);
if (item == nullptr) {
uint8_t fromIndex = 0;
if (fromPos.x == 0xFFFF) {
if (fromPos.y & 0x40) {
fromIndex = fromPos.z;
} else {
fromIndex = static_cast<uint8_t>(fromPos.y);
}
} else {
fromIndex = fromStackPos;
}
Thing* thing = internalGetThing(player, fromPos, fromIndex, 0, STACKPOS_MOVE);
if (!thing || !thing->getItem()) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
item = thing->getItem();
}
if (item->getClientID() != spriteId) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Cylinder* fromCylinder = internalGetCylinder(player, fromPos);
if (fromCylinder == nullptr) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (toCylinder == nullptr) {
toCylinder = internalGetCylinder(player, toPos);
if (toCylinder == nullptr) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
}
if (!item->isPushable() || item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
player->sendCancelMessage(RETURNVALUE_NOTMOVEABLE);
return;
}
const Position& playerPos = player->getPosition();
const Position& mapFromPos = fromCylinder->getTile()->getPosition();
if (playerPos.z != mapFromPos.z) {
player->sendCancelMessage(playerPos.z > mapFromPos.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS);
return;
}
if (!Position::areInRange<1, 1>(playerPos, mapFromPos)) {
//need to walk to the item first before using it
std::forward_list<Direction> listDir;
if (player->getPathTo(item->getPosition(), listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerMoveItemByPlayerID, this,
player->getID(), fromPos, spriteId, fromStackPos, toPos, count));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
const Tile* toCylinderTile = toCylinder->getTile();
const Position& mapToPos = toCylinderTile->getPosition();
//hangable item specific code
if (item->isHangable() && toCylinderTile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) {
//destination supports hangable objects so need to move there first
bool vertical = toCylinderTile->hasProperty(CONST_PROP_ISVERTICAL);
if (vertical) {
if (playerPos.x + 1 == mapToPos.x) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
} else { // horizontal
if (playerPos.y + 1 == mapToPos.y) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
}
if (!Position::areInRange<1, 1, 0>(playerPos, mapToPos)) {
Position walkPos = mapToPos;
if (vertical) {
walkPos.x++;
} else {
walkPos.y++;
}
Position itemPos = fromPos;
uint8_t itemStackPos = fromStackPos;
if (fromPos.x != 0xFFFF && Position::areInRange<1, 1>(mapFromPos, playerPos)
&& !Position::areInRange<1, 1, 0>(mapFromPos, walkPos)) {
//need to pickup the item first
Item* moveItem = nullptr;
ReturnValue ret = internalMoveItem(fromCylinder, player, INDEX_WHEREEVER, item, count, &moveItem, 0, player, nullptr, &fromPos, &toPos);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
return;
}
//changing the position since its now in the inventory of the player
internalGetPosition(moveItem, itemPos, itemStackPos);
}
std::forward_list<Direction> listDir;
if (player->getPathTo(walkPos, listDir, 0, 0, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerMoveItemByPlayerID, this,
player->getID(), itemPos, spriteId, itemStackPos, toPos, count));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
}
if ((Position::getDistanceX(playerPos, mapToPos) > item->getThrowRange()) ||
(Position::getDistanceY(playerPos, mapToPos) > item->getThrowRange()) ||
(Position::getDistanceZ(mapFromPos, mapToPos) * 4 > item->getThrowRange())) {
player->sendCancelMessage(RETURNVALUE_DESTINATIONOUTOFREACH);
return;
}
if (!canThrowObjectTo(mapFromPos, mapToPos)) {
player->sendCancelMessage(RETURNVALUE_CANNOTTHROW);
return;
}
uint8_t toIndex = 0;
if (toPos.x == 0xFFFF) {
if (toPos.y & 0x40) {
toIndex = toPos.z;
} else {
toIndex = static_cast<uint8_t>(toPos.y);
}
}
ReturnValue ret = internalMoveItem(fromCylinder, toCylinder, toIndex, item, count, nullptr, 0, player, nullptr, &fromPos, &toPos);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
}
}
ReturnValue Game::internalMoveItem(Cylinder* fromCylinder, Cylinder* toCylinder, int32_t index,
Item* item, uint32_t count, Item** _moveItem, uint32_t flags /*= 0*/, Creature* actor/* = nullptr*/, Item* tradeItem/* = nullptr*/, const Position* fromPos /*= nullptr*/, const Position* toPos/*= nullptr*/)
{
Player* actorPlayer = actor ? actor->getPlayer() : nullptr;
if (actorPlayer && fromPos && toPos) {
if (!g_events->eventPlayerOnMoveItem(actorPlayer, item, count, *fromPos, *toPos, fromCylinder, toCylinder)) {
return RETURNVALUE_NOTPOSSIBLE;
}
}
Tile* fromTile = fromCylinder->getTile();
if (fromTile) {
auto it = browseFields.find(fromTile);
if (it != browseFields.end() && it->second == fromCylinder) {
fromCylinder = fromTile;
}
}
Item* toItem = nullptr;
Cylinder* subCylinder;
int floorN = 0;
while ((subCylinder = toCylinder->queryDestination(index, *item, &toItem, flags)) != toCylinder) {
toCylinder = subCylinder;
flags = 0;
//to prevent infinite loop
if (++floorN >= MAP_MAX_LAYERS) {
break;
}
}
//destination is the same as the source?
if (item == toItem) {
return RETURNVALUE_NOERROR; //silently ignore move
}
//check if we can add this item
ReturnValue ret = toCylinder->queryAdd(index, *item, count, flags, actor);
if (ret == RETURNVALUE_NEEDEXCHANGE) {
//check if we can add it to source cylinder
ret = fromCylinder->queryAdd(fromCylinder->getThingIndex(item), *toItem, toItem->getItemCount(), 0);
if (ret == RETURNVALUE_NOERROR) {
//check how much we can move
uint32_t maxExchangeQueryCount = 0;
ReturnValue retExchangeMaxCount = fromCylinder->queryMaxCount(INDEX_WHEREEVER, *toItem, toItem->getItemCount(), maxExchangeQueryCount, 0);
if (retExchangeMaxCount != RETURNVALUE_NOERROR && maxExchangeQueryCount == 0) {
return retExchangeMaxCount;
}
if (toCylinder->queryRemove(*toItem, toItem->getItemCount(), flags) == RETURNVALUE_NOERROR) {
int32_t oldToItemIndex = toCylinder->getThingIndex(toItem);
toCylinder->removeThing(toItem, toItem->getItemCount());
fromCylinder->addThing(toItem);
if (oldToItemIndex != -1) {
toCylinder->postRemoveNotification(toItem, fromCylinder, oldToItemIndex);
}
int32_t newToItemIndex = fromCylinder->getThingIndex(toItem);
if (newToItemIndex != -1) {
fromCylinder->postAddNotification(toItem, toCylinder, newToItemIndex);
}
ret = toCylinder->queryAdd(index, *item, count, flags);
toItem = nullptr;
}
}
}
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
//check how much we can move
uint32_t maxQueryCount = 0;
ReturnValue retMaxCount = toCylinder->queryMaxCount(index, *item, count, maxQueryCount, flags);
if (retMaxCount != RETURNVALUE_NOERROR && maxQueryCount == 0) {
return retMaxCount;
}
uint32_t m;
if (item->isStackable()) {
m = std::min<uint32_t>(count, maxQueryCount);
} else {
m = maxQueryCount;
}
Item* moveItem = item;
//check if we can remove this item
ret = fromCylinder->queryRemove(*item, m, flags);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
if (tradeItem) {
if (toCylinder->getItem() == tradeItem) {
return RETURNVALUE_NOTENOUGHROOM;
}
Cylinder* tmpCylinder = toCylinder->getParent();
while (tmpCylinder) {
if (tmpCylinder->getItem() == tradeItem) {
return RETURNVALUE_NOTENOUGHROOM;
}
tmpCylinder = tmpCylinder->getParent();
}
}
//remove the item
int32_t itemIndex = fromCylinder->getThingIndex(item);
Item* updateItem = nullptr;
fromCylinder->removeThing(item, m);
//update item(s)
if (item->isStackable()) {
uint32_t n;
if (item->equals(toItem)) {
n = std::min<uint32_t>(100 - toItem->getItemCount(), m);
toCylinder->updateThing(toItem, toItem->getID(), toItem->getItemCount() + n);
updateItem = toItem;
} else {
n = 0;
}
int32_t newCount = m - n;
if (newCount > 0) {
moveItem = item->clone();
moveItem->setItemCount(newCount);
} else {
moveItem = nullptr;
}
if (item->isRemoved()) {
ReleaseItem(item);
}
}
//add item
if (moveItem /*m - n > 0*/) {
toCylinder->addThing(index, moveItem);
}
if (itemIndex != -1) {
fromCylinder->postRemoveNotification(item, toCylinder, itemIndex);
}
if (moveItem) {
int32_t moveItemIndex = toCylinder->getThingIndex(moveItem);
if (moveItemIndex != -1) {
toCylinder->postAddNotification(moveItem, fromCylinder, moveItemIndex);
}
}
if (updateItem) {
int32_t updateItemIndex = toCylinder->getThingIndex(updateItem);
if (updateItemIndex != -1) {
toCylinder->postAddNotification(updateItem, fromCylinder, updateItemIndex);
}
}
if (_moveItem) {
if (moveItem) {
*_moveItem = moveItem;
} else {
*_moveItem = item;
}
}
//we could not move all, inform the player
if (item->isStackable() && maxQueryCount < count) {
return retMaxCount;
}
if (moveItem && moveItem->getDuration() > 0) {
if (moveItem->getDecaying() != DECAYING_TRUE) {
moveItem->incrementReferenceCounter();
moveItem->setDecaying(DECAYING_TRUE);
toDecayItems.push_front(moveItem);
}
}
if (actorPlayer && fromPos && toPos) {
g_events->eventPlayerOnItemMoved(actorPlayer, item, count, *fromPos, *toPos, fromCylinder, toCylinder);
}
return ret;
}
ReturnValue Game::internalAddItem(Cylinder* toCylinder, Item* item, int32_t index /*= INDEX_WHEREEVER*/,
uint32_t flags/* = 0*/, bool test/* = false*/)
{
uint32_t remainderCount = 0;
return internalAddItem(toCylinder, item, index, flags, test, remainderCount);
}
ReturnValue Game::internalAddItem(Cylinder* toCylinder, Item* item, int32_t index,
uint32_t flags, bool test, uint32_t& remainderCount)
{
if (toCylinder == nullptr || item == nullptr) {
return RETURNVALUE_NOTPOSSIBLE;
}
Cylinder* destCylinder = toCylinder;
Item* toItem = nullptr;
toCylinder = toCylinder->queryDestination(index, *item, &toItem, flags);
//check if we can add this item
ReturnValue ret = toCylinder->queryAdd(index, *item, item->getItemCount(), flags);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
/*
Check if we can move add the whole amount, we do this by checking against the original cylinder,
since the queryDestination can return a cylinder that might only hold a part of the full amount.
*/
uint32_t maxQueryCount = 0;
ret = destCylinder->queryMaxCount(INDEX_WHEREEVER, *item, item->getItemCount(), maxQueryCount, flags);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
if (test) {
return RETURNVALUE_NOERROR;
}
if (item->isStackable() && item->equals(toItem)) {
uint32_t m = std::min<uint32_t>(item->getItemCount(), maxQueryCount);
uint32_t n = std::min<uint32_t>(100 - toItem->getItemCount(), m);
toCylinder->updateThing(toItem, toItem->getID(), toItem->getItemCount() + n);
int32_t count = m - n;
if (count > 0) {
if (item->getItemCount() != count) {
Item* remainderItem = item->clone();
remainderItem->setItemCount(count);
if (internalAddItem(destCylinder, remainderItem, INDEX_WHEREEVER, flags, false) != RETURNVALUE_NOERROR) {
ReleaseItem(remainderItem);
remainderCount = count;
}
} else {
toCylinder->addThing(index, item);
int32_t itemIndex = toCylinder->getThingIndex(item);
if (itemIndex != -1) {
toCylinder->postAddNotification(item, nullptr, itemIndex);
}
}
} else {
//fully merged with toItem, item will be destroyed
item->onRemoved();
ReleaseItem(item);
int32_t itemIndex = toCylinder->getThingIndex(toItem);
if (itemIndex != -1) {
toCylinder->postAddNotification(toItem, nullptr, itemIndex);
}
}
} else {
toCylinder->addThing(index, item);
int32_t itemIndex = toCylinder->getThingIndex(item);
if (itemIndex != -1) {
toCylinder->postAddNotification(item, nullptr, itemIndex);
}
}
if (item->getDuration() > 0) {
item->incrementReferenceCounter();
item->setDecaying(DECAYING_TRUE);
toDecayItems.push_front(item);
}
return RETURNVALUE_NOERROR;
}
ReturnValue Game::internalRemoveItem(Item* item, int32_t count /*= -1*/, bool test /*= false*/, uint32_t flags /*= 0*/)
{
Cylinder* cylinder = item->getParent();
if (cylinder == nullptr) {
return RETURNVALUE_NOTPOSSIBLE;
}
Tile* fromTile = cylinder->getTile();
if (fromTile) {
auto it = browseFields.find(fromTile);
if (it != browseFields.end() && it->second == cylinder) {
cylinder = fromTile;
}
}
if (count == -1) {
count = item->getItemCount();
}
//check if we can remove this item
ReturnValue ret = cylinder->queryRemove(*item, count, flags | FLAG_IGNORENOTMOVEABLE);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
if (!item->canRemove()) {
return RETURNVALUE_NOTPOSSIBLE;
}
if (!test) {
int32_t index = cylinder->getThingIndex(item);
//remove the item
cylinder->removeThing(item, count);
if (item->isRemoved()) {
item->onRemoved();
if (item->canDecay()) {
decayItems->remove(item);
}
ReleaseItem(item);
}
cylinder->postRemoveNotification(item, nullptr, index);
}
return RETURNVALUE_NOERROR;
}
ReturnValue Game::internalPlayerAddItem(Player* player, Item* item, bool dropOnMap /*= true*/, slots_t slot /*= CONST_SLOT_WHEREEVER*/)
{
uint32_t remainderCount = 0;
ReturnValue ret = internalAddItem(player, item, static_cast<int32_t>(slot), 0, false, remainderCount);
if (remainderCount != 0) {
Item* remainderItem = Item::CreateItem(item->getID(), remainderCount);
ReturnValue remaindRet = internalAddItem(player->getTile(), remainderItem, INDEX_WHEREEVER, FLAG_NOLIMIT);
if (remaindRet != RETURNVALUE_NOERROR) {
ReleaseItem(remainderItem);
}
}
if (ret != RETURNVALUE_NOERROR && dropOnMap) {
ret = internalAddItem(player->getTile(), item, INDEX_WHEREEVER, FLAG_NOLIMIT);
}
return ret;
}
Item* Game::findItemOfType(Cylinder* cylinder, uint16_t itemId,
bool depthSearch /*= true*/, int32_t subType /*= -1*/) const
{
if (cylinder == nullptr) {
return nullptr;
}
std::vector<Container*> containers;
for (size_t i = cylinder->getFirstIndex(), j = cylinder->getLastIndex(); i < j; ++i) {
Thing* thing = cylinder->getThing(i);
if (!thing) {
continue;
}
Item* item = thing->getItem();
if (!item) {
continue;
}
if (item->getID() == itemId && (subType == -1 || subType == item->getSubType())) {
return item;
}
if (depthSearch) {
Container* container = item->getContainer();
if (container) {
containers.push_back(container);
}
}
}
size_t i = 0;
while (i < containers.size()) {
Container* container = containers[i++];
for (Item* item : container->getItemList()) {
if (item->getID() == itemId && (subType == -1 || subType == item->getSubType())) {
return item;
}
Container* subContainer = item->getContainer();
if (subContainer) {
containers.push_back(subContainer);
}
}
}
return nullptr;
}
bool Game::removeMoney(Cylinder* cylinder, uint64_t money, uint32_t flags /*= 0*/)
{
if (cylinder == nullptr) {
return false;
}
if (money == 0) {
return true;
}
std::vector<Container*> containers;
std::multimap<uint32_t, Item*> moneyMap;
uint64_t moneyCount = 0;
for (size_t i = cylinder->getFirstIndex(), j = cylinder->getLastIndex(); i < j; ++i) {
Thing* thing = cylinder->getThing(i);
if (!thing) {
continue;
}
Item* item = thing->getItem();
if (!item) {
continue;
}
Container* container = item->getContainer();
if (container) {
containers.push_back(container);
} else {
const uint32_t worth = item->getWorth();
if (worth != 0) {
moneyCount += worth;
moneyMap.emplace(worth, item);
}
}
}
size_t i = 0;
while (i < containers.size()) {
Container* container = containers[i++];
for (Item* item : container->getItemList()) {
Container* tmpContainer = item->getContainer();
if (tmpContainer) {
containers.push_back(tmpContainer);
} else {
const uint32_t worth = item->getWorth();
if (worth != 0) {
moneyCount += worth;
moneyMap.emplace(worth, item);
}
}
}
}
if (moneyCount < money) {
return false;
}
for (const auto& moneyEntry : moneyMap) {
Item* item = moneyEntry.second;
if (moneyEntry.first < money) {
internalRemoveItem(item);
money -= moneyEntry.first;
} else if (moneyEntry.first > money) {
const uint32_t worth = moneyEntry.first / item->getItemCount();
const uint32_t removeCount = std::ceil(money / static_cast<double>(worth));
addMoney(cylinder, (worth * removeCount) - money, flags);
internalRemoveItem(item, removeCount);
break;
} else {
internalRemoveItem(item);
break;
}
}
return true;
}
void Game::addMoney(Cylinder* cylinder, uint64_t money, uint32_t flags /*= 0*/)
{
if (money == 0) {
return;
}
uint32_t crystalCoins = money / 10000;
money -= crystalCoins * 10000;
while (crystalCoins > 0) {
const uint16_t count = std::min<uint32_t>(100, crystalCoins);
Item* remaindItem = Item::CreateItem(ITEM_CRYSTAL_COIN, count);
ReturnValue ret = internalAddItem(cylinder, remaindItem, INDEX_WHEREEVER, flags);
if (ret != RETURNVALUE_NOERROR) {
internalAddItem(cylinder->getTile(), remaindItem, INDEX_WHEREEVER, FLAG_NOLIMIT);
}
crystalCoins -= count;
}
uint16_t platinumCoins = money / 100;
if (platinumCoins != 0) {
Item* remaindItem = Item::CreateItem(ITEM_PLATINUM_COIN, platinumCoins);
ReturnValue ret = internalAddItem(cylinder, remaindItem, INDEX_WHEREEVER, flags);
if (ret != RETURNVALUE_NOERROR) {
internalAddItem(cylinder->getTile(), remaindItem, INDEX_WHEREEVER, FLAG_NOLIMIT);
}
money -= platinumCoins * 100;
}
if (money != 0) {
Item* remaindItem = Item::CreateItem(ITEM_GOLD_COIN, money);
ReturnValue ret = internalAddItem(cylinder, remaindItem, INDEX_WHEREEVER, flags);
if (ret != RETURNVALUE_NOERROR) {
internalAddItem(cylinder->getTile(), remaindItem, INDEX_WHEREEVER, FLAG_NOLIMIT);
}
}
}
Item* Game::transformItem(Item* item, uint16_t newId, int32_t newCount /*= -1*/)
{
if (item->getID() == newId && (newCount == -1 || (newCount == item->getSubType() && newCount != 0))) { //chargeless item placed on map = infinite
return item;
}
Cylinder* cylinder = item->getParent();
if (cylinder == nullptr) {
return nullptr;
}
Tile* fromTile = cylinder->getTile();
if (fromTile) {
auto it = browseFields.find(fromTile);
if (it != browseFields.end() && it->second == cylinder) {
cylinder = fromTile;
}
}
int32_t itemIndex = cylinder->getThingIndex(item);
if (itemIndex == -1) {
return item;
}
if (!item->canTransform()) {
return item;
}
const ItemType& newType = Item::items[newId];
if (newType.id == 0) {
return item;
}
const ItemType& curType = Item::items[item->getID()];
if (curType.alwaysOnTop != newType.alwaysOnTop) {
//This only occurs when you transform items on tiles from a downItem to a topItem (or vice versa)
//Remove the old, and add the new
cylinder->removeThing(item, item->getItemCount());
cylinder->postRemoveNotification(item, cylinder, itemIndex);
item->setID(newId);
if (newCount != -1) {
item->setSubType(newCount);
}
cylinder->addThing(item);
Cylinder* newParent = item->getParent();
if (newParent == nullptr) {
ReleaseItem(item);
return nullptr;
}
newParent->postAddNotification(item, cylinder, newParent->getThingIndex(item));
return item;
}
if (curType.type == newType.type) {
//Both items has the same type so we can safely change id/subtype
if (newCount == 0 && (item->isStackable() || item->hasAttribute(ITEM_ATTRIBUTE_CHARGES))) {
if (item->isStackable()) {
internalRemoveItem(item);
return nullptr;
} else {
int32_t newItemId = newId;
if (curType.id == newType.id) {
newItemId = item->getDecayTo();
}
if (newItemId < 0) {
internalRemoveItem(item);
return nullptr;
} else if (newItemId != newId) {
//Replacing the the old item with the new while maintaining the old position
Item* newItem = Item::CreateItem(newItemId, 1);
if (newItem == nullptr) {
return nullptr;
}
cylinder->replaceThing(itemIndex, newItem);
cylinder->postAddNotification(newItem, cylinder, itemIndex);
item->setParent(nullptr);
cylinder->postRemoveNotification(item, cylinder, itemIndex);
ReleaseItem(item);
return newItem;
} else {
return transformItem(item, newItemId);
}
}
} else {
cylinder->postRemoveNotification(item, cylinder, itemIndex);
uint16_t itemId = item->getID();
int32_t count = item->getSubType();
if (curType.id != newType.id) {
if (newType.group != curType.group) {
item->setDefaultSubtype();
}
itemId = newId;
}
if (newCount != -1 && newType.hasSubType()) {
count = newCount;
}
cylinder->updateThing(item, itemId, count);
cylinder->postAddNotification(item, cylinder, itemIndex);
return item;
}
}
//Replacing the the old item with the new while maintaining the old position
Item* newItem;
if (newCount == -1) {
newItem = Item::CreateItem(newId);
} else {
newItem = Item::CreateItem(newId, newCount);
}
if (newItem == nullptr) {
return nullptr;
}
cylinder->replaceThing(itemIndex, newItem);
cylinder->postAddNotification(newItem, cylinder, itemIndex);
item->setParent(nullptr);
cylinder->postRemoveNotification(item, cylinder, itemIndex);
ReleaseItem(item);
if (newItem->getDuration() > 0) {
if (newItem->getDecaying() != DECAYING_TRUE) {
newItem->incrementReferenceCounter();
newItem->setDecaying(DECAYING_TRUE);
toDecayItems.push_front(newItem);
}
}
return newItem;
}
ReturnValue Game::internalTeleport(Thing* thing, const Position& newPos, bool pushMove/* = true*/, uint32_t flags /*= 0*/)
{
if (newPos == thing->getPosition()) {
return RETURNVALUE_NOERROR;
} else if (thing->isRemoved()) {
return RETURNVALUE_NOTPOSSIBLE;
}
Tile* toTile = map.getTile(newPos);
if (!toTile) {
return RETURNVALUE_NOTPOSSIBLE;
}
if (Creature* creature = thing->getCreature()) {
ReturnValue ret = toTile->queryAdd(0, *creature, 1, FLAG_NOLIMIT);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
map.moveCreature(*creature, *toTile, !pushMove);
return RETURNVALUE_NOERROR;
} else if (Item* item = thing->getItem()) {
return internalMoveItem(item->getParent(), toTile, INDEX_WHEREEVER, item, item->getItemCount(), nullptr, flags);
}
return RETURNVALUE_NOTPOSSIBLE;
}
Item* searchForItem(Container* container, uint16_t itemId)
{
for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) {
if ((*it)->getID() == itemId) {
return *it;
}
}
return nullptr;
}
slots_t getSlotType(const ItemType& it)
{
slots_t slot = CONST_SLOT_RIGHT;
if (it.weaponType != WeaponType_t::WEAPON_SHIELD) {
int32_t slotPosition = it.slotPosition;
if (slotPosition & SLOTP_HEAD) {
slot = CONST_SLOT_HEAD;
} else if (slotPosition & SLOTP_NECKLACE) {
slot = CONST_SLOT_NECKLACE;
} else if (slotPosition & SLOTP_ARMOR) {
slot = CONST_SLOT_ARMOR;
} else if (slotPosition & SLOTP_LEGS) {
slot = CONST_SLOT_LEGS;
} else if (slotPosition & SLOTP_FEET) {
slot = CONST_SLOT_FEET;
} else if (slotPosition & SLOTP_RING) {
slot = CONST_SLOT_RING;
} else if (slotPosition & SLOTP_AMMO) {
slot = CONST_SLOT_AMMO;
} else if (slotPosition & SLOTP_TWO_HAND || slotPosition & SLOTP_LEFT) {
slot = CONST_SLOT_LEFT;
}
}
return slot;
}
//Implementation of player invoked events
void Game::playerEquipItem(uint32_t playerId, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Item* item = player->getInventoryItem(CONST_SLOT_BACKPACK);
if (!item) {
return;
}
Container* backpack = item->getContainer();
if (!backpack) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
slots_t slot = getSlotType(it);
Item* slotItem = player->getInventoryItem(slot);
Item* equipItem = searchForItem(backpack, it.id);
if (slotItem && slotItem->getID() == it.id && (!it.stackable || slotItem->getItemCount() == 100 || !equipItem)) {
internalMoveItem(slotItem->getParent(), player, CONST_SLOT_WHEREEVER, slotItem, slotItem->getItemCount(), nullptr);
} else if (equipItem) {
internalMoveItem(equipItem->getParent(), player, slot, equipItem, equipItem->getItemCount(), nullptr);
}
}
void Game::playerMove(uint32_t playerId, Direction direction)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->resetIdleTime();
player->setNextWalkActionTask(nullptr);
player->startAutoWalk(std::forward_list<Direction> { direction });
}
bool Game::playerBroadcastMessage(Player* player, const std::string& text) const
{
if (!player->hasFlag(PlayerFlag_CanBroadcast)) {
return false;
}
std::cout << "> " << player->getName() << " broadcasted: \"" << text << "\"." << std::endl;
for (const auto& it : players) {
it.second->sendPrivateMessage(player, TALKTYPE_BROADCAST, text);
}
return true;
}
void Game::playerCreatePrivateChannel(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player || !player->isPremium()) {
return;
}
ChatChannel* channel = g_chat->createChannel(*player, CHANNEL_PRIVATE);
if (!channel || !channel->addUser(*player)) {
return;
}
player->sendCreatePrivateChannel(channel->getId(), channel->getName());
}
void Game::playerChannelInvite(uint32_t playerId, const std::string& name)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
PrivateChatChannel* channel = g_chat->getPrivateChannel(*player);
if (!channel) {
return;
}
Player* invitePlayer = getPlayerByName(name);
if (!invitePlayer) {
return;
}
if (player == invitePlayer) {
return;
}
channel->invitePlayer(*player, *invitePlayer);
}
void Game::playerChannelExclude(uint32_t playerId, const std::string& name)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
PrivateChatChannel* channel = g_chat->getPrivateChannel(*player);
if (!channel) {
return;
}
Player* excludePlayer = getPlayerByName(name);
if (!excludePlayer) {
return;
}
if (player == excludePlayer) {
return;
}
channel->excludePlayer(*player, *excludePlayer);
}
void Game::playerRequestChannels(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->sendChannelsDialog();
}
void Game::playerOpenChannel(uint32_t playerId, uint16_t channelId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
ChatChannel* channel = g_chat->addUserToChannel(*player, channelId);
if (!channel) {
return;
}
const InvitedMap* invitedUsers = channel->getInvitedUsers();
const UsersMap* users;
if (!channel->isPublicChannel()) {
users = &channel->getUsers();
} else {
users = nullptr;
}
player->sendChannel(channel->getId(), channel->getName(), users, invitedUsers);
}
void Game::playerCloseChannel(uint32_t playerId, uint16_t channelId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
g_chat->removeUserFromChannel(*player, channelId);
}
void Game::playerOpenPrivateChannel(uint32_t playerId, std::string& receiver)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!IOLoginData::formatPlayerName(receiver)) {
player->sendCancelMessage("A player with this name does not exist.");
return;
}
if (player->getName() == receiver) {
player->sendCancelMessage("You cannot set up a private message channel with yourself.");
return;
}
player->sendOpenPrivateChannel(receiver);
}
void Game::playerCloseNpcChannel(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
SpectatorVec spectators;
map.getSpectators(spectators, player->getPosition());
for (Creature* spectator : spectators) {
if (Npc* npc = spectator->getNpc()) {
npc->onPlayerCloseChannel(player);
}
}
}
void Game::playerReceivePing(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->receivePing();
}
void Game::playerReceivePingBack(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->sendPingBack();
}
void Game::playerAutoWalk(uint32_t playerId, const std::forward_list<Direction>& listDir)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->resetIdleTime();
player->setNextWalkTask(nullptr);
player->startAutoWalk(listDir);
}
void Game::playerStopAutoWalk(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->stopWalk();
}
void Game::playerUseItemEx(uint32_t playerId, const Position& fromPos, uint8_t fromStackPos, uint16_t fromSpriteId,
const Position& toPos, uint8_t toStackPos, uint16_t toSpriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
bool isHotkey = (fromPos.x == 0xFFFF && fromPos.y == 0 && fromPos.z == 0);
if (isHotkey && !g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) {
return;
}
Thing* thing = internalGetThing(player, fromPos, fromStackPos, fromSpriteId, STACKPOS_USEITEM);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Item* item = thing->getItem();
if (!item || !item->isUseable() || item->getClientID() != fromSpriteId) {
player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT);
return;
}
Position walkToPos = fromPos;
ReturnValue ret = g_actions->canUse(player, fromPos);
if (ret == RETURNVALUE_NOERROR) {
ret = g_actions->canUse(player, toPos, item);
if (ret == RETURNVALUE_TOOFARAWAY) {
walkToPos = toPos;
}
}
if (ret != RETURNVALUE_NOERROR) {
if (ret == RETURNVALUE_TOOFARAWAY) {
Position itemPos = fromPos;
uint8_t itemStackPos = fromStackPos;
if (fromPos.x != 0xFFFF && toPos.x != 0xFFFF && Position::areInRange<1, 1, 0>(fromPos, player->getPosition()) &&
!Position::areInRange<1, 1, 0>(fromPos, toPos)) {
Item* moveItem = nullptr;
ret = internalMoveItem(item->getParent(), player, INDEX_WHEREEVER, item, item->getItemCount(), &moveItem, 0, player, nullptr, &fromPos, &toPos);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
return;
}
//changing the position since its now in the inventory of the player
internalGetPosition(moveItem, itemPos, itemStackPos);
}
std::forward_list<Direction> listDir;
if (player->getPathTo(walkToPos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerUseItemEx, this,
playerId, itemPos, itemStackPos, fromSpriteId, toPos, toStackPos, toSpriteId));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
player->sendCancelMessage(ret);
return;
}
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseItemEx, this,
playerId, fromPos, fromStackPos, fromSpriteId, toPos, toStackPos, toSpriteId));
player->setNextActionTask(task);
return;
}
player->resetIdleTime();
player->setNextActionTask(nullptr);
g_actions->useItemEx(player, fromPos, toPos, toStackPos, item, isHotkey);
}
void Game::playerUseItem(uint32_t playerId, const Position& pos, uint8_t stackPos,
uint8_t index, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
bool isHotkey = (pos.x == 0xFFFF && pos.y == 0 && pos.z == 0);
if (isHotkey && !g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) {
return;
}
Thing* thing = internalGetThing(player, pos, stackPos, spriteId, STACKPOS_USEITEM);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Item* item = thing->getItem();
if (!item || item->isUseable() || item->getClientID() != spriteId) {
player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT);
return;
}
ReturnValue ret = g_actions->canUse(player, pos);
if (ret != RETURNVALUE_NOERROR) {
if (ret == RETURNVALUE_TOOFARAWAY) {
std::forward_list<Direction> listDir;
if (player->getPathTo(pos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerUseItem, this,
playerId, pos, stackPos, index, spriteId));
player->setNextWalkActionTask(task);
return;
}
ret = RETURNVALUE_THEREISNOWAY;
}
player->sendCancelMessage(ret);
return;
}
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseItem, this,
playerId, pos, stackPos, index, spriteId));
player->setNextActionTask(task);
return;
}
player->resetIdleTime();
player->setNextActionTask(nullptr);
g_actions->useItem(player, pos, index, item, isHotkey);
}
void Game::playerUseWithCreature(uint32_t playerId, const Position& fromPos, uint8_t fromStackPos, uint32_t creatureId, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Creature* creature = getCreatureByID(creatureId);
if (!creature) {
return;
}
if (!Position::areInRange<7, 5, 0>(creature->getPosition(), player->getPosition())) {
return;
}
bool isHotkey = (fromPos.x == 0xFFFF && fromPos.y == 0 && fromPos.z == 0);
if (!g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) {
if (creature->getPlayer() || isHotkey) {
player->sendCancelMessage(RETURNVALUE_DIRECTPLAYERSHOOT);
return;
}
}
Thing* thing = internalGetThing(player, fromPos, fromStackPos, spriteId, STACKPOS_USEITEM);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Item* item = thing->getItem();
if (!item || !item->isUseable() || item->getClientID() != spriteId) {
player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT);
return;
}
Position toPos = creature->getPosition();
Position walkToPos = fromPos;
ReturnValue ret = g_actions->canUse(player, fromPos);
if (ret == RETURNVALUE_NOERROR) {
ret = g_actions->canUse(player, toPos, item);
if (ret == RETURNVALUE_TOOFARAWAY) {
walkToPos = toPos;
}
}
if (ret != RETURNVALUE_NOERROR) {
if (ret == RETURNVALUE_TOOFARAWAY) {
Position itemPos = fromPos;
uint8_t itemStackPos = fromStackPos;
if (fromPos.x != 0xFFFF && Position::areInRange<1, 1, 0>(fromPos, player->getPosition()) && !Position::areInRange<1, 1, 0>(fromPos, toPos)) {
Item* moveItem = nullptr;
ret = internalMoveItem(item->getParent(), player, INDEX_WHEREEVER, item, item->getItemCount(), &moveItem, 0, player, nullptr, &fromPos, &toPos);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
return;
}
//changing the position since its now in the inventory of the player
internalGetPosition(moveItem, itemPos, itemStackPos);
}
std::forward_list<Direction> listDir;
if (player->getPathTo(walkToPos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerUseWithCreature, this,
playerId, itemPos, itemStackPos, creatureId, spriteId));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
player->sendCancelMessage(ret);
return;
}
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseWithCreature, this,
playerId, fromPos, fromStackPos, creatureId, spriteId));
player->setNextActionTask(task);
return;
}
player->resetIdleTime();
player->setNextActionTask(nullptr);
g_actions->useItemEx(player, fromPos, creature->getPosition(), creature->getParent()->getThingIndex(creature), item, isHotkey, creature);
}
void Game::playerCloseContainer(uint32_t playerId, uint8_t cid)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->closeContainer(cid);
player->sendCloseContainer(cid);
}
void Game::playerMoveUpContainer(uint32_t playerId, uint8_t cid)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Container* container = player->getContainerByID(cid);
if (!container) {
return;
}
Container* parentContainer = dynamic_cast<Container*>(container->getRealParent());
if (!parentContainer) {
Tile* tile = container->getTile();
if (!tile) {
return;
}
auto it = browseFields.find(tile);
if (it == browseFields.end()) {
parentContainer = new Container(tile);
parentContainer->incrementReferenceCounter();
browseFields[tile] = parentContainer;
g_scheduler.addEvent(createSchedulerTask(30000, std::bind(&Game::decreaseBrowseFieldRef, this, tile->getPosition())));
} else {
parentContainer = it->second;
}
}
player->addContainer(cid, parentContainer);
player->sendContainer(cid, parentContainer, parentContainer->hasParent(), player->getContainerIndex(cid));
}
void Game::playerUpdateContainer(uint32_t playerId, uint8_t cid)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Container* container = player->getContainerByID(cid);
if (!container) {
return;
}
player->sendContainer(cid, container, container->hasParent(), player->getContainerIndex(cid));
}
void Game::playerRotateItem(uint32_t playerId, const Position& pos, uint8_t stackPos, const uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Thing* thing = internalGetThing(player, pos, stackPos, 0, STACKPOS_TOPDOWN_ITEM);
if (!thing) {
return;
}
Item* item = thing->getItem();
if (!item || item->getClientID() != spriteId || !item->isRotatable() || item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (pos.x != 0xFFFF && !Position::areInRange<1, 1, 0>(pos, player->getPosition())) {
std::forward_list<Direction> listDir;
if (player->getPathTo(pos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerRotateItem, this,
playerId, pos, stackPos, spriteId));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
uint16_t newId = Item::items[item->getID()].rotateTo;
if (newId != 0) {
transformItem(item, newId);
}
}
void Game::playerWriteItem(uint32_t playerId, uint32_t windowTextId, const std::string& text)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
uint16_t maxTextLength = 0;
uint32_t internalWindowTextId = 0;
Item* writeItem = player->getWriteItem(internalWindowTextId, maxTextLength);
if (text.length() > maxTextLength || windowTextId != internalWindowTextId) {
return;
}
if (!writeItem || writeItem->isRemoved()) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Cylinder* topParent = writeItem->getTopParent();
Player* owner = dynamic_cast<Player*>(topParent);
if (owner && owner != player) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (!Position::areInRange<1, 1, 0>(writeItem->getPosition(), player->getPosition())) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
for (auto creatureEvent : player->getCreatureEvents(CREATURE_EVENT_TEXTEDIT)) {
if (!creatureEvent->executeTextEdit(player, writeItem, text)) {
player->setWriteItem(nullptr);
return;
}
}
if (!text.empty()) {
if (writeItem->getText() != text) {
writeItem->setText(text);
writeItem->setWriter(player->getName());
writeItem->setDate(time(nullptr));
}
} else {
writeItem->resetText();
writeItem->resetWriter();
writeItem->resetDate();
}
uint16_t newId = Item::items[writeItem->getID()].writeOnceItemId;
if (newId != 0) {
transformItem(writeItem, newId);
}
player->setWriteItem(nullptr);
}
void Game::playerBrowseField(uint32_t playerId, const Position& pos)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
const Position& playerPos = player->getPosition();
if (playerPos.z != pos.z) {
player->sendCancelMessage(playerPos.z > pos.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS);
return;
}
if (!Position::areInRange<1, 1>(playerPos, pos)) {
std::forward_list<Direction> listDir;
if (player->getPathTo(pos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(
&Game::playerBrowseField, this, playerId, pos
));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
Tile* tile = map.getTile(pos);
if (!tile) {
return;
}
if (!g_events->eventPlayerOnBrowseField(player, pos)) {
return;
}
Container* container;
auto it = browseFields.find(tile);
if (it == browseFields.end()) {
container = new Container(tile);
container->incrementReferenceCounter();
browseFields[tile] = container;
g_scheduler.addEvent(createSchedulerTask(30000, std::bind(&Game::decreaseBrowseFieldRef, this, tile->getPosition())));
} else {
container = it->second;
}
uint8_t dummyContainerId = 0xF - ((pos.x % 3) * 3 + (pos.y % 3));
Container* openContainer = player->getContainerByID(dummyContainerId);
if (openContainer) {
player->onCloseContainer(openContainer);
player->closeContainer(dummyContainerId);
} else {
player->addContainer(dummyContainerId, container);
player->sendContainer(dummyContainerId, container, false, 0);
}
}
void Game::playerSeekInContainer(uint32_t playerId, uint8_t containerId, uint16_t index)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Container* container = player->getContainerByID(containerId);
if (!container || !container->hasPagination()) {
return;
}
if ((index % container->capacity()) != 0 || index >= container->size()) {
return;
}
player->setContainerIndex(containerId, index);
player->sendContainer(containerId, container, container->hasParent(), index);
}
void Game::playerUpdateHouseWindow(uint32_t playerId, uint8_t listId, uint32_t windowTextId, const std::string& text)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
uint32_t internalWindowTextId;
uint32_t internalListId;
House* house = player->getEditHouse(internalWindowTextId, internalListId);
if (house && house->canEditAccessList(internalListId, player) && internalWindowTextId == windowTextId && listId == 0) {
house->setAccessList(internalListId, text);
}
player->setEditHouse(nullptr);
}
void Game::playerWrapItem(uint32_t playerId, const Position& position, uint8_t stackPos, const uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Thing* thing = internalGetThing(player, position, stackPos, 0, STACKPOS_TOPDOWN_ITEM);
if (!thing) {
return;
}
Item* item = thing->getItem();
if (!item || item->getClientID() != spriteId || !item->hasAttribute(ITEM_ATTRIBUTE_WRAPID) || item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (position.x != 0xFFFF && !Position::areInRange<1, 1, 0>(position, player->getPosition())) {
std::forward_list<Direction> listDir;
if (player->getPathTo(position, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerWrapItem, this,
playerId, position, stackPos, spriteId));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
g_events->eventPlayerOnWrapItem(player, item);
}
void Game::playerRequestTrade(uint32_t playerId, const Position& pos, uint8_t stackPos,
uint32_t tradePlayerId, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* tradePartner = getPlayerByID(tradePlayerId);
if (!tradePartner || tradePartner == player) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "Sorry, not possible.");
return;
}
if (!Position::areInRange<2, 2, 0>(tradePartner->getPosition(), player->getPosition())) {
std::ostringstream ss;
ss << tradePartner->getName() << " tells you to move closer.";
player->sendTextMessage(MESSAGE_INFO_DESCR, ss.str());
return;
}
if (!canThrowObjectTo(tradePartner->getPosition(), player->getPosition())) {
player->sendCancelMessage(RETURNVALUE_CREATUREISNOTREACHABLE);
return;
}
Thing* tradeThing = internalGetThing(player, pos, stackPos, 0, STACKPOS_TOPDOWN_ITEM);
if (!tradeThing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Item* tradeItem = tradeThing->getItem();
if (tradeItem->getClientID() != spriteId || !tradeItem->isPickupable() || tradeItem->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
const Position& playerPosition = player->getPosition();
const Position& tradeItemPosition = tradeItem->getPosition();
if (playerPosition.z != tradeItemPosition.z) {
player->sendCancelMessage(playerPosition.z > tradeItemPosition.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS);
return;
}
if (!Position::areInRange<1, 1>(tradeItemPosition, playerPosition)) {
std::forward_list<Direction> listDir;
if (player->getPathTo(pos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerRequestTrade, this,
playerId, pos, stackPos, tradePlayerId, spriteId));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
Container* tradeItemContainer = tradeItem->getContainer();
if (tradeItemContainer) {
for (const auto& it : tradeItems) {
Item* item = it.first;
if (tradeItem == item) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded.");
return;
}
if (tradeItemContainer->isHoldingItem(item)) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded.");
return;
}
Container* container = item->getContainer();
if (container && container->isHoldingItem(tradeItem)) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded.");
return;
}
}
} else {
for (const auto& it : tradeItems) {
Item* item = it.first;
if (tradeItem == item) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded.");
return;
}
Container* container = item->getContainer();
if (container && container->isHoldingItem(tradeItem)) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded.");
return;
}
}
}
Container* tradeContainer = tradeItem->getContainer();
if (tradeContainer && tradeContainer->getItemHoldingCount() + 1 > 100) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "You can not trade more than 100 items.");
return;
}
if (!g_events->eventPlayerOnTradeRequest(player, tradePartner, tradeItem)) {
return;
}
internalStartTrade(player, tradePartner, tradeItem);
}
bool Game::internalStartTrade(Player* player, Player* tradePartner, Item* tradeItem)
{
if (player->tradeState != TRADE_NONE && !(player->tradeState == TRADE_ACKNOWLEDGE && player->tradePartner == tradePartner)) {
player->sendCancelMessage(RETURNVALUE_YOUAREALREADYTRADING);
return false;
} else if (tradePartner->tradeState != TRADE_NONE && tradePartner->tradePartner != player) {
player->sendCancelMessage(RETURNVALUE_THISPLAYERISALREADYTRADING);
return false;
}
player->tradePartner = tradePartner;
player->tradeItem = tradeItem;
player->tradeState = TRADE_INITIATED;
tradeItem->incrementReferenceCounter();
tradeItems[tradeItem] = player->getID();
player->sendTradeItemRequest(player->getName(), tradeItem, true);
if (tradePartner->tradeState == TRADE_NONE) {
std::ostringstream ss;
ss << player->getName() << " wants to trade with you.";
tradePartner->sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str());
tradePartner->tradeState = TRADE_ACKNOWLEDGE;
tradePartner->tradePartner = player;
} else {
Item* counterOfferItem = tradePartner->tradeItem;
player->sendTradeItemRequest(tradePartner->getName(), counterOfferItem, false);
tradePartner->sendTradeItemRequest(player->getName(), tradeItem, false);
}
return true;
}
void Game::playerAcceptTrade(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!(player->getTradeState() == TRADE_ACKNOWLEDGE || player->getTradeState() == TRADE_INITIATED)) {
return;
}
Player* tradePartner = player->tradePartner;
if (!tradePartner) {
return;
}
if (!canThrowObjectTo(tradePartner->getPosition(), player->getPosition())) {
player->sendCancelMessage(RETURNVALUE_CREATUREISNOTREACHABLE);
return;
}
player->setTradeState(TRADE_ACCEPT);
if (tradePartner->getTradeState() == TRADE_ACCEPT) {
Item* playerTradeItem = player->tradeItem;
Item* partnerTradeItem = tradePartner->tradeItem;
if (!g_events->eventPlayerOnTradeAccept(player, tradePartner, playerTradeItem, partnerTradeItem)) {
internalCloseTrade(player);
return;
}
player->setTradeState(TRADE_TRANSFER);
tradePartner->setTradeState(TRADE_TRANSFER);
auto it = tradeItems.find(playerTradeItem);
if (it != tradeItems.end()) {
ReleaseItem(it->first);
tradeItems.erase(it);
}
it = tradeItems.find(partnerTradeItem);
if (it != tradeItems.end()) {
ReleaseItem(it->first);
tradeItems.erase(it);
}
bool isSuccess = false;
ReturnValue tradePartnerRet = RETURNVALUE_NOERROR;
ReturnValue playerRet = RETURNVALUE_NOERROR;
// if player is trying to trade its own backpack
if (tradePartner->getInventoryItem(CONST_SLOT_BACKPACK) == partnerTradeItem) {
tradePartnerRet = (tradePartner->getInventoryItem(getSlotType(Item::items[playerTradeItem->getID()])) ? RETURNVALUE_NOTENOUGHROOM : RETURNVALUE_NOERROR);
}
if (player->getInventoryItem(CONST_SLOT_BACKPACK) == playerTradeItem) {
playerRet = (player->getInventoryItem(getSlotType(Item::items[partnerTradeItem->getID()])) ? RETURNVALUE_NOTENOUGHROOM : RETURNVALUE_NOERROR);
}
if (tradePartnerRet == RETURNVALUE_NOERROR && playerRet == RETURNVALUE_NOERROR) {
tradePartnerRet = internalAddItem(tradePartner, playerTradeItem, INDEX_WHEREEVER, 0, true);
playerRet = internalAddItem(player, partnerTradeItem, INDEX_WHEREEVER, 0, true);
if (tradePartnerRet == RETURNVALUE_NOERROR && playerRet == RETURNVALUE_NOERROR) {
playerRet = internalRemoveItem(playerTradeItem, playerTradeItem->getItemCount(), true);
tradePartnerRet = internalRemoveItem(partnerTradeItem, partnerTradeItem->getItemCount(), true);
if (tradePartnerRet == RETURNVALUE_NOERROR && playerRet == RETURNVALUE_NOERROR) {
tradePartnerRet = internalMoveItem(playerTradeItem->getParent(), tradePartner, INDEX_WHEREEVER, playerTradeItem, playerTradeItem->getItemCount(), nullptr, FLAG_IGNOREAUTOSTACK, nullptr, partnerTradeItem);
if (tradePartnerRet == RETURNVALUE_NOERROR) {
internalMoveItem(partnerTradeItem->getParent(), player, INDEX_WHEREEVER, partnerTradeItem, partnerTradeItem->getItemCount(), nullptr, FLAG_IGNOREAUTOSTACK);
playerTradeItem->onTradeEvent(ON_TRADE_TRANSFER, tradePartner);
partnerTradeItem->onTradeEvent(ON_TRADE_TRANSFER, player);
isSuccess = true;
}
}
}
}
if (!isSuccess) {
std::string errorDescription;
if (tradePartner->tradeItem) {
errorDescription = getTradeErrorDescription(tradePartnerRet, playerTradeItem);
tradePartner->sendTextMessage(MESSAGE_EVENT_ADVANCE, errorDescription);
tradePartner->tradeItem->onTradeEvent(ON_TRADE_CANCEL, tradePartner);
}
if (player->tradeItem) {
errorDescription = getTradeErrorDescription(playerRet, partnerTradeItem);
player->sendTextMessage(MESSAGE_EVENT_ADVANCE, errorDescription);
player->tradeItem->onTradeEvent(ON_TRADE_CANCEL, player);
}
}
g_events->eventPlayerOnTradeCompleted(player, tradePartner, playerTradeItem, partnerTradeItem, isSuccess);
player->setTradeState(TRADE_NONE);
player->tradeItem = nullptr;
player->tradePartner = nullptr;
player->sendTradeClose();
tradePartner->setTradeState(TRADE_NONE);
tradePartner->tradeItem = nullptr;
tradePartner->tradePartner = nullptr;
tradePartner->sendTradeClose();
}
}
std::string Game::getTradeErrorDescription(ReturnValue ret, Item* item)
{
if (item) {
if (ret == RETURNVALUE_NOTENOUGHCAPACITY) {
std::ostringstream ss;
ss << "You do not have enough capacity to carry";
if (item->isStackable() && item->getItemCount() > 1) {
ss << " these objects.";
} else {
ss << " this object.";
}
ss << "\n " << item->getWeightDescription();
return ss.str();
} else if (ret == RETURNVALUE_NOTENOUGHROOM || ret == RETURNVALUE_CONTAINERNOTENOUGHROOM) {
std::ostringstream ss;
ss << "You do not have enough room to carry";
if (item->isStackable() && item->getItemCount() > 1) {
ss << " these objects.";
} else {
ss << " this object.";
}
return ss.str();
}
}
return "Trade could not be completed.";
}
void Game::playerLookInTrade(uint32_t playerId, bool lookAtCounterOffer, uint8_t index)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* tradePartner = player->tradePartner;
if (!tradePartner) {
return;
}
Item* tradeItem;
if (lookAtCounterOffer) {
tradeItem = tradePartner->getTradeItem();
} else {
tradeItem = player->getTradeItem();
}
if (!tradeItem) {
return;
}
const Position& playerPosition = player->getPosition();
const Position& tradeItemPosition = tradeItem->getPosition();
int32_t lookDistance = std::max<int32_t>(Position::getDistanceX(playerPosition, tradeItemPosition),
Position::getDistanceY(playerPosition, tradeItemPosition));
if (index == 0) {
g_events->eventPlayerOnLookInTrade(player, tradePartner, tradeItem, lookDistance);
return;
}
Container* tradeContainer = tradeItem->getContainer();
if (!tradeContainer) {
return;
}
std::vector<const Container*> containers {tradeContainer};
size_t i = 0;
while (i < containers.size()) {
const Container* container = containers[i++];
for (Item* item : container->getItemList()) {
Container* tmpContainer = item->getContainer();
if (tmpContainer) {
containers.push_back(tmpContainer);
}
if (--index == 0) {
g_events->eventPlayerOnLookInTrade(player, tradePartner, item, lookDistance);
return;
}
}
}
}
void Game::playerCloseTrade(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
internalCloseTrade(player);
}
void Game::internalCloseTrade(Player* player)
{
Player* tradePartner = player->tradePartner;
if ((tradePartner && tradePartner->getTradeState() == TRADE_TRANSFER) || player->getTradeState() == TRADE_TRANSFER) {
return;
}
if (player->getTradeItem()) {
auto it = tradeItems.find(player->getTradeItem());
if (it != tradeItems.end()) {
ReleaseItem(it->first);
tradeItems.erase(it);
}
player->tradeItem->onTradeEvent(ON_TRADE_CANCEL, player);
player->tradeItem = nullptr;
}
player->setTradeState(TRADE_NONE);
player->tradePartner = nullptr;
player->sendTextMessage(MESSAGE_STATUS_SMALL, "Trade cancelled.");
player->sendTradeClose();
if (tradePartner) {
if (tradePartner->getTradeItem()) {
auto it = tradeItems.find(tradePartner->getTradeItem());
if (it != tradeItems.end()) {
ReleaseItem(it->first);
tradeItems.erase(it);
}
tradePartner->tradeItem->onTradeEvent(ON_TRADE_CANCEL, tradePartner);
tradePartner->tradeItem = nullptr;
}
tradePartner->setTradeState(TRADE_NONE);
tradePartner->tradePartner = nullptr;
tradePartner->sendTextMessage(MESSAGE_STATUS_SMALL, "Trade cancelled.");
tradePartner->sendTradeClose();
}
}
void Game::playerPurchaseItem(uint32_t playerId, uint16_t spriteId, uint8_t count, uint8_t amount,
bool ignoreCap/* = false*/, bool inBackpacks/* = false*/)
{
if (amount == 0 || amount > 100) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
int32_t onBuy, onSell;
Npc* merchant = player->getShopOwner(onBuy, onSell);
if (!merchant) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return;
}
uint8_t subType;
if (it.isSplash() || it.isFluidContainer()) {
subType = clientFluidToServer(count);
} else {
subType = count;
}
if (!player->hasShopItemForSale(it.id, subType)) {
return;
}
merchant->onPlayerTrade(player, onBuy, it.id, subType, amount, ignoreCap, inBackpacks);
}
void Game::playerSellItem(uint32_t playerId, uint16_t spriteId, uint8_t count, uint8_t amount, bool ignoreEquipped)
{
if (amount == 0 || amount > 100) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
int32_t onBuy, onSell;
Npc* merchant = player->getShopOwner(onBuy, onSell);
if (!merchant) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return;
}
uint8_t subType;
if (it.isSplash() || it.isFluidContainer()) {
subType = clientFluidToServer(count);
} else {
subType = count;
}
merchant->onPlayerTrade(player, onSell, it.id, subType, amount, ignoreEquipped);
}
void Game::playerCloseShop(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->closeShopWindow();
}
void Game::playerLookInShop(uint32_t playerId, uint16_t spriteId, uint8_t count)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
int32_t onBuy, onSell;
Npc* merchant = player->getShopOwner(onBuy, onSell);
if (!merchant) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return;
}
int32_t subType;
if (it.isFluidContainer() || it.isSplash()) {
subType = clientFluidToServer(count);
} else {
subType = count;
}
if (!player->hasShopItemForSale(it.id, subType)) {
return;
}
if (!g_events->eventPlayerOnLookInShop(player, &it, subType)) {
return;
}
std::ostringstream ss;
ss << "You see " << Item::getDescription(it, 1, nullptr, subType);
player->sendTextMessage(MESSAGE_INFO_DESCR, ss.str());
}
void Game::playerLookAt(uint32_t playerId, const Position& pos, uint8_t stackPos)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Thing* thing = internalGetThing(player, pos, stackPos, 0, STACKPOS_LOOK);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Position thingPos = thing->getPosition();
if (!player->canSee(thingPos)) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Position playerPos = player->getPosition();
int32_t lookDistance;
if (thing != player) {
lookDistance = std::max<int32_t>(Position::getDistanceX(playerPos, thingPos), Position::getDistanceY(playerPos, thingPos));
if (playerPos.z != thingPos.z) {
lookDistance += 15;
}
} else {
lookDistance = -1;
}
g_events->eventPlayerOnLook(player, pos, thing, stackPos, lookDistance);
}
void Game::playerLookInBattleList(uint32_t playerId, uint32_t creatureId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Creature* creature = getCreatureByID(creatureId);
if (!creature) {
return;
}
if (!player->canSeeCreature(creature)) {
return;
}
const Position& creaturePos = creature->getPosition();
if (!player->canSee(creaturePos)) {
return;
}
int32_t lookDistance;
if (creature != player) {
const Position& playerPos = player->getPosition();
lookDistance = std::max<int32_t>(Position::getDistanceX(playerPos, creaturePos), Position::getDistanceY(playerPos, creaturePos));
if (playerPos.z != creaturePos.z) {
lookDistance += 15;
}
} else {
lookDistance = -1;
}
g_events->eventPlayerOnLookInBattleList(player, creature, lookDistance);
}
void Game::playerCancelAttackAndFollow(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
playerSetAttackedCreature(playerId, 0);
playerFollowCreature(playerId, 0);
player->stopWalk();
}
void Game::playerSetAttackedCreature(uint32_t playerId, uint32_t creatureId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (player->getAttackedCreature() && creatureId == 0) {
player->setAttackedCreature(nullptr);
player->sendCancelTarget();
return;
}
Creature* attackCreature = getCreatureByID(creatureId);
if (!attackCreature) {
player->setAttackedCreature(nullptr);
player->sendCancelTarget();
return;
}
ReturnValue ret = Combat::canTargetCreature(player, attackCreature);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
player->sendCancelTarget();
player->setAttackedCreature(nullptr);
return;
}
player->setAttackedCreature(attackCreature);
g_dispatcher.addTask(createTask(std::bind(&Game::updateCreatureWalk, this, player->getID())));
}
void Game::playerFollowCreature(uint32_t playerId, uint32_t creatureId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->setAttackedCreature(nullptr);
g_dispatcher.addTask(createTask(std::bind(&Game::updateCreatureWalk, this, player->getID())));
player->setFollowCreature(getCreatureByID(creatureId));
}
void Game::playerSetFightModes(uint32_t playerId, fightMode_t fightMode, bool chaseMode, bool secureMode)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->setFightMode(fightMode);
player->setChaseMode(chaseMode);
player->setSecureMode(secureMode);
}
void Game::playerRequestAddVip(uint32_t playerId, const std::string& name)
{
if (name.length() > 20) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* vipPlayer = getPlayerByName(name);
if (!vipPlayer) {
uint32_t guid;
bool specialVip;
std::string formattedName = name;
if (!IOLoginData::getGuidByNameEx(guid, specialVip, formattedName)) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "A player with this name does not exist.");
return;
}
if (specialVip && !player->hasFlag(PlayerFlag_SpecialVIP)) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "You can not add this player.");
return;
}
player->addVIP(guid, formattedName, VIPSTATUS_OFFLINE);
} else {
if (vipPlayer->hasFlag(PlayerFlag_SpecialVIP) && !player->hasFlag(PlayerFlag_SpecialVIP)) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "You can not add this player.");
return;
}
if (!vipPlayer->isInGhostMode() || player->isAccessPlayer()) {
player->addVIP(vipPlayer->getGUID(), vipPlayer->getName(), VIPSTATUS_ONLINE);
} else {
player->addVIP(vipPlayer->getGUID(), vipPlayer->getName(), VIPSTATUS_OFFLINE);
}
}
}
void Game::playerRequestRemoveVip(uint32_t playerId, uint32_t guid)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->removeVIP(guid);
}
void Game::playerRequestEditVip(uint32_t playerId, uint32_t guid, const std::string& description, uint32_t icon, bool notify)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->editVIP(guid, description, icon, notify);
}
void Game::playerTurn(uint32_t playerId, Direction dir)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!g_events->eventPlayerOnTurn(player, dir)) {
return;
}
player->resetIdleTime();
internalCreatureTurn(player, dir);
}
void Game::playerRequestOutfit(uint32_t playerId)
{
if (!g_config.getBoolean(ConfigManager::ALLOW_CHANGEOUTFIT)) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->sendOutfitWindow();
}
void Game::playerToggleMount(uint32_t playerId, bool mount)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->toggleMount(mount);
}
void Game::playerChangeOutfit(uint32_t playerId, Outfit_t outfit)
{
if (!g_config.getBoolean(ConfigManager::ALLOW_CHANGEOUTFIT)) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
const Outfit* playerOutfit = Outfits::getInstance().getOutfitByLookType(player->getSex(), outfit.lookType);
if (!playerOutfit) {
outfit.lookMount = 0;
}
if (outfit.lookMount != 0) {
Mount* mount = mounts.getMountByClientID(outfit.lookMount);
if (!mount) {
return;
}
if (!player->hasMount(mount)) {
return;
}
if (player->isMounted()) {
Mount* prevMount = mounts.getMountByID(player->getCurrentMount());
if (prevMount) {
changeSpeed(player, mount->speed - prevMount->speed);
}
player->setCurrentMount(mount->id);
} else {
player->setCurrentMount(mount->id);
outfit.lookMount = 0;
}
} else if (player->isMounted()) {
player->dismount();
}
if (player->canWear(outfit.lookType, outfit.lookAddons)) {
player->defaultOutfit = outfit;
if (player->hasCondition(CONDITION_OUTFIT)) {
return;
}
internalCreatureChangeOutfit(player, outfit);
}
}
void Game::playerShowQuestLog(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->sendQuestLog();
}
void Game::playerShowQuestLine(uint32_t playerId, uint16_t questId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Quest* quest = quests.getQuestByID(questId);
if (!quest) {
return;
}
player->sendQuestLine(quest);
}
void Game::playerSay(uint32_t playerId, uint16_t channelId, SpeakClasses type,
const std::string& receiver, const std::string& text)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->resetIdleTime();
if (playerSaySpell(player, type, text)) {
return;
}
uint32_t muteTime = player->isMuted();
if (muteTime > 0) {
std::ostringstream ss;
ss << "You are still muted for " << muteTime << " seconds.";
player->sendTextMessage(MESSAGE_STATUS_SMALL, ss.str());
return;
}
if (!text.empty() && text.front() == '/' && player->isAccessPlayer()) {
return;
}
if (type != TALKTYPE_PRIVATE_PN) {
player->removeMessageBuffer();
}
switch (type) {
case TALKTYPE_SAY:
internalCreatureSay(player, TALKTYPE_SAY, text, false);
break;
case TALKTYPE_WHISPER:
playerWhisper(player, text);
break;
case TALKTYPE_YELL:
playerYell(player, text);
break;
case TALKTYPE_PRIVATE_TO:
case TALKTYPE_PRIVATE_RED_TO:
playerSpeakTo(player, type, receiver, text);
break;
case TALKTYPE_CHANNEL_O:
case TALKTYPE_CHANNEL_Y:
case TALKTYPE_CHANNEL_R1:
g_chat->talkToChannel(*player, type, text, channelId);
break;
case TALKTYPE_PRIVATE_PN:
playerSpeakToNpc(player, text);
break;
case TALKTYPE_BROADCAST:
playerBroadcastMessage(player, text);
break;
default:
break;
}
}
bool Game::playerSaySpell(Player* player, SpeakClasses type, const std::string& text)
{
std::string words = text;
TalkActionResult_t result = g_talkActions->playerSaySpell(player, type, words);
if (result == TALKACTION_BREAK) {
return true;
}
result = g_spells->playerSaySpell(player, words);
if (result == TALKACTION_BREAK) {
if (!g_config.getBoolean(ConfigManager::EMOTE_SPELLS)) {
return internalCreatureSay(player, TALKTYPE_SAY, words, false);
} else {
return internalCreatureSay(player, TALKTYPE_MONSTER_SAY, words, false);
}
} else if (result == TALKACTION_FAILED) {
return true;
}
return false;
}
void Game::playerWhisper(Player* player, const std::string& text)
{
SpectatorVec spectators;
map.getSpectators(spectators, player->getPosition(), false, false,
Map::maxClientViewportX, Map::maxClientViewportX,
Map::maxClientViewportY, Map::maxClientViewportY);
//send to client
for (Creature* spectator : spectators) {
if (Player* spectatorPlayer = spectator->getPlayer()) {
if (!Position::areInRange<1, 1>(player->getPosition(), spectatorPlayer->getPosition())) {
spectatorPlayer->sendCreatureSay(player, TALKTYPE_WHISPER, "pspsps");
} else {
spectatorPlayer->sendCreatureSay(player, TALKTYPE_WHISPER, text);
}
}
}
//event method
for (Creature* spectator : spectators) {
spectator->onCreatureSay(player, TALKTYPE_WHISPER, text);
}
}
bool Game::playerYell(Player* player, const std::string& text)
{
if (player->hasCondition(CONDITION_YELLTICKS)) {
player->sendCancelMessage(RETURNVALUE_YOUAREEXHAUSTED);
return false;
}
uint32_t minimumLevel = g_config.getNumber(ConfigManager::YELL_MINIMUM_LEVEL);
if (player->getLevel() < minimumLevel) {
std::ostringstream ss;
ss << "You may not yell unless you have reached level " << minimumLevel;
if (g_config.getBoolean(ConfigManager::YELL_ALLOW_PREMIUM)) {
if (player->isPremium()) {
internalCreatureSay(player, TALKTYPE_YELL, asUpperCaseString(text), false);
return true;
} else {
ss << " or have a premium account";
}
}
ss << ".";
player->sendTextMessage(MESSAGE_STATUS_SMALL, ss.str());
return false;
}
if (player->getAccountType() < ACCOUNT_TYPE_GAMEMASTER) {
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_YELLTICKS, 30000, 0);
player->addCondition(condition);
}
internalCreatureSay(player, TALKTYPE_YELL, asUpperCaseString(text), false);
return true;
}
bool Game::playerSpeakTo(Player* player, SpeakClasses type, const std::string& receiver,
const std::string& text)
{
Player* toPlayer = getPlayerByName(receiver);
if (!toPlayer) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "A player with this name is not online.");
return false;
}
if (type == TALKTYPE_PRIVATE_RED_TO && (player->hasFlag(PlayerFlag_CanTalkRedPrivate) || player->getAccountType() >= ACCOUNT_TYPE_GAMEMASTER)) {
type = TALKTYPE_PRIVATE_RED_FROM;
} else {
type = TALKTYPE_PRIVATE_FROM;
}
toPlayer->sendPrivateMessage(player, type, text);
toPlayer->onCreatureSay(player, type, text);
if (toPlayer->isInGhostMode() && !player->isAccessPlayer()) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "A player with this name is not online.");
} else {
std::ostringstream ss;
ss << "Message sent to " << toPlayer->getName() << '.';
player->sendTextMessage(MESSAGE_STATUS_SMALL, ss.str());
}
return true;
}
void Game::playerSpeakToNpc(Player* player, const std::string& text)
{
SpectatorVec spectators;
map.getSpectators(spectators, player->getPosition());
for (Creature* spectator : spectators) {
if (spectator->getNpc()) {
spectator->onCreatureSay(player, TALKTYPE_PRIVATE_PN, text);
}
}
}
//--
bool Game::canThrowObjectTo(const Position& fromPos, const Position& toPos, bool checkLineOfSight /*= true*/,
int32_t rangex /*= Map::maxClientViewportX*/, int32_t rangey /*= Map::maxClientViewportY*/) const
{
return map.canThrowObjectTo(fromPos, toPos, checkLineOfSight, rangex, rangey);
}
bool Game::isSightClear(const Position& fromPos, const Position& toPos, bool floorCheck) const
{
return map.isSightClear(fromPos, toPos, floorCheck);
}
bool Game::internalCreatureTurn(Creature* creature, Direction dir)
{
if (creature->getDirection() == dir) {
return false;
}
creature->setDirection(dir);
//send to client
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureTurn(creature);
}
return true;
}
bool Game::internalCreatureSay(Creature* creature, SpeakClasses type, const std::string& text,
bool ghostMode, SpectatorVec* spectatorsPtr/* = nullptr*/, const Position* pos/* = nullptr*/)
{
if (text.empty()) {
return false;
}
if (!pos) {
pos = &creature->getPosition();
}
SpectatorVec spectators;
if (!spectatorsPtr || spectatorsPtr->empty()) {
// This somewhat complex construct ensures that the cached SpectatorVec
// is used if available and if it can be used, else a local vector is
// used (hopefully the compiler will optimize away the construction of
// the temporary when it's not used).
if (type != TALKTYPE_YELL && type != TALKTYPE_MONSTER_YELL) {
map.getSpectators(spectators, *pos, false, false,
Map::maxClientViewportX, Map::maxClientViewportX,
Map::maxClientViewportY, Map::maxClientViewportY);
} else {
map.getSpectators(spectators, *pos, true, false, 18, 18, 14, 14);
}
} else {
spectators = (*spectatorsPtr);
}
//send to client
for (Creature* spectator : spectators) {
if (Player* tmpPlayer = spectator->getPlayer()) {
if (!ghostMode || tmpPlayer->canSeeCreature(creature)) {
tmpPlayer->sendCreatureSay(creature, type, text, pos);
}
}
}
//event method
for (Creature* spectator : spectators) {
spectator->onCreatureSay(creature, type, text);
if (creature != spectator) {
g_events->eventCreatureOnHear(spectator, creature, text, type);
}
}
return true;
}
void Game::checkCreatureWalk(uint32_t creatureId)
{
Creature* creature = getCreatureByID(creatureId);
if (creature && creature->getHealth() > 0) {
creature->onWalk();
cleanup();
}
}
void Game::updateCreatureWalk(uint32_t creatureId)
{
Creature* creature = getCreatureByID(creatureId);
if (creature && creature->getHealth() > 0) {
creature->goToFollowCreature();
}
}
void Game::checkCreatureAttack(uint32_t creatureId)
{
Creature* creature = getCreatureByID(creatureId);
if (creature && creature->getHealth() > 0) {
creature->onAttacking(0);
}
}
void Game::addCreatureCheck(Creature* creature)
{
creature->creatureCheck = true;
if (creature->inCheckCreaturesVector) {
// already in a vector
return;
}
creature->inCheckCreaturesVector = true;
checkCreatureLists[uniform_random(0, EVENT_CREATURECOUNT - 1)].push_back(creature);
creature->incrementReferenceCounter();
}
void Game::removeCreatureCheck(Creature* creature)
{
if (creature->inCheckCreaturesVector) {
creature->creatureCheck = false;
}
}
void Game::checkCreatures(size_t index)
{
g_scheduler.addEvent(createSchedulerTask(EVENT_CHECK_CREATURE_INTERVAL, std::bind(&Game::checkCreatures, this, (index + 1) % EVENT_CREATURECOUNT)));
auto& checkCreatureList = checkCreatureLists[index];
auto it = checkCreatureList.begin(), end = checkCreatureList.end();
while (it != end) {
Creature* creature = *it;
if (creature->creatureCheck) {
if (creature->getHealth() > 0) {
creature->onThink(EVENT_CREATURE_THINK_INTERVAL);
creature->onAttacking(EVENT_CREATURE_THINK_INTERVAL);
creature->executeConditions(EVENT_CREATURE_THINK_INTERVAL);
} else {
creature->onDeath();
}
++it;
} else {
creature->inCheckCreaturesVector = false;
it = checkCreatureList.erase(it);
ReleaseCreature(creature);
}
}
cleanup();
}
void Game::changeSpeed(Creature* creature, int32_t varSpeedDelta)
{
int32_t varSpeed = creature->getSpeed() - creature->getBaseSpeed();
varSpeed += varSpeedDelta;
creature->setSpeed(varSpeed);
//send to clients
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), false, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendChangeSpeed(creature, creature->getStepSpeed());
}
}
void Game::internalCreatureChangeOutfit(Creature* creature, const Outfit_t& outfit)
{
if (!g_events->eventCreatureOnChangeOutfit(creature, outfit)) {
return;
}
creature->setCurrentOutfit(outfit);
if (creature->isInvisible()) {
return;
}
//send to clients
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureChangeOutfit(creature, outfit);
}
}
void Game::internalCreatureChangeVisible(Creature* creature, bool visible)
{
//send to clients
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureChangeVisible(creature, visible);
}
}
void Game::changeLight(const Creature* creature)
{
//send to clients
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureLight(creature);
}
}
bool Game::combatBlockHit(CombatDamage& damage, Creature* attacker, Creature* target, bool checkDefense, bool checkArmor, bool field)
{
if (damage.primary.type == COMBAT_NONE && damage.secondary.type == COMBAT_NONE) {
return true;
}
if (target->getPlayer() && target->isInGhostMode()) {
return true;
}
if (damage.primary.value > 0) {
return false;
}
static const auto sendBlockEffect = [this](BlockType_t blockType, CombatType_t combatType, const Position& targetPos) {
if (blockType == BLOCK_DEFENSE) {
addMagicEffect(targetPos, CONST_ME_POFF);
} else if (blockType == BLOCK_ARMOR) {
addMagicEffect(targetPos, CONST_ME_BLOCKHIT);
} else if (blockType == BLOCK_IMMUNITY) {
uint8_t hitEffect = 0;
switch (combatType) {
case COMBAT_UNDEFINEDDAMAGE: {
return;
}
case COMBAT_ENERGYDAMAGE:
case COMBAT_FIREDAMAGE:
case COMBAT_PHYSICALDAMAGE:
case COMBAT_ICEDAMAGE:
case COMBAT_DEATHDAMAGE: {
hitEffect = CONST_ME_BLOCKHIT;
break;
}
case COMBAT_EARTHDAMAGE: {
hitEffect = CONST_ME_GREEN_RINGS;
break;
}
case COMBAT_HOLYDAMAGE: {
hitEffect = CONST_ME_HOLYDAMAGE;
break;
}
default: {
hitEffect = CONST_ME_POFF;
break;
}
}
addMagicEffect(targetPos, hitEffect);
}
};
BlockType_t primaryBlockType, secondaryBlockType;
if (damage.primary.type != COMBAT_NONE) {
damage.primary.value = -damage.primary.value;
primaryBlockType = target->blockHit(attacker, damage.primary.type, damage.primary.value, checkDefense, checkArmor, field);
damage.primary.value = -damage.primary.value;
sendBlockEffect(primaryBlockType, damage.primary.type, target->getPosition());
} else {
primaryBlockType = BLOCK_NONE;
}
if (damage.secondary.type != COMBAT_NONE) {
damage.secondary.value = -damage.secondary.value;
secondaryBlockType = target->blockHit(attacker, damage.secondary.type, damage.secondary.value, false, false, field);
damage.secondary.value = -damage.secondary.value;
sendBlockEffect(secondaryBlockType, damage.secondary.type, target->getPosition());
} else {
secondaryBlockType = BLOCK_NONE;
}
return (primaryBlockType != BLOCK_NONE) && (secondaryBlockType != BLOCK_NONE);
}
void Game::combatGetTypeInfo(CombatType_t combatType, Creature* target, TextColor_t& color, uint8_t& effect)
{
switch (combatType) {
case COMBAT_PHYSICALDAMAGE: {
Item* splash = nullptr;
switch (target->getRace()) {
case RACE_VENOM:
color = TEXTCOLOR_LIGHTGREEN;
effect = CONST_ME_HITBYPOISON;
splash = Item::CreateItem(ITEM_SMALLSPLASH, FLUID_SLIME);
break;
case RACE_BLOOD:
color = TEXTCOLOR_RED;
effect = CONST_ME_DRAWBLOOD;
if (const Tile* tile = target->getTile()) {
if (!tile->hasFlag(TILESTATE_PROTECTIONZONE)) {
splash = Item::CreateItem(ITEM_SMALLSPLASH, FLUID_BLOOD);
}
}
break;
case RACE_UNDEAD:
color = TEXTCOLOR_LIGHTGREY;
effect = CONST_ME_HITAREA;
break;
case RACE_FIRE:
color = TEXTCOLOR_ORANGE;
effect = CONST_ME_DRAWBLOOD;
break;
case RACE_ENERGY:
color = TEXTCOLOR_ELECTRICPURPLE;
effect = CONST_ME_ENERGYHIT;
break;
default:
color = TEXTCOLOR_NONE;
effect = CONST_ME_NONE;
break;
}
if (splash) {
internalAddItem(target->getTile(), splash, INDEX_WHEREEVER, FLAG_NOLIMIT);
startDecay(splash);
}
break;
}
case COMBAT_ENERGYDAMAGE: {
color = TEXTCOLOR_ELECTRICPURPLE;
effect = CONST_ME_ENERGYHIT;
break;
}
case COMBAT_EARTHDAMAGE: {
color = TEXTCOLOR_LIGHTGREEN;
effect = CONST_ME_GREEN_RINGS;
break;
}
case COMBAT_DROWNDAMAGE: {
color = TEXTCOLOR_LIGHTBLUE;
effect = CONST_ME_LOSEENERGY;
break;
}
case COMBAT_FIREDAMAGE: {
color = TEXTCOLOR_ORANGE;
effect = CONST_ME_HITBYFIRE;
break;
}
case COMBAT_ICEDAMAGE: {
color = TEXTCOLOR_SKYBLUE;
effect = CONST_ME_ICEATTACK;
break;
}
case COMBAT_HOLYDAMAGE: {
color = TEXTCOLOR_YELLOW;
effect = CONST_ME_HOLYDAMAGE;
break;
}
case COMBAT_DEATHDAMAGE: {
color = TEXTCOLOR_DARKRED;
effect = CONST_ME_SMALLCLOUDS;
break;
}
case COMBAT_LIFEDRAIN: {
color = TEXTCOLOR_RED;
effect = CONST_ME_MAGIC_RED;
break;
}
default: {
color = TEXTCOLOR_NONE;
effect = CONST_ME_NONE;
break;
}
}
}
bool Game::combatChangeHealth(Creature* attacker, Creature* target, CombatDamage& damage)
{
const Position& targetPos = target->getPosition();
if (damage.primary.value > 0) {
if (target->getHealth() <= 0) {
return false;
}
Player* attackerPlayer;
if (attacker) {
attackerPlayer = attacker->getPlayer();
} else {
attackerPlayer = nullptr;
}
Player* targetPlayer = target->getPlayer();
if (attackerPlayer && targetPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) {
return false;
}
if (damage.origin != ORIGIN_NONE) {
const auto& events = target->getCreatureEvents(CREATURE_EVENT_HEALTHCHANGE);
if (!events.empty()) {
for (CreatureEvent* creatureEvent : events) {
creatureEvent->executeHealthChange(target, attacker, damage);
}
damage.origin = ORIGIN_NONE;
return combatChangeHealth(attacker, target, damage);
}
}
int32_t realHealthChange = target->getHealth();
target->gainHealth(attacker, damage.primary.value);
realHealthChange = target->getHealth() - realHealthChange;
if (realHealthChange > 0 && !target->isInGhostMode()) {
std::stringstream ss;
ss << realHealthChange << (realHealthChange != 1 ? " hitpoints." : " hitpoint.");
std::string damageString = ss.str();
std::string spectatorMessage;
TextMessage message;
message.position = targetPos;
message.primary.value = realHealthChange;
message.primary.color = TEXTCOLOR_PASTELRED;
SpectatorVec spectators;
map.getSpectators(spectators, targetPos, false, true);
for (Creature* spectator : spectators) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
ss.str({});
ss << "You heal " << target->getNameDescription() << " for " << damageString;
message.type = MESSAGE_HEALED;
message.text = ss.str();
} else if (tmpPlayer == targetPlayer) {
ss.str({});
if (!attacker) {
ss << "You were healed";
} else if (targetPlayer == attackerPlayer) {
ss << "You healed yourself";
} else {
ss << "You were healed by " << attacker->getNameDescription();
}
ss << " for " << damageString;
message.type = MESSAGE_HEALED;
message.text = ss.str();
} else {
if (spectatorMessage.empty()) {
ss.str({});
if (!attacker) {
ss << ucfirst(target->getNameDescription()) << " was healed";
} else {
ss << ucfirst(attacker->getNameDescription()) << " healed ";
if (attacker == target) {
ss << (targetPlayer ? (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "herself" : "himself") : "itself");
} else {
ss << target->getNameDescription();
}
}
ss << " for " << damageString;
spectatorMessage = ss.str();
}
message.type = MESSAGE_HEALED_OTHERS;
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
}
} else {
if (!target->isAttackable()) {
if (!target->isInGhostMode()) {
addMagicEffect(targetPos, CONST_ME_POFF);
}
return true;
}
Player* attackerPlayer;
if (attacker) {
attackerPlayer = attacker->getPlayer();
} else {
attackerPlayer = nullptr;
}
Player* targetPlayer = target->getPlayer();
if (attackerPlayer && targetPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) {
return false;
}
damage.primary.value = std::abs(damage.primary.value);
damage.secondary.value = std::abs(damage.secondary.value);
int32_t healthChange = damage.primary.value + damage.secondary.value;
if (healthChange == 0) {
return true;
}
TextMessage message;
message.position = targetPos;
SpectatorVec spectators;
if (targetPlayer && target->hasCondition(CONDITION_MANASHIELD) && damage.primary.type != COMBAT_UNDEFINEDDAMAGE) {
int32_t manaDamage = std::min<int32_t>(targetPlayer->getMana(), healthChange);
if (manaDamage != 0) {
if (damage.origin != ORIGIN_NONE) {
const auto& events = target->getCreatureEvents(CREATURE_EVENT_MANACHANGE);
if (!events.empty()) {
for (CreatureEvent* creatureEvent : events) {
creatureEvent->executeManaChange(target, attacker, damage);
}
healthChange = damage.primary.value + damage.secondary.value;
if (healthChange == 0) {
return true;
}
manaDamage = std::min<int32_t>(targetPlayer->getMana(), healthChange);
}
}
targetPlayer->drainMana(attacker, manaDamage);
map.getSpectators(spectators, targetPos, true, true);
addMagicEffect(spectators, targetPos, CONST_ME_LOSEENERGY);
std::stringstream ss;
std::string damageString = std::to_string(manaDamage);
std::string spectatorMessage;
message.primary.value = manaDamage;
message.primary.color = TEXTCOLOR_BLUE;
for (Creature* spectator : spectators) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer->getPosition().z != targetPos.z) {
continue;
}
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
ss.str({});
ss << ucfirst(target->getNameDescription()) << " loses " << damageString + " mana due to your attack.";
message.type = MESSAGE_DAMAGE_DEALT;
message.text = ss.str();
} else if (tmpPlayer == targetPlayer) {
ss.str({});
ss << "You lose " << damageString << " mana";
if (!attacker) {
ss << '.';
} else if (targetPlayer == attackerPlayer) {
ss << " due to your own attack.";
} else {
ss << " due to an attack by " << attacker->getNameDescription() << '.';
}
message.type = MESSAGE_DAMAGE_RECEIVED;
message.text = ss.str();
} else {
if (spectatorMessage.empty()) {
ss.str({});
ss << ucfirst(target->getNameDescription()) << " loses " << damageString + " mana";
if (attacker) {
ss << " due to ";
if (attacker == target) {
ss << (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her own attack" : "his own attack");
} else {
ss << "an attack by " << attacker->getNameDescription();
}
}
ss << '.';
spectatorMessage = ss.str();
}
message.type = MESSAGE_DAMAGE_OTHERS;
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
damage.primary.value -= manaDamage;
if (damage.primary.value < 0) {
damage.secondary.value = std::max<int32_t>(0, damage.secondary.value + damage.primary.value);
damage.primary.value = 0;
}
}
}
int32_t realDamage = damage.primary.value + damage.secondary.value;
if (realDamage == 0) {
return true;
}
if (damage.origin != ORIGIN_NONE) {
const auto& events = target->getCreatureEvents(CREATURE_EVENT_HEALTHCHANGE);
if (!events.empty()) {
for (CreatureEvent* creatureEvent : events) {
creatureEvent->executeHealthChange(target, attacker, damage);
}
damage.origin = ORIGIN_NONE;
return combatChangeHealth(attacker, target, damage);
}
}
int32_t targetHealth = target->getHealth();
if (damage.primary.value >= targetHealth) {
damage.primary.value = targetHealth;
damage.secondary.value = 0;
} else if (damage.secondary.value) {
damage.secondary.value = std::min<int32_t>(damage.secondary.value, targetHealth - damage.primary.value);
}
realDamage = damage.primary.value + damage.secondary.value;
if (realDamage == 0) {
return true;
}
if (spectators.empty()) {
map.getSpectators(spectators, targetPos, true, true);
}
message.primary.value = damage.primary.value;
message.secondary.value = damage.secondary.value;
uint8_t hitEffect;
if (message.primary.value) {
combatGetTypeInfo(damage.primary.type, target, message.primary.color, hitEffect);
if (hitEffect != CONST_ME_NONE) {
addMagicEffect(spectators, targetPos, hitEffect);
}
}
if (message.secondary.value) {
combatGetTypeInfo(damage.secondary.type, target, message.secondary.color, hitEffect);
if (hitEffect != CONST_ME_NONE) {
addMagicEffect(spectators, targetPos, hitEffect);
}
}
if (message.primary.color != TEXTCOLOR_NONE || message.secondary.color != TEXTCOLOR_NONE) {
std::stringstream ss;
ss << realDamage << (realDamage != 1 ? " hitpoints" : " hitpoint");
std::string damageString = ss.str();
std::string spectatorMessage;
for (Creature* spectator : spectators) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer->getPosition().z != targetPos.z) {
continue;
}
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
ss.str({});
ss << ucfirst(target->getNameDescription()) << " loses " << damageString << " due to your attack.";
message.type = MESSAGE_DAMAGE_DEALT;
message.text = ss.str();
} else if (tmpPlayer == targetPlayer) {
ss.str({});
ss << "You lose " << damageString;
if (!attacker) {
ss << '.';
} else if (targetPlayer == attackerPlayer) {
ss << " due to your own attack.";
} else {
ss << " due to an attack by " << attacker->getNameDescription() << '.';
}
message.type = MESSAGE_DAMAGE_RECEIVED;
message.text = ss.str();
} else {
message.type = MESSAGE_DAMAGE_OTHERS;
if (spectatorMessage.empty()) {
ss.str({});
ss << ucfirst(target->getNameDescription()) << " loses " << damageString;
if (attacker) {
ss << " due to ";
if (attacker == target) {
if (targetPlayer) {
ss << (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her own attack" : "his own attack");
} else {
ss << "its own attack";
}
} else {
ss << "an attack by " << attacker->getNameDescription();
}
}
ss << '.';
spectatorMessage = ss.str();
}
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
}
if (realDamage >= targetHealth) {
for (CreatureEvent* creatureEvent : target->getCreatureEvents(CREATURE_EVENT_PREPAREDEATH)) {
if (!creatureEvent->executeOnPrepareDeath(target, attacker)) {
return false;
}
}
}
target->drainHealth(attacker, realDamage);
addCreatureHealth(spectators, target);
}
return true;
}
bool Game::combatChangeMana(Creature* attacker, Creature* target, CombatDamage& damage)
{
Player* targetPlayer = target->getPlayer();
if (!targetPlayer) {
return true;
}
int32_t manaChange = damage.primary.value + damage.secondary.value;
if (manaChange > 0) {
if (attacker) {
const Player* attackerPlayer = attacker->getPlayer();
if (attackerPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(target) == SKULL_NONE) {
return false;
}
}
if (damage.origin != ORIGIN_NONE) {
const auto& events = target->getCreatureEvents(CREATURE_EVENT_MANACHANGE);
if (!events.empty()) {
for (CreatureEvent* creatureEvent : events) {
creatureEvent->executeManaChange(target, attacker, damage);
}
damage.origin = ORIGIN_NONE;
return combatChangeMana(attacker, target, damage);
}
}
int32_t realManaChange = targetPlayer->getMana();
targetPlayer->changeMana(manaChange);
realManaChange = targetPlayer->getMana() - realManaChange;
if (realManaChange > 0 && !targetPlayer->isInGhostMode()) {
TextMessage message(MESSAGE_HEALED, "You gained " + std::to_string(realManaChange) + " mana.");
message.position = target->getPosition();
message.primary.value = realManaChange;
message.primary.color = TEXTCOLOR_MAYABLUE;
targetPlayer->sendTextMessage(message);
}
} else {
const Position& targetPos = target->getPosition();
if (!target->isAttackable()) {
if (!target->isInGhostMode()) {
addMagicEffect(targetPos, CONST_ME_POFF);
}
return false;
}
Player* attackerPlayer;
if (attacker) {
attackerPlayer = attacker->getPlayer();
} else {
attackerPlayer = nullptr;
}
if (attackerPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) {
return false;
}
int32_t manaLoss = std::min<int32_t>(targetPlayer->getMana(), -manaChange);
BlockType_t blockType = target->blockHit(attacker, COMBAT_MANADRAIN, manaLoss);
if (blockType != BLOCK_NONE) {
addMagicEffect(targetPos, CONST_ME_POFF);
return false;
}
if (manaLoss <= 0) {
return true;
}
if (damage.origin != ORIGIN_NONE) {
const auto& events = target->getCreatureEvents(CREATURE_EVENT_MANACHANGE);
if (!events.empty()) {
for (CreatureEvent* creatureEvent : events) {
creatureEvent->executeManaChange(target, attacker, damage);
}
damage.origin = ORIGIN_NONE;
return combatChangeMana(attacker, target, damage);
}
}
targetPlayer->drainMana(attacker, manaLoss);
std::stringstream ss;
std::string damageString = std::to_string(manaLoss);
std::string spectatorMessage;
TextMessage message;
message.position = targetPos;
message.primary.value = manaLoss;
message.primary.color = TEXTCOLOR_BLUE;
SpectatorVec spectators;
map.getSpectators(spectators, targetPos, false, true);
for (Creature* spectator : spectators) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
ss.str({});
ss << ucfirst(target->getNameDescription()) << " loses " << damageString << " mana due to your attack.";
message.type = MESSAGE_DAMAGE_DEALT;
message.text = ss.str();
} else if (tmpPlayer == targetPlayer) {
ss.str({});
ss << "You lose " << damageString << " mana";
if (!attacker) {
ss << '.';
} else if (targetPlayer == attackerPlayer) {
ss << " due to your own attack.";
} else {
ss << " mana due to an attack by " << attacker->getNameDescription() << '.';
}
message.type = MESSAGE_DAMAGE_RECEIVED;
message.text = ss.str();
} else {
if (spectatorMessage.empty()) {
ss.str({});
ss << ucfirst(target->getNameDescription()) << " loses " << damageString << " mana";
if (attacker) {
ss << " due to ";
if (attacker == target) {
ss << (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her own attack" : "his own attack");
} else {
ss << "an attack by " << attacker->getNameDescription();
}
}
ss << '.';
spectatorMessage = ss.str();
}
message.type = MESSAGE_DAMAGE_OTHERS;
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
}
return true;
}
void Game::addCreatureHealth(const Creature* target)
{
SpectatorVec spectators;
map.getSpectators(spectators, target->getPosition(), true, true);
addCreatureHealth(spectators, target);
}
void Game::addCreatureHealth(const SpectatorVec& spectators, const Creature* target)
{
for (Creature* spectator : spectators) {
if (Player* tmpPlayer = spectator->getPlayer()) {
tmpPlayer->sendCreatureHealth(target);
}
}
}
void Game::addMagicEffect(const Position& pos, uint8_t effect)
{
SpectatorVec spectators;
map.getSpectators(spectators, pos, true, true);
addMagicEffect(spectators, pos, effect);
}
void Game::addMagicEffect(const SpectatorVec& spectators, const Position& pos, uint8_t effect)
{
for (Creature* spectator : spectators) {
if (Player* tmpPlayer = spectator->getPlayer()) {
tmpPlayer->sendMagicEffect(pos, effect);
}
}
}
void Game::addDistanceEffect(const Position& fromPos, const Position& toPos, uint8_t effect)
{
SpectatorVec spectators, toPosSpectators;
map.getSpectators(spectators, fromPos, false, true);
map.getSpectators(toPosSpectators, toPos, false, true);
spectators.addSpectators(toPosSpectators);
addDistanceEffect(spectators, fromPos, toPos, effect);
}
void Game::addDistanceEffect(const SpectatorVec& spectators, const Position& fromPos, const Position& toPos, uint8_t effect)
{
for (Creature* spectator : spectators) {
if (Player* tmpPlayer = spectator->getPlayer()) {
tmpPlayer->sendDistanceShoot(fromPos, toPos, effect);
}
}
}
void Game::startDecay(Item* item)
{
if (!item || !item->canDecay()) {
return;
}
ItemDecayState_t decayState = item->getDecaying();
if (decayState == DECAYING_TRUE) {
return;
}
if (item->getDuration() > 0) {
item->incrementReferenceCounter();
item->setDecaying(DECAYING_TRUE);
toDecayItems.push_front(item);
} else {
internalDecayItem(item);
}
}
void Game::internalDecayItem(Item* item)
{
const ItemType& it = Item::items[item->getID()];
if (it.decayTo != 0) {
Item* newItem = transformItem(item, item->getDecayTo());
startDecay(newItem);
} else {
ReturnValue ret = internalRemoveItem(item);
if (ret != RETURNVALUE_NOERROR) {
std::cout << "[Debug - Game::internalDecayItem] internalDecayItem failed, error code: " << static_cast<uint32_t>(ret) << ", item id: " << item->getID() << std::endl;
}
}
}
void Game::checkDecay()
{
g_scheduler.addEvent(createSchedulerTask(EVENT_DECAYINTERVAL, std::bind(&Game::checkDecay, this)));
size_t bucket = (lastBucket + 1) % EVENT_DECAY_BUCKETS;
auto it = decayItems[bucket].begin(), end = decayItems[bucket].end();
while (it != end) {
Item* item = *it;
if (!item->canDecay()) {
item->setDecaying(DECAYING_FALSE);
ReleaseItem(item);
it = decayItems[bucket].erase(it);
continue;
}
int32_t duration = item->getDuration();
int32_t decreaseTime = std::min<int32_t>(EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS, duration);
duration -= decreaseTime;
item->decreaseDuration(decreaseTime);
if (duration <= 0) {
it = decayItems[bucket].erase(it);
internalDecayItem(item);
ReleaseItem(item);
} else if (duration < EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS) {
it = decayItems[bucket].erase(it);
size_t newBucket = (bucket + ((duration + EVENT_DECAYINTERVAL / 2) / 1000)) % EVENT_DECAY_BUCKETS;
if (newBucket == bucket) {
internalDecayItem(item);
ReleaseItem(item);
} else {
decayItems[newBucket].push_back(item);
}
} else {
++it;
}
}
lastBucket = bucket;
cleanup();
}
void Game::checkLight()
{
g_scheduler.addEvent(createSchedulerTask(EVENT_LIGHTINTERVAL, std::bind(&Game::checkLight, this)));
lightHour += lightHourDelta;
if (lightHour > 1440) {
lightHour -= 1440;
}
if (std::abs(lightHour - SUNRISE) < 2 * lightHourDelta) {
lightState = LIGHT_STATE_SUNRISE;
} else if (std::abs(lightHour - SUNSET) < 2 * lightHourDelta) {
lightState = LIGHT_STATE_SUNSET;
}
int32_t newLightLevel = lightLevel;
bool lightChange = false;
switch (lightState) {
case LIGHT_STATE_SUNRISE: {
newLightLevel += (LIGHT_LEVEL_DAY - LIGHT_LEVEL_NIGHT) / 30;
lightChange = true;
break;
}
case LIGHT_STATE_SUNSET: {
newLightLevel -= (LIGHT_LEVEL_DAY - LIGHT_LEVEL_NIGHT) / 30;
lightChange = true;
break;
}
default:
break;
}
if (newLightLevel <= LIGHT_LEVEL_NIGHT) {
lightLevel = LIGHT_LEVEL_NIGHT;
lightState = LIGHT_STATE_NIGHT;
} else if (newLightLevel >= LIGHT_LEVEL_DAY) {
lightLevel = LIGHT_LEVEL_DAY;
lightState = LIGHT_STATE_DAY;
} else {
lightLevel = newLightLevel;
}
if (lightChange) {
LightInfo lightInfo = getWorldLightInfo();
for (const auto& it : players) {
it.second->sendWorldLight(lightInfo);
}
}
}
LightInfo Game::getWorldLightInfo() const
{
return {lightLevel, 0xD7};
}
void Game::shutdown()
{
std::cout << "Shutting down..." << std::flush;
g_scheduler.shutdown();
g_databaseTasks.shutdown();
g_dispatcher.shutdown();
map.spawns.clear();
raids.clear();
cleanup();
if (serviceManager) {
serviceManager->stop();
}
ConnectionManager::getInstance().closeAll();
std::cout << " done!" << std::endl;
}
void Game::cleanup()
{
//free memory
for (auto creature : ToReleaseCreatures) {
creature->decrementReferenceCounter();
}
ToReleaseCreatures.clear();
for (auto item : ToReleaseItems) {
item->decrementReferenceCounter();
}
ToReleaseItems.clear();
for (Item* item : toDecayItems) {
const uint32_t dur = item->getDuration();
if (dur >= EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS) {
decayItems[lastBucket].push_back(item);
} else {
decayItems[(lastBucket + 1 + dur / 1000) % EVENT_DECAY_BUCKETS].push_back(item);
}
}
toDecayItems.clear();
}
void Game::ReleaseCreature(Creature* creature)
{
ToReleaseCreatures.push_back(creature);
}
void Game::ReleaseItem(Item* item)
{
ToReleaseItems.push_back(item);
}
void Game::broadcastMessage(const std::string& text, MessageClasses type) const
{
std::cout << "> Broadcasted message: \"" << text << "\"." << std::endl;
for (const auto& it : players) {
it.second->sendTextMessage(type, text);
}
}
void Game::updateCreatureWalkthrough(const Creature* creature)
{
//send to clients
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
for (Creature* spectator : spectators) {
Player* tmpPlayer = spectator->getPlayer();
tmpPlayer->sendCreatureWalkthrough(creature, tmpPlayer->canWalkthroughEx(creature));
}
}
void Game::updateCreatureSkull(const Creature* creature)
{
if (getWorldType() != WORLD_TYPE_PVP) {
return;
}
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureSkull(creature);
}
}
void Game::updatePlayerShield(Player* player)
{
SpectatorVec spectators;
map.getSpectators(spectators, player->getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureShield(player);
}
}
void Game::updatePlayerHelpers(const Player& player)
{
uint32_t creatureId = player.getID();
uint16_t helpers = player.getHelpers();
SpectatorVec spectators;
map.getSpectators(spectators, player.getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureHelpers(creatureId, helpers);
}
}
void Game::updateCreatureType(Creature* creature)
{
const Player* masterPlayer = nullptr;
uint32_t creatureId = creature->getID();
CreatureType_t creatureType = creature->getType();
if (creatureType == CREATURETYPE_MONSTER) {
const Creature* master = creature->getMaster();
if (master) {
masterPlayer = master->getPlayer();
if (masterPlayer) {
creatureType = CREATURETYPE_SUMMON_OTHERS;
}
}
}
//send to clients
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
if (creatureType == CREATURETYPE_SUMMON_OTHERS) {
for (Creature* spectator : spectators) {
Player* player = spectator->getPlayer();
if (masterPlayer == player) {
player->sendCreatureType(creatureId, CREATURETYPE_SUMMON_OWN);
} else {
player->sendCreatureType(creatureId, creatureType);
}
}
} else {
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureType(creatureId, creatureType);
}
}
}
void Game::updatePremium(Account& account)
{
bool save = false;
time_t timeNow = time(nullptr);
if (account.premiumDays != 0 && account.premiumDays != std::numeric_limits<uint16_t>::max()) {
if (account.lastDay == 0) {
account.lastDay = timeNow;
save = true;
} else {
uint32_t days = (timeNow - account.lastDay) / 86400;
if (days > 0) {
if (days >= account.premiumDays) {
account.premiumDays = 0;
account.lastDay = 0;
} else {
account.premiumDays -= days;
time_t remainder = (timeNow - account.lastDay) % 86400;
account.lastDay = timeNow - remainder;
}
save = true;
}
}
} else if (account.lastDay != 0) {
account.lastDay = 0;
save = true;
}
if (save && !IOLoginData::saveAccount(account)) {
std::cout << "> ERROR: Failed to save account: " << account.name << "!" << std::endl;
}
}
void Game::loadMotdNum()
{
Database& db = Database::getInstance();
DBResult_ptr result = db.storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'motd_num'");
if (result) {
motdNum = result->getNumber<uint32_t>("value");
} else {
db.executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('motd_num', '0')");
}
result = db.storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'motd_hash'");
if (result) {
motdHash = result->getString("value");
if (motdHash != transformToSHA1(g_config.getString(ConfigManager::MOTD))) {
++motdNum;
}
} else {
db.executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('motd_hash', '')");
}
}
void Game::saveMotdNum() const
{
Database& db = Database::getInstance();
std::ostringstream query;
query << "UPDATE `server_config` SET `value` = '" << motdNum << "' WHERE `config` = 'motd_num'";
db.executeQuery(query.str());
query.str(std::string());
query << "UPDATE `server_config` SET `value` = '" << transformToSHA1(g_config.getString(ConfigManager::MOTD)) << "' WHERE `config` = 'motd_hash'";
db.executeQuery(query.str());
}
void Game::checkPlayersRecord()
{
const size_t playersOnline = getPlayersOnline();
if (playersOnline > playersRecord) {
uint32_t previousRecord = playersRecord;
playersRecord = playersOnline;
for (auto& it : g_globalEvents->getEventMap(GLOBALEVENT_RECORD)) {
it.second.executeRecord(playersRecord, previousRecord);
}
updatePlayersRecord();
}
}
void Game::updatePlayersRecord() const
{
Database& db = Database::getInstance();
std::ostringstream query;
query << "UPDATE `server_config` SET `value` = '" << playersRecord << "' WHERE `config` = 'players_record'";
db.executeQuery(query.str());
}
void Game::loadPlayersRecord()
{
Database& db = Database::getInstance();
DBResult_ptr result = db.storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'players_record'");
if (result) {
playersRecord = result->getNumber<uint32_t>("value");
} else {
db.executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('players_record', '0')");
}
}
uint64_t Game::getExperienceStage(uint32_t level)
{
if (!stagesEnabled) {
return g_config.getNumber(ConfigManager::RATE_EXPERIENCE);
}
if (useLastStageLevel && level >= lastStageLevel) {
return stages[lastStageLevel];
}
return stages[level];
}
bool Game::loadExperienceStages()
{
pugi::xml_document doc;
pugi::xml_parse_result result = doc.load_file("data/XML/stages.xml");
if (!result) {
printXMLError("Error - Game::loadExperienceStages", "data/XML/stages.xml", result);
return false;
}
for (auto stageNode : doc.child("stages").children()) {
if (strcasecmp(stageNode.name(), "config") == 0) {
stagesEnabled = stageNode.attribute("enabled").as_bool();
} else {
uint32_t minLevel, maxLevel, multiplier;
pugi::xml_attribute minLevelAttribute = stageNode.attribute("minlevel");
if (minLevelAttribute) {
minLevel = pugi::cast<uint32_t>(minLevelAttribute.value());
} else {
minLevel = 1;
}
pugi::xml_attribute maxLevelAttribute = stageNode.attribute("maxlevel");
if (maxLevelAttribute) {
maxLevel = pugi::cast<uint32_t>(maxLevelAttribute.value());
} else {
maxLevel = 0;
lastStageLevel = minLevel;
useLastStageLevel = true;
}
pugi::xml_attribute multiplierAttribute = stageNode.attribute("multiplier");
if (multiplierAttribute) {
multiplier = pugi::cast<uint32_t>(multiplierAttribute.value());
} else {
multiplier = 1;
}
if (useLastStageLevel) {
stages[lastStageLevel] = multiplier;
} else {
for (uint32_t i = minLevel; i <= maxLevel; ++i) {
stages[i] = multiplier;
}
}
}
}
return true;
}
void Game::playerInviteToParty(uint32_t playerId, uint32_t invitedId)
{
if (playerId == invitedId) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* invitedPlayer = getPlayerByID(invitedId);
if (!invitedPlayer || invitedPlayer->isInviting(player)) {
return;
}
if (invitedPlayer->getParty()) {
std::ostringstream ss;
ss << invitedPlayer->getName() << " is already in a party.";
player->sendTextMessage(MESSAGE_INFO_DESCR, ss.str());
return;
}
Party* party = player->getParty();
if (!party) {
party = new Party(player);
} else if (party->getLeader() != player) {
return;
}
party->invitePlayer(*invitedPlayer);
}
void Game::playerJoinParty(uint32_t playerId, uint32_t leaderId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* leader = getPlayerByID(leaderId);
if (!leader || !leader->isInviting(player)) {
return;
}
Party* party = leader->getParty();
if (!party || party->getLeader() != leader) {
return;
}
if (player->getParty()) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "You are already in a party.");
return;
}
party->joinParty(*player);
}
void Game::playerRevokePartyInvitation(uint32_t playerId, uint32_t invitedId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Party* party = player->getParty();
if (!party || party->getLeader() != player) {
return;
}
Player* invitedPlayer = getPlayerByID(invitedId);
if (!invitedPlayer || !player->isInviting(invitedPlayer)) {
return;
}
party->revokeInvitation(*invitedPlayer);
}
void Game::playerPassPartyLeadership(uint32_t playerId, uint32_t newLeaderId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Party* party = player->getParty();
if (!party || party->getLeader() != player) {
return;
}
Player* newLeader = getPlayerByID(newLeaderId);
if (!newLeader || !player->isPartner(newLeader)) {
return;
}
party->passPartyLeadership(newLeader);
}
void Game::playerLeaveParty(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Party* party = player->getParty();
if (!party || player->hasCondition(CONDITION_INFIGHT)) {
return;
}
party->leaveParty(player);
}
void Game::playerEnableSharedPartyExperience(uint32_t playerId, bool sharedExpActive)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Party* party = player->getParty();
if (!party || (player->hasCondition(CONDITION_INFIGHT) && player->getZone() != ZONE_PROTECTION)) {
return;
}
party->setSharedExperience(player, sharedExpActive);
}
void Game::sendGuildMotd(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Guild* guild = player->getGuild();
if (guild) {
player->sendChannelMessage("Message of the Day", guild->getMotd(), TALKTYPE_CHANNEL_R1, CHANNEL_GUILD);
}
}
void Game::kickPlayer(uint32_t playerId, bool displayEffect)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->kickPlayer(displayEffect);
}
void Game::playerReportRuleViolation(uint32_t playerId, const std::string& targetName, uint8_t reportType, uint8_t reportReason, const std::string& comment, const std::string& translation)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
g_events->eventPlayerOnReportRuleViolation(player, targetName, reportType, reportReason, comment, translation);
}
void Game::playerReportBug(uint32_t playerId, const std::string& message, const Position& position, uint8_t category)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
g_events->eventPlayerOnReportBug(player, message, position, category);
}
void Game::playerDebugAssert(uint32_t playerId, const std::string& assertLine, const std::string& date, const std::string& description, const std::string& comment)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
// TODO: move debug assertions to database
FILE* file = fopen("client_assertions.txt", "a");
if (file) {
fprintf(file, "----- %s - %s (%s) -----\n", formatDate(time(nullptr)).c_str(), player->getName().c_str(), convertIPToString(player->getIP()).c_str());
fprintf(file, "%s\n%s\n%s\n%s\n", assertLine.c_str(), date.c_str(), description.c_str(), comment.c_str());
fclose(file);
}
}
void Game::playerLeaveMarket(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->setInMarket(false);
}
void Game::playerBrowseMarket(uint32_t playerId, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return;
}
if (it.wareId == 0) {
return;
}
const MarketOfferList& buyOffers = IOMarket::getActiveOffers(MARKETACTION_BUY, it.id);
const MarketOfferList& sellOffers = IOMarket::getActiveOffers(MARKETACTION_SELL, it.id);
player->sendMarketBrowseItem(it.id, buyOffers, sellOffers);
player->sendMarketDetail(it.id);
}
void Game::playerBrowseMarketOwnOffers(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
const MarketOfferList& buyOffers = IOMarket::getOwnOffers(MARKETACTION_BUY, player->getGUID());
const MarketOfferList& sellOffers = IOMarket::getOwnOffers(MARKETACTION_SELL, player->getGUID());
player->sendMarketBrowseOwnOffers(buyOffers, sellOffers);
}
void Game::playerBrowseMarketOwnHistory(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
const HistoryMarketOfferList& buyOffers = IOMarket::getOwnHistory(MARKETACTION_BUY, player->getGUID());
const HistoryMarketOfferList& sellOffers = IOMarket::getOwnHistory(MARKETACTION_SELL, player->getGUID());
player->sendMarketBrowseOwnHistory(buyOffers, sellOffers);
}
void Game::playerCreateMarketOffer(uint32_t playerId, uint8_t type, uint16_t spriteId, uint16_t amount, uint32_t price, bool anonymous)
{
if (amount == 0 || amount > 64000) {
return;
}
if (price == 0 || price > 999999999) {
return;
}
if (type != MARKETACTION_BUY && type != MARKETACTION_SELL) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
if (g_config.getBoolean(ConfigManager::MARKET_PREMIUM) && !player->isPremium()) {
player->sendMarketLeave();
return;
}
const ItemType& itt = Item::items.getItemIdByClientId(spriteId);
if (itt.id == 0 || itt.wareId == 0) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(itt.wareId);
if (it.id == 0 || it.wareId == 0) {
return;
}
if (!it.stackable && amount > 2000) {
return;
}
const uint32_t maxOfferCount = g_config.getNumber(ConfigManager::MAX_MARKET_OFFERS_AT_A_TIME_PER_PLAYER);
if (maxOfferCount != 0 && IOMarket::getPlayerOfferCount(player->getGUID()) >= maxOfferCount) {
return;
}
uint64_t fee = (price / 100.) * amount;
if (fee < 20) {
fee = 20;
} else if (fee > 1000) {
fee = 1000;
}
if (type == MARKETACTION_SELL) {
if (fee > player->bankBalance) {
return;
}
DepotChest* depotChest = player->getDepotChest(player->getLastDepotId(), false);
if (!depotChest) {
return;
}
std::forward_list<Item*> itemList = getMarketItemList(it.wareId, amount, depotChest, player->getInbox());
if (itemList.empty()) {
return;
}
if (it.stackable) {
uint16_t tmpAmount = amount;
for (Item* item : itemList) {
uint16_t removeCount = std::min<uint16_t>(tmpAmount, item->getItemCount());
tmpAmount -= removeCount;
internalRemoveItem(item, removeCount);
if (tmpAmount == 0) {
break;
}
}
} else {
for (Item* item : itemList) {
internalRemoveItem(item);
}
}
player->bankBalance -= fee;
} else {
uint64_t totalPrice = static_cast<uint64_t>(price) * amount;
totalPrice += fee;
if (totalPrice > player->bankBalance) {
return;
}
player->bankBalance -= totalPrice;
}
IOMarket::createOffer(player->getGUID(), static_cast<MarketAction_t>(type), it.id, amount, price, anonymous);
player->sendMarketEnter(player->getLastDepotId());
const MarketOfferList& buyOffers = IOMarket::getActiveOffers(MARKETACTION_BUY, it.id);
const MarketOfferList& sellOffers = IOMarket::getActiveOffers(MARKETACTION_SELL, it.id);
player->sendMarketBrowseItem(it.id, buyOffers, sellOffers);
}
void Game::playerCancelMarketOffer(uint32_t playerId, uint32_t timestamp, uint16_t counter)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
MarketOfferEx offer = IOMarket::getOfferByCounter(timestamp, counter);
if (offer.id == 0 || offer.playerId != player->getGUID()) {
return;
}
if (offer.type == MARKETACTION_BUY) {
player->bankBalance += static_cast<uint64_t>(offer.price) * offer.amount;
player->sendMarketEnter(player->getLastDepotId());
} else {
const ItemType& it = Item::items[offer.itemId];
if (it.id == 0) {
return;
}
if (it.stackable) {
uint16_t tmpAmount = offer.amount;
while (tmpAmount > 0) {
int32_t stackCount = std::min<int32_t>(100, tmpAmount);
Item* item = Item::CreateItem(it.id, stackCount);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
tmpAmount -= stackCount;
}
} else {
int32_t subType;
if (it.charges != 0) {
subType = it.charges;
} else {
subType = -1;
}
for (uint16_t i = 0; i < offer.amount; ++i) {
Item* item = Item::CreateItem(it.id, subType);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
}
}
}
IOMarket::moveOfferToHistory(offer.id, OFFERSTATE_CANCELLED);
offer.amount = 0;
offer.timestamp += g_config.getNumber(ConfigManager::MARKET_OFFER_DURATION);
player->sendMarketCancelOffer(offer);
player->sendMarketEnter(player->getLastDepotId());
}
void Game::playerAcceptMarketOffer(uint32_t playerId, uint32_t timestamp, uint16_t counter, uint16_t amount)
{
if (amount == 0 || amount > 64000) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
MarketOfferEx offer = IOMarket::getOfferByCounter(timestamp, counter);
if (offer.id == 0) {
return;
}
if (amount > offer.amount) {
return;
}
const ItemType& it = Item::items[offer.itemId];
if (it.id == 0) {
return;
}
uint64_t totalPrice = static_cast<uint64_t>(offer.price) * amount;
if (offer.type == MARKETACTION_BUY) {
DepotChest* depotChest = player->getDepotChest(player->getLastDepotId(), false);
if (!depotChest) {
return;
}
std::forward_list<Item*> itemList = getMarketItemList(it.wareId, amount, depotChest, player->getInbox());
if (itemList.empty()) {
return;
}
Player* buyerPlayer = getPlayerByGUID(offer.playerId);
if (!buyerPlayer) {
buyerPlayer = new Player(nullptr);
if (!IOLoginData::loadPlayerById(buyerPlayer, offer.playerId)) {
delete buyerPlayer;
return;
}
}
if (it.stackable) {
uint16_t tmpAmount = amount;
for (Item* item : itemList) {
uint16_t removeCount = std::min<uint16_t>(tmpAmount, item->getItemCount());
tmpAmount -= removeCount;
internalRemoveItem(item, removeCount);
if (tmpAmount == 0) {
break;
}
}
} else {
for (Item* item : itemList) {
internalRemoveItem(item);
}
}
player->bankBalance += totalPrice;
if (it.stackable) {
uint16_t tmpAmount = amount;
while (tmpAmount > 0) {
uint16_t stackCount = std::min<uint16_t>(100, tmpAmount);
Item* item = Item::CreateItem(it.id, stackCount);
if (internalAddItem(buyerPlayer->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
tmpAmount -= stackCount;
}
} else {
int32_t subType;
if (it.charges != 0) {
subType = it.charges;
} else {
subType = -1;
}
for (uint16_t i = 0; i < amount; ++i) {
Item* item = Item::CreateItem(it.id, subType);
if (internalAddItem(buyerPlayer->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
}
}
if (buyerPlayer->isOffline()) {
IOLoginData::savePlayer(buyerPlayer);
delete buyerPlayer;
} else {
buyerPlayer->onReceiveMail();
}
} else {
if (totalPrice > player->bankBalance) {
return;
}
player->bankBalance -= totalPrice;
if (it.stackable) {
uint16_t tmpAmount = amount;
while (tmpAmount > 0) {
uint16_t stackCount = std::min<uint16_t>(100, tmpAmount);
Item* item = Item::CreateItem(it.id, stackCount);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
tmpAmount -= stackCount;
}
} else {
int32_t subType;
if (it.charges != 0) {
subType = it.charges;
} else {
subType = -1;
}
for (uint16_t i = 0; i < amount; ++i) {
Item* item = Item::CreateItem(it.id, subType);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
}
}
Player* sellerPlayer = getPlayerByGUID(offer.playerId);
if (sellerPlayer) {
sellerPlayer->bankBalance += totalPrice;
} else {
IOLoginData::increaseBankBalance(offer.playerId, totalPrice);
}
player->onReceiveMail();
}
const int32_t marketOfferDuration = g_config.getNumber(ConfigManager::MARKET_OFFER_DURATION);
IOMarket::appendHistory(player->getGUID(), (offer.type == MARKETACTION_BUY ? MARKETACTION_SELL : MARKETACTION_BUY), offer.itemId, amount, offer.price, offer.timestamp + marketOfferDuration, OFFERSTATE_ACCEPTEDEX);
IOMarket::appendHistory(offer.playerId, offer.type, offer.itemId, amount, offer.price, offer.timestamp + marketOfferDuration, OFFERSTATE_ACCEPTED);
offer.amount -= amount;
if (offer.amount == 0) {
IOMarket::deleteOffer(offer.id);
} else {
IOMarket::acceptOffer(offer.id, amount);
}
player->sendMarketEnter(player->getLastDepotId());
offer.timestamp += marketOfferDuration;
player->sendMarketAcceptOffer(offer);
}
void Game::parsePlayerExtendedOpcode(uint32_t playerId, uint8_t opcode, const std::string& buffer)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
for (CreatureEvent* creatureEvent : player->getCreatureEvents(CREATURE_EVENT_EXTENDED_OPCODE)) {
creatureEvent->executeExtendedOpcode(player, opcode, buffer);
}
}
std::forward_list<Item*> Game::getMarketItemList(uint16_t wareId, uint16_t sufficientCount, DepotChest* depotChest, Inbox* inbox)
{
std::forward_list<Item*> itemList;
uint16_t count = 0;
std::list<Container*> containers { depotChest, inbox };
do {
Container* container = containers.front();
containers.pop_front();
for (Item* item : container->getItemList()) {
Container* c = item->getContainer();
if (c && !c->empty()) {
containers.push_back(c);
continue;
}
const ItemType& itemType = Item::items[item->getID()];
if (itemType.wareId != wareId) {
continue;
}
if (c && (!itemType.isContainer() || c->capacity() != itemType.maxItems)) {
continue;
}
if (!item->hasMarketAttributes()) {
continue;
}
itemList.push_front(item);
count += Item::countByType(item, -1);
if (count >= sufficientCount) {
return itemList;
}
}
} while (!containers.empty());
return std::forward_list<Item*>();
}
void Game::forceAddCondition(uint32_t creatureId, Condition* condition)
{
Creature* creature = getCreatureByID(creatureId);
if (!creature) {
delete condition;
return;
}
creature->addCondition(condition, true);
}
void Game::forceRemoveCondition(uint32_t creatureId, ConditionType_t type)
{
Creature* creature = getCreatureByID(creatureId);
if (!creature) {
return;
}
creature->removeCondition(type, true);
}
void Game::sendOfflineTrainingDialog(Player* player)
{
if (!player) {
return;
}
if (!player->hasModalWindowOpen(offlineTrainingWindow.id)) {
player->sendModalWindow(offlineTrainingWindow);
}
}
void Game::playerAnswerModalWindow(uint32_t playerId, uint32_t modalWindowId, uint8_t button, uint8_t choice)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->hasModalWindowOpen(modalWindowId)) {
return;
}
player->onModalWindowHandled(modalWindowId);
// offline training, hardcoded
if (modalWindowId == std::numeric_limits<uint32_t>::max()) {
if (button == 1) {
if (choice == SKILL_SWORD || choice == SKILL_AXE || choice == SKILL_CLUB || choice == SKILL_DISTANCE || choice == SKILL_MAGLEVEL) {
BedItem* bedItem = player->getBedItem();
if (bedItem && bedItem->sleep(player)) {
player->setOfflineTrainingSkill(choice);
return;
}
}
} else {
player->sendTextMessage(MESSAGE_EVENT_ADVANCE, "Offline training aborted.");
}
player->setBedItem(nullptr);
} else {
for (auto creatureEvent : player->getCreatureEvents(CREATURE_EVENT_MODALWINDOW)) {
creatureEvent->executeModalWindow(player, modalWindowId, button, choice);
}
}
}
void Game::addPlayer(Player* player)
{
const std::string& lowercase_name = asLowerCaseString(player->getName());
mappedPlayerNames[lowercase_name] = player;
mappedPlayerGuids[player->getGUID()] = player;
wildcardTree.insert(lowercase_name);
players[player->getID()] = player;
}
void Game::removePlayer(Player* player)
{
const std::string& lowercase_name = asLowerCaseString(player->getName());
mappedPlayerNames.erase(lowercase_name);
mappedPlayerGuids.erase(player->getGUID());
wildcardTree.remove(lowercase_name);
players.erase(player->getID());
}
void Game::addNpc(Npc* npc)
{
npcs[npc->getID()] = npc;
}
void Game::removeNpc(Npc* npc)
{
npcs.erase(npc->getID());
}
void Game::addMonster(Monster* monster)
{
monsters[monster->getID()] = monster;
}
void Game::removeMonster(Monster* monster)
{
monsters.erase(monster->getID());
}
Guild* Game::getGuild(uint32_t id) const
{
auto it = guilds.find(id);
if (it == guilds.end()) {
return nullptr;
}
return it->second;
}
void Game::addGuild(Guild* guild)
{
guilds[guild->getId()] = guild;
}
void Game::removeGuild(uint32_t guildId)
{
guilds.erase(guildId);
}
void Game::decreaseBrowseFieldRef(const Position& pos)
{
Tile* tile = map.getTile(pos.x, pos.y, pos.z);
if (!tile) {
return;
}
auto it = browseFields.find(tile);
if (it != browseFields.end()) {
it->second->decrementReferenceCounter();
}
}
void Game::internalRemoveItems(std::vector<Item*> itemList, uint32_t amount, bool stackable)
{
if (stackable) {
for (Item* item : itemList) {
if (item->getItemCount() > amount) {
internalRemoveItem(item, amount);
break;
} else {
amount -= item->getItemCount();
internalRemoveItem(item);
}
}
} else {
for (Item* item : itemList) {
internalRemoveItem(item);
}
}
}
BedItem* Game::getBedBySleeper(uint32_t guid) const
{
auto it = bedSleepersMap.find(guid);
if (it == bedSleepersMap.end()) {
return nullptr;
}
return it->second;
}
void Game::setBedSleeper(BedItem* bed, uint32_t guid)
{
bedSleepersMap[guid] = bed;
}
void Game::removeBedSleeper(uint32_t guid)
{
auto it = bedSleepersMap.find(guid);
if (it != bedSleepersMap.end()) {
bedSleepersMap.erase(it);
}
}
Item* Game::getUniqueItem(uint16_t uniqueId)
{
auto it = uniqueItems.find(uniqueId);
if (it == uniqueItems.end()) {
return nullptr;
}
return it->second;
}
bool Game::addUniqueItem(uint16_t uniqueId, Item* item)
{
auto result = uniqueItems.emplace(uniqueId, item);
if (!result.second) {
std::cout << "Duplicate unique id: " << uniqueId << std::endl;
}
return result.second;
}
void Game::removeUniqueItem(uint16_t uniqueId)
{
auto it = uniqueItems.find(uniqueId);
if (it != uniqueItems.end()) {
uniqueItems.erase(it);
}
}
bool Game::reload(ReloadTypes_t reloadType)
{
switch (reloadType) {
case RELOAD_TYPE_ACTIONS: return g_actions->reload();
case RELOAD_TYPE_CHAT: return g_chat->load();
case RELOAD_TYPE_CONFIG: return g_config.reload();
case RELOAD_TYPE_CREATURESCRIPTS: {
g_creatureEvents->reload();
g_creatureEvents->removeInvalidEvents();
return true;
}
case RELOAD_TYPE_EVENTS: return g_events->load();
case RELOAD_TYPE_GLOBALEVENTS: return g_globalEvents->reload();
case RELOAD_TYPE_ITEMS: return Item::items.reload();
case RELOAD_TYPE_MONSTERS: return g_monsters.reload();
case RELOAD_TYPE_MOUNTS: return mounts.reload();
case RELOAD_TYPE_MOVEMENTS: return g_moveEvents->reload();
case RELOAD_TYPE_NPCS: {
Npcs::reload();
return true;
}
case RELOAD_TYPE_QUESTS: return quests.reload();
case RELOAD_TYPE_RAIDS: return raids.reload() && raids.startup();
case RELOAD_TYPE_SPELLS: {
if (!g_spells->reload()) {
std::cout << "[Error - Game::reload] Failed to reload spells." << std::endl;
std::terminate();
} else if (!g_monsters.reload()) {
std::cout << "[Error - Game::reload] Failed to reload monsters." << std::endl;
std::terminate();
}
return true;
}
case RELOAD_TYPE_TALKACTIONS: return g_talkActions->reload();
case RELOAD_TYPE_WEAPONS: {
bool results = g_weapons->reload();
g_weapons->loadDefaults();
return results;
}
case RELOAD_TYPE_SCRIPTS: {
// commented out stuff is TODO, once we approach further in revscriptsys
g_actions->clear(true);
g_creatureEvents->clear(true);
g_moveEvents->clear(true);
g_talkActions->clear(true);
g_globalEvents->clear(true);
g_weapons->clear(true);
g_weapons->loadDefaults();
g_spells->clear(true);
g_scripts->loadScripts("scripts", false, true);
g_creatureEvents->removeInvalidEvents();
/*
Npcs::reload();
raids.reload() && raids.startup();
Item::items.reload();
quests.reload();
mounts.reload();
g_config.reload();
g_events->load();
g_chat->load();
*/
return true;
}
default: {
if (!g_spells->reload()) {
std::cout << "[Error - Game::reload] Failed to reload spells." << std::endl;
std::terminate();
} else if (!g_monsters.reload()) {
std::cout << "[Error - Game::reload] Failed to reload monsters." << std::endl;
std::terminate();
}
g_actions->reload();
g_config.reload();
g_creatureEvents->reload();
g_monsters.reload();
g_moveEvents->reload();
Npcs::reload();
raids.reload() && raids.startup();
g_talkActions->reload();
Item::items.reload();
g_weapons->reload();
g_weapons->clear(true);
g_weapons->loadDefaults();
quests.reload();
mounts.reload();
g_globalEvents->reload();
g_events->load();
g_chat->load();
g_actions->clear(true);
g_creatureEvents->clear(true);
g_moveEvents->clear(true);
g_talkActions->clear(true);
g_globalEvents->clear(true);
g_spells->clear(true);
g_scripts->loadScripts("scripts", false, true);
g_creatureEvents->removeInvalidEvents();
return true;
}
}
return true;
}
| 1 | 17,232 | checking health again? I think it is not necessary. | otland-forgottenserver | cpp |
@@ -442,7 +442,18 @@ configRetry:
log.Infof("Starting the Typha connection")
err := typhaConnection.Start(context.Background())
if err != nil {
- log.WithError(err).Fatal("Failed to connect to Typha")
+ log.WithError(err).Error("Failed to connect to Typha. Retrying...")
+ startTime := time.Now()
+ for err != nil && time.Since(startTime) < 30*time.Second {
+ // Set Ready to false and Live to true when unable to connect to typha
+ healthAggregator.Report(healthName, &health.HealthReport{Live: true, Ready: false})
+ err = typhaConnection.Start(context.Background())
+ log.WithError(err).Debug("Retrying to start Typha")
+ time.Sleep(1 * time.Second)
+ }
+ if err != nil {
+ log.WithError(err).Fatal("Failed to connect to Typha")
+ }
}
go func() {
typhaConnection.Finished.Wait() | 1 | // Copyright (c) 2017-2019 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package daemon
import (
"context"
"errors"
"fmt"
"math/rand"
"net/http"
"os"
"os/exec"
"os/signal"
"runtime"
"runtime/debug"
"syscall"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"github.com/projectcalico/felix/buildinfo"
"github.com/projectcalico/felix/calc"
"github.com/projectcalico/felix/config"
_ "github.com/projectcalico/felix/config"
dp "github.com/projectcalico/felix/dataplane"
"github.com/projectcalico/felix/logutils"
"github.com/projectcalico/felix/policysync"
"github.com/projectcalico/felix/proto"
"github.com/projectcalico/felix/statusrep"
"github.com/projectcalico/felix/usagerep"
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
"github.com/projectcalico/libcalico-go/lib/backend"
bapi "github.com/projectcalico/libcalico-go/lib/backend/api"
"github.com/projectcalico/libcalico-go/lib/backend/model"
"github.com/projectcalico/libcalico-go/lib/backend/syncersv1/felixsyncer"
"github.com/projectcalico/libcalico-go/lib/backend/syncersv1/updateprocessors"
"github.com/projectcalico/libcalico-go/lib/backend/watchersyncer"
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
"github.com/projectcalico/libcalico-go/lib/health"
lclogutils "github.com/projectcalico/libcalico-go/lib/logutils"
"github.com/projectcalico/libcalico-go/lib/set"
"github.com/projectcalico/pod2daemon/binder"
"github.com/projectcalico/typha/pkg/syncclient"
)
const usage = `Felix, the Calico per-host daemon.
Usage:
calico-felix [options]
Options:
-c --config-file=<filename> Config file to load [default: /etc/calico/felix.cfg].
--version Print the version and exit.
`
const (
// Our default value for GOGC if it is not set. This is the percentage that heap usage must
// grow by to trigger a garbage collection. Go's default is 100, meaning that 50% of the
// heap can be lost to garbage. We reduce it to this value to trade increased CPU usage for
// lower occupancy.
defaultGCPercent = 20
// String sent on the failure report channel to indicate we're shutting down for config
// change.
reasonConfigChanged = "config changed"
// Process return code used to report a config change. This is the same as the code used
// by SIGHUP, which means that the wrapper script also restarts Felix on a SIGHUP.
configChangedRC = 129
)
// Run is the entry point to run a Felix instance.
//
// Its main role is to sequence Felix's startup by:
//
// Initialising early logging config (log format and early debug settings).
//
// Parsing command line parameters.
//
// Loading datastore configuration from the environment or config file.
//
// Loading more configuration from the datastore (this is retried until success).
//
// Starting the configured internal (golang) or external dataplane driver.
//
// Starting the background processing goroutines, which load and keep in sync with the
// state from the datastore, the "calculation graph".
//
// Starting the usage reporting and prometheus metrics endpoint threads (if configured).
//
// Then, it defers to monitorAndManageShutdown(), which blocks until one of the components
// fails, then attempts a graceful shutdown. At that point, all the processing is in
// background goroutines.
//
// To avoid having to maintain rarely-used code paths, Felix handles updates to its
// main config parameters by exiting and allowing itself to be restarted by the init
// daemon.
func Run(configFile string) {
// Go's RNG is not seeded by default. Do that now.
rand.Seed(time.Now().UTC().UnixNano())
// Special-case handling for environment variable-configured logging:
// Initialise early so we can trace out config parsing.
logutils.ConfigureEarlyLogging()
ctx := context.Background()
if os.Getenv("GOGC") == "" {
// Tune the GC to trade off a little extra CPU usage for significantly lower
// occupancy at high scale. This is worthwhile because Felix runs per-host so
// any occupancy improvement is multiplied by the number of hosts.
log.Debugf("No GOGC value set, defaulting to %d%%.", defaultGCPercent)
debug.SetGCPercent(defaultGCPercent)
}
buildInfoLogCxt := log.WithFields(log.Fields{
"version": buildinfo.GitVersion,
"buildDate": buildinfo.BuildDate,
"gitCommit": buildinfo.GitRevision,
"GOMAXPROCS": runtime.GOMAXPROCS(0),
})
buildInfoLogCxt.Info("Felix starting up")
// Health monitoring, for liveness and readiness endpoints. The following loop can take a
// while before the datastore reports itself as ready - for example when there is data that
// needs to be migrated from a previous version - and we still want to Felix to report
// itself as live (but not ready) while we are waiting for that. So we create the
// aggregator upfront and will start serving health status over HTTP as soon as we see _any_
// config that indicates that.
healthAggregator := health.NewHealthAggregator()
const healthName = "felix-startup"
// Register this function as a reporter of liveness and readiness, with no timeout.
healthAggregator.RegisterReporter(healthName, &health.HealthReport{Live: true, Ready: true}, 0)
// Load the configuration from all the different sources including the
// datastore and merge. Keep retrying on failure. We'll sit in this
// loop until the datastore is ready.
log.Info("Loading configuration...")
var backendClient bapi.Client
var configParams *config.Config
var typhaAddr string
var numClientsCreated int
configRetry:
for {
if numClientsCreated > 60 {
// If we're in a restart loop, periodically exit (so we can be restarted) since
// - it may solve the problem if there's something wrong with our process
// - it prevents us from leaking connections to the datastore.
exitWithCustomRC(configChangedRC, "Restarting to avoid leaking datastore connections")
}
// Make an initial report that says we're live but not yet ready.
healthAggregator.Report(healthName, &health.HealthReport{Live: true, Ready: false})
// Load locally-defined config, including the datastore connection
// parameters. First the environment variables.
configParams = config.New()
envConfig := config.LoadConfigFromEnvironment(os.Environ())
// Then, the config file.
log.Infof("Loading config file: %v", configFile)
fileConfig, err := config.LoadConfigFile(configFile)
if err != nil {
log.WithError(err).WithField("configFile", configFile).Error(
"Failed to load configuration file")
time.Sleep(1 * time.Second)
continue configRetry
}
// Parse and merge the local config.
configParams.UpdateFrom(envConfig, config.EnvironmentVariable)
if configParams.Err != nil {
log.WithError(configParams.Err).WithField("configFile", configFile).Error(
"Failed to parse configuration environment variable")
time.Sleep(1 * time.Second)
continue configRetry
}
configParams.UpdateFrom(fileConfig, config.ConfigFile)
if configParams.Err != nil {
log.WithError(configParams.Err).WithField("configFile", configFile).Error(
"Failed to parse configuration file")
time.Sleep(1 * time.Second)
continue configRetry
}
// Each time round this loop, check that we're serving health reports if we should
// be, or cancel any existing server if we should not be serving any more.
healthAggregator.ServeHTTP(configParams.HealthEnabled, configParams.HealthHost, configParams.HealthPort)
// We should now have enough config to connect to the datastore
// so we can load the remainder of the config.
datastoreConfig := configParams.DatastoreConfig()
// Can't dump the whole config because it may have sensitive information...
log.WithField("datastore", datastoreConfig.Spec.DatastoreType).Info("Connecting to datastore")
backendClient, err = backend.NewClient(datastoreConfig)
if err != nil {
log.WithError(err).Error("Failed to create datastore client")
time.Sleep(1 * time.Second)
continue configRetry
}
log.Info("Created datastore client")
numClientsCreated++
for {
globalConfig, hostConfig, err := loadConfigFromDatastore(
ctx, backendClient, configParams.FelixHostname)
if err == ErrNotReady {
log.Warn("Waiting for datastore to be initialized (or migrated)")
time.Sleep(1 * time.Second)
healthAggregator.Report(healthName, &health.HealthReport{Live: true, Ready: true})
continue
} else if err != nil {
log.WithError(err).Error("Failed to get config from datastore")
time.Sleep(1 * time.Second)
continue configRetry
}
configParams.UpdateFrom(globalConfig, config.DatastoreGlobal)
configParams.UpdateFrom(hostConfig, config.DatastorePerHost)
break
}
configParams.Validate()
if configParams.Err != nil {
log.WithError(configParams.Err).Error(
"Failed to parse/validate configuration from datastore.")
time.Sleep(1 * time.Second)
continue configRetry
}
// We now have some config flags that affect how we configure the syncer.
// After loading the config from the datastore, reconnect, possibly with new
// config. We don't need to re-load the configuration _again_ because the
// calculation graph will spot if the config has changed since we were initialised.
datastoreConfig = configParams.DatastoreConfig()
backendClient, err = backend.NewClient(datastoreConfig)
if err != nil {
log.WithError(err).Error("Failed to (re)connect to datastore")
time.Sleep(1 * time.Second)
continue configRetry
}
numClientsCreated++
// If we're configured to discover Typha, do that now so we can retry if we fail.
typhaAddr, err = discoverTyphaAddr(configParams)
if err != nil {
log.WithError(err).Error("Typha discovery enabled but discovery failed.")
time.Sleep(1 * time.Second)
continue configRetry
}
break configRetry
}
if numClientsCreated > 2 {
// We don't have a way to close datastore connection so, if we reconnected after
// a failure to load config, restart felix to avoid leaking connections.
exitWithCustomRC(configChangedRC, "Restarting to avoid leaking datastore connections")
}
// We're now both live and ready.
healthAggregator.Report(healthName, &health.HealthReport{Live: true, Ready: true})
// Enable or disable the health HTTP server according to coalesced config.
healthAggregator.ServeHTTP(configParams.HealthEnabled, configParams.HealthHost, configParams.HealthPort)
// If we get here, we've loaded the configuration successfully.
// Update log levels before we do anything else.
logutils.ConfigureLogging(configParams)
// Since we may have enabled more logging, log with the build context
// again.
buildInfoLogCxt.WithField("config", configParams).Info(
"Successfully loaded configuration.")
// Start up the dataplane driver. This may be the internal go-based driver or an external
// one.
var dpDriver dp.DataplaneDriver
var dpDriverCmd *exec.Cmd
failureReportChan := make(chan string)
configChangedRestartCallback := func() { failureReportChan <- reasonConfigChanged }
dpDriver, dpDriverCmd = dp.StartDataplaneDriver(configParams, healthAggregator, configChangedRestartCallback)
// Initialise the glue logic that connects the calculation graph to/from the dataplane driver.
log.Info("Connect to the dataplane driver.")
var connToUsageRepUpdChan chan map[string]string
if configParams.UsageReportingEnabled {
// Make a channel for the connector to use to send updates to the usage reporter.
// (Otherwise, we pass in a nil channel, which disables such updates.)
connToUsageRepUpdChan = make(chan map[string]string, 1)
}
dpConnector := newConnector(configParams, connToUsageRepUpdChan, backendClient, dpDriver, failureReportChan)
// If enabled, create a server for the policy sync API. This allows clients to connect to
// Felix over a socket and receive policy updates.
var policySyncServer *policysync.Server
var policySyncProcessor *policysync.Processor
var policySyncAPIBinder binder.Binder
calcGraphClientChannels := []chan<- interface{}{dpConnector.ToDataplane}
if configParams.PolicySyncPathPrefix != "" {
log.WithField("policySyncPathPrefix", configParams.PolicySyncPathPrefix).Info(
"Policy sync API enabled. Creating the policy sync server.")
toPolicySync := make(chan interface{})
policySyncUIDAllocator := policysync.NewUIDAllocator()
policySyncProcessor = policysync.NewProcessor(toPolicySync)
policySyncServer = policysync.NewServer(
policySyncProcessor.JoinUpdates,
policySyncUIDAllocator.NextUID,
)
policySyncAPIBinder = binder.NewBinder(configParams.PolicySyncPathPrefix)
policySyncServer.RegisterGrpc(policySyncAPIBinder.Server())
calcGraphClientChannels = append(calcGraphClientChannels, toPolicySync)
}
// Now create the calculation graph, which receives updates from the
// datastore and outputs dataplane updates for the dataplane driver.
//
// The Syncer has its own thread and we use an extra thread for the
// Validator, just to pipeline that part of the calculation then the
// main calculation graph runs in a single thread for simplicity.
// The output of the calculation graph arrives at the dataplane
// connection via channel.
//
// Syncer -chan-> Validator -chan-> Calc graph -chan-> dataplane
// KVPair KVPair protobufs
// Get a Syncer from the datastore, or a connection to our remote sync daemon, Typha,
// which will feed the calculation graph with updates, bringing Felix into sync.
var syncer Startable
var typhaConnection *syncclient.SyncerClient
syncerToValidator := calc.NewSyncerCallbacksDecoupler()
if typhaAddr != "" {
// Use a remote Syncer, via the Typha server.
log.WithField("addr", typhaAddr).Info("Connecting to Typha.")
typhaConnection = syncclient.New(
typhaAddr,
buildinfo.GitVersion,
configParams.FelixHostname,
fmt.Sprintf("Revision: %s; Build date: %s",
buildinfo.GitRevision, buildinfo.BuildDate),
syncerToValidator,
&syncclient.Options{
ReadTimeout: configParams.TyphaReadTimeout,
WriteTimeout: configParams.TyphaWriteTimeout,
KeyFile: configParams.TyphaKeyFile,
CertFile: configParams.TyphaCertFile,
CAFile: configParams.TyphaCAFile,
ServerCN: configParams.TyphaCN,
ServerURISAN: configParams.TyphaURISAN,
},
)
} else {
// Use the syncer locally.
syncer = felixsyncer.New(backendClient, syncerToValidator)
}
log.WithField("syncer", syncer).Info("Created Syncer")
// Create the ipsets/active policy calculation graph, which will
// do the dynamic calculation of ipset memberships and active policies
// etc.
asyncCalcGraph := calc.NewAsyncCalcGraph(
configParams,
calcGraphClientChannels,
healthAggregator,
)
if configParams.UsageReportingEnabled {
// Usage reporting enabled, add stats collector to graph. When it detects an update
// to the stats, it makes a callback, which we use to send an update on a channel.
// We use a buffered channel here to avoid blocking the calculation graph.
statsChanIn := make(chan calc.StatsUpdate, 1)
statsCollector := calc.NewStatsCollector(func(stats calc.StatsUpdate) error {
statsChanIn <- stats
return nil
})
statsCollector.RegisterWith(asyncCalcGraph.CalcGraph)
// Rather than sending the updates directly to the usage reporting thread, we
// decouple with an extra goroutine. This prevents blocking the calculation graph
// goroutine if the usage reporting goroutine is blocked on IO, for example.
// Using a buffered channel wouldn't work here because the usage reporting
// goroutine can block for a long time on IO so we could build up a long queue.
statsChanOut := make(chan calc.StatsUpdate)
go func() {
var statsChanOutOrNil chan calc.StatsUpdate
var stats calc.StatsUpdate
for {
select {
case stats = <-statsChanIn:
// Got a stats update, activate the output channel.
log.WithField("stats", stats).Debug("Buffer: stats update received")
statsChanOutOrNil = statsChanOut
case statsChanOutOrNil <- stats:
// Passed on the update, deactivate the output channel until
// the next update.
log.WithField("stats", stats).Debug("Buffer: stats update sent")
statsChanOutOrNil = nil
}
}
}()
usageRep := usagerep.New(
configParams.UsageReportingInitialDelaySecs,
configParams.UsageReportingIntervalSecs,
statsChanOut,
connToUsageRepUpdChan,
)
go usageRep.PeriodicallyReportUsage(context.Background())
} else {
// Usage reporting disabled, but we still want a stats collector for the
// felix_cluster_* metrics. Register a no-op function as the callback.
statsCollector := calc.NewStatsCollector(func(stats calc.StatsUpdate) error {
return nil
})
statsCollector.RegisterWith(asyncCalcGraph.CalcGraph)
}
// Create the validator, which sits between the syncer and the
// calculation graph.
validator := calc.NewValidationFilter(asyncCalcGraph)
// Start the background processing threads.
if syncer != nil {
log.Infof("Starting the datastore Syncer")
syncer.Start()
} else {
log.Infof("Starting the Typha connection")
err := typhaConnection.Start(context.Background())
if err != nil {
log.WithError(err).Fatal("Failed to connect to Typha")
}
go func() {
typhaConnection.Finished.Wait()
failureReportChan <- "Connection to Typha failed"
}()
}
go syncerToValidator.SendTo(validator)
asyncCalcGraph.Start()
log.Infof("Started the processing graph")
var stopSignalChans []chan<- bool
if configParams.EndpointReportingEnabled {
delay := configParams.EndpointReportingDelaySecs
log.WithField("delay", delay).Info(
"Endpoint status reporting enabled, starting status reporter")
dpConnector.statusReporter = statusrep.NewEndpointStatusReporter(
configParams.FelixHostname,
configParams.OpenstackRegion,
dpConnector.StatusUpdatesFromDataplane,
dpConnector.InSync,
dpConnector.datastore,
delay,
delay*180,
)
dpConnector.statusReporter.Start()
}
// Start communicating with the dataplane driver.
dpConnector.Start()
if policySyncProcessor != nil {
log.WithField("policySyncPathPrefix", configParams.PolicySyncPathPrefix).Info(
"Policy sync API enabled. Starting the policy sync server.")
policySyncProcessor.Start()
sc := make(chan bool)
stopSignalChans = append(stopSignalChans, sc)
go policySyncAPIBinder.SearchAndBind(sc)
}
// Send the opening message to the dataplane driver, giving it its
// config.
dpConnector.ToDataplane <- &proto.ConfigUpdate{
Config: configParams.RawValues(),
}
if configParams.PrometheusMetricsEnabled {
log.Info("Prometheus metrics enabled. Starting server.")
gaugeHost := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "felix_host",
Help: "Configured Felix hostname (as a label), typically used in grouping/aggregating stats; the label defaults to the hostname of the host but can be overridden by configuration. The value of the gauge is always set to 1.",
ConstLabels: prometheus.Labels{"host": configParams.FelixHostname},
})
gaugeHost.Set(1)
prometheus.MustRegister(gaugeHost)
go servePrometheusMetrics(configParams)
}
// Register signal handlers to dump memory/CPU profiles.
logutils.RegisterProfilingSignalHandlers(configParams)
// Now monitor the worker process and our worker threads and shut
// down the process gracefully if they fail.
monitorAndManageShutdown(failureReportChan, dpDriverCmd, stopSignalChans)
}
func servePrometheusMetrics(configParams *config.Config) {
for {
log.WithField("port", configParams.PrometheusMetricsPort).Info("Starting prometheus metrics endpoint")
if configParams.PrometheusGoMetricsEnabled && configParams.PrometheusProcessMetricsEnabled {
log.Info("Including Golang & Process metrics")
} else {
if !configParams.PrometheusGoMetricsEnabled {
log.Info("Discarding Golang metrics")
prometheus.Unregister(prometheus.NewGoCollector())
}
if !configParams.PrometheusProcessMetricsEnabled {
log.Info("Discarding process metrics")
prometheus.Unregister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
}
}
http.Handle("/metrics", promhttp.Handler())
err := http.ListenAndServe(fmt.Sprintf(":%v", configParams.PrometheusMetricsPort), nil)
log.WithError(err).Error(
"Prometheus metrics endpoint failed, trying to restart it...")
time.Sleep(1 * time.Second)
}
}
func monitorAndManageShutdown(failureReportChan <-chan string, driverCmd *exec.Cmd, stopSignalChans []chan<- bool) {
// Ask the runtime to tell us if we get a term/int signal.
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGTERM)
signal.Notify(signalChan, syscall.SIGINT)
signal.Notify(signalChan, syscall.SIGHUP)
// Start a background thread to tell us when the dataplane driver stops.
// If the driver stops unexpectedly, we'll terminate this process.
// If this process needs to stop, we'll kill the driver and then wait
// for the message from the background thread.
driverStoppedC := make(chan bool)
go func() {
if driverCmd == nil {
log.Info("No driver process to monitor")
return
}
err := driverCmd.Wait()
log.WithError(err).Warn("Driver process stopped")
driverStoppedC <- true
}()
// Wait for one of the channels to give us a reason to shut down.
driverAlreadyStopped := driverCmd == nil
receivedFatalSignal := false
var reason string
select {
case <-driverStoppedC:
reason = "Driver stopped"
driverAlreadyStopped = true
case sig := <-signalChan:
if sig == syscall.SIGHUP {
log.Warning("Received a SIGHUP, treating as a request to reload config")
reason = reasonConfigChanged
} else {
reason = fmt.Sprintf("Received OS signal %v", sig)
receivedFatalSignal = true
}
case reason = <-failureReportChan:
}
logCxt := log.WithField("reason", reason)
logCxt.Warn("Felix is shutting down")
// Notify other components to stop.
for _, c := range stopSignalChans {
select {
case c <- true:
default:
}
}
if !driverAlreadyStopped {
// Driver may still be running, just in case the driver is
// unresponsive, start a thread to kill this process if we
// don't manage to kill the driver.
logCxt.Info("Driver still running, trying to shut it down...")
giveUpOnSigTerm := make(chan bool)
go func() {
time.Sleep(4 * time.Second)
giveUpOnSigTerm <- true
time.Sleep(1 * time.Second)
log.Fatal("Failed to wait for driver to exit, giving up.")
}()
// Signal to the driver to exit.
driverCmd.Process.Signal(syscall.SIGTERM)
select {
case <-driverStoppedC:
logCxt.Info("Driver shut down after SIGTERM")
case <-giveUpOnSigTerm:
logCxt.Error("Driver did not respond to SIGTERM, sending SIGKILL")
driverCmd.Process.Kill()
<-driverStoppedC
logCxt.Info("Driver shut down after SIGKILL")
}
}
if !receivedFatalSignal {
// We're exiting due to a failure or a config change, wait
// a couple of seconds to ensure that we don't go into a tight
// restart loop (which would make the init daemon in calico/node give
// up trying to restart us).
logCxt.Info("Sleeping to avoid tight restart loop.")
go func() {
time.Sleep(2 * time.Second)
if reason == reasonConfigChanged {
exitWithCustomRC(configChangedRC, "Exiting for config change")
return
}
logCxt.Fatal("Exiting.")
}()
for {
sig := <-signalChan
if sig == syscall.SIGHUP {
logCxt.Warning("Ignoring SIGHUP because we're already shutting down")
continue
}
logCxt.WithField("signal", sig).Fatal(
"Signal received while shutting down, exiting immediately")
}
}
logCxt.Fatal("Exiting immediately")
}
func exitWithCustomRC(rc int, message string) {
// Since log writing is done a background thread, we set the force-flush flag on this log to ensure that
// all the in-flight logs get written before we exit.
log.WithFields(log.Fields{
"rc": rc,
lclogutils.FieldForceFlush: true,
}).Info(message)
os.Exit(rc)
}
var (
ErrNotReady = errors.New("datastore is not ready or has not been initialised")
)
func loadConfigFromDatastore(
ctx context.Context, client bapi.Client, hostname string,
) (globalConfig, hostConfig map[string]string, err error) {
// The configuration is split over 3 different resource types and 4 different resource
// instances in the v3 data model:
// - ClusterInformation (global): name "default"
// - FelixConfiguration (global): name "default"
// - FelixConfiguration (per-host): name "node.<hostname>"
// - Node (per-host): name: <hostname>
// Get the global values and host specific values separately. We re-use the updateprocessor
// logic to convert the single v3 resource to a set of v1 key/values.
hostConfig = make(map[string]string)
globalConfig = make(map[string]string)
var ready bool
err = getAndMergeConfig(
ctx, client, globalConfig,
apiv3.KindClusterInformation, "default",
updateprocessors.NewClusterInfoUpdateProcessor(),
&ready,
)
if err != nil {
return
}
if !ready {
// The ClusterInformation struct should contain the ready flag, if it is not set, abort.
err = ErrNotReady
return
}
err = getAndMergeConfig(
ctx, client, globalConfig,
apiv3.KindFelixConfiguration, "default",
updateprocessors.NewFelixConfigUpdateProcessor(),
&ready,
)
if err != nil {
return
}
err = getAndMergeConfig(
ctx, client, hostConfig,
apiv3.KindFelixConfiguration, "node."+hostname,
updateprocessors.NewFelixConfigUpdateProcessor(),
&ready,
)
if err != nil {
return
}
err = getAndMergeConfig(
ctx, client, hostConfig,
apiv3.KindNode, hostname,
updateprocessors.NewFelixNodeUpdateProcessor(),
&ready,
)
if err != nil {
return
}
return
}
// getAndMergeConfig gets the v3 resource configuration extracts the separate config values
// (where each configuration value is stored in a field of the v3 resource Spec) and merges into
// the supplied map, as required by our v1-style configuration loader.
func getAndMergeConfig(
ctx context.Context, client bapi.Client, config map[string]string,
kind string, name string,
configConverter watchersyncer.SyncerUpdateProcessor,
ready *bool,
) error {
logCxt := log.WithFields(log.Fields{"kind": kind, "name": name})
cfg, err := client.Get(ctx, model.ResourceKey{
Kind: kind,
Name: name,
Namespace: "",
}, "")
if err != nil {
switch err.(type) {
case cerrors.ErrorResourceDoesNotExist:
logCxt.Info("No config of this type")
return nil
default:
logCxt.WithError(err).Info("Failed to load config from datastore")
return err
}
}
// Re-use the update processor logic implemented for the Syncer. We give it a v3 config
// object in a KVPair and it uses the annotations defined on it to split it into v1-style
// KV pairs. Log any errors - but don't fail completely to avoid cyclic restarts.
v1kvs, err := configConverter.Process(cfg)
if err != nil {
logCxt.WithError(err).Error("Failed to convert configuration")
}
// Loop through the converted values and update our config map with values from either the
// Global or Host configs.
for _, v1KV := range v1kvs {
if _, ok := v1KV.Key.(model.ReadyFlagKey); ok {
logCxt.WithField("ready", v1KV.Value).Info("Loaded ready flag")
if v1KV.Value == true {
*ready = true
}
} else if v1KV.Value != nil {
switch k := v1KV.Key.(type) {
case model.GlobalConfigKey:
config[k.Name] = v1KV.Value.(string)
case model.HostConfigKey:
config[k.Name] = v1KV.Value.(string)
default:
logCxt.WithField("KV", v1KV).Debug("Skipping config - not required for initial loading")
}
}
}
return nil
}
type DataplaneConnector struct {
config *config.Config
configUpdChan chan<- map[string]string
ToDataplane chan interface{}
StatusUpdatesFromDataplane chan interface{}
InSync chan bool
failureReportChan chan<- string
dataplane dp.DataplaneDriver
datastore bapi.Client
statusReporter *statusrep.EndpointStatusReporter
datastoreInSync bool
firstStatusReportSent bool
}
type Startable interface {
Start()
}
func newConnector(configParams *config.Config,
configUpdChan chan<- map[string]string,
datastore bapi.Client,
dataplane dp.DataplaneDriver,
failureReportChan chan<- string,
) *DataplaneConnector {
felixConn := &DataplaneConnector{
config: configParams,
configUpdChan: configUpdChan,
datastore: datastore,
ToDataplane: make(chan interface{}),
StatusUpdatesFromDataplane: make(chan interface{}),
InSync: make(chan bool, 1),
failureReportChan: failureReportChan,
dataplane: dataplane,
}
return felixConn
}
func (fc *DataplaneConnector) readMessagesFromDataplane() {
defer func() {
fc.shutDownProcess("Failed to read messages from dataplane")
}()
log.Info("Reading from dataplane driver pipe...")
ctx := context.Background()
for {
payload, err := fc.dataplane.RecvMessage()
if err != nil {
log.WithError(err).Error("Failed to read from front-end socket")
fc.shutDownProcess("Failed to read from front-end socket")
}
log.WithField("payload", payload).Debug("New message from dataplane")
switch msg := payload.(type) {
case *proto.ProcessStatusUpdate:
fc.handleProcessStatusUpdate(ctx, msg)
case *proto.WorkloadEndpointStatusUpdate:
if fc.statusReporter != nil {
fc.StatusUpdatesFromDataplane <- msg
}
case *proto.WorkloadEndpointStatusRemove:
if fc.statusReporter != nil {
fc.StatusUpdatesFromDataplane <- msg
}
case *proto.HostEndpointStatusUpdate:
if fc.statusReporter != nil {
fc.StatusUpdatesFromDataplane <- msg
}
case *proto.HostEndpointStatusRemove:
if fc.statusReporter != nil {
fc.StatusUpdatesFromDataplane <- msg
}
default:
log.WithField("msg", msg).Warning("Unknown message from dataplane")
}
log.Debug("Finished handling message from front-end")
}
}
func (fc *DataplaneConnector) handleProcessStatusUpdate(ctx context.Context, msg *proto.ProcessStatusUpdate) {
log.Debugf("Status update from dataplane driver: %v", *msg)
statusReport := model.StatusReport{
Timestamp: msg.IsoTimestamp,
UptimeSeconds: msg.Uptime,
FirstUpdate: !fc.firstStatusReportSent,
}
kv := model.KVPair{
Key: model.ActiveStatusReportKey{Hostname: fc.config.FelixHostname, RegionString: model.RegionString(fc.config.OpenstackRegion)},
Value: &statusReport,
TTL: fc.config.ReportingTTLSecs,
}
applyCtx, cancel := context.WithTimeout(ctx, 2*time.Second)
_, err := fc.datastore.Apply(applyCtx, &kv)
cancel()
if err != nil {
if _, ok := err.(cerrors.ErrorOperationNotSupported); ok {
log.Debug("Datastore doesn't support status reports.")
return // and it won't support the last status key either.
} else {
log.Warningf("Failed to write status to datastore: %v", err)
}
} else {
fc.firstStatusReportSent = true
}
kv = model.KVPair{
Key: model.LastStatusReportKey{Hostname: fc.config.FelixHostname, RegionString: model.RegionString(fc.config.OpenstackRegion)},
Value: &statusReport,
}
applyCtx, cancel = context.WithTimeout(ctx, 2*time.Second)
_, err = fc.datastore.Apply(applyCtx, &kv)
cancel()
if err != nil {
log.Warningf("Failed to write status to datastore: %v", err)
}
}
var handledConfigChanges = set.From("CalicoVersion", "ClusterGUID", "ClusterType")
func (fc *DataplaneConnector) sendMessagesToDataplaneDriver() {
defer func() {
fc.shutDownProcess("Failed to send messages to dataplane")
}()
var config map[string]string
for {
msg := <-fc.ToDataplane
switch msg := msg.(type) {
case *proto.InSync:
log.Info("Datastore now in sync.")
if !fc.datastoreInSync {
log.Info("Datastore in sync for first time, sending message to status reporter.")
fc.datastoreInSync = true
fc.InSync <- true
}
case *proto.ConfigUpdate:
if config != nil {
log.WithFields(log.Fields{
"old": config,
"new": msg.Config,
}).Info("Config updated, checking whether we need to restart")
restartNeeded := false
for kNew, vNew := range msg.Config {
logCxt := log.WithFields(log.Fields{"key": kNew, "new": vNew})
if vOld, prs := config[kNew]; !prs {
logCxt = logCxt.WithField("updateType", "add")
} else if vNew != vOld {
logCxt = logCxt.WithFields(log.Fields{"old": vOld, "updateType": "update"})
} else {
continue
}
if handledConfigChanges.Contains(kNew) {
logCxt.Info("Config change can be handled without restart")
continue
}
logCxt.Warning("Config change requires restart")
restartNeeded = true
}
for kOld, vOld := range config {
logCxt := log.WithFields(log.Fields{"key": kOld, "old": vOld, "updateType": "delete"})
if _, prs := msg.Config[kOld]; prs {
// Key was present in the message so we've handled above.
continue
}
if handledConfigChanges.Contains(kOld) {
logCxt.Info("Config change can be handled without restart")
continue
}
logCxt.Warning("Config change requires restart")
restartNeeded = true
}
if restartNeeded {
fc.shutDownProcess("config changed")
}
}
// Take a copy of the config to compare against next time.
config = make(map[string]string)
for k, v := range msg.Config {
config[k] = v
}
if fc.configUpdChan != nil {
// Send the config over to the usage reporter.
fc.configUpdChan <- config
}
case *calc.DatastoreNotReady:
log.Warn("Datastore became unready, need to restart.")
fc.shutDownProcess("datastore became unready")
}
if err := fc.dataplane.SendMessage(msg); err != nil {
fc.shutDownProcess("Failed to write to dataplane driver")
}
}
}
func (fc *DataplaneConnector) shutDownProcess(reason string) {
// Send a failure report to the managed shutdown thread then give it
// a few seconds to do the shutdown.
fc.failureReportChan <- reason
time.Sleep(5 * time.Second)
// The graceful shutdown failed, terminate the process.
log.Panic("Managed shutdown failed. Panicking.")
}
func (fc *DataplaneConnector) Start() {
// Start a background thread to write to the dataplane driver.
go fc.sendMessagesToDataplaneDriver()
// Start background thread to read messages from dataplane driver.
go fc.readMessagesFromDataplane()
}
var ErrServiceNotReady = errors.New("Kubernetes service missing IP or port.")
func discoverTyphaAddr(configParams *config.Config) (string, error) {
if configParams.TyphaAddr != "" {
// Explicit address; trumps other sources of config.
return configParams.TyphaAddr, nil
}
if configParams.TyphaK8sServiceName == "" {
// No explicit address, and no service name, not using Typha.
return "", nil
}
// If we get here, we need to look up the Typha service using the k8s API.
// TODO Typha: support Typha lookup without using rest.InClusterConfig().
k8sconf, err := rest.InClusterConfig()
if err != nil {
log.WithError(err).Error("Unable to create Kubernetes config.")
return "", err
}
clientset, err := kubernetes.NewForConfig(k8sconf)
if err != nil {
log.WithError(err).Error("Unable to create Kubernetes client set.")
return "", err
}
svcClient := clientset.CoreV1().Services(configParams.TyphaK8sNamespace)
svc, err := svcClient.Get(configParams.TyphaK8sServiceName, v1.GetOptions{})
if err != nil {
log.WithError(err).Error("Unable to get Typha service from Kubernetes.")
return "", err
}
host := svc.Spec.ClusterIP
log.WithField("clusterIP", host).Info("Found Typha ClusterIP.")
if host == "" {
log.WithError(err).Error("Typha service had no ClusterIP.")
return "", ErrServiceNotReady
}
for _, p := range svc.Spec.Ports {
if p.Name == "calico-typha" {
log.WithField("port", p).Info("Found Typha service port.")
typhaAddr := fmt.Sprintf("%s:%v", host, p.Port)
return typhaAddr, nil
}
}
log.Error("Didn't find Typha service port.")
return "", ErrServiceNotReady
}
| 1 | 16,841 | Need an `if err == nil {break}` above this line so that we don't log/sleep if the retry succeeds. | projectcalico-felix | c |
@@ -0,0 +1,18 @@
+'use strict';
+
+const assert = require('assert');
+
+class ProvidedPromise {
+ set Promise(lib) {
+ assert.ok(typeof lib === 'function', `mongodb.Promise must be a function, got ${lib}`);
+ this._promise = lib;
+ }
+ get Promise() {
+ return this._promise;
+ }
+}
+
+const provided = new ProvidedPromise();
+provided.Promise = global.Promise;
+
+module.exports = provided; | 1 | 1 | 17,359 | to reiterate my point above, this class is not the actual provided Promise, but rather something a user can provide a Promise to/with. I think a name like `PromiseProvider` is more appropriate. | mongodb-node-mongodb-native | js |
|
@@ -738,6 +738,12 @@ func addDep(s *scope, args []pyObject) pyObject {
dep := core.ParseBuildLabelContext(string(args[1].(pyString)), s.pkg)
exported := args[2].IsTruthy()
target.AddMaybeExportedDependency(dep, exported, false, false)
+ // Queue this dependency if it'll be needed.
+ if target.State() > core.Inactive {
+ err := s.state.QueueTarget(dep, target.Label, true, false)
+ s.Assert(err == nil, "%s", err)
+ }
+ // TODO(peterebden): Do we even need the following any more?
s.pkg.MarkTargetModified(target)
return None
} | 1 | package asp
import (
"encoding/json"
"fmt"
"io"
"path"
"reflect"
"sort"
"strconv"
"strings"
"github.com/Masterminds/semver/v3"
"github.com/manifoldco/promptui"
"github.com/thought-machine/please/src/cli"
"github.com/thought-machine/please/src/core"
"github.com/thought-machine/please/src/fs"
)
// A few sneaky globals for when we don't have a scope handy
var stringMethods, dictMethods, configMethods map[string]*pyFunc
// A nativeFunc is a function that implements a builtin function natively.
type nativeFunc func(*scope, []pyObject) pyObject
// registerBuiltins sets up the "special" builtins that map to native code.
func registerBuiltins(s *scope) {
setNativeCode(s, "build_rule", buildRule)
setNativeCode(s, "subrepo", subrepo)
setNativeCode(s, "fail", builtinFail)
setNativeCode(s, "subinclude", subinclude).varargs = true
setNativeCode(s, "load", bazelLoad).varargs = true
setNativeCode(s, "package", pkg).kwargs = true
setNativeCode(s, "sorted", sorted)
setNativeCode(s, "isinstance", isinstance)
setNativeCode(s, "range", pyRange)
setNativeCode(s, "enumerate", enumerate)
setNativeCode(s, "zip", zip).varargs = true
setNativeCode(s, "len", lenFunc)
setNativeCode(s, "glob", glob)
setNativeCode(s, "bool", boolType)
setNativeCode(s, "int", intType)
setNativeCode(s, "str", strType)
setNativeCode(s, "join_path", joinPath).varargs = true
setNativeCode(s, "get_base_path", packageName)
setNativeCode(s, "package_name", packageName)
setNativeCode(s, "subrepo_name", subrepoName)
setNativeCode(s, "canonicalise", canonicalise)
setNativeCode(s, "get_labels", getLabels)
setNativeCode(s, "add_label", addLabel)
setNativeCode(s, "add_dep", addDep)
setNativeCode(s, "add_out", addOut)
setNativeCode(s, "get_outs", getOuts)
setNativeCode(s, "add_licence", addLicence)
setNativeCode(s, "get_licences", getLicences)
setNativeCode(s, "get_command", getCommand)
setNativeCode(s, "set_command", setCommand)
setNativeCode(s, "json", valueAsJSON)
setNativeCode(s, "breakpoint", breakpoint)
setNativeCode(s, "semver_check", semverCheck)
stringMethods = map[string]*pyFunc{
"join": setNativeCode(s, "join", strJoin),
"split": setNativeCode(s, "split", strSplit),
"replace": setNativeCode(s, "replace", strReplace),
"partition": setNativeCode(s, "partition", strPartition),
"rpartition": setNativeCode(s, "rpartition", strRPartition),
"startswith": setNativeCode(s, "startswith", strStartsWith),
"endswith": setNativeCode(s, "endswith", strEndsWith),
"lstrip": setNativeCode(s, "lstrip", strLStrip),
"rstrip": setNativeCode(s, "rstrip", strRStrip),
"removeprefix": setNativeCode(s, "removeprefix", strRemovePrefix),
"removesuffix": setNativeCode(s, "removesuffix", strRemoveSuffix),
"strip": setNativeCode(s, "strip", strStrip),
"find": setNativeCode(s, "find", strFind),
"rfind": setNativeCode(s, "find", strRFind),
"format": setNativeCode(s, "format", strFormat),
"count": setNativeCode(s, "count", strCount),
"upper": setNativeCode(s, "upper", strUpper),
"lower": setNativeCode(s, "lower", strLower),
}
stringMethods["format"].kwargs = true
dictMethods = map[string]*pyFunc{
"get": setNativeCode(s, "get", dictGet),
"setdefault": s.Lookup("setdefault").(*pyFunc),
"keys": setNativeCode(s, "keys", dictKeys),
"items": setNativeCode(s, "items", dictItems),
"values": setNativeCode(s, "values", dictValues),
"copy": setNativeCode(s, "copy", dictCopy),
}
configMethods = map[string]*pyFunc{
"get": setNativeCode(s, "config_get", configGet),
"setdefault": s.Lookup("setdefault").(*pyFunc),
}
if s.state.Config.Parse.GitFunctions {
setNativeCode(s, "git_branch", execGitBranch)
setNativeCode(s, "git_commit", execGitCommit)
setNativeCode(s, "git_show", execGitShow)
setNativeCode(s, "git_state", execGitState)
}
setLogCode(s, "debug", log.Debug)
setLogCode(s, "info", log.Info)
setLogCode(s, "notice", log.Notice)
setLogCode(s, "warning", log.Warning)
setLogCode(s, "error", log.Errorf)
setLogCode(s, "fatal", log.Fatalf)
}
// registerSubincludePackage sets up the package for remote subincludes.
func registerSubincludePackage(s *scope) {
// Another small hack - replace the code for these two with native code, must be done after the
// declarations which are in misc_rules.
buildRule := s.Lookup("build_rule").(*pyFunc)
f := setNativeCode(s, "filegroup", filegroup)
f.args = buildRule.args
f.argIndices = buildRule.argIndices
f.defaults = buildRule.defaults
f.constants = buildRule.constants
f.types = buildRule.types
f.args = buildRule.args
f.argIndices = buildRule.argIndices
f.defaults = buildRule.defaults
f.constants = buildRule.constants
f.types = buildRule.types
}
func setNativeCode(s *scope, name string, code nativeFunc) *pyFunc {
f := s.Lookup(name).(*pyFunc)
f.nativeCode = code
f.code = nil // Might as well save a little memory here
return f
}
// setLogCode specialises setNativeCode for handling the log functions (of which there are a few)
func setLogCode(s *scope, name string, f func(format string, args ...interface{})) {
setNativeCode(s, name, func(s *scope, args []pyObject) pyObject {
if str, ok := args[0].(pyString); ok {
l := make([]interface{}, len(args))
for i, arg := range args {
l[i] = arg
}
f("//%s: %s", s.pkgFilename(), fmt.Sprintf(string(str), l[1:]...))
return None
}
f("//%s: %s", s.pkgFilename(), args)
return None
}).varargs = true
}
// buildRule implements the build_rule() builtin function.
// This is the main interface point; every build rule ultimately calls this to add
// new objects to the build graph.
func buildRule(s *scope, args []pyObject) pyObject {
s.NAssert(s.pkg == nil, "Cannot create new build rules in this context")
// We need to set various defaults from config here; it is useful to put it on the rule but not often so
// because most rules pass them through anyway.
// TODO(peterebden): when we get rid of the old parser, put these defaults on all the build rules and
// get rid of this.
args[visibilityBuildRuleArgIdx] = defaultFromConfig(s.config, args[visibilityBuildRuleArgIdx], "DEFAULT_VISIBILITY")
args[testOnlyBuildRuleArgIdx] = defaultFromConfig(s.config, args[testOnlyBuildRuleArgIdx], "DEFAULT_TESTONLY")
args[licencesBuildRuleArgIdx] = defaultFromConfig(s.config, args[licencesBuildRuleArgIdx], "DEFAULT_LICENCES")
args[sandboxBuildRuleArgIdx] = defaultFromConfig(s.config, args[sandboxBuildRuleArgIdx], "BUILD_SANDBOX")
args[testSandboxBuildRuleArgIdx] = defaultFromConfig(s.config, args[testSandboxBuildRuleArgIdx], "TEST_SANDBOX")
target := createTarget(s, args)
s.Assert(s.pkg.Target(target.Label.Name) == nil, "Duplicate build target in %s: %s", s.pkg.Name, target.Label.Name)
populateTarget(s, target, args)
s.state.AddTarget(s.pkg, target)
if s.Callback {
target.AddedPostBuild = true
s.pkg.MarkTargetModified(target)
}
return pyString(":" + target.Label.Name)
}
// defaultFromConfig sets a default value from the config if the property isn't set.
func defaultFromConfig(config *pyConfig, arg pyObject, name string) pyObject {
if arg == nil || arg == None {
return config.Get(name, arg)
}
return arg
}
// filegroup implements the filegroup() builtin.
func filegroup(s *scope, args []pyObject) pyObject {
args[1] = filegroupCommand
return buildRule(s, args)
}
// pkg implements the package() builtin function.
func pkg(s *scope, args []pyObject) pyObject {
s.Assert(s.pkg.NumTargets() == 0, "package() must be called before any build targets are defined")
for k, v := range s.locals {
k = strings.ToUpper(k)
s.Assert(s.config.Get(k, nil) != nil, "error calling package(): %s is not a known config value", k)
s.config.IndexAssign(pyString(k), v)
}
return None
}
// tagName applies the given tag to a target name.
func tagName(name, tag string) string {
if name[0] != '_' {
name = "_" + name
}
if strings.ContainsRune(name, '#') {
name += "_"
} else {
name += "#"
}
return name + tag
}
// bazelLoad implements the load() builtin, which is only available for Bazel compatibility.
func bazelLoad(s *scope, args []pyObject) pyObject {
s.Assert(s.state.Config.Bazel.Compatibility, "load() is only available in Bazel compatibility mode. See `plz help bazel` for more information.")
// The argument always looks like a build label, but it is not really one (i.e. there is no BUILD file that defines it).
// We do not support their legacy syntax here (i.e. "/tools/build_rules/build_test" etc).
l := core.ParseBuildLabelContext(string(args[0].(pyString)), s.contextPkg)
filename := path.Join(l.PackageName, l.Name)
if l.Subrepo != "" {
subrepo := s.state.Graph.Subrepo(l.Subrepo)
if subrepo == nil || (subrepo.Target != nil && subrepo != s.contextPkg.Subrepo) {
subincludeTarget(s, l)
subrepo = s.state.Graph.SubrepoOrDie(l.Subrepo)
}
filename = subrepo.Dir(filename)
}
s.SetAll(s.interpreter.Subinclude(filename, l, s.contextPkg), false)
return None
}
func (s *scope) WaitForBuiltTargetWithoutLimiter(l, dependent core.BuildLabel) *core.BuildTarget {
s.interpreter.limiter.Release()
defer s.interpreter.limiter.Acquire()
return s.state.WaitForBuiltTarget(l, dependent)
}
// builtinFail raises an immediate error that can't be intercepted.
func builtinFail(s *scope, args []pyObject) pyObject {
s.Error(string(args[0].(pyString)))
return None
}
func subinclude(s *scope, args []pyObject) pyObject {
s.NAssert(s.contextPkg == nil, "Cannot subinclude() from this context")
for _, arg := range args {
t := subincludeTarget(s, core.ParseBuildLabelContext(string(arg.(pyString)), s.contextPkg))
pkg := s.contextPkg
if t.Subrepo != s.contextPkg.Subrepo && t.Subrepo != nil {
pkg = &core.Package{
Name: "@" + t.Subrepo.Name,
SubrepoName: t.Subrepo.Name,
Subrepo: t.Subrepo,
}
}
l := pkg.Label()
s.Assert(l.CanSee(s.state, t), "Target %s isn't visible to be subincluded into %s", t.Label, l)
for _, out := range t.Outputs() {
s.SetAll(s.interpreter.Subinclude(path.Join(t.OutDir(), out), t.Label, pkg), false)
}
}
return None
}
// subincludeTarget returns the target for a subinclude() call to a label.
// It blocks until the target exists and is built.
func subincludeTarget(s *scope, l core.BuildLabel) *core.BuildTarget {
pkgLabel := s.contextPkg.Label()
if l.Subrepo == pkgLabel.Subrepo && l.PackageName == pkgLabel.PackageName {
// This is a subinclude in the same package, check the target exists.
s.NAssert(s.contextPkg.Target(l.Name) == nil, "Target :%s is not defined in this package; it has to be defined before the subinclude() call", l.Name)
}
s.NAssert(l.IsAllTargets() || l.IsAllSubpackages(), "Can't pass :all or /... to subinclude()")
// If we're including from a subrepo, or if we're in a subrepo and including from a different subrepo, make sure
// that package is parsed to avoid locking. Locks can occur when the target's package also subincludes that target.
//
// When this happens, both parse thread "WaitForBuiltTarget" expecting the other to queue the target to be built.
//
// By parsing the package first, the subrepo package's subinclude will queue the subrepo target to be built before
// we call WaitForBuiltTargetWithoutLimiter below avoiding the lockup.
if l.Subrepo != "" && l.SubrepoLabel().PackageName != s.contextPkg.Name && l.Subrepo != s.contextPkg.SubrepoName {
subrepoPackageLabel := core.BuildLabel{
PackageName: l.SubrepoLabel().PackageName,
Subrepo: l.SubrepoLabel().Subrepo,
Name: "all",
}
s.state.WaitForPackage(subrepoPackageLabel, pkgLabel)
}
// Temporarily release the parallelism limiter; this is important to keep us from deadlocking
// all available parser threads (easy to happen if they're all waiting on a single target which now can't start)
t := s.WaitForBuiltTargetWithoutLimiter(l, pkgLabel)
// This is not quite right, if you subinclude from another subinclude we can basically
// lose track of it later on. It's hard to know what better to do at this point though.
s.contextPkg.RegisterSubinclude(l)
return t
}
func lenFunc(s *scope, args []pyObject) pyObject {
return objLen(args[0])
}
func objLen(obj pyObject) pyInt {
switch t := obj.(type) {
case pyList:
return pyInt(len(t))
case pyDict:
return pyInt(len(t))
case pyString:
return pyInt(len(t))
}
panic("object of type " + obj.Type() + " has no len()")
}
func isinstance(s *scope, args []pyObject) pyObject {
obj := args[0]
types := args[1]
if f, ok := types.(*pyFunc); ok && isType(obj, f.name) {
// Special case for 'str' and so forth that are functions but also types.
return True
} else if l, ok := types.(pyList); ok {
for _, li := range l {
if lif, ok := li.(*pyFunc); ok && isType(obj, lif.name) {
return True
} else if reflect.TypeOf(obj) == reflect.TypeOf(li) {
return True
}
}
}
return newPyBool(reflect.TypeOf(obj) == reflect.TypeOf(types))
}
func isType(obj pyObject, name string) bool {
switch obj.(type) {
case pyBool:
return name == "bool" || name == "int" // N.B. For compatibility with old assert statements
case pyInt:
return name == "int"
case pyString:
return name == "str"
case pyList:
return name == "list"
case pyDict:
return name == "dict"
case *pyConfig:
return name == "config"
}
return false
}
func strJoin(s *scope, args []pyObject) pyObject {
self := string(args[0].(pyString))
seq := asStringList(s, args[1], "seq")
return pyString(strings.Join(seq, self))
}
func strSplit(s *scope, args []pyObject) pyObject {
self := args[0].(pyString)
on := args[1].(pyString)
return fromStringList(strings.Split(string(self), string(on)))
}
func strReplace(s *scope, args []pyObject) pyObject {
self := args[0].(pyString)
old := args[1].(pyString)
new := args[2].(pyString)
return pyString(strings.ReplaceAll(string(self), string(old), string(new)))
}
func strPartition(s *scope, args []pyObject) pyObject {
self := args[0].(pyString)
sep := args[1].(pyString)
if idx := strings.Index(string(self), string(sep)); idx != -1 {
return pyList{self[:idx], self[idx : idx+len(sep)], self[idx+len(sep):]}
}
return pyList{self, pyString(""), pyString("")}
}
func strRPartition(s *scope, args []pyObject) pyObject {
self := args[0].(pyString)
sep := args[1].(pyString)
if idx := strings.LastIndex(string(self), string(sep)); idx != -1 {
return pyList{self[:idx], self[idx : idx+len(sep)], self[idx+len(sep):]}
}
return pyList{pyString(""), pyString(""), self}
}
func strStartsWith(s *scope, args []pyObject) pyObject {
self := args[0].(pyString)
x := args[1].(pyString)
return newPyBool(strings.HasPrefix(string(self), string(x)))
}
func strEndsWith(s *scope, args []pyObject) pyObject {
self := args[0].(pyString)
x := args[1].(pyString)
return newPyBool(strings.HasSuffix(string(self), string(x)))
}
func strLStrip(s *scope, args []pyObject) pyObject {
self := args[0].(pyString)
cutset := args[1].(pyString)
return pyString(strings.TrimLeft(string(self), string(cutset)))
}
func strRStrip(s *scope, args []pyObject) pyObject {
self := args[0].(pyString)
cutset := args[1].(pyString)
return pyString(strings.TrimRight(string(self), string(cutset)))
}
func strStrip(s *scope, args []pyObject) pyObject {
self := args[0].(pyString)
cutset := args[1].(pyString)
return pyString(strings.Trim(string(self), string(cutset)))
}
func strRemovePrefix(s *scope, args []pyObject) pyObject {
self := args[0].(pyString)
prefix := args[1].(pyString)
return pyString(strings.TrimPrefix(string(self), string(prefix)))
}
func strRemoveSuffix(s *scope, args []pyObject) pyObject {
self := args[0].(pyString)
suffix := args[1].(pyString)
return pyString(strings.TrimSuffix(string(self), string(suffix)))
}
func strFind(s *scope, args []pyObject) pyObject {
self := args[0].(pyString)
needle := args[1].(pyString)
return pyInt(strings.Index(string(self), string(needle)))
}
func strRFind(s *scope, args []pyObject) pyObject {
self := args[0].(pyString)
needle := args[1].(pyString)
return pyInt(strings.LastIndex(string(self), string(needle)))
}
func strFormat(s *scope, args []pyObject) pyObject {
self := string(args[0].(pyString))
for k, v := range s.locals {
self = strings.ReplaceAll(self, "{"+k+"}", v.String())
}
for _, arg := range args[1:] {
self = strings.Replace(self, "{}", arg.String(), 1)
}
return pyString(strings.ReplaceAll(strings.ReplaceAll(self, "{{", "{"), "}}", "}"))
}
func strCount(s *scope, args []pyObject) pyObject {
self := string(args[0].(pyString))
needle := string(args[1].(pyString))
return pyInt(strings.Count(self, needle))
}
func strUpper(s *scope, args []pyObject) pyObject {
self := string(args[0].(pyString))
return pyString(strings.ToUpper(self))
}
func strLower(s *scope, args []pyObject) pyObject {
self := string(args[0].(pyString))
return pyString(strings.ToLower(self))
}
func boolType(s *scope, args []pyObject) pyObject {
return newPyBool(args[0].IsTruthy())
}
func intType(s *scope, args []pyObject) pyObject {
i, err := strconv.Atoi(string(args[0].(pyString)))
s.Assert(err == nil, "%s", err)
return pyInt(i)
}
func strType(s *scope, args []pyObject) pyObject {
return pyString(args[0].String())
}
func glob(s *scope, args []pyObject) pyObject {
include := asStringList(s, args[0], "include")
exclude := asStringList(s, args[1], "exclude")
hidden := args[2].IsTruthy()
exclude = append(exclude, s.state.Config.Parse.BuildFileName...)
if s.globber == nil {
s.globber = fs.NewGlobber(s.state.Config.Parse.BuildFileName)
}
return fromStringList(s.globber.Glob(s.pkg.SourceRoot(), include, exclude, hidden))
}
func asStringList(s *scope, arg pyObject, name string) []string {
l, ok := arg.(pyList)
s.Assert(ok, "argument %s must be a list", name)
sl := make([]string, len(l))
for i, x := range l {
sx, ok := x.(pyString)
s.Assert(ok, "%s must be a list of strings", name)
sl[i] = string(sx)
}
return sl
}
func fromStringList(l []string) pyList {
ret := make(pyList, len(l))
for i, s := range l {
ret[i] = pyString(s)
}
return ret
}
func configGet(s *scope, args []pyObject) pyObject {
self := args[0].(*pyConfig)
return self.Get(string(args[1].(pyString)), args[2])
}
func dictGet(s *scope, args []pyObject) pyObject {
self := args[0].(pyDict)
sk, ok := args[1].(pyString)
s.Assert(ok, "dict keys must be strings, not %s", args[1].Type())
if ret, present := self[string(sk)]; present {
return ret
}
return args[2]
}
func dictKeys(s *scope, args []pyObject) pyObject {
self := args[0].(pyDict)
ret := make(pyList, len(self))
for i, k := range self.Keys() {
ret[i] = pyString(k)
}
return ret
}
func dictValues(s *scope, args []pyObject) pyObject {
self := args[0].(pyDict)
ret := make(pyList, len(self))
for i, k := range self.Keys() {
ret[i] = self[k]
}
return ret
}
func dictItems(s *scope, args []pyObject) pyObject {
self := args[0].(pyDict)
ret := make(pyList, len(self))
for i, k := range self.Keys() {
ret[i] = pyList{pyString(k), self[k]}
}
return ret
}
func dictCopy(s *scope, args []pyObject) pyObject {
self := args[0].(pyDict)
ret := make(pyDict, len(self))
for k, v := range self {
ret[k] = v
}
return ret
}
func sorted(s *scope, args []pyObject) pyObject {
l, ok := args[0].(pyList)
s.Assert(ok, "unsortable type %s", args[0].Type())
l = l[:]
sort.Slice(l, func(i, j int) bool { return l[i].Operator(LessThan, l[j]).IsTruthy() })
return l
}
func joinPath(s *scope, args []pyObject) pyObject {
l := make([]string, len(args))
for i, arg := range args {
l[i] = string(arg.(pyString))
}
return pyString(path.Join(l...))
}
func packageName(s *scope, args []pyObject) pyObject {
if s.pkg != nil {
return pyString(s.pkg.Name)
}
if s.subincludeLabel != nil {
return pyString(s.subincludeLabel.PackageName)
}
s.Error("you cannot call package_name() from this context")
return nil
}
func subrepoName(s *scope, args []pyObject) pyObject {
if s.pkg != nil {
return pyString(s.pkg.SubrepoName)
}
if s.subincludeLabel != nil {
return pyString(s.subincludeLabel.Subrepo)
}
s.Error("you cannot call subrepo_name() from this context")
return nil
}
func canonicalise(s *scope, args []pyObject) pyObject {
s.Assert(s.pkg != nil, "Cannot call canonicalise() from this context")
label := core.ParseBuildLabel(string(args[0].(pyString)), s.pkg.Name)
return pyString(label.String())
}
func pyRange(s *scope, args []pyObject) pyObject {
start := args[0].(pyInt)
stop, isInt := args[1].(pyInt)
step := args[2].(pyInt)
if !isInt {
// Stop not passed so we start at 0 and start is the stop.
stop = start
start = 0
}
ret := make(pyList, 0, stop-start)
for i := start; i < stop; i += step {
ret = append(ret, i)
}
return ret
}
func enumerate(s *scope, args []pyObject) pyObject {
l, ok := args[0].(pyList)
s.Assert(ok, "Argument to enumerate must be a list, not %s", args[0].Type())
ret := make(pyList, len(l))
for i, li := range l {
ret[i] = pyList{pyInt(i), li}
}
return ret
}
func zip(s *scope, args []pyObject) pyObject {
lastLen := 0
for i, seq := range args {
si, ok := seq.(pyList)
s.Assert(ok, "Arguments to zip must be lists, not %s", si.Type())
// This isn't a restriction in Python but I can't be bothered handling all the stuff that real zip does.
s.Assert(i == 0 || lastLen == len(si), "All arguments to zip must have the same length")
lastLen = len(si)
}
ret := make(pyList, lastLen)
for i := range ret {
r := make(pyList, len(args))
for j, li := range args {
r[j] = li.(pyList)[i]
}
ret[i] = r
}
return ret
}
// getLabels returns the set of labels for a build target and its transitive dependencies.
// The labels are filtered by the given prefix, which is stripped from the returned labels.
// Two formats are supported here: either passing just the name of a target in the current
// package, or a build label referring specifically to one.
func getLabels(s *scope, args []pyObject) pyObject {
name := string(args[0].(pyString))
prefix := string(args[1].(pyString))
all := args[2].IsTruthy()
if core.LooksLikeABuildLabel(name) {
label := core.ParseBuildLabel(name, s.pkg.Name)
return getLabelsInternal(s.state.Graph.TargetOrDie(label), prefix, core.Built, all)
}
target := getTargetPost(s, name)
return getLabelsInternal(target, prefix, core.Building, all)
}
// addLabel adds a set of labels to the named rule
func addLabel(s *scope, args []pyObject) pyObject {
name := string(args[0].(pyString))
var target *core.BuildTarget
if core.LooksLikeABuildLabel(name) {
label := core.ParseBuildLabel(name, s.pkg.Name)
target = s.state.Graph.TargetOrDie(label)
} else {
target = getTargetPost(s, name)
}
target.AddLabel(args[1].String())
return None
}
func getLabelsInternal(target *core.BuildTarget, prefix string, minState core.BuildTargetState, all bool) pyObject {
if target.State() < minState {
log.Fatalf("get_labels called on a target that is not yet built: %s", target.Label)
}
labels := map[string]bool{}
done := map[*core.BuildTarget]bool{}
var getLabels func(*core.BuildTarget)
getLabels = func(t *core.BuildTarget) {
for _, label := range t.Labels {
if strings.HasPrefix(label, prefix) {
labels[strings.TrimSpace(strings.TrimPrefix(label, prefix))] = true
}
}
done[t] = true
if !t.OutputIsComplete || t == target || all {
for _, dep := range t.Dependencies() {
if !done[dep] {
getLabels(dep)
}
}
}
}
getLabels(target)
ret := make([]string, len(labels))
i := 0
for label := range labels {
ret[i] = label
i++
}
sort.Strings(ret)
return fromStringList(ret)
}
// getTargetPost is called by various functions to get a target from the current package.
// Panics if the target is not in the current package or has already been built.
func getTargetPost(s *scope, name string) *core.BuildTarget {
target := s.pkg.Target(name)
//nolint:staticcheck
s.Assert(target != nil, "Unknown build target %s in %s", name, s.pkg.Name)
// It'd be cheating to try to modify targets that're already built.
// Prohibit this because it'd likely end up with nasty race conditions.
s.Assert(target.State() < core.Built, "Attempted to modify target %s, but it's already built", target.Label) //nolint:staticcheck
return target
}
// addDep adds a dependency to a target.
func addDep(s *scope, args []pyObject) pyObject {
s.Assert(s.Callback, "can only be called from a pre- or post-build callback")
target := getTargetPost(s, string(args[0].(pyString)))
dep := core.ParseBuildLabelContext(string(args[1].(pyString)), s.pkg)
exported := args[2].IsTruthy()
target.AddMaybeExportedDependency(dep, exported, false, false)
s.pkg.MarkTargetModified(target)
return None
}
// addOut adds an output to a target.
func addOut(s *scope, args []pyObject) pyObject {
target := getTargetPost(s, string(args[0].(pyString)))
name := string(args[1].(pyString))
out := string(args[2].(pyString))
if out == "" {
target.AddOutput(name)
s.pkg.MustRegisterOutput(name, target)
} else {
_, ok := target.EntryPoints[name]
s.NAssert(ok, "Named outputs can't have the same name as entry points")
target.AddNamedOutput(name, out)
s.pkg.MustRegisterOutput(out, target)
}
return None
}
// getOuts gets the outputs of a target
func getOuts(s *scope, args []pyObject) pyObject {
name := args[0].String()
var target *core.BuildTarget
if core.LooksLikeABuildLabel(name) {
label := core.ParseBuildLabel(name, s.pkg.Name)
target = s.state.Graph.TargetOrDie(label)
} else {
target = getTargetPost(s, name)
}
outs := target.Outputs()
ret := make(pyList, len(outs))
for i, out := range outs {
ret[i] = pyString(out)
}
return ret
}
// addLicence adds a licence to a target.
func addLicence(s *scope, args []pyObject) pyObject {
target := getTargetPost(s, string(args[0].(pyString)))
target.AddLicence(string(args[1].(pyString)))
return None
}
// getLicences returns the licences for a single target.
func getLicences(s *scope, args []pyObject) pyObject {
return fromStringList(getTargetPost(s, string(args[0].(pyString))).Licences)
}
// getCommand gets the command of a target, optionally for a configuration.
func getCommand(s *scope, args []pyObject) pyObject {
target := getTargetPost(s, string(args[0].(pyString)))
config := string(args[1].(pyString))
if config != "" {
return pyString(target.GetCommandConfig(config))
}
if len(target.Commands) > 0 {
commands := pyDict{}
for config, cmd := range target.Commands {
commands[config] = pyString(cmd)
}
return commands
}
return pyString(target.Command)
}
// valueAsJSON returns a JSON-formatted string representation of a plz value.
func valueAsJSON(s *scope, args []pyObject) pyObject {
js, err := json.Marshal(args[0])
if err != nil {
s.Error("Could not marshal object as JSON")
return None
}
return pyString(js)
}
// setCommand sets the command of a target, optionally for a configuration.
func setCommand(s *scope, args []pyObject) pyObject {
target := getTargetPost(s, string(args[0].(pyString)))
config := string(args[1].(pyString))
command := string(args[2].(pyString))
if command == "" {
target.Command = config
} else {
target.AddCommand(config, command)
}
return None
}
// selectFunc implements the select() builtin.
func selectFunc(s *scope, args []pyObject) pyObject {
d, _ := asDict(args[0])
var def pyObject
// This is not really the same as Bazel's order-of-matching rules, but is at least deterministic.
keys := d.Keys()
for i := len(keys) - 1; i >= 0; i-- {
k := keys[i]
if k == "//conditions:default" || k == "default" {
def = d[k]
} else if selectTarget(s, core.ParseBuildLabelContext(k, s.contextPkg)).HasLabel("config:on") {
return d[k]
}
}
s.NAssert(def == nil, "None of the select() conditions matched")
return def
}
// selectTarget returns the target to be used for a select() call.
// It panics appropriately if the target isn't built yet.
func selectTarget(s *scope, l core.BuildLabel) *core.BuildTarget {
if s.pkg != nil && l.PackageName == s.pkg.Name {
t := s.pkg.Target(l.Name)
s.NAssert(t == nil, "Target %s in select() call has not been defined yet", l.Name)
return t
}
return subincludeTarget(s, l)
}
// subrepo implements the subrepo() builtin that adds a new repository.
func subrepo(s *scope, args []pyObject) pyObject {
s.NAssert(s.pkg == nil, "Cannot create new subrepos in this context")
name := string(args[0].(pyString))
dep := string(args[1].(pyString))
var target *core.BuildTarget
root := name
if dep != "" {
// N.B. The target must be already registered on this package.
target = s.pkg.TargetOrDie(core.ParseBuildLabelContext(dep, s.pkg).Name)
root = path.Join(target.OutDir(), name)
} else if args[2] != None {
root = string(args[2].(pyString))
}
state := s.state
if args[3] != None { // arg 3 is the config file to load
state = state.ForConfig(path.Join(s.pkg.Name, string(args[3].(pyString))))
} else if args[4].IsTruthy() { // arg 4 is bazel_compat
state = state.ForConfig()
state.Config.Bazel.Compatibility = true
state.Config.Parse.BuildFileName = append(state.Config.Parse.BuildFileName, "BUILD.bazel")
}
isCrossCompile := s.pkg.Subrepo != nil && s.pkg.Subrepo.IsCrossCompile
arch := cli.HostArch()
if args[5] != None { // arg 5 is arch-string, for arch-subrepos.
givenArch := string(args[5].(pyString))
if err := arch.UnmarshalFlag(givenArch); err != nil {
log.Fatalf("Could not interpret architecture '%s' for subrepo '%s'", givenArch, name)
}
state = state.ForArch(arch)
isCrossCompile = true
}
sr := &core.Subrepo{
Name: s.pkg.SubrepoArchName(path.Join(s.pkg.Name, name)),
Root: root,
Target: target,
State: state,
Arch: arch,
IsCrossCompile: isCrossCompile,
}
if s.state.Config.Bazel.Compatibility && s.pkg.Name == "workspace" {
sr.Name = s.pkg.SubrepoArchName(name)
}
log.Debug("Registering subrepo %s in package %s", sr.Name, s.pkg.Label())
s.state.Graph.MaybeAddSubrepo(sr)
return pyString("///" + sr.Name)
}
// breakpoint implements an interactive debugger for the breakpoint() builtin
func breakpoint(s *scope, args []pyObject) pyObject {
// Take this mutex to ensure only one debugger runs at a time
s.interpreter.breakpointMutex.Lock()
defer s.interpreter.breakpointMutex.Unlock()
fmt.Printf("breakpoint() encountered in %s, entering interactive debugger...\n", s.contextPkg.Filename)
// This is a small hack to get the return value back from an ident statement, which
// is normally not available since we don't have implicit returns.
interpretStatements := func(stmts []*Statement) (ret pyObject, err error) {
if len(stmts) == 1 && stmts[0].Ident != nil {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("%s", r)
}
}()
return s.interpretIdentStatement(stmts[0].Ident), nil
}
return s.interpreter.interpretStatements(s, stmts)
}
for {
prompt := promptui.Prompt{
Label: "plz",
Validate: func(input string) error {
_, err := s.interpreter.parser.ParseData([]byte(input), "<stdin>")
return err
},
}
if input, err := prompt.Run(); err != nil {
if err == io.EOF {
break
} else if err.Error() != "^C" {
log.Error("%s", err)
}
} else if stmts, err := s.interpreter.parser.ParseData([]byte(input), "<stdin>"); err != nil {
log.Error("Syntax error: %s", err)
} else if ret, err := interpretStatements(stmts); err != nil {
log.Error("%s", err)
} else if ret != nil && ret != None {
fmt.Printf("%s\n", ret)
} else {
fmt.Printf("\n")
}
}
fmt.Printf("Debugger exited, continuing...\n")
return None
}
func semverCheck(s *scope, args []pyObject) pyObject {
v, err := semver.NewVersion(string(args[0].(pyString)))
if err != nil {
s.Error("failed to parse version: %v", err)
return newPyBool(false)
}
c, err := semver.NewConstraint(string(args[1].(pyString)))
if err != nil {
s.Error("failed to parse constraint: %v", err)
return newPyBool(false)
}
return newPyBool(c.Check(v))
}
| 1 | 9,929 | I guess we can only call this from a post-build function, but we might need to check that this target is to be built? I guess target A depends on B which has a post build. We `plz build :B` which adds C as a dep of A. Won't we queue C to be built even though it only needs to build if A needs to be built? That's kinda why I approached this the way I did. I trust you've thought about this but would be good to understand this a bit better. | thought-machine-please | go |
@@ -123,7 +123,7 @@ def sndrcv(pks, pkt, timeout = None, inter = 0, verbose=None, chainCC=0, retry=0
if remaintime <= 0:
break
r = None
- if arch.FREEBSD or arch.DARWIN:
+ if not (pks.__class__.__name__ == 'StreamSocket') and (arch.FREEBSD or arch.DARWIN):
inp, out, err = select(inmask,[],[], 0.05)
if len(inp) == 0 or pks in inp:
r = pks.nonblock_recv() | 1 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Functions to send and receive packets.
"""
import errno
import cPickle,os,sys,time,subprocess
import itertools
from select import select
from data import *
import arch
from config import conf
from packet import Gen
from utils import warning,get_temp_file,PcapReader,wrpcap
import plist
from error import log_runtime,log_interactive
from base_classes import SetGen
#################
## Debug class ##
#################
class debug:
recv=[]
sent=[]
match=[]
####################
## Send / Receive ##
####################
def sndrcv(pks, pkt, timeout = None, inter = 0, verbose=None, chainCC=0, retry=0, multi=0):
if not isinstance(pkt, Gen):
pkt = SetGen(pkt)
if verbose is None:
verbose = conf.verb
debug.recv = plist.PacketList([],"Unanswered")
debug.sent = plist.PacketList([],"Sent")
debug.match = plist.SndRcvList([])
nbrecv=0
ans = []
# do it here to fix random fields, so that parent and child have the same
all_stimuli = tobesent = [p for p in pkt]
notans = len(tobesent)
hsent={}
for i in tobesent:
h = i.hashret()
if h in hsent:
hsent[h].append(i)
else:
hsent[h] = [i]
if retry < 0:
retry = -retry
autostop=retry
else:
autostop=0
while retry >= 0:
found=0
if timeout < 0:
timeout = None
rdpipe,wrpipe = os.pipe()
rdpipe=os.fdopen(rdpipe)
wrpipe=os.fdopen(wrpipe,"w")
pid=1
try:
pid = os.fork()
if pid == 0:
try:
sys.stdin.close()
rdpipe.close()
try:
i = 0
if verbose:
print "Begin emission:"
for p in tobesent:
pks.send(p)
i += 1
time.sleep(inter)
if verbose:
print "Finished to send %i packets." % i
except SystemExit:
pass
except KeyboardInterrupt:
pass
except:
log_runtime.exception("--- Error in child %i" % os.getpid())
log_runtime.info("--- Error in child %i" % os.getpid())
finally:
try:
os.setpgrp() # Chance process group to avoid ctrl-C
sent_times = [p.sent_time for p in all_stimuli if p.sent_time]
cPickle.dump( (conf.netcache,sent_times), wrpipe )
wrpipe.close()
except:
pass
elif pid < 0:
log_runtime.error("fork error")
else:
wrpipe.close()
stoptime = 0
remaintime = None
inmask = [rdpipe,pks]
try:
try:
while 1:
if stoptime:
remaintime = stoptime-time.time()
if remaintime <= 0:
break
r = None
if arch.FREEBSD or arch.DARWIN:
inp, out, err = select(inmask,[],[], 0.05)
if len(inp) == 0 or pks in inp:
r = pks.nonblock_recv()
else:
inp = []
try:
inp, out, err = select(inmask,[],[], remaintime)
except IOError, exc:
if exc.errno != errno.EINTR:
raise
if len(inp) == 0:
break
if pks in inp:
r = pks.recv(MTU)
if rdpipe in inp:
if timeout:
stoptime = time.time()+timeout
del(inmask[inmask.index(rdpipe)])
if r is None:
continue
ok = 0
h = r.hashret()
if h in hsent:
hlst = hsent[h]
for i, sentpkt in enumerate(hlst):
if r.answers(sentpkt):
ans.append((sentpkt, r))
if verbose > 1:
os.write(1, "*")
ok = 1
if not multi:
del hlst[i]
notans -= 1
else:
if not hasattr(sentpkt, '_answered'):
notans -= 1
sentpkt._answered = 1
break
if notans == 0 and not multi:
break
if not ok:
if verbose > 1:
os.write(1, ".")
nbrecv += 1
if conf.debug_match:
debug.recv.append(r)
except KeyboardInterrupt:
if chainCC:
raise
finally:
try:
nc,sent_times = cPickle.load(rdpipe)
except EOFError:
warning("Child died unexpectedly. Packets may have not been sent %i"%os.getpid())
else:
conf.netcache.update(nc)
for p,t in zip(all_stimuli, sent_times):
p.sent_time = t
os.waitpid(pid,0)
finally:
if pid == 0:
os._exit(0)
remain = list(itertools.chain(*hsent.itervalues()))
if multi:
remain = [p for p in remain if not hasattr(p, '_answered')]
if autostop and len(remain) > 0 and len(remain) != len(tobesent):
retry = autostop
tobesent = remain
if len(tobesent) == 0:
break
retry -= 1
if conf.debug_match:
debug.sent=plist.PacketList(remain[:],"Sent")
debug.match=plist.SndRcvList(ans[:])
#clean the ans list to delete the field _answered
if (multi):
for s,r in ans:
if hasattr(s, '_answered'):
del(s._answered)
if verbose:
print "\nReceived %i packets, got %i answers, remaining %i packets" % (nbrecv+len(ans), len(ans), notans)
return plist.SndRcvList(ans),plist.PacketList(remain,"Unanswered")
def __gen_send(s, x, inter=0, loop=0, count=None, verbose=None, realtime=None, return_packets=False, *args, **kargs):
if type(x) is str:
x = conf.raw_layer(load=x)
if not isinstance(x, Gen):
x = SetGen(x)
if verbose is None:
verbose = conf.verb
n = 0
if count is not None:
loop = -count
elif not loop:
loop = -1
if return_packets:
sent_packets = plist.PacketList()
try:
while loop:
dt0 = None
for p in x:
if realtime:
ct = time.time()
if dt0:
st = dt0+p.time-ct
if st > 0:
time.sleep(st)
else:
dt0 = ct-p.time
s.send(p)
if return_packets:
sent_packets.append(p)
n += 1
if verbose:
os.write(1,".")
time.sleep(inter)
if loop < 0:
loop += 1
except KeyboardInterrupt:
pass
s.close()
if verbose:
print "\nSent %i packets." % n
if return_packets:
return sent_packets
@conf.commands.register
def send(x, inter=0, loop=0, count=None, verbose=None, realtime=None, return_packets=False, *args, **kargs):
"""Send packets at layer 3
send(packets, [inter=0], [loop=0], [verbose=conf.verb]) -> None"""
return __gen_send(conf.L3socket(*args, **kargs), x, inter=inter, loop=loop, count=count,verbose=verbose,
realtime=realtime, return_packets=return_packets)
@conf.commands.register
def sendp(x, inter=0, loop=0, iface=None, iface_hint=None, count=None, verbose=None, realtime=None,
return_packets=False, *args, **kargs):
"""Send packets at layer 2
sendp(packets, [inter=0], [loop=0], [verbose=conf.verb]) -> None"""
if iface is None and iface_hint is not None:
iface = conf.route.route(iface_hint)[0]
return __gen_send(conf.L2socket(iface=iface, *args, **kargs), x, inter=inter, loop=loop, count=count,
verbose=verbose, realtime=realtime, return_packets=return_packets)
@conf.commands.register
def sendpfast(x, pps=None, mbps=None, realtime=None, loop=0, file_cache=False, iface=None):
"""Send packets at layer 2 using tcpreplay for performance
pps: packets per second
mpbs: MBits per second
realtime: use packet's timestamp, bending time with realtime value
loop: number of times to process the packet list
file_cache: cache packets in RAM instead of reading from disk at each iteration
iface: output interface """
if iface is None:
iface = conf.iface
argv = [conf.prog.tcpreplay, "--intf1=%s" % iface ]
if pps is not None:
argv.append("--pps=%i" % pps)
elif mbps is not None:
argv.append("--mbps=%f" % mbps)
elif realtime is not None:
argv.append("--multiplier=%f" % realtime)
else:
argv.append("--topspeed")
if loop:
argv.append("--loop=%i" % loop)
if file_cache:
argv.append("--enable-file-cache")
f = get_temp_file()
argv.append(f)
wrpcap(f, x)
try:
subprocess.check_call(argv)
except KeyboardInterrupt:
log_interactive.info("Interrupted by user")
except Exception,e:
log_interactive.error("while trying to exec [%s]: %s" % (argv[0],e))
finally:
os.unlink(f)
@conf.commands.register
def sr(x,filter=None, iface=None, nofilter=0, *args,**kargs):
"""Send and receive packets at layer 3
nofilter: put 1 to avoid use of bpf filters
retry: if positive, how many times to resend unanswered packets
if negative, how many times to retry when no more packets are answered
timeout: how much time to wait after the last packet has been sent
verbose: set verbosity level
multi: whether to accept multiple answers for the same stimulus
filter: provide a BPF filter
iface: listen answers only on the given interface"""
if not kargs.has_key("timeout"):
kargs["timeout"] = -1
s = conf.L3socket(filter=filter, iface=iface, nofilter=nofilter)
a,b=sndrcv(s,x,*args,**kargs)
s.close()
return a,b
@conf.commands.register
def sr1(x,filter=None,iface=None, nofilter=0, *args,**kargs):
"""Send packets at layer 3 and return only the first answer
nofilter: put 1 to avoid use of bpf filters
retry: if positive, how many times to resend unanswered packets
if negative, how many times to retry when no more packets are answered
timeout: how much time to wait after the last packet has been sent
verbose: set verbosity level
multi: whether to accept multiple answers for the same stimulus
filter: provide a BPF filter
iface: listen answers only on the given interface"""
if not kargs.has_key("timeout"):
kargs["timeout"] = -1
s=conf.L3socket(filter=filter, nofilter=nofilter, iface=iface)
a,b=sndrcv(s,x,*args,**kargs)
s.close()
if len(a) > 0:
return a[0][1]
else:
return None
@conf.commands.register
def srp(x,iface=None, iface_hint=None, filter=None, nofilter=0, type=ETH_P_ALL, *args,**kargs):
"""Send and receive packets at layer 2
nofilter: put 1 to avoid use of bpf filters
retry: if positive, how many times to resend unanswered packets
if negative, how many times to retry when no more packets are answered
timeout: how much time to wait after the last packet has been sent
verbose: set verbosity level
multi: whether to accept multiple answers for the same stimulus
filter: provide a BPF filter
iface: work only on the given interface"""
if not kargs.has_key("timeout"):
kargs["timeout"] = -1
if iface is None and iface_hint is not None:
iface = conf.route.route(iface_hint)[0]
s = conf.L2socket(iface=iface, filter=filter, nofilter=nofilter, type=type)
a,b=sndrcv(s ,x,*args,**kargs)
s.close()
return a,b
@conf.commands.register
def srp1(*args,**kargs):
"""Send and receive packets at layer 2 and return only the first answer
nofilter: put 1 to avoid use of bpf filters
retry: if positive, how many times to resend unanswered packets
if negative, how many times to retry when no more packets are answered
timeout: how much time to wait after the last packet has been sent
verbose: set verbosity level
multi: whether to accept multiple answers for the same stimulus
filter: provide a BPF filter
iface: work only on the given interface"""
if not kargs.has_key("timeout"):
kargs["timeout"] = -1
a,b=srp(*args,**kargs)
if len(a) > 0:
return a[0][1]
else:
return None
def __sr_loop(srfunc, pkts, prn=lambda x:x[1].summary(), prnfail=lambda x:x.summary(), inter=1, timeout=None, count=None, verbose=None, store=1, *args, **kargs):
n = 0
r = 0
ct = conf.color_theme
if verbose is None:
verbose = conf.verb
parity = 0
ans=[]
unans=[]
if timeout is None:
timeout = min(2*inter, 5)
try:
while 1:
parity ^= 1
col = [ct.even,ct.odd][parity]
if count is not None:
if count == 0:
break
count -= 1
start = time.time()
print "\rsend...\r",
res = srfunc(pkts, timeout=timeout, verbose=0, chainCC=1, *args, **kargs)
n += len(res[0])+len(res[1])
r += len(res[0])
if verbose > 1 and prn and len(res[0]) > 0:
msg = "RECV %i:" % len(res[0])
print "\r"+ct.success(msg),
for p in res[0]:
print col(prn(p))
print " "*len(msg),
if verbose > 1 and prnfail and len(res[1]) > 0:
msg = "fail %i:" % len(res[1])
print "\r"+ct.fail(msg),
for p in res[1]:
print col(prnfail(p))
print " "*len(msg),
if verbose > 1 and not (prn or prnfail):
print "recv:%i fail:%i" % tuple(map(len, res[:2]))
if store:
ans += res[0]
unans += res[1]
end=time.time()
if end-start < inter:
time.sleep(inter+start-end)
except KeyboardInterrupt:
pass
if verbose and n>0:
print ct.normal("\nSent %i packets, received %i packets. %3.1f%% hits." % (n,r,100.0*r/n))
return plist.SndRcvList(ans),plist.PacketList(unans)
@conf.commands.register
def srloop(pkts, *args, **kargs):
"""Send a packet at layer 3 in loop and print the answer each time
srloop(pkts, [prn], [inter], [count], ...) --> None"""
return __sr_loop(sr, pkts, *args, **kargs)
@conf.commands.register
def srploop(pkts, *args, **kargs):
"""Send a packet at layer 2 in loop and print the answer each time
srloop(pkts, [prn], [inter], [count], ...) --> None"""
return __sr_loop(srp, pkts, *args, **kargs)
def sndrcvflood(pks, pkt, prn=lambda (s,r):r.summary(), chainCC=0, store=1, unique=0):
if not isinstance(pkt, Gen):
pkt = SetGen(pkt)
tobesent = [p for p in pkt]
received = plist.SndRcvList()
seen = {}
hsent={}
for i in tobesent:
h = i.hashret()
if h in hsent:
hsent[h].append(i)
else:
hsent[h] = [i]
def send_in_loop(tobesent):
while 1:
for p in tobesent:
yield p
packets_to_send = send_in_loop(tobesent)
ssock = rsock = pks.fileno()
try:
while 1:
readyr,readys,_ = select([rsock],[ssock],[])
if ssock in readys:
pks.send(packets_to_send.next())
if rsock in readyr:
p = pks.recv(MTU)
if p is None:
continue
h = p.hashret()
if h in hsent:
hlst = hsent[h]
for i in hlst:
if p.answers(i):
res = prn((i,p))
if unique:
if res in seen:
continue
seen[res] = None
if res is not None:
print res
if store:
received.append((i,p))
except KeyboardInterrupt:
if chainCC:
raise
return received
@conf.commands.register
def srflood(x,filter=None, iface=None, nofilter=None, *args,**kargs):
"""Flood and receive packets at layer 3
prn: function applied to packets received. Ret val is printed if not None
store: if 1 (default), store answers and return them
unique: only consider packets whose print
nofilter: put 1 to avoid use of bpf filters
filter: provide a BPF filter
iface: listen answers only on the given interface"""
s = conf.L3socket(filter=filter, iface=iface, nofilter=nofilter)
r=sndrcvflood(s,x,*args,**kargs)
s.close()
return r
@conf.commands.register
def srpflood(x,filter=None, iface=None, iface_hint=None, nofilter=None, *args,**kargs):
"""Flood and receive packets at layer 2
prn: function applied to packets received. Ret val is printed if not None
store: if 1 (default), store answers and return them
unique: only consider packets whose print
nofilter: put 1 to avoid use of bpf filters
filter: provide a BPF filter
iface: listen answers only on the given interface"""
if iface is None and iface_hint is not None:
iface = conf.route.route(iface_hint)[0]
s = conf.L2socket(filter=filter, iface=iface, nofilter=nofilter)
r=sndrcvflood(s,x,*args,**kargs)
s.close()
return r
@conf.commands.register
def sniff(count=0, store=1, offline=None, prn=None, lfilter=None,
L2socket=None, timeout=None, opened_socket=None,
stop_filter=None, iface=None, *arg, **karg):
"""Sniff packets
sniff([count=0,] [prn=None,] [store=1,] [offline=None,]
[lfilter=None,] + L2ListenSocket args) -> list of packets
count: number of packets to capture. 0 means infinity
store: wether to store sniffed packets or discard them
prn: function to apply to each packet. If something is returned,
it is displayed. Ex:
ex: prn = lambda x: x.summary()
lfilter: python function applied to each packet to determine
if further action may be done
ex: lfilter = lambda x: x.haslayer(Padding)
offline: pcap file to read packets from, instead of sniffing them
timeout: stop sniffing after a given time (default: None)
L2socket: use the provided L2socket
opened_socket: provide an object ready to use .recv() on
stop_filter: python function applied to each packet to determine
if we have to stop the capture after this packet
ex: stop_filter = lambda x: x.haslayer(TCP)
iface: interface or list of interfaces (default: None for sniffing on all
interfaces)
"""
c = 0
label = {}
sniff_sockets = []
if opened_socket is not None:
sniff_sockets = [opened_socket]
else:
if offline is None:
if L2socket is None:
L2socket = conf.L2listen
if type(iface) is list:
for i in iface:
s = L2socket(type=ETH_P_ALL, iface=i, *arg, **karg)
label[s] = i
sniff_sockets.append(s)
else:
sniff_sockets = [L2socket(type=ETH_P_ALL, iface=iface, *arg,
**karg)]
else:
sniff_sockets = [PcapReader(offline)]
lst = []
if timeout is not None:
stoptime = time.time()+timeout
remain = None
try:
stop_event = False
while not stop_event:
if timeout is not None:
remain = stoptime-time.time()
if remain <= 0:
break
sel = select(sniff_sockets, [], [], remain)
for s in sel[0]:
p = s.recv()
if p is not None:
if lfilter and not lfilter(p):
continue
if s in label:
p.sniffed_on = label[s]
if store:
lst.append(p)
c += 1
if prn:
r = prn(p)
if r is not None:
print r
if stop_filter and stop_filter(p):
stop_event = True
break
if 0 < count <= c:
stop_event = True
break
except KeyboardInterrupt:
pass
if opened_socket is None:
for s in sniff_sockets:
s.close()
return plist.PacketList(lst,"Sniffed")
@conf.commands.register
def bridge_and_sniff(if1, if2, count=0, store=1, offline=None, prn=None,
lfilter=None, L2socket=None, timeout=None,
stop_filter=None, *args, **kargs):
"""Forward traffic between two interfaces and sniff packets exchanged
bridge_and_sniff([count=0,] [prn=None,] [store=1,] [offline=None,]
[lfilter=None,] + L2Socket args) -> list of packets
count: number of packets to capture. 0 means infinity
store: wether to store sniffed packets or discard them
prn: function to apply to each packet. If something is returned,
it is displayed. Ex:
ex: prn = lambda x: x.summary()
lfilter: python function applied to each packet to determine
if further action may be done
ex: lfilter = lambda x: x.haslayer(Padding)
timeout: stop sniffing after a given time (default: None)
L2socket: use the provided L2socket
stop_filter: python function applied to each packet to determine
if we have to stop the capture after this packet
ex: stop_filter = lambda x: x.haslayer(TCP)
"""
c = 0
if L2socket is None:
L2socket = conf.L2socket
s1 = L2socket(iface=if1)
s2 = L2socket(iface=if2)
peerof={s1:s2,s2:s1}
label={s1:if1, s2:if2}
lst = []
if timeout is not None:
stoptime = time.time()+timeout
remain = None
try:
stop_event = False
while not stop_event:
if timeout is not None:
remain = stoptime-time.time()
if remain <= 0:
break
ins, outs, errs = select([s1, s2], [], [], remain)
for s in ins:
p = s.recv()
if p is not None:
peerof[s].send(p.original)
if lfilter and not lfilter(p):
continue
if store:
p.sniffed_on = label[s]
lst.append(p)
c += 1
if prn:
r = prn(p)
if r is not None:
print r
if stop_filter and stop_filter(p):
stop_event = True
break
if 0 < count <= c:
stop_event = True
break
except KeyboardInterrupt:
pass
finally:
return plist.PacketList(lst,"Sniffed")
@conf.commands.register
def tshark(*args,**kargs):
"""Sniff packets and print them calling pkt.show(), a bit like text wireshark"""
sniff(prn=lambda x: x.display(),*args,**kargs)
| 1 | 8,509 | You should use `isinstance()` instead of comparing the class name to a string. | secdev-scapy | py |
@@ -15,6 +15,11 @@ axe.utils.isHidden = function isHidden(el, recursed) {
return false;
}
+ // do not exclude `video` or `audio` el's
+ if ([`VIDEO`, `AUDIO`].includes(el.nodeName.toUpperCase())) {
+ return false;
+ }
+
// 11 === Node.DOCUMENT_FRAGMENT_NODE
if (el.nodeType === 11) {
el = el.host; // grab the host Node | 1 | /**
* Determine whether an element is visible
* @method isHidden
* @memberof axe.utils
* @param {HTMLElement} el The HTMLElement
* @param {Boolean} recursed
* @return {Boolean} The element's visibilty status
*/
axe.utils.isHidden = function isHidden(el, recursed) {
'use strict';
const node = axe.utils.getNodeFromTree(el);
// 9 === Node.DOCUMENT
if (el.nodeType === 9) {
return false;
}
// 11 === Node.DOCUMENT_FRAGMENT_NODE
if (el.nodeType === 11) {
el = el.host; // grab the host Node
}
if (node && node._isHidden !== null) {
return node._isHidden;
}
const style = window.getComputedStyle(el, null);
if (
!style ||
(!el.parentNode ||
(style.getPropertyValue('display') === 'none' ||
(!recursed &&
// visibility is only accurate on the first element
style.getPropertyValue('visibility') === 'hidden') ||
el.getAttribute('aria-hidden') === 'true'))
) {
return true;
}
const parent = el.assignedSlot ? el.assignedSlot : el.parentNode;
const isHidden = axe.utils.isHidden(parent, true);
// cache the results of the isHidden check on the parent tree
// so we don't have to look at the parent tree again for all its
// descendants
if (node) {
node._isHidden = isHidden;
}
return isHidden;
};
| 1 | 15,190 | Why should audio and video elements always return `false` for `isHidden`? | dequelabs-axe-core | js |
@@ -27,7 +27,7 @@ namespace Playground.Core
Mvx.IoCProvider.RegisterSingleton<IMvxTextProvider>(new TextProviderBuilder().TextProvider);
- RegisterAppStart<RootViewModel>();
+ // RegisterAppStart<RootViewModel>();
}
/// <summary> | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MS-PL license.
// See the LICENSE file in the project root for more information.
using System.Threading.Tasks;
using MvvmCross;
using MvvmCross.IoC;
using MvvmCross.Localization;
using MvvmCross.ViewModels;
using Playground.Core.Services;
using Playground.Core.ViewModels;
namespace Playground.Core
{
public class App : MvxApplication
{
/// <summary>
/// Breaking change in v6: This method is called on a background thread. Use
/// Startup for any UI bound actions
/// </summary>
public override void Initialize()
{
CreatableTypes()
.EndingWith("Service")
.AsInterfaces()
.RegisterAsLazySingleton();
Mvx.IoCProvider.RegisterSingleton<IMvxTextProvider>(new TextProviderBuilder().TextProvider);
RegisterAppStart<RootViewModel>();
}
/// <summary>
/// Do any UI bound startup actions here
/// </summary>
public override Task Startup()
{
return base.Startup();
}
/// <summary>
/// If the application is restarted (eg primary activity on Android
/// can be restarted) this method will be called before Startup
/// is called again
/// </summary>
public override void Reset()
{
base.Reset();
}
}
}
| 1 | 15,623 | This obviously won't work for all the other platforms. | MvvmCross-MvvmCross | .cs |
@@ -15,7 +15,7 @@ public class ManipulationTest extends BasicJBehaveTest {
@Override
public InjectableStepsFactory stepsFactory() {
- Map<String, Object> state = new HashMap<String, Object>();
+ Map<String, Object> state = new HashMap<>();
return new InstanceStepsFactory(configuration(),
new SharedSteps(state), | 1 | package com.github.javaparser.bdd;
import com.github.javaparser.bdd.steps.ManipulationSteps;
import com.github.javaparser.bdd.steps.SharedSteps;
import de.codecentric.jbehave.junit.monitoring.JUnitReportingRunner;
import org.jbehave.core.steps.InjectableStepsFactory;
import org.jbehave.core.steps.InstanceStepsFactory;
import org.junit.runner.RunWith;
import java.util.HashMap;
import java.util.Map;
@RunWith(JUnitReportingRunner.class)
public class ManipulationTest extends BasicJBehaveTest {
@Override
public InjectableStepsFactory stepsFactory() {
Map<String, Object> state = new HashMap<String, Object>();
return new InstanceStepsFactory(configuration(),
new SharedSteps(state),
new ManipulationSteps(state));
}
public ManipulationTest() {
super("**/bdd/manipulation*.story");
}
}
| 1 | 8,876 | Isn't he diamond operator Java7+? | javaparser-javaparser | java |
@@ -159,6 +159,17 @@ public final class Require {
return number;
}
+ public static double positive(String argName, double number, String message) {
+ if (number <= 0) {
+ if (message == null) {
+ throw new IllegalArgumentException(argName + " must be greater than 0");
+ } else {
+ throw new IllegalArgumentException(message);
+ }
+ }
+ return number;
+ }
+
public static int positive(String argName, Integer number) {
return positive(argName, number, null);
} | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.internal;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Path;
import java.time.Duration;
import java.util.Objects;
/**
* A utility class to check arguments (preconditions) and state.
* <p>
* Examples of use:
* <pre>
* public void setActionWithTimeout(Action action delegate, int timeout) {
* this.action = Require.nonNull("Action", action);
* this.timeout = Require.positive("Timeout", timeout);
* }
* </pre>
*/
public final class Require {
private static final String ARG_MUST_BE_SET = "%s must be set";
private static final String MUST_EXIST = "%s must exist: %s";
private static final String MUST_BE_DIR = "%s must be a directory: %s";
private static final String MUST_BE_FILE = "%s must be a regular file: %s";
private Require() {
// An utility class
}
public static void precondition(boolean condition, String message, Object... args) {
if (!condition) {
throw new IllegalArgumentException(String.format(message, args));
}
}
public static <T> T nonNull(String argName, T arg) {
if (arg == null) {
throw new IllegalArgumentException(String.format(ARG_MUST_BE_SET, argName));
}
return arg;
}
public static <T> T nonNull(String argName, T arg, String message, Object... args) {
if (arg == null) {
throw new IllegalArgumentException(
String.join(" ", argName, String.format(message, args)));
}
return arg;
}
public static <T> ArgumentChecker<T> argument(String argName, T arg) {
return new ArgumentChecker<>(argName, arg);
}
public static class ArgumentChecker<T> {
private final String argName;
private final T arg;
ArgumentChecker(String argName, T arg) {
this.argName = argName;
this.arg = arg;
}
public T nonNull() {
if (arg == null) {
throw new IllegalArgumentException(String.format(ARG_MUST_BE_SET, argName));
}
return arg;
}
public T nonNull(String message, Object... args) {
if (arg == null) {
throw new IllegalArgumentException(String.format(message, args));
}
return arg;
}
public T equalTo(Object other) {
if (arg == null) {
throw new IllegalArgumentException(String.format(ARG_MUST_BE_SET, argName));
}
if (!Objects.equals(arg, other)) {
throw new IllegalArgumentException(argName + " must be equal to `" + other + "`");
}
return arg;
}
public T instanceOf(Class<?> cls) {
if (arg == null) {
throw new IllegalArgumentException(String.format(ARG_MUST_BE_SET, argName));
}
if (!cls.isInstance(arg)) {
throw new IllegalArgumentException(argName + " must be an instance of " + cls);
}
return arg;
}
}
public static Duration nonNegative(String argName, Duration arg) {
if (arg == null) {
throw new IllegalArgumentException(String.format(ARG_MUST_BE_SET, argName));
}
if (arg.isNegative()) {
throw new IllegalArgumentException(argName + " must be set to 0 or more");
}
return arg;
}
public static Duration nonNegative(Duration arg) {
if (arg == null) {
throw new IllegalArgumentException("Duration must be set");
}
if (arg.isNegative()) {
throw new IllegalArgumentException("Duration must be set to 0 or more");
}
return arg;
}
public static int nonNegative(String argName, Integer number) {
if (number == null) {
throw new IllegalArgumentException(String.format(ARG_MUST_BE_SET, argName));
}
if (number < 0) {
throw new IllegalArgumentException(argName + " cannot be less than 0");
}
return number;
}
public static int positive(String argName, Integer number, String message) {
if (number == null) {
throw new IllegalArgumentException(String.format(ARG_MUST_BE_SET, argName));
}
if (number <= 0) {
if (message == null) {
throw new IllegalArgumentException(argName + " must be greater than 0");
} else {
throw new IllegalArgumentException(message);
}
}
return number;
}
public static int positive(String argName, Integer number) {
return positive(argName, number, null);
}
public static IntChecker argument(String argName, Integer number) {
return new IntChecker(argName, number);
}
public static class IntChecker {
private final String argName;
private final Integer number;
IntChecker(String argName, Integer number) {
this.argName = argName;
this.number = number;
}
public int greaterThan(int max, String message) {
if (number == null) {
throw new IllegalArgumentException(String.format(ARG_MUST_BE_SET, argName));
}
if (number <= max) {
throw new IllegalArgumentException(message);
}
return number;
}
}
public static FileChecker argument(String argName, File file) {
return new FileChecker(argName, file);
}
public static class FileChecker {
private final String argName;
private final File file;
FileChecker(String argName, File file) {
this.argName = argName;
this.file = file;
}
public File isFile() {
if (file == null) {
throw new IllegalArgumentException(String.format(ARG_MUST_BE_SET, argName));
}
if (!file.exists()) {
throw new IllegalArgumentException(
String.format(MUST_EXIST, argName, file.getAbsolutePath()));
}
if (!file.isFile()) {
throw new IllegalArgumentException(
String.format(MUST_BE_FILE, argName, file.getAbsolutePath()));
}
return file;
}
public File isDirectory() {
if (file == null) {
throw new IllegalArgumentException(String.format(ARG_MUST_BE_SET, argName));
}
if (!file.exists()) {
throw new IllegalArgumentException(
String.format(MUST_EXIST, argName, file.getAbsolutePath()));
}
if (!file.isDirectory()) {
throw new IllegalArgumentException(
String.format(MUST_BE_DIR, argName, file.getAbsolutePath()));
}
return file;
}
}
public static void stateCondition(boolean state, String message, Object... args) {
if (!state) {
throw new IllegalStateException(String.format(message, args));
}
}
public static <T> StateChecker<T> state(String name, T state) {
return new StateChecker<>(name, state);
}
public static class StateChecker<T> {
private final String name;
private final T state;
StateChecker(String name, T state) {
this.name = name;
this.state = state;
}
public T nonNull() {
if (state == null) {
throw new IllegalStateException(name + " must not be null");
}
return state;
}
public T nonNull(String message, Object... args) {
if (state == null) {
throw new IllegalStateException(String.join(" ", name, String.format(message, args)));
}
return state;
}
public T instanceOf(Class<?> cls) {
if (state == null) {
throw new IllegalStateException(String.format(ARG_MUST_BE_SET, name));
}
if (!cls.isInstance(state)) {
throw new IllegalStateException(name + " must be an instance of " + cls);
}
return state;
}
}
public static FileStateChecker state(String name, File file) {
return new FileStateChecker(name, file);
}
public static class FileStateChecker {
private final String name;
private final File file;
FileStateChecker(String name, File file) {
this.name = name;
this.file = file;
}
public File isFile() {
if (file == null) {
throw new IllegalStateException(String.format(ARG_MUST_BE_SET, name));
}
if (!file.exists()) {
throw new IllegalStateException(String.format(MUST_EXIST, name, file.getAbsolutePath()));
}
if (!file.isFile()) {
throw new IllegalStateException(String.format(MUST_BE_FILE, name, file.getAbsolutePath()));
}
return file;
}
public File isDirectory() {
if (file == null) {
throw new IllegalStateException(String.format(ARG_MUST_BE_SET, name));
}
if (!file.exists()) {
throw new IllegalStateException(String.format(MUST_EXIST, name, file.getAbsolutePath()));
}
if (!file.isDirectory()) {
throw new IllegalStateException(String.format(MUST_BE_DIR, name, file.getAbsolutePath()));
}
return file;
}
}
public static PathStateChecker state(String name, Path path) {
return new PathStateChecker(name, path);
}
public static class PathStateChecker {
private final String name;
private final Path path;
PathStateChecker(String name, Path path) {
this.name = name;
this.path = path;
}
public Path isFile() {
if (path == null) {
throw new IllegalStateException(String.format(ARG_MUST_BE_SET, name));
}
if (!Files.exists(path)) {
throw new IllegalStateException(String.format(MUST_EXIST, name, path));
}
if (!Files.isRegularFile(path)) {
throw new IllegalStateException(String.format(MUST_BE_FILE, name, path));
}
return path;
}
public Path isDirectory() {
if (path == null) {
throw new IllegalStateException(String.format(ARG_MUST_BE_SET, name));
}
if (!Files.exists(path)) {
throw new IllegalStateException(String.format(MUST_EXIST, name, path));
}
if (!Files.isDirectory(path)) {
throw new IllegalStateException(String.format(MUST_BE_DIR, name, path));
}
return path;
}
}
}
| 1 | 18,469 | Prefer adding a second `positive(String, double)` that delegates down to this three-param version. Using `null` in code is generally Not A Great Idea, and it looks ugly. | SeleniumHQ-selenium | rb |
@@ -474,7 +474,7 @@ func (rule removeCount) Name() string {
func (rule removeCount) Pattern() plan.Pattern {
return plan.Pat(universe.CountKind, plan.Any())
}
-func (rule removeCount) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) {
+func (rule removeCount) Rewrite(ctx context.Context, node plan.Node, nextNodeId *int) (plan.Node, bool, error) {
return node.Predecessors()[0], true, nil
}
| 1 | package lang_test
import (
"bytes"
"context"
"encoding/json"
"math"
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/influxdata/flux"
"github.com/influxdata/flux/ast"
_ "github.com/influxdata/flux/builtin"
fcsv "github.com/influxdata/flux/csv"
"github.com/influxdata/flux/dependencies/dependenciestest"
"github.com/influxdata/flux/execute"
"github.com/influxdata/flux/execute/executetest"
"github.com/influxdata/flux/lang"
"github.com/influxdata/flux/memory"
"github.com/influxdata/flux/mock"
"github.com/influxdata/flux/parser"
"github.com/influxdata/flux/plan"
"github.com/influxdata/flux/plan/plantest"
"github.com/influxdata/flux/runtime"
"github.com/influxdata/flux/stdlib/csv"
"github.com/influxdata/flux/stdlib/influxdata/influxdb"
"github.com/influxdata/flux/stdlib/universe"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/mocktracer"
)
func init() {
execute.RegisterSource(influxdb.FromKind, mock.CreateMockFromSource)
plan.RegisterLogicalRules(
influxdb.DefaultFromAttributes{
Org: &influxdb.NameOrID{Name: "influxdata"},
Host: func(v string) *string { return &v }("http://localhost:9999"),
},
)
}
func TestFluxCompiler(t *testing.T) {
ctx := context.Background()
for _, tc := range []struct {
name string
now time.Time
extern *ast.File
q string
jsonCompiler []byte
compilerErr string
startErr string
}{
{
name: "simple",
q: `from(bucket: "foo") |> range(start: -5m)`,
},
{
name: "syntax error",
q: `t={]`,
compilerErr: "expected RBRACE",
},
{
name: "type error",
q: `t=0 t.s`,
startErr: "type error @1:5-1:6",
},
{
name: "from with no streaming data",
q: `x = from(bucket: "foo") |> range(start: -5m)`,
startErr: "no streaming data",
},
{
name: "from with yield",
q: `x = from(bucket: "foo") |> range(start: -5m) |> yield()`,
},
{
name: "extern",
extern: &ast.File{
Body: []ast.Statement{
&ast.OptionStatement{
Assignment: &ast.VariableAssignment{
ID: &ast.Identifier{Name: "twentySix"},
Init: &ast.IntegerLiteral{Value: 26},
},
},
},
},
q: `twentySeven = twentySix + 1
twentySeven
from(bucket: "foo") |> range(start: -5m)`,
},
{
name: "extern with error",
extern: &ast.File{
Body: []ast.Statement{
&ast.OptionStatement{
Assignment: &ast.VariableAssignment{
ID: &ast.Identifier{Name: "twentySix"},
Init: &ast.IntegerLiteral{Value: 26},
},
},
},
},
q: `twentySeven = twentyFive + 2
twentySeven
from(bucket: "foo") |> range(start: -5m)`,
startErr: "undefined identifier twentyFive",
},
{
name: "with now",
now: time.Unix(1000, 0),
q: `from(bucket: "foo") |> range(start: -5m)`,
},
{
name: "extern that uses null keyword",
now: parser.MustParseTime("2020-03-24T14:24:46.15933241Z").Value,
jsonCompiler: []byte(`
{
"Now": "2020-03-24T14:24:46.15933241Z",
"extern": null,
"query": "from(bucket: \"apps\")\n |> range(start: -30s)\n |> filter(fn: (r) => r._measurement == \"query_control_queueing_active\")\n |> filter(fn: (r) => r._field == \"gauge\")\n |> filter(fn: (r) => r.env == \"acc\")\n |> group(columns: [\"host\"])\n |> last()\n |> group()\n |> mean()\n // Rename \"_value\" to \"metricValue\" for properly unmarshaling the result.\n |> rename(columns: {_value: \"metricvalue\"})\n |> keep(columns: [\"metricvalue\"])\n"
}`),
},
} {
tc := tc
t.Run(tc.name, func(t *testing.T) {
var c lang.FluxCompiler
{
if tc.q != "" {
var extern json.RawMessage
if tc.extern != nil {
var err error
extern, err = json.Marshal(tc.extern)
if err != nil {
t.Fatal(err)
}
}
c = lang.FluxCompiler{
Now: tc.now,
Extern: extern,
Query: tc.q,
}
} else if len(tc.jsonCompiler) > 0 {
if err := json.Unmarshal(tc.jsonCompiler, &c); err != nil {
t.Fatal(err)
}
} else {
t.Fatal("expected either a query, or a jsonCompiler in test case")
}
}
// serialize and deserialize and make sure they are equal
bs, err := json.Marshal(c)
if err != nil {
t.Error(err)
}
cc := lang.FluxCompiler{}
err = json.Unmarshal(bs, &cc)
if err != nil {
t.Error(err)
}
if diff := cmp.Diff(c, cc); diff != "" {
t.Errorf("compiler serialized/deserialized does not match: -want/+got:\n%v", diff)
}
program, err := c.Compile(ctx, runtime.Default)
if err != nil {
if tc.compilerErr != "" {
if !strings.Contains(err.Error(), tc.compilerErr) {
t.Fatalf(`expected query to error with "%v" but got "%v"`, tc.compilerErr, err)
} else {
return
}
}
t.Fatalf("failed to compile AST: %v", err)
} else if tc.compilerErr != "" {
t.Fatalf("expected query to error with %q, but got no error", tc.compilerErr)
}
astProg := program.(*lang.AstProgram)
if astProg.Now != tc.now {
t.Errorf(`unexpected value for now, want "%v", got "%v"`, tc.now, astProg.Now)
}
// we need to start the program to get compile errors derived from AST evaluation
ctx := executetest.NewTestExecuteDependencies().Inject(context.Background())
if _, err = program.Start(ctx, &memory.Allocator{}); tc.startErr == "" && err != nil {
t.Errorf("expected query %q to start successfully but got error %v", tc.q, err)
} else if tc.startErr != "" && err == nil {
t.Errorf("expected query %q to start with error but got no error", tc.q)
} else if tc.startErr != "" && err != nil && !strings.Contains(err.Error(), tc.startErr) {
t.Errorf(`expected query to error with "%v" but got "%v"`, tc.startErr, err)
}
})
}
}
func TestCompilationError(t *testing.T) {
program, err := lang.Compile(`illegal query`, runtime.Default, time.Unix(0, 0))
if err != nil {
// This shouldn't happen, has the script should be evaluated at program Start.
t.Fatal(err)
}
ctx := executetest.NewTestExecuteDependencies().Inject(context.Background())
_, err = program.Start(ctx, &memory.Allocator{})
if err == nil {
t.Fatal("compilation error expected, got none")
}
}
func TestASTCompiler(t *testing.T) {
testcases := []struct {
name string
now time.Time
file *ast.File
script string
jsonCompiler []byte
want plantest.PlanSpec
startErr string
}{
{
name: "override now time using now option",
now: time.Unix(1, 1),
script: `
import "csv"
option now = () => 2017-10-10T00:01:00Z
csv.from(csv: "foo,bar") |> range(start: 2017-10-10T00:00:00Z)
`,
want: plantest.PlanSpec{
Nodes: []plan.Node{
&plan.PhysicalPlanNode{Spec: &csv.FromCSVProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.RangeProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.YieldProcedureSpec{}},
},
Edges: [][2]int{
{0, 1},
{1, 2},
},
Resources: flux.ResourceManagement{ConcurrencyQuota: 1, MemoryBytesQuota: math.MaxInt64},
Now: parser.MustParseTime("2017-10-10T00:01:00Z").Value,
},
},
{
name: "get now time from compiler",
now: parser.MustParseTime("2018-10-10T00:00:00Z").Value,
script: `
import "csv"
csv.from(csv: "foo,bar") |> range(start: 2017-10-10T00:00:00Z)
`,
want: plantest.PlanSpec{
Nodes: []plan.Node{
&plan.PhysicalPlanNode{Spec: &csv.FromCSVProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.RangeProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.YieldProcedureSpec{}},
},
Edges: [][2]int{
{0, 1},
{1, 2},
},
Resources: flux.ResourceManagement{ConcurrencyQuota: 1, MemoryBytesQuota: math.MaxInt64},
Now: parser.MustParseTime("2018-10-10T00:00:00Z").Value,
},
},
{
name: "extern",
file: &ast.File{
Body: []ast.Statement{
&ast.OptionStatement{
Assignment: &ast.VariableAssignment{
ID: &ast.Identifier{Name: "now"},
Init: &ast.FunctionExpression{
Body: &ast.DateTimeLiteral{
Value: parser.MustParseTime("2018-10-10T00:00:00Z").Value,
},
},
},
},
},
},
script: `
import "csv"
csv.from(csv: "foo,bar") |> range(start: 2017-10-10T00:00:00Z)
`,
want: plantest.PlanSpec{
Nodes: []plan.Node{
&plan.PhysicalPlanNode{Spec: &csv.FromCSVProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.RangeProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.YieldProcedureSpec{}},
},
Edges: [][2]int{
{0, 1},
{1, 2},
},
Resources: flux.ResourceManagement{ConcurrencyQuota: 1, MemoryBytesQuota: math.MaxInt64},
Now: parser.MustParseTime("2018-10-10T00:00:00Z").Value,
},
},
{
name: "simple case",
now: parser.MustParseTime("2018-10-10T00:00:00Z").Value,
script: `x = 1`,
startErr: "no streaming data",
},
{
name: "json compiler with null keyword",
jsonCompiler: []byte(`
{
"extern": null,
"ast": {
"type": "Package",
"package": "main",
"files": [
{
"type": "File",
"metadata": "parser-type=rust",
"package": null,
"imports": [],
"body": [
{
"type": "VariableAssignment",
"id": {
"name": "x"
},
"init": {
"type": "IntegerLiteral",
"value": "1"
}
}
]
}
]
},
"Now": "2018-10-10T00:00:00Z"
}
`),
startErr: "no streaming data",
},
}
rt := runtime.Default
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
var c lang.ASTCompiler
{
if tc.script != "" {
astPkg, err := rt.Parse(tc.script)
if err != nil {
t.Fatalf("failed to parse script: %v", err)
}
var jsonPkg json.RawMessage
jsonPkg, err = parser.HandleToJSON(astPkg)
if err != nil {
t.Fatal(err)
}
// The JSON produced by Rust does not escape characters like ">", but
// Go does, so we need to use HTMLEscape to make the roundtrip the same.
var buf bytes.Buffer
json.HTMLEscape(&buf, jsonPkg)
jsonPkg = buf.Bytes()
c = lang.ASTCompiler{
AST: jsonPkg,
Now: tc.now,
}
if tc.file != nil {
bs, err := json.Marshal(tc.file)
if err != nil {
t.Fatal(err)
}
c.Extern = bs
}
} else if len(tc.jsonCompiler) > 0 {
var bb bytes.Buffer
if err := json.Compact(&bb, tc.jsonCompiler); err != nil {
t.Fatal(err)
}
if err := json.Unmarshal(bb.Bytes(), &c); err != nil {
t.Fatal(err)
}
} else {
t.Fatal("expected either script of jsonCompiler in test case")
}
}
// serialize and deserialize and make sure they are equal
bs, err := json.Marshal(c)
if err != nil {
t.Error(err)
}
cc := lang.ASTCompiler{}
err = json.Unmarshal(bs, &cc)
if err != nil {
t.Error(err)
}
if diff := cmp.Diff(c, cc); diff != "" {
t.Errorf("compiler serialized/deserialized does not match: -want/+got:\n%v", diff)
}
program, err := c.Compile(context.Background(), runtime.Default)
if err != nil {
t.Fatalf("failed to compile AST: %v", err)
}
ctx := executetest.NewTestExecuteDependencies().Inject(context.Background())
// we need to start the program to get compile errors derived from AST evaluation
if _, err := program.Start(ctx, &memory.Allocator{}); err != nil {
if tc.startErr == "" {
t.Fatalf("failed to start program: %v", err)
} else {
// We expect an error, did we get the right one?
if !strings.Contains(err.Error(), tc.startErr) {
t.Fatalf("expected to get an error containing %q but got %q", tc.startErr, err.Error())
}
return
}
}
got := program.(*lang.AstProgram).PlanSpec
want := plantest.CreatePlanSpec(&tc.want)
if err := plantest.ComparePlansShallow(want, got); err != nil {
t.Error(err)
}
})
}
}
func TestCompileOptions(t *testing.T) {
src := `import "csv"
csv.from(csv: "foo,bar")
|> range(start: 2017-10-10T00:00:00Z)
|> count()`
now := parser.MustParseTime("2018-10-10T00:00:00Z").Value
opt := lang.WithLogPlanOpts(plan.OnlyLogicalRules(removeCount{}))
program, err := lang.Compile(src, runtime.Default, now, opt)
if err != nil {
t.Fatalf("failed to compile script: %v", err)
}
// start program in order to evaluate planner options
ctx := executetest.NewTestExecuteDependencies().Inject(context.Background())
if _, err := program.Start(ctx, &memory.Allocator{}); err != nil {
t.Fatalf("failed to start program: %v", err)
}
want := plantest.CreatePlanSpec(&plantest.PlanSpec{
Nodes: []plan.Node{
&plan.PhysicalPlanNode{Spec: &csv.FromCSVProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.RangeProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.YieldProcedureSpec{}},
},
Edges: [][2]int{
{0, 1},
{1, 2},
},
Resources: flux.ResourceManagement{ConcurrencyQuota: 1, MemoryBytesQuota: math.MaxInt64},
Now: parser.MustParseTime("2018-10-10T00:00:00Z").Value,
})
if err := plantest.ComparePlansShallow(want, program.PlanSpec); err != nil {
t.Fatalf("unexpected plans: %v", err)
}
}
type removeCount struct{}
func (rule removeCount) Name() string {
return "removeCountRule"
}
func (rule removeCount) Pattern() plan.Pattern {
return plan.Pat(universe.CountKind, plan.Any())
}
func (rule removeCount) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) {
return node.Predecessors()[0], true, nil
}
func TestCompileOptions_FromFluxOptions(t *testing.T) {
nowFn := func() time.Time {
return parser.MustParseTime("2018-10-10T00:00:00Z").Value
}
plan.RegisterLogicalRules(&removeCount{})
tcs := []struct {
name string
files []string
want *plan.Spec
wantErr string
}{
{
name: "no planner option set",
files: []string{`
import "planner"
from(bucket: "bkt") |> range(start: 0) |> filter(fn: (r) => r._value > 0) |> count()`},
want: plantest.CreatePlanSpec(&plantest.PlanSpec{
Nodes: []plan.Node{
&plan.PhysicalPlanNode{Spec: &influxdb.FromRemoteProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.YieldProcedureSpec{}},
},
Edges: [][2]int{
{0, 1},
},
Resources: flux.ResourceManagement{ConcurrencyQuota: 1, MemoryBytesQuota: math.MaxInt64},
Now: nowFn(),
}),
},
{
name: "remove push down filter",
files: []string{`
import "planner"
option planner.disablePhysicalRules = ["influxdata/influxdb.MergeRemoteFilterRule"]
from(bucket: "bkt") |> range(start: 0) |> filter(fn: (r) => r._value > 0) |> count()`},
want: plantest.CreatePlanSpec(&plantest.PlanSpec{
Nodes: []plan.Node{
&plan.PhysicalPlanNode{Spec: &influxdb.FromRemoteProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.FilterProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.YieldProcedureSpec{}},
},
Edges: [][2]int{
{0, 1},
{1, 2},
},
Resources: flux.ResourceManagement{ConcurrencyQuota: 1, MemoryBytesQuota: math.MaxInt64},
Now: nowFn(),
}),
},
{
name: "remove push down filter and count",
files: []string{`
import "planner"
option planner.disablePhysicalRules = ["influxdata/influxdb.MergeRemoteFilterRule"]
option planner.disableLogicalRules = ["removeCountRule"]
from(bucket: "bkt") |> range(start: 0) |> filter(fn: (r) => r._value > 0) |> count()`},
want: plantest.CreatePlanSpec(&plantest.PlanSpec{
Nodes: []plan.Node{
&plan.PhysicalPlanNode{Spec: &influxdb.FromRemoteProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.FilterProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.CountProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.YieldProcedureSpec{}},
},
Edges: [][2]int{
{0, 1},
{1, 2},
{2, 3},
},
Resources: flux.ResourceManagement{ConcurrencyQuota: 1, MemoryBytesQuota: math.MaxInt64},
Now: nowFn(),
}),
},
{
name: "remove push down filter and count - with non existent rule",
files: []string{`
import "planner"
option planner.disablePhysicalRules = ["influxdata/influxdb.MergeRemoteFilterRule", "non_existent"]
option planner.disableLogicalRules = ["removeCountRule", "non_existent"]
from(bucket: "bkt") |> range(start: 0) |> filter(fn: (r) => r._value > 0) |> count()`},
want: plantest.CreatePlanSpec(&plantest.PlanSpec{
Nodes: []plan.Node{
&plan.PhysicalPlanNode{Spec: &influxdb.FromRemoteProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.FilterProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.CountProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.YieldProcedureSpec{}},
},
Edges: [][2]int{
{0, 1},
{1, 2},
{2, 3},
},
Resources: flux.ResourceManagement{ConcurrencyQuota: 1, MemoryBytesQuota: math.MaxInt64},
Now: nowFn(),
}),
},
{
name: "remove non existent rules does not produce any effect",
files: []string{`
import "planner"
option planner.disablePhysicalRules = ["foo", "bar", "mew", "buz", "foxtrot"]
option planner.disableLogicalRules = ["foo", "bar", "mew", "buz", "foxtrot"]
from(bucket: "bkt") |> range(start: 0) |> filter(fn: (r) => r._value > 0) |> count()`},
want: plantest.CreatePlanSpec(&plantest.PlanSpec{
Nodes: []plan.Node{
&plan.PhysicalPlanNode{Spec: &influxdb.FromRemoteProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.YieldProcedureSpec{}},
},
Edges: [][2]int{
{0, 1},
},
Resources: flux.ResourceManagement{ConcurrencyQuota: 1, MemoryBytesQuota: math.MaxInt64},
Now: nowFn(),
}),
},
{
name: "empty planner option does not produce any effect",
files: []string{`
import "planner"
option planner.disablePhysicalRules = [""]
option planner.disableLogicalRules = [""]
from(bucket: "bkt") |> range(start: 0) |> filter(fn: (r) => r._value > 0) |> count()`},
want: plantest.CreatePlanSpec(&plantest.PlanSpec{
Nodes: []plan.Node{
&plan.PhysicalPlanNode{Spec: &influxdb.FromRemoteProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.YieldProcedureSpec{}},
},
Edges: [][2]int{
{0, 1},
},
Resources: flux.ResourceManagement{ConcurrencyQuota: 1, MemoryBytesQuota: math.MaxInt64},
Now: nowFn(),
}),
},
{
name: "logical planner option must be an array",
files: []string{`
import "planner"
option planner.disableLogicalRules = "not an array"
// remember to return streaming data
from(bucket: "does_not_matter")`},
wantErr: `type error @4:38-4:52: expected [string] but found string`,
},
{
name: "physical planner option must be an array",
files: []string{`
import "planner"
option planner.disablePhysicalRules = "not an array"
// remember to return streaming data
from(bucket: "does_not_matter")`},
wantErr: `type error @4:39-4:53: expected [string] but found string`,
},
{
name: "logical planner option must be an array of strings",
files: []string{`
import "planner"
option planner.disableLogicalRules = [1.0]
// remember to return streaming data
from(bucket: "does_not_matter")`},
wantErr: `type error @4:38-4:43: expected string but found float`,
},
{
name: "physical planner option must be an array of strings",
files: []string{`
import "planner"
option planner.disablePhysicalRules = [1.0]
// remember to return streaming data
from(bucket: "does_not_matter")`},
wantErr: `type error @4:39-4:44: expected string but found float`,
},
{
name: "planner is an object defined by the user",
files: []string{`
planner = {
disablePhysicalRules: ["fromRangeRule"],
disableLogicalRules: ["removeCountRule"]
}
from(bucket: "bkt") |> range(start: 0) |> filter(fn: (r) => r._value > 0) |> count()`},
// This shouldn't change the plan.
want: plantest.CreatePlanSpec(&plantest.PlanSpec{
Nodes: []plan.Node{
&plan.PhysicalPlanNode{Spec: &influxdb.FromRemoteProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.YieldProcedureSpec{}},
},
Edges: [][2]int{
{0, 1},
},
Resources: flux.ResourceManagement{ConcurrencyQuota: 1, MemoryBytesQuota: math.MaxInt64},
Now: nowFn(),
}),
},
{
name: "use planner option with alias",
files: []string{`
import pl "planner"
option pl.disablePhysicalRules = ["influxdata/influxdb.MergeRemoteFilterRule"]
option pl.disableLogicalRules = ["removeCountRule"]
from(bucket: "bkt") |> range(start: 0) |> filter(fn: (r) => r._value > 0) |> count()`},
want: plantest.CreatePlanSpec(&plantest.PlanSpec{
Nodes: []plan.Node{
&plan.PhysicalPlanNode{Spec: &influxdb.FromRemoteProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.FilterProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.CountProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.YieldProcedureSpec{}},
},
Edges: [][2]int{
{0, 1},
{1, 2},
{2, 3},
},
Resources: flux.ResourceManagement{ConcurrencyQuota: 1, MemoryBytesQuota: math.MaxInt64},
Now: nowFn(),
}),
},
{
name: "multiple files - splitting options setting",
files: []string{
`package main
import pl "planner"
option pl.disablePhysicalRules = ["influxdata/influxdb.MergeRemoteFilterRule"]
from(bucket: "bkt") |> range(start: 0) |> filter(fn: (r) => r._value > 0) |> count()`,
`package foo
import "planner"
option planner.disableLogicalRules = ["removeCountRule"]`},
want: plantest.CreatePlanSpec(&plantest.PlanSpec{
Nodes: []plan.Node{
&plan.PhysicalPlanNode{Spec: &influxdb.FromRemoteProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.FilterProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.CountProcedureSpec{}},
&plan.PhysicalPlanNode{Spec: &universe.YieldProcedureSpec{}},
},
Edges: [][2]int{
{0, 1},
{1, 2},
{2, 3},
},
Resources: flux.ResourceManagement{ConcurrencyQuota: 1, MemoryBytesQuota: math.MaxInt64},
Now: nowFn(),
}),
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
if strings.HasPrefix(tc.name, "multiple files") {
t.Skip("how should options behave with multiple files?")
}
if len(tc.files) == 0 {
t.Fatal("the test should have at least one file")
}
astPkg, err := runtime.Parse(tc.files[0])
if err != nil {
t.Fatal(err)
}
if len(tc.files) > 1 {
for _, file := range tc.files[1:] {
otherPkg, err := runtime.Parse(file)
if err != nil {
t.Fatal(err)
}
if err := runtime.MergePackages(astPkg, otherPkg); err != nil {
t.Fatal(err)
}
}
}
program := lang.CompileAST(astPkg, runtime.Default, nowFn())
ctx := executetest.NewTestExecuteDependencies().Inject(context.Background())
if _, err := program.Start(ctx, &memory.Allocator{}); err != nil {
if tc.wantErr == "" {
t.Fatalf("failed to start program: %v", err)
} else if got := getRootErr(err); tc.wantErr != got.Error() {
t.Fatalf("expected wrong error -want/+got:\n\t- %s\n\t+ %s", tc.wantErr, got)
}
return
} else if tc.wantErr != "" {
t.Fatalf("expected error, got none")
}
if err := plantest.ComparePlansShallow(tc.want, program.PlanSpec); err != nil {
t.Errorf("unexpected plans: %v", err)
}
})
}
}
func TestQueryTracing(t *testing.T) {
// temporarily install a mock tracer to see which spans are created.
oldTracer := opentracing.GlobalTracer()
defer opentracing.SetGlobalTracer(oldTracer)
mockTracer := mocktracer.New()
opentracing.SetGlobalTracer(mockTracer)
plainCtx := context.Background()
for _, ctx := range []context.Context{flux.WithQueryTracingEnabled(plainCtx), plainCtx} {
// Clear spans from previous run
mockTracer.Reset()
var name string
if flux.IsQueryTracingEnabled(ctx) {
name = "tracing enabled"
} else {
name = "tracing disabled"
}
t.Run(name, func(t *testing.T) {
// Run a query
c := lang.FluxCompiler{
Query: `
import "experimental/array"
array.from(rows: [{key: 1, value: 2}, {key: 3, value: 4}])
|> filter(fn: (r) => r.value == 2)
|> map(fn: (r) => ({r with foo: "hi"}))`,
}
prog, err := c.Compile(ctx, runtime.Default)
if err != nil {
t.Fatal(err)
}
q, err := prog.Start(ctx, &memory.Allocator{})
if err != nil {
t.Fatal(err)
}
defer q.Done()
for r := range q.Results() {
if err := r.Tables().Do(func(flux.Table) error {
return nil
}); err != nil {
t.Fatal(err)
}
}
if err := q.Err(); err != nil {
t.Fatal(err)
}
// If tracing was enabled, then we should see spans for each
// source and transformation. If tracing is not enabled, we should
// not have those spans.
gotOps := make(map[string]struct{})
for _, span := range mockTracer.FinishedSpans() {
gotOps[span.OperationName] = struct{}{}
}
wantOps := []string{
"*array.tableSource",
"*universe.filterTransformation",
"*universe.mapTransformation",
}
for _, wantOp := range wantOps {
_, ok := gotOps[wantOp]
if flux.IsQueryTracingEnabled(ctx) {
if !ok {
t.Errorf("expected to find span %q but it wasn't there", wantOp)
}
} else {
if ok {
t.Errorf("did not expect to find span %q but it was found", wantOp)
}
}
}
})
}
}
func getRootErr(err error) error {
if err == nil {
return err
}
fe, ok := err.(*flux.Error)
if !ok {
return err
}
if fe == nil {
return fe
}
if fe.Err == nil {
return fe
}
return getRootErr(fe.Err)
}
// TestTableObjectCompiler evaluates a simple `from |> range |> filter` script on csv data, and
// extracts the TableObjects obtained from evaluation. It eventually compiles TableObjects and
// compares obtained results with expected ones (obtained from decoding the raw csv data).
func TestTableObjectCompiler(t *testing.T) {
dataRaw := `#datatype,string,long,dateTime:RFC3339,long,string,string,string,string
#group,false,false,false,false,false,false,true,true
#default,_result,,,,,,,
,result,table,_time,_value,_field,_measurement,host,name
,,0,2018-05-22T19:53:26Z,15204688,io_time,diskio,host.local,disk0
,,0,2018-05-22T19:53:36Z,15204894,io_time,diskio,host.local,disk0
,,0,2018-05-22T19:53:46Z,15205102,io_time,diskio,host.local,disk0
,,0,2018-05-22T19:53:56Z,15205226,io_time,diskio,host.local,disk0
,,0,2018-05-22T19:54:06Z,15205499,io_time,diskio,host.local,disk0
,,0,2018-05-22T19:54:16Z,15205755,io_time,diskio,host.local,disk0
,,1,2018-05-22T19:53:26Z,648,io_time,diskio,host.local,disk2
,,1,2018-05-22T19:53:36Z,648,io_time,diskio,host.local,disk2
,,1,2018-05-22T19:53:46Z,648,io_time,diskio,host.local,disk2
,,1,2018-05-22T19:53:56Z,648,io_time,diskio,host.local,disk2
,,1,2018-05-22T19:54:06Z,648,io_time,diskio,host.local,disk2
,,1,2018-05-22T19:54:16Z,648,io_time,diskio,host.local,disk2
`
rangedDataRaw := `#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string,string
#group,false,false,true,true,false,false,false,false,true,true
#default,_result,,,,,,,,,
,result,table,_start,_stop,_time,_value,_field,_measurement,host,name
,,0,2017-10-10T00:00:00Z,2018-05-22T19:54:00Z,2018-05-22T19:53:26Z,15204688,io_time,diskio,host.local,disk0
,,0,2017-10-10T00:00:00Z,2018-05-22T19:54:00Z,2018-05-22T19:53:36Z,15204894,io_time,diskio,host.local,disk0
,,0,2017-10-10T00:00:00Z,2018-05-22T19:54:00Z,2018-05-22T19:53:46Z,15205102,io_time,diskio,host.local,disk0
,,0,2017-10-10T00:00:00Z,2018-05-22T19:54:00Z,2018-05-22T19:53:56Z,15205226,io_time,diskio,host.local,disk0
,,1,2017-10-10T00:00:00Z,2018-05-22T19:54:00Z,2018-05-22T19:53:26Z,648,io_time,diskio,host.local,disk2
,,1,2017-10-10T00:00:00Z,2018-05-22T19:54:00Z,2018-05-22T19:53:36Z,648,io_time,diskio,host.local,disk2
,,1,2017-10-10T00:00:00Z,2018-05-22T19:54:00Z,2018-05-22T19:53:46Z,648,io_time,diskio,host.local,disk2
,,1,2017-10-10T00:00:00Z,2018-05-22T19:54:00Z,2018-05-22T19:53:56Z,648,io_time,diskio,host.local,disk2
`
filteredDataRaw := `#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string,string
#group,false,false,true,true,false,false,false,false,true,true
#default,_result,,,,,,,,,
,result,table,_start,_stop,_time,_value,_field,_measurement,host,name
,,1,2017-10-10T00:00:00Z,2018-05-22T19:54:00Z,2018-05-22T19:53:26Z,648,io_time,diskio,host.local,disk2
,,1,2017-10-10T00:00:00Z,2018-05-22T19:54:00Z,2018-05-22T19:53:36Z,648,io_time,diskio,host.local,disk2
,,1,2017-10-10T00:00:00Z,2018-05-22T19:54:00Z,2018-05-22T19:53:46Z,648,io_time,diskio,host.local,disk2
,,1,2017-10-10T00:00:00Z,2018-05-22T19:54:00Z,2018-05-22T19:53:56Z,648,io_time,diskio,host.local,disk2
`
script := `import "csv"
data = "` + dataRaw + `"
csv.from(csv: data)
|> range(start: 2017-10-10T00:00:00Z, stop: 2018-05-22T19:54:00Z)
|> filter(fn: (r) => r._value < 1000)`
wantFrom := getTablesFromRawOrFail(t, dataRaw)
wantRange := getTablesFromRawOrFail(t, rangedDataRaw)
wantFilter := getTablesFromRawOrFail(t, filteredDataRaw)
vs, _, err := runtime.Eval(dependenciestest.Default().Inject(context.Background()), script)
if err != nil {
t.Fatal(err)
}
if len(vs) != 1 {
t.Fatalf("wrong number of side effect values, got %d", len(vs))
}
to, ok := vs[0].Value.(*flux.TableObject)
if !ok {
t.Fatalf("expected TableObject but instead got %T", vs[0].Value)
}
tos := flattenTableObjects(to, []*flux.TableObject{})
fromCsvTO := tos[0]
if fromCsvTO.Kind != csv.FromCSVKind {
t.Fatalf("unexpected kind for fromCSV: %s", fromCsvTO.Kind)
}
rangeTO := tos[1]
if rangeTO.Kind != universe.RangeKind {
t.Fatalf("unexpected kind for range: %s", rangeTO.Kind)
}
filterTO := tos[2]
if filterTO.Kind != universe.FilterKind {
t.Fatalf("unexpected kind for filter: %s", filterTO.Kind)
}
compareTableObjectWithTables(t, fromCsvTO, wantFrom)
compareTableObjectWithTables(t, rangeTO, wantRange)
compareTableObjectWithTables(t, filterTO, wantFilter)
// run it twice to ensure compilation is idempotent and there are no side-effects
compareTableObjectWithTables(t, fromCsvTO, wantFrom)
compareTableObjectWithTables(t, rangeTO, wantRange)
compareTableObjectWithTables(t, filterTO, wantFilter)
}
func compareTableObjectWithTables(t *testing.T, to *flux.TableObject, want []*executetest.Table) {
t.Helper()
got := getTableObjectTablesOrFail(t, to)
if !cmp.Equal(want, got) {
t.Fatalf("unexpected result -want/+got\n\n%s\n\n", cmp.Diff(want, got))
}
}
func getTablesFromRawOrFail(t *testing.T, rawData string) []*executetest.Table {
t.Helper()
b := bytes.NewReader([]byte(rawData))
result, err := fcsv.NewResultDecoder(fcsv.ResultDecoderConfig{}).Decode(b)
if err != nil {
t.Fatal(err)
}
return getTablesFromResultOrFail(t, result)
}
func getTableObjectTablesOrFail(t *testing.T, to *flux.TableObject) []*executetest.Table {
t.Helper()
toc := lang.TableObjectCompiler{
Tables: to,
}
program, err := toc.Compile(context.Background())
if err != nil {
t.Fatal(err)
}
ctx := executetest.NewTestExecuteDependencies().Inject(context.Background())
q, err := program.Start(ctx, &memory.Allocator{})
if err != nil {
t.Fatal(err)
}
result := <-q.Results()
if _, ok := <-q.Results(); ok {
t.Fatalf("got more then one result for %s", to.Kind)
}
tables := getTablesFromResultOrFail(t, result)
q.Done()
if err := q.Err(); err != nil {
t.Fatal(err)
}
return tables
}
func getTablesFromResultOrFail(t *testing.T, result flux.Result) []*executetest.Table {
t.Helper()
tables := make([]*executetest.Table, 0)
if err := result.Tables().Do(func(table flux.Table) error {
converted, err := executetest.ConvertTable(table)
if err != nil {
return err
}
tables = append(tables, converted)
return nil
}); err != nil {
t.Fatal(err)
}
executetest.NormalizeTables(tables)
return tables
}
func flattenTableObjects(to *flux.TableObject, arr []*flux.TableObject) []*flux.TableObject {
for _, parent := range to.Parents {
arr = flattenTableObjects(parent, arr)
}
return append(arr, to)
}
| 1 | 15,288 | This pattern, where we add a new parameter without using it, often indicates to me that we've got a leaky interface or abstraction. I see this pattern _a lot_ in this patch, so wondering you have thoughts about it. | influxdata-flux | go |
@@ -164,10 +164,13 @@ func WriteWordpressConfig(wordpressConfig *WordpressConfig, filePath string) err
return err
}
- // Ensure target directory is writable.
+ // Ensure target directory exists and is writeable
dir := filepath.Dir(filePath)
- err = os.Chmod(dir, 0755)
- if err != nil {
+ if err = os.Chmod(dir, 0755); os.IsNotExist(err) {
+ if err = os.MkdirAll(dir, 0755); err != nil {
+ return err
+ }
+ } else if err != nil {
return err
}
| 1 | package ddevapp
import (
"os"
"path/filepath"
"text/template"
"fmt"
"github.com/Masterminds/sprig"
"github.com/drud/ddev/pkg/archive"
"github.com/drud/ddev/pkg/fileutil"
"github.com/drud/ddev/pkg/output"
"github.com/drud/ddev/pkg/util"
)
// WordpressConfig encapsulates all the configurations for a WordPress site.
type WordpressConfig struct {
WPGeneric bool
DeployName string
DeployURL string
DatabaseName string
DatabaseUsername string
DatabasePassword string
DatabaseHost string
AuthKey string
SecureAuthKey string
LoggedInKey string
NonceKey string
AuthSalt string
SecureAuthSalt string
LoggedInSalt string
NonceSalt string
Docroot string
TablePrefix string
Signature string
}
// NewWordpressConfig produces a WordpressConfig object with defaults.
func NewWordpressConfig() *WordpressConfig {
return &WordpressConfig{
WPGeneric: false,
DatabaseName: "db",
DatabaseUsername: "db",
DatabasePassword: "db",
DatabaseHost: "db",
Docroot: "/var/www/html/docroot",
TablePrefix: "wp_",
AuthKey: util.RandString(64),
AuthSalt: util.RandString(64),
LoggedInKey: util.RandString(64),
LoggedInSalt: util.RandString(64),
NonceKey: util.RandString(64),
NonceSalt: util.RandString(64),
SecureAuthKey: util.RandString(64),
SecureAuthSalt: util.RandString(64),
Signature: DdevFileSignature,
}
}
// wordPressHooks adds a wp-specific hooks example for post-import-db
const wordPressHooks = `
# Un-comment and enter the production url and local url
# to replace in your database after import.
# post-import-db:
# - exec: wp search-replace <production-url> <local-url>`
// getWordpressHooks for appending as byte array
func getWordpressHooks() []byte {
return []byte(wordPressHooks)
}
// getWordpressUploadDir just returns a static upload files directory string.
func getWordpressUploadDir(app *DdevApp) string {
return "wp-content/uploads"
}
const (
wordpressTemplate = `<?php
{{ $config := . }}
/**
{{ $config.Signature }}: Automatically generated WordPress wp-config.php file.
ddev manages this file and may delete or overwrite the file unless this comment is removed.
*/
// ** MySQL settings - You can get this info from your web host ** //
/** The name of the database for WordPress */
define('DB_NAME', '{{ $config.DatabaseName }}');
/** MySQL database username */
define('DB_USER', '{{ $config.DatabaseUsername }}');
/** MySQL database password */
define('DB_PASSWORD', '{{ $config.DatabasePassword }}');
/** MySQL hostname */
define('DB_HOST', '{{ $config.DatabaseHost }}');
/** Database Charset to use in creating database tables. */
define('DB_CHARSET', 'utf8mb4');
/** The Database Collate type. Don't change this if in doubt. */
define('DB_COLLATE', '');
/**
* WordPress Database Table prefix.
*/
$table_prefix = '{{ $config.TablePrefix }}';
/**
* For developers: WordPress debugging mode.
*/
define('WP_DEBUG', false);
/**#@+
* Authentication Unique Keys and Salts.
*/
define( 'AUTH_KEY', '{{ $config.AuthKey }}' );
define( 'SECURE_AUTH_KEY', '{{ $config.SecureAuthKey }}' );
define( 'LOGGED_IN_KEY', '{{ $config.LoggedInKey }}' );
define( 'NONCE_KEY', '{{ $config.NonceKey }}' );
define( 'AUTH_SALT', '{{ $config.AuthSalt }}' );
define( 'SECURE_AUTH_SALT', '{{ $config.SecureAuthSalt }}' );
define( 'LOGGED_IN_SALT', '{{ $config.LoggedInSalt }}' );
define( 'NONCE_SALT', '{{ $config.NonceSalt }}' );
/* That's all, stop editing! Happy blogging. */
/** Absolute path to the WordPress directory. */
if ( !defined('ABSPATH') )
define('ABSPATH', dirname(__FILE__) . '/');
/**
Sets up WordPress vars and included files.
wp-settings.php is typically included in wp-config.php. This check ensures it is not
included again if this file is written to wp-config-local.php.
*/
if (basename(__FILE__) == "wp-config.php") {
require_once(ABSPATH . '/wp-settings.php');
}
`
)
// createWordpressSettingsFile creates a wordpress settings file from a
// template. Returns fullpath to location of file + err
func createWordpressSettingsFile(app *DdevApp) (string, error) {
settingsFilePath := app.SiteSettingsPath
if settingsFilePath == "" {
settingsFilePath = app.SiteLocalSettingsPath
}
output.UserOut.Printf("Generating %s file for database connection.", filepath.Base(settingsFilePath))
wpConfig := NewWordpressConfig()
wpConfig.DeployURL = app.GetHTTPURL()
err := WriteWordpressConfig(wpConfig, settingsFilePath)
return settingsFilePath, err
}
// WriteWordpressConfig dynamically produces valid wp-config.php file by combining a configuration
// object with a data-driven template.
func WriteWordpressConfig(wordpressConfig *WordpressConfig, filePath string) error {
tmpl, err := template.New("wordpressConfig").Funcs(sprig.TxtFuncMap()).Parse(wordpressTemplate)
if err != nil {
return err
}
// Ensure target directory is writable.
dir := filepath.Dir(filePath)
err = os.Chmod(dir, 0755)
if err != nil {
return err
}
file, err := os.Create(filePath)
if err != nil {
return err
}
err = tmpl.Execute(file, wordpressConfig)
if err != nil {
return err
}
util.CheckClose(file)
return nil
}
// setWordpressSiteSettingsPaths sets the expected settings files paths for
// a wordpress site.
func setWordpressSiteSettingsPaths(app *DdevApp) {
settingsFileBasePath := filepath.Join(app.AppRoot, app.Docroot)
var settingsFilePath, localSettingsFilePath string
settingsFilePath = filepath.Join(settingsFileBasePath, "wp-config.php")
localSettingsFilePath = filepath.Join(settingsFileBasePath, "wp-config-local.php")
app.SiteSettingsPath = settingsFilePath
app.SiteLocalSettingsPath = localSettingsFilePath
}
// isWordpressApp returns true if the app of of type wordpress
func isWordpressApp(app *DdevApp) bool {
if _, err := os.Stat(filepath.Join(app.AppRoot, app.Docroot, "wp-login.php")); err == nil {
return true
}
return false
}
// wordpressPostImportDBAction just emits a warning about updating URLs as is
// required with wordpress when running on a different URL.
func wordpressPostImportDBAction(app *DdevApp) error {
util.Warning("Wordpress sites require a search/replace of the database when the URL is changed. You can run \"ddev exec wp search-replace [http://www.myproductionsite.example] %s\" to update the URLs across your database. For more information, see http://wp-cli.org/commands/search-replace/", app.GetHTTPURL())
return nil
}
// wordpressImportFilesAction defines the Wordpress workflow for importing project files.
// The Wordpress workflow is currently identical to the Drupal import-files workflow.
func wordpressImportFilesAction(app *DdevApp, importPath, extPath string) error {
destPath := filepath.Join(app.GetAppRoot(), app.GetDocroot(), app.GetUploadDir())
// parent of destination dir should exist
if !fileutil.FileExists(filepath.Dir(destPath)) {
return fmt.Errorf("unable to import to %s: parent directory does not exist", destPath)
}
// parent of destination dir should be writable.
if err := os.Chmod(filepath.Dir(destPath), 0755); err != nil {
return err
}
// If the destination path exists, remove it as was warned
if fileutil.FileExists(destPath) {
if err := os.RemoveAll(destPath); err != nil {
return fmt.Errorf("failed to cleanup %s before import: %v", destPath, err)
}
}
if isTar(importPath) {
if err := archive.Untar(importPath, destPath, extPath); err != nil {
return fmt.Errorf("failed to extract provided archive: %v", err)
}
return nil
}
if isZip(importPath) {
if err := archive.Unzip(importPath, destPath, extPath); err != nil {
return fmt.Errorf("failed to extract provided archive: %v", err)
}
return nil
}
if err := fileutil.CopyDir(importPath, destPath); err != nil {
return err
}
return nil
}
| 1 | 13,064 | This seems like an improved pattern :) | drud-ddev | go |
@@ -127,16 +127,16 @@ class UserController < ApplicationController
# valid OpenID and one the user has control over before saving
# it as a password equivalent for the user.
session[:new_user_settings] = params
- openid_verify(params[:user][:openid_url], @user)
+ federated_verify(params[:user][:openid_url], @user)
else
update_user(@user, params)
end
- elsif using_open_id?
+ elsif using_federated_login?
# The redirect from the OpenID provider reenters here
# again and we need to pass the parameters through to
# the open_id_authentication function
settings = session.delete(:new_user_settings)
- openid_verify(nil, @user) do |user|
+ federated_verify(nil, @user) do |user|
update_user(user, settings)
end
end | 1 | class UserController < ApplicationController
layout :choose_layout
skip_before_filter :verify_authenticity_token, :only => [:api_read, :api_details, :api_gpx_files]
before_filter :disable_terms_redirect, :only => [:terms, :save, :logout, :api_details]
before_filter :authorize, :only => [:api_details, :api_gpx_files]
before_filter :authorize_web, :except => [:api_read, :api_details, :api_gpx_files]
before_filter :set_locale, :except => [:api_read, :api_details, :api_gpx_files]
before_filter :require_user, :only => [:account, :go_public, :make_friend, :remove_friend]
before_filter :require_self, :only => [:account]
before_filter :check_database_readable, :except => [:login, :api_read, :api_details, :api_gpx_files]
before_filter :check_database_writable, :only => [:new, :account, :confirm, :confirm_email, :lost_password, :reset_password, :go_public, :make_friend, :remove_friend]
before_filter :check_api_readable, :only => [:api_read, :api_details, :api_gpx_files]
before_filter :require_allow_read_prefs, :only => [:api_details]
before_filter :require_allow_read_gpx, :only => [:api_gpx_files]
before_filter :require_cookies, :only => [:new, :login, :confirm]
before_filter :require_administrator, :only => [:set_status, :delete, :list]
around_filter :api_call_handle_error, :only => [:api_read, :api_details, :api_gpx_files]
before_filter :lookup_user_by_id, :only => [:api_read]
before_filter :lookup_user_by_name, :only => [:set_status, :delete]
def terms
@legale = params[:legale] || OSM.IPToCountry(request.remote_ip) || DEFAULT_LEGALE
@text = OSM.legal_text_for_country(@legale)
if request.xhr?
render :partial => "terms"
else
@title = t 'user.terms.title'
if @user and @user.terms_agreed?
# Already agreed to terms, so just show settings
redirect_to :action => :account, :display_name => @user.display_name
elsif @user.nil? and session[:new_user].nil?
redirect_to :action => :login, :referer => request.fullpath
end
end
end
def save
@title = t 'user.new.title'
if params[:decline]
if @user
@user.terms_seen = true
if @user.save
flash[:notice] = t 'user.new.terms declined', :url => t('user.new.terms declined url')
end
if params[:referer]
redirect_to params[:referer]
else
redirect_to :action => :account, :display_name => @user.display_name
end
else
redirect_to t('user.terms.declined')
end
elsif @user
if !@user.terms_agreed?
@user.consider_pd = params[:user][:consider_pd]
@user.terms_agreed = Time.now.getutc
@user.terms_seen = true
if @user.save
flash[:notice] = t 'user.new.terms accepted'
end
end
if params[:referer]
redirect_to params[:referer]
else
redirect_to :action => :account, :display_name => @user.display_name
end
else
@user = session.delete(:new_user)
if check_signup_allowed(@user.email)
@user.data_public = true
@user.description = "" if @user.description.nil?
@user.creation_ip = request.remote_ip
@user.languages = http_accept_language.user_preferred_languages
@user.terms_agreed = Time.now.getutc
@user.terms_seen = true
@user.openid_url = nil if @user.openid_url and @user.openid_url.empty?
if @user.save
flash[:piwik_goal] = PIWIK["goals"]["signup"] if defined?(PIWIK)
referer = welcome_path
begin
uri = URI(session[:referer])
/map=(.*)\/(.*)\/(.*)/.match(uri.fragment) do |m|
editor = Rack::Utils.parse_query(uri.query).slice('editor')
referer = welcome_path({'zoom' => m[1],
'lat' => m[2],
'lon' => m[3]}.merge(editor))
end
rescue
# Use default
end
if @user.status == "active"
session[:referer] = referer
successful_login(@user)
else
session[:token] = @user.tokens.create.token
Notifier.signup_confirm(@user, @user.tokens.create(:referer => referer)).deliver
redirect_to :action => 'confirm', :display_name => @user.display_name
end
else
render :action => 'new', :referer => params[:referer]
end
end
end
end
def account
@title = t 'user.account.title'
@tokens = @user.oauth_tokens.authorized
if params[:user] and params[:user][:display_name] and params[:user][:description]
if params[:user][:openid_url] and
params[:user][:openid_url].length > 0 and
params[:user][:openid_url] != @user.openid_url
# If the OpenID has changed, we want to check that it is a
# valid OpenID and one the user has control over before saving
# it as a password equivalent for the user.
session[:new_user_settings] = params
openid_verify(params[:user][:openid_url], @user)
else
update_user(@user, params)
end
elsif using_open_id?
# The redirect from the OpenID provider reenters here
# again and we need to pass the parameters through to
# the open_id_authentication function
settings = session.delete(:new_user_settings)
openid_verify(nil, @user) do |user|
update_user(user, settings)
end
end
end
def go_public
@user.data_public = true
@user.save
flash[:notice] = t 'user.go_public.flash success'
redirect_to :controller => 'user', :action => 'account', :display_name => @user.display_name
end
def lost_password
@title = t 'user.lost_password.title'
if params[:user] and params[:user][:email]
user = User.visible.find_by_email(params[:user][:email])
if user.nil?
users = User.visible.where("LOWER(email) = LOWER(?)", params[:user][:email])
if users.count == 1
user = users.first
end
end
if user
token = user.tokens.create
Notifier.lost_password(user, token).deliver
flash[:notice] = t 'user.lost_password.notice email on way'
redirect_to :action => 'login'
else
flash.now[:error] = t 'user.lost_password.notice email cannot find'
end
end
end
def reset_password
@title = t 'user.reset_password.title'
if params[:token]
token = UserToken.find_by_token(params[:token])
if token
@user = token.user
if params[:user]
@user.pass_crypt = params[:user][:pass_crypt]
@user.pass_crypt_confirmation = params[:user][:pass_crypt_confirmation]
@user.status = "active" if @user.status == "pending"
@user.email_valid = true
if @user.save
token.destroy
flash[:notice] = t 'user.reset_password.flash changed'
redirect_to :action => 'login'
end
end
else
flash[:error] = t 'user.reset_password.flash token bad'
redirect_to :action => 'lost_password'
end
else
render :text => "", :status => :bad_request
end
end
def new
@title = t 'user.new.title'
@referer = params[:referer] || session[:referer]
if using_open_id?
# The redirect from the OpenID provider reenters here
# again and we need to pass the parameters through to
# the open_id_authentication function
@user = session.delete(:new_user)
openid_verify(nil, @user) do |user, verified_email|
user.status = "active" if user.email == verified_email
end
if @user.openid_url.nil? or @user.invalid?
render :action => 'new'
else
session[:new_user] = @user
redirect_to :action => 'terms'
end
elsif @user
# The user is logged in already, so don't show them the signup
# page, instead send them to the home page
if @referer
redirect_to @referer
else
redirect_to :controller => 'site', :action => 'index'
end
elsif params.key?(:openid)
@user = User.new(:email => params[:email],
:email_confirmation => params[:email],
:display_name => params[:nickname],
:openid_url => params[:openid])
flash.now[:notice] = t 'user.new.openid association'
else
check_signup_allowed
end
end
def create
@user = User.new(user_params)
if check_signup_allowed(@user.email)
session[:referer] = params[:referer]
@user.status = "pending"
if @user.openid_url.present? && @user.pass_crypt.empty?
# We are creating an account with OpenID and no password
# was specified so create a random one
@user.pass_crypt = SecureRandom.base64(16)
@user.pass_crypt_confirmation = @user.pass_crypt
end
if @user.invalid?
# Something is wrong with a new user, so rerender the form
render :action => "new"
elsif @user.openid_url.present?
# Verify OpenID before moving on
session[:new_user] = @user
openid_verify(@user.openid_url, @user)
else
# Save the user record
session[:new_user] = @user
redirect_to :action => :terms
end
end
end
def login
if params[:username] or using_open_id?
session[:remember_me] ||= params[:remember_me]
session[:referer] ||= params[:referer]
if using_open_id?
openid_authentication(params[:openid_url])
else
password_authentication(params[:username], params[:password])
end
end
end
def logout
@title = t 'user.logout.title'
if params[:session] == request.session_options[:id]
if session[:token]
token = UserToken.find_by_token(session[:token])
if token
token.destroy
end
session.delete(:token)
end
session.delete(:user)
session_expires_automatically
if params[:referer]
redirect_to params[:referer]
else
redirect_to :controller => 'site', :action => 'index'
end
end
end
def confirm
if request.post?
token = UserToken.find_by_token(params[:confirm_string])
if token && token.user.active?
flash[:error] = t('user.confirm.already active')
redirect_to :action => 'login'
elsif !token || token.expired?
flash[:error] = t('user.confirm.unknown token')
redirect_to :action => 'confirm'
else
user = token.user
user.status = "active"
user.email_valid = true
user.save!
referer = token.referer
token.destroy
if session[:token]
token = UserToken.find_by_token(session[:token])
session.delete(:token)
else
token = nil
end
if token.nil? or token.user != user
flash[:notice] = t('user.confirm.success')
redirect_to :action => :login, :referer => referer
else
token.destroy
session[:user] = user.id
redirect_to referer || welcome_path
end
end
else
user = User.find_by_display_name(params[:display_name])
if !user || user.active?
redirect_to root_path
end
end
end
def confirm_resend
if user = User.find_by_display_name(params[:display_name])
Notifier.signup_confirm(user, user.tokens.create).deliver
flash[:notice] = t 'user.confirm_resend.success', :email => user.email
else
flash[:notice] = t 'user.confirm_resend.failure', :name => params[:display_name]
end
redirect_to :action => 'login'
end
def confirm_email
if request.post?
token = UserToken.find_by_token(params[:confirm_string])
if token and token.user.new_email?
@user = token.user
@user.email = @user.new_email
@user.new_email = nil
@user.email_valid = true
if @user.save
flash[:notice] = t 'user.confirm_email.success'
else
flash[:errors] = @user.errors
end
token.destroy
session[:user] = @user.id
redirect_to :action => 'account', :display_name => @user.display_name
else
flash[:error] = t 'user.confirm_email.failure'
redirect_to :action => 'account', :display_name => @user.display_name
end
end
end
def api_read
render :text => "", :status => :gone unless @this_user.visible?
end
def api_details
@this_user = @user
render :action => :api_read
end
def api_gpx_files
doc = OSM::API.new.get_xml_doc
@user.traces.each do |trace|
doc.root << trace.to_xml_node() if trace.public? or trace.user == @user
end
render :text => doc.to_s, :content_type => "text/xml"
end
def view
@this_user = User.find_by_display_name(params[:display_name])
if @this_user and
(@this_user.visible? or (@user and @user.administrator?))
@title = @this_user.display_name
else
render_unknown_user params[:display_name]
end
end
def make_friend
@new_friend = User.find_by_display_name(params[:display_name])
if @new_friend
if request.post?
friend = Friend.new
friend.user_id = @user.id
friend.friend_user_id = @new_friend.id
unless @user.is_friends_with?(@new_friend)
if friend.save
flash[:notice] = t 'user.make_friend.success', :name => @new_friend.display_name
Notifier.friend_notification(friend).deliver
else
friend.add_error(t('user.make_friend.failed', :name => @new_friend.display_name))
end
else
flash[:warning] = t 'user.make_friend.already_a_friend', :name => @new_friend.display_name
end
if params[:referer]
redirect_to params[:referer]
else
redirect_to :controller => 'user', :action => 'view'
end
end
else
render_unknown_user params[:display_name]
end
end
def remove_friend
@friend = User.find_by_display_name(params[:display_name])
if @friend
if request.post?
if @user.is_friends_with?(@friend)
Friend.delete_all "user_id = #{@user.id} AND friend_user_id = #{@friend.id}"
flash[:notice] = t 'user.remove_friend.success', :name => @friend.display_name
else
flash[:error] = t 'user.remove_friend.not_a_friend', :name => @friend.display_name
end
if params[:referer]
redirect_to params[:referer]
else
redirect_to :controller => 'user', :action => 'view'
end
end
else
render_unknown_user params[:display_name]
end
end
##
# sets a user's status
def set_status
@this_user.status = params[:status]
@this_user.save
redirect_to :controller => 'user', :action => 'view', :display_name => params[:display_name]
end
##
# delete a user, marking them as deleted and removing personal data
def delete
@this_user.delete
redirect_to :controller => 'user', :action => 'view', :display_name => params[:display_name]
end
##
# display a list of users matching specified criteria
def list
if request.post?
ids = params[:user].keys.collect { |id| id.to_i }
User.update_all("status = 'confirmed'", :id => ids) if params[:confirm]
User.update_all("status = 'deleted'", :id => ids) if params[:hide]
redirect_to url_for(:status => params[:status], :ip => params[:ip], :page => params[:page])
else
conditions = Hash.new
conditions[:status] = params[:status] if params[:status]
conditions[:creation_ip] = params[:ip] if params[:ip]
@user_pages, @users = paginate(:users,
:conditions => conditions,
:order => :id,
:per_page => 50)
end
end
private
##
# handle password authentication
def password_authentication(username, password)
if user = User.authenticate(:username => username, :password => password)
successful_login(user)
elsif user = User.authenticate(:username => username, :password => password, :pending => true)
unconfirmed_login(user)
elsif User.authenticate(:username => username, :password => password, :suspended => true)
failed_login t('user.login.account is suspended', :webmaster => "mailto:webmaster@openstreetmap.org")
else
failed_login t('user.login.auth failure')
end
end
##
# handle OpenID authentication
def openid_authentication(openid_url)
# If we don't appear to have a user for this URL then ask the
# provider for some extra information to help with signup
if openid_url and User.find_by_openid_url(openid_url)
required = nil
else
required = [:nickname, :email, "http://axschema.org/namePerson/friendly", "http://axschema.org/contact/email"]
end
# Start the authentication
authenticate_with_open_id(openid_expand_url(openid_url), :method => :get, :required => required) do |result, identity_url, sreg, ax|
if result.successful?
# We need to use the openid url passed back from the OpenID provider
# rather than the one supplied by the user, as these can be different.
#
# For example, you can simply enter yahoo.com in the login box rather
# than a user specific url. Only once it comes back from the provider
# provider do we know the unique address for the user.
if user = User.find_by_openid_url(identity_url)
case user.status
when "pending" then
unconfirmed_login(user)
when "active", "confirmed" then
successful_login(user)
when "suspended" then
failed_login t('user.login.account is suspended', :webmaster => "mailto:webmaster@openstreetmap.org")
else
failed_login t('user.login.auth failure')
end
else
# Guard against not getting any extension data
sreg = Hash.new if sreg.nil?
ax = Hash.new if ax.nil?
# We don't have a user registered to this OpenID, so redirect
# to the create account page with username and email filled
# in if they have been given by the OpenID provider through
# the simple registration protocol.
nickname = sreg["nickname"] || ax["http://axschema.org/namePerson/friendly"].first
email = sreg["email"] || ax["http://axschema.org/contact/email"].first
redirect_to :controller => 'user', :action => 'new', :nickname => nickname, :email => email, :openid => identity_url
end
elsif result.missing?
failed_login t('user.login.openid missing provider')
elsif result.invalid?
failed_login t('user.login.openid invalid')
else
failed_login t('user.login.auth failure')
end
end
end
##
# verify an OpenID URL
def openid_verify(openid_url, user)
user.openid_url = openid_url
authenticate_with_open_id(openid_expand_url(openid_url), :method => :get, :required => [:email, "http://axschema.org/contact/email"]) do |result, identity_url, sreg, ax|
if result.successful?
# Do we trust the emails this provider returns?
if openid_email_verified(identity_url)
# Guard against not getting any extension data
sreg = Hash.new if sreg.nil?
ax = Hash.new if ax.nil?
# Get the verified email
verified_email = sreg["email"] || ax["http://axschema.org/contact/email"].first
end
# We need to use the openid url passed back from the OpenID provider
# rather than the one supplied by the user, as these can be different.
#
# For example, you can simply enter yahoo.com in the login box rather
# than a user specific url. Only once it comes back from the provider
# provider do we know the unique address for the user.
user.openid_url = identity_url
yield user, verified_email
elsif result.missing?
flash.now[:error] = t 'user.login.openid missing provider'
elsif result.invalid?
flash.now[:error] = t 'user.login.openid invalid'
else
flash.now[:error] = t 'user.login.auth failure'
end
end
end
##
# special case some common OpenID providers by applying heuristics to
# try and come up with the correct URL based on what the user entered
def openid_expand_url(openid_url)
if openid_url.nil?
return nil
elsif openid_url.match(/(.*)gmail.com(\/?)$/) or openid_url.match(/(.*)googlemail.com(\/?)$/)
# Special case gmail.com as it is potentially a popular OpenID
# provider and, unlike yahoo.com, where it works automatically, Google
# have hidden their OpenID endpoint somewhere obscure this making it
# somewhat less user friendly.
return 'https://www.google.com/accounts/o8/id'
else
return openid_url
end
end
##
# check if we trust an OpenID provider to return a verified
# email, so that we can skpi verifying it ourselves
def openid_email_verified(openid_url)
openid_url.match(/https:\/\/www.google.com\/accounts\/o8\/id?(.*)/) or
openid_url.match(/https:\/\/me.yahoo.com\/(.*)/)
end
##
# process a successful login
def successful_login(user)
session[:user] = user.id
session_expires_after 28.days if session[:remember_me]
target = session[:referer] || url_for(:controller => :site, :action => :index)
# The user is logged in, so decide where to send them:
#
# - If they haven't seen the contributor terms, send them there.
# - If they have a block on them, show them that.
# - If they were referred to the login, send them back there.
# - Otherwise, send them to the home page.
if REQUIRE_TERMS_SEEN and not user.terms_seen
redirect_to :controller => :user, :action => :terms, :referer => target
elsif user.blocked_on_view
redirect_to user.blocked_on_view, :referer => target
else
redirect_to target
end
session.delete(:remember_me)
session.delete(:referer)
end
##
# process a failed login
def failed_login(message)
flash[:error] = message
redirect_to :action => 'login', :referer => session[:referer]
session.delete(:remember_me)
session.delete(:referer)
end
##
#
def unconfirmed_login(user)
redirect_to :action => 'confirm', :display_name => user.display_name
session.delete(:remember_me)
session.delete(:referer)
end
##
# update a user's details
def update_user(user, params)
user.display_name = params[:user][:display_name]
user.new_email = params[:user][:new_email]
if params[:user][:pass_crypt].length > 0 or params[:user][:pass_crypt_confirmation].length > 0
user.pass_crypt = params[:user][:pass_crypt]
user.pass_crypt_confirmation = params[:user][:pass_crypt_confirmation]
end
if params[:user][:description] != user.description
user.description = params[:user][:description]
user.description_format = "markdown"
end
user.languages = params[:user][:languages].split(",")
case params[:image_action]
when "new" then
user.image = params[:user][:image]
user.image_use_gravatar = false
when "delete" then
user.image = nil
user.image_use_gravatar = false
when "gravatar" then
user.image = nil
user.image_use_gravatar = true
end
user.home_lat = params[:user][:home_lat]
user.home_lon = params[:user][:home_lon]
if params[:user][:preferred_editor] == "default"
user.preferred_editor = nil
else
user.preferred_editor = params[:user][:preferred_editor]
end
user.openid_url = nil if params[:user][:openid_url].blank?
if user.save
set_locale
if user.new_email.blank? or user.new_email == user.email
flash.now[:notice] = t 'user.account.flash update success'
else
user.email = user.new_email
if user.valid?
flash.now[:notice] = t 'user.account.flash update success confirm needed'
begin
Notifier.email_confirm(user, user.tokens.create).deliver
rescue
# Ignore errors sending email
end
else
@user.errors.set(:new_email, @user.errors.get(:email))
@user.errors.set(:email, [])
end
user.reset_email!
end
end
end
##
# require that the user is a administrator, or fill out a helpful error message
# and return them to the user page.
def require_administrator
if @user and not @user.administrator?
flash[:error] = t('user.filter.not_an_administrator')
if params[:display_name]
redirect_to :controller => 'user', :action => 'view', :display_name => params[:display_name]
else
redirect_to :controller => 'user', :action => 'login', :referer => request.fullpath
end
elsif not @user
redirect_to :controller => 'user', :action => 'login', :referer => request.fullpath
end
end
##
# require that the user in the URL is the logged in user
def require_self
if params[:display_name] != @user.display_name
render :text => "", :status => :forbidden
end
end
##
# ensure that there is a "this_user" instance variable
def lookup_user_by_id
@this_user = User.find(params[:id])
end
##
# ensure that there is a "this_user" instance variable
def lookup_user_by_name
@this_user = User.find_by_display_name(params[:display_name])
rescue ActiveRecord::RecordNotFound
redirect_to :controller => 'user', :action => 'view', :display_name => params[:display_name] unless @this_user
end
##
# Choose the layout to use. See
# https://rails.lighthouseapp.com/projects/8994/tickets/5371-layout-with-onlyexcept-options-makes-other-actions-render-without-layouts
def choose_layout
oauth_url = url_for(:controller => :oauth, :action => :authorize, :only_path => true)
if [ 'api_details' ].include? action_name
nil
elsif params[:referer] and URI.parse(params[:referer]).path == oauth_url
'slim'
else
'site'
end
end
##
#
def disable_terms_redirect
# this is necessary otherwise going to the user terms page, when
# having not agreed already would cause an infinite redirect loop.
# it's .now so that this doesn't propagate to other pages.
flash.now[:skip_terms] = true
end
##
# return permitted user parameters
def user_params
params.require(:user).permit(:email, :email_confirmation, :display_name, :openid_url, :pass_crypt, :pass_crypt_confirmation)
end
##
# check signup acls
def check_signup_allowed(email = nil)
if email.nil?
domain = nil
else
domain = email.split("@").last
end
if blocked = Acl.no_account_creation(request.remote_ip, domain)
logger.info "Blocked signup from #{request.remote_ip} for #{email}"
render :action => 'blocked'
end
not blocked
end
end
| 1 | 8,054 | `open_id_authentication` is no longer the name of the function, and it's not OpenID specific | openstreetmap-openstreetmap-website | rb |
@@ -243,10 +243,10 @@ void rai_qt::accounts::refresh_wallet_balance ()
balance = balance + (this->wallet.node.ledger.account_balance (transaction, key));
pending = pending + (this->wallet.node.ledger.account_pending (transaction, key));
}
- auto final_text (std::string ("Wallet balance (XRB): ") + wallet.format_balance (balance));
+ auto final_text (std::string ("Balance: ") + wallet.format_balance (balance));
if (!pending.is_zero ())
{
- final_text += "\nWallet pending: " + wallet.format_balance (pending);
+ final_text += "\nPending: " + wallet.format_balance (pending);
}
wallet_balance_label->setText (QString (final_text.c_str ()));
this->wallet.node.alarm.add (std::chrono::system_clock::now () + std::chrono::seconds (60), [this]() { | 1 | #include <rai/qt/qt.hpp>
#include <boost/property_tree/json_parser.hpp>
#include <sstream>
namespace
{
void show_line_error (QLineEdit & line)
{
line.setStyleSheet ("QLineEdit { color: red }");
}
void show_line_ok (QLineEdit & line)
{
line.setStyleSheet ("QLineEdit { color: black }");
}
void show_line_success (QLineEdit & line)
{
line.setStyleSheet ("QLineEdit { color: blue }");
}
void show_label_error (QLabel & label)
{
label.setStyleSheet ("QLabel { color: red }");
}
void show_label_ok (QLabel & label)
{
label.setStyleSheet ("QLabel { color: black }");
}
void show_button_error (QPushButton & button)
{
button.setStyleSheet ("QPushButton { color: red }");
}
void show_button_ok (QPushButton & button)
{
button.setStyleSheet ("QPushButton { color: black }");
}
void show_button_success (QPushButton & button)
{
button.setStyleSheet ("QPushButton { color: blue }");
}
}
bool rai_qt::eventloop_processor::event (QEvent * event_a)
{
assert (dynamic_cast<rai_qt::eventloop_event *> (event_a) != nullptr);
static_cast<rai_qt::eventloop_event *> (event_a)->action ();
return true;
}
rai_qt::eventloop_event::eventloop_event (std::function<void()> const & action_a) :
QEvent (QEvent::Type::User),
action (action_a)
{
}
rai_qt::self_pane::self_pane (rai_qt::wallet & wallet_a, rai::account const & account_a) :
window (new QWidget),
layout (new QVBoxLayout),
self_layout (new QHBoxLayout),
self_window (new QWidget),
your_account_label (new QLabel ("Your RaiBlocks account:")),
account_window (new QWidget),
account_layout (new QHBoxLayout),
account_text (new QLineEdit),
copy_button (new QPushButton ("Copy")),
balance_window (new QWidget),
balance_layout (new QHBoxLayout),
balance_label (new QLabel),
wallet (wallet_a)
{
your_account_label->setStyleSheet ("font-weight: bold;");
version = new QLabel (boost::str (boost::format ("Version %1%.%2%") % RAIBLOCKS_VERSION_MAJOR % RAIBLOCKS_VERSION_MINOR).c_str ());
self_layout->addWidget (your_account_label);
self_layout->addStretch ();
self_layout->addWidget (version);
self_layout->setContentsMargins (0, 0, 0, 0);
self_window->setLayout (self_layout);
account_text->setReadOnly (true);
account_text->setStyleSheet ("QLineEdit{ background: #ddd; }");
account_layout->addWidget (account_text, 9);
account_layout->addWidget (copy_button, 1);
account_layout->setContentsMargins (0, 0, 0, 0);
account_window->setLayout (account_layout);
layout->addWidget (self_window);
layout->addWidget (account_window);
balance_label->setStyleSheet ("font-weight: bold;");
balance_layout->addWidget (balance_label);
balance_layout->addStretch ();
balance_layout->setContentsMargins (0, 0, 0, 0);
balance_window->setLayout (balance_layout);
layout->addWidget (balance_window);
layout->setContentsMargins (5, 5, 5, 5);
window->setLayout (layout);
QObject::connect (copy_button, &QPushButton::clicked, [this]() {
this->wallet.application.clipboard ()->setText (QString (this->wallet.account.to_account ().c_str ()));
copy_button->setText ("Copied!");
this->wallet.node.alarm.add (std::chrono::system_clock::now () + std::chrono::seconds (2), [this]() {
copy_button->setText ("Copy");
});
});
}
void rai_qt::self_pane::refresh_balance ()
{
auto balance (wallet.node.balance_pending (wallet.account));
auto final_text (std::string ("Balance: ") + wallet.format_balance (balance.first));
if (!balance.second.is_zero ())
{
final_text += "\nPending: " + wallet.format_balance (balance.second);
}
wallet.self.balance_label->setText (QString (final_text.c_str ()));
}
rai_qt::accounts::accounts (rai_qt::wallet & wallet_a) :
window (new QWidget),
layout (new QVBoxLayout),
wallet_balance_label (new QLabel),
model (new QStandardItemModel),
view (new QTableView),
use_account (new QPushButton ("Use account")),
create_account (new QPushButton ("Create account")),
import_wallet (new QPushButton ("Import wallet")),
backup_seed (new QPushButton ("Copy wallet seed to clipboard")),
separator (new QFrame),
account_key_line (new QLineEdit),
account_key_button (new QPushButton ("Import adhoc key")),
back (new QPushButton ("Back")),
wallet (wallet_a)
{
separator->setFrameShape (QFrame::HLine);
separator->setFrameShadow (QFrame::Sunken);
model->setHorizontalHeaderItem (0, new QStandardItem ("Balance"));
model->setHorizontalHeaderItem (1, new QStandardItem ("Account"));
view->setEditTriggers (QAbstractItemView::NoEditTriggers);
view->setModel (model);
view->verticalHeader ()->hide ();
view->setContextMenuPolicy (Qt::ContextMenuPolicy::CustomContextMenu);
view->horizontalHeader ()->setStretchLastSection (true);
layout->addWidget (wallet_balance_label);
layout->addWidget (view);
layout->addWidget (use_account);
layout->addWidget (create_account);
layout->addWidget (import_wallet);
layout->addWidget (backup_seed);
layout->addWidget (separator);
layout->addWidget (account_key_line);
layout->addWidget (account_key_button);
layout->addWidget (back);
window->setLayout (layout);
QObject::connect (use_account, &QPushButton::released, [this]() {
auto selection (view->selectionModel ()->selection ().indexes ());
if (selection.size () == 1)
{
auto error (this->wallet.account.decode_account (model->item (selection[0].row (), 1)->text ().toStdString ()));
assert (!error);
this->wallet.refresh ();
}
});
QObject::connect (account_key_button, &QPushButton::released, [this]() {
QString key_text_wide (account_key_line->text ());
std::string key_text (key_text_wide.toLocal8Bit ());
rai::raw_key key;
if (!key.data.decode_hex (key_text))
{
show_line_ok (*account_key_line);
account_key_line->clear ();
this->wallet.wallet_m->insert_adhoc (key);
this->wallet.accounts.refresh ();
this->wallet.accounts.refresh_wallet_balance ();
this->wallet.history.refresh ();
}
else
{
show_line_error (*account_key_line);
}
});
QObject::connect (back, &QPushButton::clicked, [this]() {
this->wallet.pop_main_stack ();
});
QObject::connect (create_account, &QPushButton::released, [this]() {
rai::transaction transaction (this->wallet.wallet_m->store.environment, nullptr, true);
if (this->wallet.wallet_m->store.valid_password (transaction))
{
this->wallet.wallet_m->deterministic_insert (transaction);
show_button_success (*create_account);
create_account->setText ("New account was created");
refresh ();
this->wallet.node.alarm.add (std::chrono::system_clock::now () + std::chrono::seconds (5), [this]() {
show_button_ok (*create_account);
create_account->setText ("Create account");
});
}
else
{
show_button_error (*create_account);
create_account->setText ("Wallet is locked, unlock it to create account");
this->wallet.node.alarm.add (std::chrono::system_clock::now () + std::chrono::seconds (5), [this]() {
show_button_ok (*create_account);
create_account->setText ("Create account");
});
}
});
QObject::connect (import_wallet, &QPushButton::released, [this]() {
this->wallet.push_main_stack (this->wallet.import.window);
});
QObject::connect (backup_seed, &QPushButton::released, [this]() {
rai::raw_key seed;
rai::transaction transaction (this->wallet.wallet_m->store.environment, nullptr, false);
if (this->wallet.wallet_m->store.valid_password (transaction))
{
this->wallet.wallet_m->store.seed (seed, transaction);
this->wallet.application.clipboard ()->setText (QString (seed.data.to_string ().c_str ()));
show_button_success (*backup_seed);
backup_seed->setText ("Seed was copied to clipboard");
this->wallet.node.alarm.add (std::chrono::system_clock::now () + std::chrono::seconds (5), [this]() {
show_button_ok (*backup_seed);
backup_seed->setText ("Copy wallet seed to clipboard");
});
}
else
{
this->wallet.application.clipboard ()->setText ("");
show_button_error (*backup_seed);
backup_seed->setText ("Wallet is locked, unlock it to enable the backup");
this->wallet.node.alarm.add (std::chrono::system_clock::now () + std::chrono::seconds (5), [this]() {
show_button_ok (*backup_seed);
backup_seed->setText ("Copy wallet seed to clipboard");
});
}
});
refresh_wallet_balance ();
}
void rai_qt::accounts::refresh_wallet_balance ()
{
rai::transaction transaction (this->wallet.wallet_m->store.environment, nullptr, false);
rai::uint128_t balance (0);
rai::uint128_t pending (0);
for (auto i (this->wallet.wallet_m->store.begin (transaction)), j (this->wallet.wallet_m->store.end ()); i != j; ++i)
{
rai::public_key key (i->first.uint256 ());
balance = balance + (this->wallet.node.ledger.account_balance (transaction, key));
pending = pending + (this->wallet.node.ledger.account_pending (transaction, key));
}
auto final_text (std::string ("Wallet balance (XRB): ") + wallet.format_balance (balance));
if (!pending.is_zero ())
{
final_text += "\nWallet pending: " + wallet.format_balance (pending);
}
wallet_balance_label->setText (QString (final_text.c_str ()));
this->wallet.node.alarm.add (std::chrono::system_clock::now () + std::chrono::seconds (60), [this]() {
this->wallet.application.postEvent (&this->wallet.processor, new eventloop_event ([this]() {
refresh_wallet_balance ();
}));
});
}
void rai_qt::accounts::refresh ()
{
model->removeRows (0, model->rowCount ());
rai::transaction transaction (wallet.wallet_m->store.environment, nullptr, false);
QBrush brush;
for (auto i (wallet.wallet_m->store.begin (transaction)), j (wallet.wallet_m->store.end ()); i != j; ++i)
{
rai::public_key key (i->first.uint256 ());
auto balance_amount (wallet.node.ledger.account_balance (transaction, key));
bool display (true);
switch (wallet.wallet_m->store.key_type (i->second))
{
case rai::key_type::adhoc:
{
brush.setColor ("red");
display = !balance_amount.is_zero ();
break;
}
default:
{
brush.setColor ("black");
break;
}
}
if (display)
{
QList<QStandardItem *> items;
std::string balance = wallet.format_balance (balance_amount);
items.push_back (new QStandardItem (balance.c_str ()));
auto account (new QStandardItem (QString (key.to_account ().c_str ())));
account->setForeground (brush);
items.push_back (account);
model->appendRow (items);
}
}
}
rai_qt::import::import (rai_qt::wallet & wallet_a) :
window (new QWidget),
layout (new QVBoxLayout),
seed_label (new QLabel ("Seed:")),
seed (new QLineEdit),
clear_label (new QLabel ("Modifying seed clears existing keys\nType 'clear keys' below to confirm:")),
clear_line (new QLineEdit),
import_seed (new QPushButton ("Import seed")),
separator (new QFrame),
filename_label (new QLabel ("Filename:")),
filename (new QLineEdit),
password_label (new QLabel ("Password:")),
password (new QLineEdit),
perform (new QPushButton ("Import")),
back (new QPushButton ("Back")),
wallet (wallet_a)
{
layout->addWidget (seed_label);
layout->addWidget (seed);
layout->addWidget (clear_label);
layout->addWidget (clear_line);
clear_line->setPlaceholderText ("clear keys");
layout->addWidget (import_seed);
layout->addWidget (separator);
layout->addWidget (filename_label);
layout->addWidget (filename);
layout->addWidget (password_label);
layout->addWidget (password);
layout->addWidget (perform);
layout->addStretch ();
layout->addWidget (back);
window->setLayout (layout);
QObject::connect (perform, &QPushButton::released, [this]() {
std::ifstream stream;
stream.open (filename->text ().toStdString ().c_str ());
if (!stream.fail ())
{
show_line_ok (*filename);
std::stringstream contents;
contents << stream.rdbuf ();
if (!this->wallet.wallet_m->import (contents.str (), password->text ().toStdString ().c_str ()))
{
show_line_ok (*password);
this->wallet.accounts.refresh ();
password->clear ();
filename->clear ();
}
else
{
show_line_error (*password);
}
}
else
{
show_line_error (*filename);
}
});
QObject::connect (back, &QPushButton::released, [this]() {
this->wallet.pop_main_stack ();
});
QObject::connect (import_seed, &QPushButton::released, [this]() {
if (clear_line->text ().toStdString () == "clear keys")
{
show_line_ok (*clear_line);
rai::raw_key seed_l;
if (!seed_l.data.decode_hex (seed->text ().toStdString ()))
{
bool successful (false);
{
rai::transaction transaction (this->wallet.wallet_m->store.environment, nullptr, true);
if (this->wallet.wallet_m->store.valid_password (transaction))
{
this->wallet.wallet_m->store.seed_set (transaction, seed_l);
successful = true;
}
else
{
show_line_error (*seed);
show_button_error (*import_seed);
import_seed->setText ("Wallet is locked, unlock it to enable the import");
this->wallet.node.alarm.add (std::chrono::system_clock::now () + std::chrono::seconds (10), [this]() {
show_line_ok (*seed);
show_button_ok (*import_seed);
import_seed->setText ("Import seed");
});
}
}
if (successful)
{
rai::transaction transaction (this->wallet.wallet_m->store.environment, nullptr, true);
this->wallet.account = this->wallet.wallet_m->deterministic_insert (transaction);
auto count (0);
for (uint32_t i (1), n (32); i < n; ++i)
{
rai::raw_key prv;
this->wallet.wallet_m->store.deterministic_key (prv, transaction, i);
rai::keypair pair (prv.data.to_string ());
auto latest (this->wallet.node.ledger.latest (transaction, pair.pub));
if (!latest.is_zero ())
{
count = i;
n = i + 32;
}
}
for (uint32_t i (0); i < count; ++i)
{
this->wallet.account = this->wallet.wallet_m->deterministic_insert (transaction);
}
}
if (successful)
{
seed->clear ();
clear_line->clear ();
show_line_ok (*seed);
show_button_success (*import_seed);
import_seed->setText ("Successful import of seed");
this->wallet.refresh ();
this->wallet.node.alarm.add (std::chrono::system_clock::now () + std::chrono::seconds (5), [this]() {
show_button_ok (*import_seed);
import_seed->setText ("Import seed");
});
}
}
else
{
show_line_error (*seed);
show_button_error (*import_seed);
if (seed->text ().toStdString ().size () != 64)
{
import_seed->setText ("Incorrect seed, length must be 64");
}
else
{
import_seed->setText ("Incorrect seed. Only HEX characters allowed");
}
this->wallet.node.alarm.add (std::chrono::system_clock::now () + std::chrono::seconds (5), [this]() {
show_button_ok (*import_seed);
import_seed->setText ("Import seed");
});
}
}
else
{
show_line_error (*clear_line);
show_button_error (*import_seed);
import_seed->setText ("Type words 'clear keys'");
this->wallet.node.alarm.add (std::chrono::system_clock::now () + std::chrono::seconds (5), [this]() {
show_button_ok (*import_seed);
import_seed->setText ("Import seed");
});
}
});
}
rai_qt::history::history (rai::ledger & ledger_a, rai::account const & account_a, rai_qt::wallet & wallet_a) :
window (new QWidget),
layout (new QVBoxLayout),
model (new QStandardItemModel),
view (new QTableView),
tx_window (new QWidget),
tx_layout (new QHBoxLayout),
tx_label (new QLabel ("Account history count:")),
tx_count (new QSpinBox),
ledger (ledger_a),
account (account_a),
wallet (wallet_a)
{ /*
tx_count->setRange (1, 256);
tx_layout->addWidget (tx_label);
tx_layout->addWidget (tx_count);
tx_layout->setContentsMargins (0, 0, 0, 0);
tx_window->setLayout (tx_layout);*/
model->setHorizontalHeaderItem (0, new QStandardItem ("Type"));
model->setHorizontalHeaderItem (1, new QStandardItem ("Account"));
model->setHorizontalHeaderItem (2, new QStandardItem ("Amount"));
model->setHorizontalHeaderItem (3, new QStandardItem ("Hash"));
view->setModel (model);
view->setEditTriggers (QAbstractItemView::NoEditTriggers);
view->verticalHeader ()->hide ();
view->horizontalHeader ()->setStretchLastSection (true);
// layout->addWidget (tx_window);
layout->addWidget (view);
layout->setContentsMargins (0, 0, 0, 0);
window->setLayout (layout);
tx_count->setValue (32);
}
namespace
{
class short_text_visitor : public rai::block_visitor
{
public:
short_text_visitor (MDB_txn * transaction_a, rai::ledger & ledger_a) :
transaction (transaction_a),
ledger (ledger_a)
{
}
void send_block (rai::send_block const & block_a)
{
type = "Send";
account = block_a.hashables.destination;
amount = ledger.amount (transaction, block_a.hash ());
}
void receive_block (rai::receive_block const & block_a)
{
type = "Receive";
account = ledger.account (transaction, block_a.source ());
amount = ledger.amount (transaction, block_a.source ());
}
void open_block (rai::open_block const & block_a)
{
type = "Receive";
if (block_a.hashables.source != rai::genesis_account)
{
account = ledger.account (transaction, block_a.hashables.source);
amount = ledger.amount (transaction, block_a.hash ());
}
else
{
account = rai::genesis_account;
amount = rai::genesis_amount;
}
}
void change_block (rai::change_block const & block_a)
{
type = "Change";
amount = 0;
account = block_a.hashables.representative;
}
MDB_txn * transaction;
rai::ledger & ledger;
std::string type;
rai::uint128_t amount;
rai::account account;
};
}
void rai_qt::history::refresh ()
{
rai::transaction transaction (ledger.store.environment, nullptr, false);
model->removeRows (0, model->rowCount ());
auto hash (ledger.latest (transaction, account));
short_text_visitor visitor (transaction, ledger);
for (auto i (0), n (tx_count->value ()); i < n && !hash.is_zero (); ++i)
{
QList<QStandardItem *> items;
auto block (ledger.store.block_get (transaction, hash));
assert (block != nullptr);
block->visit (visitor);
items.push_back (new QStandardItem (QString (visitor.type.c_str ())));
items.push_back (new QStandardItem (QString (visitor.account.to_account ().c_str ())));
items.push_back (new QStandardItem (QString (wallet.format_balance (visitor.amount).c_str ())));
items.push_back (new QStandardItem (QString (hash.to_string ().c_str ())));
hash = block->previous ();
model->appendRow (items);
}
}
rai_qt::block_viewer::block_viewer (rai_qt::wallet & wallet_a) :
window (new QWidget),
layout (new QVBoxLayout),
hash_label (new QLabel ("Hash:")),
hash (new QLineEdit),
block_label (new QLabel ("Block:")),
block (new QPlainTextEdit),
successor_label (new QLabel ("Successor:")),
successor (new QLineEdit),
retrieve (new QPushButton ("Retrieve")),
rebroadcast (new QPushButton ("Rebroadcast")),
back (new QPushButton ("Back")),
wallet (wallet_a)
{
layout->addWidget (hash_label);
layout->addWidget (hash);
layout->addWidget (block_label);
layout->addWidget (block);
layout->addWidget (successor_label);
layout->addWidget (successor);
layout->addWidget (retrieve);
layout->addWidget (rebroadcast);
layout->addStretch ();
layout->addWidget (back);
window->setLayout (layout);
QObject::connect (back, &QPushButton::released, [this]() {
this->wallet.pop_main_stack ();
});
QObject::connect (retrieve, &QPushButton::released, [this]() {
rai::block_hash hash_l;
if (!hash_l.decode_hex (hash->text ().toStdString ()))
{
rai::transaction transaction (this->wallet.node.store.environment, nullptr, false);
auto block_l (this->wallet.node.store.block_get (transaction, hash_l));
if (block_l != nullptr)
{
std::string contents;
block_l->serialize_json (contents);
block->setPlainText (contents.c_str ());
auto successor_l (this->wallet.node.store.block_successor (transaction, hash_l));
successor->setText (successor_l.to_string ().c_str ());
}
else
{
block->setPlainText ("Block not found");
}
}
else
{
block->setPlainText ("Bad block hash");
}
});
QObject::connect (rebroadcast, &QPushButton::released, [this]() {
rai::block_hash block;
auto error (block.decode_hex (hash->text ().toStdString ()));
if (!error)
{
rai::transaction transaction (this->wallet.node.store.environment, nullptr, false);
if (this->wallet.node.store.block_exists (transaction, block))
{
rebroadcast->setEnabled (false);
this->wallet.node.background ([this, block]() {
rebroadcast_action (block);
});
}
}
});
rebroadcast->setToolTip ("Rebroadcast block into the network");
}
void rai_qt::block_viewer::rebroadcast_action (rai::uint256_union const & hash_a)
{
auto done (true);
rai::transaction transaction (wallet.node.ledger.store.environment, nullptr, false);
auto block (wallet.node.store.block_get (transaction, hash_a));
if (block != nullptr)
{
wallet.node.network.republish_block (transaction, std::move (block));
auto successor (wallet.node.store.block_successor (transaction, hash_a));
if (!successor.is_zero ())
{
done = false;
wallet.node.alarm.add (std::chrono::system_clock::now () + std::chrono::seconds (1), [this, successor]() {
rebroadcast_action (successor);
});
}
}
if (done)
{
rebroadcast->setEnabled (true);
}
}
rai_qt::account_viewer::account_viewer (rai_qt::wallet & wallet_a) :
window (new QWidget),
layout (new QVBoxLayout),
account_label (new QLabel ("Account:")),
account_line (new QLineEdit),
refresh (new QPushButton ("Refresh")),
balance_window (new QWidget),
balance_layout (new QHBoxLayout),
balance_label (new QLabel),
history (wallet_a.wallet_m->node.ledger, account, wallet_a),
back (new QPushButton ("Back")),
account (wallet_a.account),
wallet (wallet_a)
{
layout->addWidget (account_label);
layout->addWidget (account_line);
layout->addWidget (refresh);
balance_layout->addWidget (balance_label);
balance_layout->addStretch ();
balance_layout->setContentsMargins (0, 0, 0, 0);
balance_window->setLayout (balance_layout);
layout->addWidget (balance_window);
layout->addWidget (history.window);
layout->addWidget (back);
window->setLayout (layout);
QObject::connect (back, &QPushButton::released, [this]() {
this->wallet.pop_main_stack ();
});
QObject::connect (refresh, &QPushButton::released, [this]() {
account.clear ();
if (!account.decode_account (account_line->text ().toStdString ()))
{
show_line_ok (*account_line);
this->history.refresh ();
auto balance (this->wallet.node.balance_pending (account));
auto final_text (std::string ("Balance (XRB): ") + wallet.format_balance (balance.first));
if (!balance.second.is_zero ())
{
final_text += "\nPending: " + wallet.format_balance (balance.second);
}
balance_label->setText (QString (final_text.c_str ()));
}
else
{
show_line_error (*account_line);
balance_label->clear ();
}
});
}
rai_qt::status::status (rai_qt::wallet & wallet_a) :
wallet (wallet_a)
{
wallet.status->setToolTip ("Wallet status, block count (blocks downloaded)");
active.insert (rai_qt::status_types::nominal);
set_text ();
}
void rai_qt::status::erase (rai_qt::status_types status_a)
{
assert (status_a != rai_qt::status_types::nominal);
auto erased (active.erase (status_a));
(void)erased;
set_text ();
}
void rai_qt::status::insert (rai_qt::status_types status_a)
{
assert (status_a != rai_qt::status_types::nominal);
active.insert (status_a);
set_text ();
}
void rai_qt::status::set_text ()
{
wallet.status->setText (text ().c_str ());
wallet.status->setStyleSheet ((std::string ("QLabel {") + color () + "}").c_str ());
}
std::string rai_qt::status::text ()
{
assert (!active.empty ());
std::string result;
size_t unchecked (0);
std::string count_string;
{
rai::transaction transaction (wallet.wallet_m->node.store.environment, nullptr, false);
auto size (wallet.wallet_m->node.store.block_count (transaction));
unchecked = wallet.wallet_m->node.store.unchecked_count (transaction);
count_string = std::to_string (size.sum ());
}
switch (*active.begin ())
{
case rai_qt::status_types::disconnected:
result = "Status: Disconnected";
break;
case rai_qt::status_types::working:
result = "Status: Generating proof of work";
break;
case rai_qt::status_types::synchronizing:
result = "Status: Synchronizing";
break;
case rai_qt::status_types::locked:
result = "Status: Wallet locked";
break;
case rai_qt::status_types::vulnerable:
result = "Status: Wallet password empty";
break;
case rai_qt::status_types::active:
result = "Status: Wallet active";
break;
case rai_qt::status_types::nominal:
result = "Status: Running";
break;
default:
assert (false);
break;
}
result += ", Block: ";
if (unchecked != 0 && wallet.wallet_m->node.bootstrap_initiator.in_progress ())
{
count_string += " (" + std::to_string (unchecked) + ")";
}
result += count_string.c_str ();
return result;
}
std::string rai_qt::status::color ()
{
assert (!active.empty ());
std::string result;
switch (*active.begin ())
{
case rai_qt::status_types::disconnected:
result = "color: red";
break;
case rai_qt::status_types::working:
result = "color: blue";
break;
case rai_qt::status_types::synchronizing:
result = "color: blue";
break;
case rai_qt::status_types::locked:
result = "color: orange";
break;
case rai_qt::status_types::vulnerable:
result = "color: blue";
break;
case rai_qt::status_types::active:
result = "color: black";
break;
case rai_qt::status_types::nominal:
result = "color: black";
break;
default:
assert (false);
break;
}
return result;
}
rai_qt::wallet::wallet (QApplication & application_a, rai_qt::eventloop_processor & processor_a, rai::node & node_a, std::shared_ptr<rai::wallet> wallet_a, rai::account & account_a) :
rendering_ratio (rai::Mxrb_ratio),
node (node_a),
wallet_m (wallet_a),
account (account_a),
processor (processor_a),
history (node.ledger, account, *this),
accounts (*this),
self (*this, account_a),
settings (*this),
advanced (*this),
block_creation (*this),
block_entry (*this),
block_viewer (*this),
account_viewer (*this),
import (*this),
application (application_a),
status (new QLabel),
main_stack (new QStackedWidget),
client_window (new QWidget),
client_layout (new QVBoxLayout),
entry_window (new QWidget),
entry_window_layout (new QVBoxLayout),
separator (new QFrame),
account_history_label (new QLabel ("Account history:")),
send_blocks (new QPushButton ("Send")),
settings_button (new QPushButton ("Settings")),
accounts_button (new QPushButton ("Accounts")),
show_advanced (new QPushButton ("Advanced")),
send_blocks_window (new QWidget),
send_blocks_layout (new QVBoxLayout),
send_account_label (new QLabel ("Destination account:")),
send_account (new QLineEdit),
send_count_label (new QLabel ("Amount:")),
send_count (new QLineEdit),
send_blocks_send (new QPushButton ("Send")),
send_blocks_back (new QPushButton ("Back")),
active_status (*this)
{
update_connected ();
empty_password ();
settings.update_locked (true, true);
send_blocks_layout->addWidget (send_account_label);
send_account->setPlaceholderText (rai::zero_key.pub.to_account ().c_str ());
send_blocks_layout->addWidget (send_account);
send_blocks_layout->addWidget (send_count_label);
send_count->setPlaceholderText ("0");
send_blocks_layout->addWidget (send_count);
send_blocks_layout->addWidget (send_blocks_send);
send_blocks_layout->addStretch ();
send_blocks_layout->addWidget (send_blocks_back);
send_blocks_layout->setContentsMargins (0, 0, 0, 0);
send_blocks_window->setLayout (send_blocks_layout);
entry_window_layout->addWidget (account_history_label);
entry_window_layout->addWidget (history.window);
entry_window_layout->addWidget (send_blocks);
entry_window_layout->addWidget (settings_button);
entry_window_layout->addWidget (accounts_button);
entry_window_layout->addWidget (show_advanced);
entry_window_layout->setContentsMargins (0, 0, 0, 0);
entry_window_layout->setSpacing (5);
entry_window->setLayout (entry_window_layout);
main_stack->addWidget (entry_window);
status->setContentsMargins (5, 5, 5, 5);
status->setAlignment (Qt::AlignHCenter);
separator->setFrameShape (QFrame::HLine);
separator->setFrameShadow (QFrame::Sunken);
client_layout->addWidget (status);
client_layout->addWidget (self.window);
client_layout->addWidget (separator);
client_layout->addWidget (main_stack);
client_layout->setSpacing (0);
client_layout->setContentsMargins (0, 0, 0, 0);
client_window->setLayout (client_layout);
client_window->resize (320, 480);
client_window->setStyleSheet ("\
QLineEdit { padding: 3px; } \
");
refresh ();
}
void rai_qt::wallet::start ()
{
std::weak_ptr<rai_qt::wallet> this_w (shared_from_this ());
QObject::connect (settings_button, &QPushButton::released, [this_w]() {
if (auto this_l = this_w.lock ())
{
this_l->settings.activate ();
}
});
QObject::connect (accounts_button, &QPushButton::released, [this_w]() {
if (auto this_l = this_w.lock ())
{
this_l->push_main_stack (this_l->accounts.window);
}
});
QObject::connect (show_advanced, &QPushButton::released, [this_w]() {
if (auto this_l = this_w.lock ())
{
this_l->push_main_stack (this_l->advanced.window);
}
});
QObject::connect (send_blocks_send, &QPushButton::released, [this_w]() {
if (auto this_l = this_w.lock ())
{
show_line_ok (*this_l->send_count);
show_line_ok (*this_l->send_account);
rai::amount amount;
if (!amount.decode_dec (this_l->send_count->text ().toStdString ()))
{
rai::uint128_t actual (amount.number () * this_l->rendering_ratio);
if (actual / this_l->rendering_ratio == amount.number ())
{
QString account_text (this_l->send_account->text ());
std::string account_text_narrow (account_text.toLocal8Bit ());
rai::account account_l;
auto parse_error (account_l.decode_account (account_text_narrow));
if (!parse_error)
{
auto balance (this_l->node.balance (this_l->account));
if (actual <= balance)
{
rai::transaction transaction (this_l->wallet_m->store.environment, nullptr, false);
if (this_l->wallet_m->store.valid_password (transaction))
{
this_l->send_blocks_send->setEnabled (false);
this_l->node.background ([this_w, account_l, actual]() {
if (auto this_l = this_w.lock ())
{
this_l->wallet_m->send_async (this_l->account, account_l, actual, [this_w](std::shared_ptr<rai::block> block_a) {
if (auto this_l = this_w.lock ())
{
auto succeeded (block_a != nullptr);
this_l->application.postEvent (&this_l->processor, new eventloop_event ([this_w, succeeded]() {
if (auto this_l = this_w.lock ())
{
this_l->send_blocks_send->setEnabled (true);
if (succeeded)
{
this_l->send_count->clear ();
this_l->send_account->clear ();
this_l->accounts.refresh ();
}
else
{
show_line_error (*this_l->send_count);
}
}
}));
}
});
}
});
}
else
{
show_button_error (*this_l->send_blocks_send);
this_l->send_blocks_send->setText ("Wallet is locked, unlock it to send");
this_l->node.alarm.add (std::chrono::system_clock::now () + std::chrono::seconds (5), [this_w]() {
if (auto this_l = this_w.lock ())
{
show_button_ok (*this_l->send_blocks_send);
this_l->send_blocks_send->setText ("Send");
}
});
}
}
else
{
show_line_error (*this_l->send_count);
show_button_error (*this_l->send_blocks_send);
this_l->send_blocks_send->setText ("Not enough balance");
this_l->node.alarm.add (std::chrono::system_clock::now () + std::chrono::seconds (5), [this_w]() {
if (auto this_l = this_w.lock ())
{
show_button_ok (*this_l->send_blocks_send);
this_l->send_blocks_send->setText ("Send");
}
});
}
}
else
{
show_line_error (*this_l->send_account);
show_button_error (*this_l->send_blocks_send);
this_l->send_blocks_send->setText ("Bad destination account");
this_l->node.alarm.add (std::chrono::system_clock::now () + std::chrono::seconds (5), [this_w]() {
if (auto this_l = this_w.lock ())
{
show_button_ok (*this_l->send_blocks_send);
this_l->send_blocks_send->setText ("Send");
}
});
}
}
else
{
show_line_error (*this_l->send_count);
show_button_error (*this_l->send_blocks_send);
this_l->send_blocks_send->setText ("Amount too big");
this_l->node.alarm.add (std::chrono::system_clock::now () + std::chrono::seconds (5), [this_w]() {
if (auto this_l = this_w.lock ())
{
show_line_ok (*this_l->send_account);
show_button_ok (*this_l->send_blocks_send);
this_l->send_blocks_send->setText ("Send");
}
});
}
}
else
{
show_line_error (*this_l->send_count);
show_button_error (*this_l->send_blocks_send);
this_l->send_blocks_send->setText ("Bad amount number");
this_l->node.alarm.add (std::chrono::system_clock::now () + std::chrono::seconds (5), [this_w]() {
if (auto this_l = this_w.lock ())
{
show_button_ok (*this_l->send_blocks_send);
this_l->send_blocks_send->setText ("Send");
}
});
}
}
});
QObject::connect (send_blocks_back, &QPushButton::released, [this_w]() {
if (auto this_l = this_w.lock ())
{
this_l->pop_main_stack ();
}
});
QObject::connect (send_blocks, &QPushButton::released, [this_w]() {
if (auto this_l = this_w.lock ())
{
this_l->push_main_stack (this_l->send_blocks_window);
}
});
node.observers.blocks.add ([this_w](std::shared_ptr<rai::block>, rai::account const & account_a, rai::amount const &) {
if (auto this_l = this_w.lock ())
{
this_l->application.postEvent (&this_l->processor, new eventloop_event ([this_w, account_a]() {
if (auto this_l = this_w.lock ())
{
if (this_l->wallet_m->exists (account_a))
{
this_l->accounts.refresh ();
}
if (account_a == this_l->account)
{
this_l->history.refresh ();
this_l->self.refresh_balance ();
}
}
}));
}
});
node.observers.wallet.add ([this_w](bool active_a) {
if (auto this_l = this_w.lock ())
{
this_l->application.postEvent (&this_l->processor, new eventloop_event ([this_w, active_a]() {
if (auto this_l = this_w.lock ())
{
if (active_a)
{
this_l->active_status.insert (rai_qt::status_types::active);
}
else
{
this_l->active_status.erase (rai_qt::status_types::active);
}
}
}));
}
});
node.observers.endpoint.add ([this_w](rai::endpoint const &) {
if (auto this_l = this_w.lock ())
{
this_l->application.postEvent (&this_l->processor, new eventloop_event ([this_w]() {
if (auto this_l = this_w.lock ())
{
this_l->update_connected ();
}
}));
}
});
node.observers.disconnect.add ([this_w]() {
if (auto this_l = this_w.lock ())
{
this_l->application.postEvent (&this_l->processor, new eventloop_event ([this_w]() {
if (auto this_l = this_w.lock ())
{
this_l->update_connected ();
}
}));
}
});
node.bootstrap_initiator.add_observer ([this_w](bool active_a) {
if (auto this_l = this_w.lock ())
{
this_l->application.postEvent (&this_l->processor, new eventloop_event ([this_w, active_a]() {
if (auto this_l = this_w.lock ())
{
if (active_a)
{
this_l->active_status.insert (rai_qt::status_types::synchronizing);
}
else
{
this_l->active_status.erase (rai_qt::status_types::synchronizing);
}
}
}));
}
});
node.work.work_observers.add ([this_w](bool working) {
if (auto this_l = this_w.lock ())
{
this_l->application.postEvent (&this_l->processor, new eventloop_event ([this_w, working]() {
if (auto this_l = this_w.lock ())
{
if (working)
{
this_l->active_status.insert (rai_qt::status_types::working);
}
else
{
this_l->active_status.erase (rai_qt::status_types::working);
}
}
}));
}
});
wallet_m->lock_observer = [this_w](bool invalid, bool vulnerable) {
if (auto this_l = this_w.lock ())
{
this_l->application.postEvent (&this_l->processor, new eventloop_event ([this_w, invalid, vulnerable]() {
if (auto this_l = this_w.lock ())
{
this_l->settings.update_locked (invalid, vulnerable);
}
}));
}
};
settings_button->setToolTip ("Unlock wallet, set password, change representative");
}
void rai_qt::wallet::refresh ()
{
{
rai::transaction transaction (wallet_m->store.environment, nullptr, false);
assert (wallet_m->store.exists (transaction, account));
}
self.account_text->setText (QString (account.to_account ().c_str ()));
self.refresh_balance ();
accounts.refresh ();
history.refresh ();
account_viewer.history.refresh ();
settings.refresh_representative ();
}
void rai_qt::wallet::update_connected ()
{
if (node.peers.empty ())
{
active_status.insert (rai_qt::status_types::disconnected);
}
else
{
active_status.erase (rai_qt::status_types::disconnected);
}
}
void rai_qt::wallet::empty_password ()
{
this->node.alarm.add (std::chrono::system_clock::now () + std::chrono::seconds (3), [this]() {
wallet_m->enter_password (std::string (""));
});
}
void rai_qt::wallet::change_rendering_ratio (rai::uint128_t const & rendering_ratio_a)
{
application.postEvent (&processor, new eventloop_event ([this, rendering_ratio_a]() {
this->rendering_ratio = rendering_ratio_a;
this->refresh ();
}));
}
std::string rai_qt::wallet::format_balance (rai::uint128_t const & balance) const
{
auto balance_str = rai::amount (balance).format_balance (rendering_ratio, 2, true, std::locale (""));
auto unit = std::string ("XRB");
if (rendering_ratio == rai::kxrb_ratio)
{
unit = std::string ("kxrb");
}
else if (rendering_ratio == rai::xrb_ratio)
{
unit = std::string ("xrb");
}
return balance_str + " " + unit;
}
void rai_qt::wallet::push_main_stack (QWidget * widget_a)
{
main_stack->addWidget (widget_a);
main_stack->setCurrentIndex (main_stack->count () - 1);
}
void rai_qt::wallet::pop_main_stack ()
{
main_stack->removeWidget (main_stack->currentWidget ());
}
rai_qt::settings::settings (rai_qt::wallet & wallet_a) :
window (new QWidget),
layout (new QVBoxLayout),
password (new QLineEdit),
lock_toggle (new QPushButton ("Unlock")),
sep1 (new QFrame),
new_password (new QLineEdit),
retype_password (new QLineEdit),
change (new QPushButton ("Set/Change password")),
sep2 (new QFrame),
representative (new QLabel ("Account representative:")),
current_representative (new QLabel),
new_representative (new QLineEdit),
change_rep (new QPushButton ("Change representative")),
back (new QPushButton ("Back")),
wallet (wallet_a)
{
password->setPlaceholderText ("Password");
password->setEchoMode (QLineEdit::EchoMode::Password);
layout->addWidget (password);
layout->addWidget (lock_toggle);
sep1->setFrameShape (QFrame::HLine);
sep1->setFrameShadow (QFrame::Sunken);
layout->addWidget (sep1);
new_password->setEchoMode (QLineEdit::EchoMode::Password);
new_password->setPlaceholderText ("New password");
layout->addWidget (new_password);
retype_password->setEchoMode (QLineEdit::EchoMode::Password);
retype_password->setPlaceholderText ("Retype password");
layout->addWidget (retype_password);
layout->addWidget (change);
sep2->setFrameShape (QFrame::HLine);
sep2->setFrameShadow (QFrame::Sunken);
layout->addWidget (sep2);
layout->addWidget (representative);
current_representative->setTextInteractionFlags (Qt::TextSelectableByMouse);
layout->addWidget (current_representative);
new_representative->setPlaceholderText (rai::zero_key.pub.to_account ().c_str ());
layout->addWidget (new_representative);
layout->addWidget (change_rep);
layout->addStretch ();
layout->addWidget (back);
window->setLayout (layout);
QObject::connect (change, &QPushButton::released, [this]() {
rai::transaction transaction (this->wallet.wallet_m->store.environment, nullptr, true);
if (this->wallet.wallet_m->store.valid_password (transaction))
{
if (new_password->text ().isEmpty ())
{
new_password->clear ();
new_password->setPlaceholderText ("Empty Password - try again: New password");
retype_password->clear ();
retype_password->setPlaceholderText ("Empty Password - try again: Retype password");
}
else
{
if (new_password->text () == retype_password->text ())
{
this->wallet.wallet_m->store.rekey (transaction, std::string (new_password->text ().toLocal8Bit ()));
new_password->clear ();
retype_password->clear ();
retype_password->setPlaceholderText ("Retype password");
show_button_success (*change);
change->setText ("Password was changed");
update_locked (false, false);
this->wallet.node.alarm.add (std::chrono::system_clock::now () + std::chrono::seconds (5), [this]() {
show_button_ok (*change);
change->setText ("Set/Change password");
});
}
else
{
retype_password->clear ();
retype_password->setPlaceholderText ("Password mismatch");
}
}
}
else
{
show_button_error (*change);
change->setText ("Wallet is locked, unlock it");
this->wallet.node.alarm.add (std::chrono::system_clock::now () + std::chrono::seconds (5), [this]() {
show_button_ok (*change);
change->setText ("Set/Change password");
});
}
});
QObject::connect (change_rep, &QPushButton::released, [this]() {
rai::account representative_l;
if (!representative_l.decode_account (new_representative->text ().toStdString ()))
{
rai::transaction transaction (this->wallet.wallet_m->store.environment, nullptr, false);
if (this->wallet.wallet_m->store.valid_password (transaction))
{
change_rep->setEnabled (false);
{
rai::transaction transaction_l (this->wallet.wallet_m->store.environment, nullptr, true);
this->wallet.wallet_m->store.representative_set (transaction_l, representative_l);
}
auto block (this->wallet.wallet_m->change_sync (this->wallet.account, representative_l));
change_rep->setEnabled (true);
show_button_success (*change_rep);
change_rep->setText ("Represenative was changed");
current_representative->setText (QString (representative_l.to_account_split ().c_str ()));
new_representative->clear ();
this->wallet.node.alarm.add (std::chrono::system_clock::now () + std::chrono::seconds (5), [this]() {
show_button_ok (*change_rep);
change_rep->setText ("Change representative");
});
}
else
{
show_button_error (*change_rep);
change_rep->setText ("Wallet is locked, unlock it");
this->wallet.node.alarm.add (std::chrono::system_clock::now () + std::chrono::seconds (5), [this]() {
show_button_ok (*change_rep);
change_rep->setText ("Change representative");
});
}
}
else
{
show_line_error (*new_representative);
show_button_error (*change_rep);
change_rep->setText ("Invalid account");
this->wallet.node.alarm.add (std::chrono::system_clock::now () + std::chrono::seconds (5), [this]() {
show_line_ok (*new_representative);
show_button_ok (*change_rep);
change_rep->setText ("Change representative");
});
}
});
QObject::connect (back, &QPushButton::released, [this]() {
assert (this->wallet.main_stack->currentWidget () == window);
this->wallet.pop_main_stack ();
});
QObject::connect (lock_toggle, &QPushButton::released, [this]() {
rai::transaction transaction (this->wallet.wallet_m->store.environment, nullptr, true);
if (this->wallet.wallet_m->store.valid_password (transaction))
{
// lock wallet
rai::raw_key empty;
empty.data.clear ();
this->wallet.wallet_m->store.password.value_set (empty);
update_locked (true, true);
lock_toggle->setText ("Unlock");
password->setEnabled (1);
}
else
{
// try to unlock wallet
if (!this->wallet.wallet_m->enter_password (std::string (password->text ().toLocal8Bit ())))
{
password->clear ();
lock_toggle->setText ("Lock");
password->setDisabled (1);
}
else
{
show_line_error (*password);
show_button_error (*lock_toggle);
lock_toggle->setText ("Invalid password");
this->wallet.node.alarm.add (std::chrono::system_clock::now () + std::chrono::seconds (5), [this]() {
show_line_ok (*password);
show_button_ok (*lock_toggle);
// if wallet is still not unlocked by now, change button text
rai::transaction transaction (this->wallet.wallet_m->store.environment, nullptr, true);
if (!this->wallet.wallet_m->store.valid_password (transaction))
{
lock_toggle->setText ("Unlock");
}
});
}
}
});
// initial state for lock toggle button
rai::transaction transaction (this->wallet.wallet_m->store.environment, nullptr, true);
if (this->wallet.wallet_m->store.valid_password (transaction))
{
lock_toggle->setText ("Lock");
password->setDisabled (1);
}
representative->setToolTip ("In the infrequent case where the network needs to make a global decision,\nyour wallet software performs a balance-weighted vote to determine\nthe outcome. Since not everyone can remain online and perform this duty,\nyour wallet names a representative that can vote with, but cannot spend,\nyour balance.");
refresh_representative ();
}
void rai_qt::settings::refresh_representative ()
{
rai::transaction transaction (this->wallet.wallet_m->node.store.environment, nullptr, false);
rai::account_info info;
auto error (this->wallet.wallet_m->node.store.account_get (transaction, this->wallet.account, info));
if (!error)
{
auto block (this->wallet.wallet_m->node.store.block_get (transaction, info.rep_block));
assert (block != nullptr);
current_representative->setText (QString (block->representative ().to_account_split ().c_str ()));
}
else
{
current_representative->setText (this->wallet.wallet_m->store.representative (transaction).to_account_split ().c_str ());
}
}
void rai_qt::settings::activate ()
{
this->wallet.push_main_stack (window);
}
void rai_qt::settings::update_locked (bool invalid, bool vulnerable)
{
if (invalid)
{
this->wallet.active_status.insert (rai_qt::status_types::locked);
}
else
{
this->wallet.active_status.erase (rai_qt::status_types::locked);
}
if (vulnerable)
{
this->wallet.active_status.insert (rai_qt::status_types::vulnerable);
}
else
{
this->wallet.active_status.erase (rai_qt::status_types::vulnerable);
}
}
rai_qt::advanced_actions::advanced_actions (rai_qt::wallet & wallet_a) :
window (new QWidget),
layout (new QVBoxLayout),
show_ledger (new QPushButton ("Ledger")),
show_peers (new QPushButton ("Peers")),
search_for_receivables (new QPushButton ("Search for receivables")),
bootstrap (new QPushButton ("Initiate bootstrap")),
wallet_refresh (new QPushButton ("Refresh Wallet")),
create_block (new QPushButton ("Create Block")),
enter_block (new QPushButton ("Enter Block")),
block_viewer (new QPushButton ("Block Viewer")),
account_viewer (new QPushButton ("Account Viewer")),
scale_window (new QWidget),
scale_layout (new QHBoxLayout),
scale_label (new QLabel ("Scale:")),
ratio_group (new QButtonGroup),
mrai (new QRadioButton ("Mxrb")),
krai (new QRadioButton ("kxrb")),
rai (new QRadioButton ("xrb")),
back (new QPushButton ("Back")),
ledger_window (new QWidget),
ledger_layout (new QVBoxLayout),
ledger_model (new QStandardItemModel),
ledger_view (new QTableView),
ledger_refresh (new QPushButton ("Refresh")),
ledger_back (new QPushButton ("Back")),
peers_window (new QWidget),
peers_layout (new QVBoxLayout),
peers_model (new QStandardItemModel),
peers_view (new QTableView),
bootstrap_label (new QLabel ("IPV6:port \"::ffff:192.168.0.1:7075\"")),
bootstrap_line (new QLineEdit),
peers_bootstrap (new QPushButton ("Initiate Bootstrap")),
peers_refresh (new QPushButton ("Refresh")),
peers_back (new QPushButton ("Back")),
wallet (wallet_a)
{
ratio_group->addButton (mrai);
ratio_group->addButton (krai);
ratio_group->addButton (rai);
ratio_group->setId (mrai, 0);
ratio_group->setId (krai, 1);
ratio_group->setId (rai, 2);
scale_layout->addWidget (scale_label);
scale_layout->addWidget (mrai);
scale_layout->addWidget (krai);
scale_layout->addWidget (rai);
scale_window->setLayout (scale_layout);
ledger_model->setHorizontalHeaderItem (0, new QStandardItem ("Account"));
ledger_model->setHorizontalHeaderItem (1, new QStandardItem ("Balance"));
ledger_model->setHorizontalHeaderItem (2, new QStandardItem ("Block"));
ledger_view->setModel (ledger_model);
ledger_view->setEditTriggers (QAbstractItemView::NoEditTriggers);
ledger_view->verticalHeader ()->hide ();
ledger_view->horizontalHeader ()->setStretchLastSection (true);
ledger_layout->addWidget (ledger_view);
ledger_layout->addWidget (ledger_refresh);
ledger_layout->addWidget (ledger_back);
ledger_layout->setContentsMargins (0, 0, 0, 0);
ledger_window->setLayout (ledger_layout);
peers_model->setHorizontalHeaderItem (0, new QStandardItem ("IPv6 address:port"));
peers_model->setHorizontalHeaderItem (1, new QStandardItem ("Net version"));
peers_view->setEditTriggers (QAbstractItemView::NoEditTriggers);
peers_view->verticalHeader ()->hide ();
peers_view->setModel (peers_model);
peers_view->setColumnWidth (0, 220);
peers_view->setSortingEnabled (true);
peers_view->horizontalHeader ()->setStretchLastSection (true);
peers_layout->addWidget (peers_view);
peers_layout->addWidget (bootstrap_label);
peers_layout->addWidget (bootstrap_line);
peers_layout->addWidget (peers_bootstrap);
peers_layout->addWidget (peers_refresh);
peers_layout->addWidget (peers_back);
peers_layout->setContentsMargins (0, 0, 0, 0);
peers_window->setLayout (peers_layout);
layout->addWidget (show_ledger);
layout->addWidget (show_peers);
layout->addWidget (search_for_receivables);
layout->addWidget (bootstrap);
layout->addWidget (wallet_refresh);
layout->addWidget (create_block);
layout->addWidget (enter_block);
layout->addWidget (block_viewer);
layout->addWidget (account_viewer);
layout->addWidget (scale_window);
layout->addStretch ();
layout->addWidget (back);
window->setLayout (layout);
QObject::connect (mrai, &QRadioButton::toggled, [this]() {
if (mrai->isChecked ())
{
this->wallet.change_rendering_ratio (rai::Mxrb_ratio);
}
});
QObject::connect (krai, &QRadioButton::toggled, [this]() {
if (krai->isChecked ())
{
this->wallet.change_rendering_ratio (rai::kxrb_ratio);
}
});
QObject::connect (rai, &QRadioButton::toggled, [this]() {
if (rai->isChecked ())
{
this->wallet.change_rendering_ratio (rai::xrb_ratio);
}
});
mrai->click ();
QObject::connect (wallet_refresh, &QPushButton::released, [this]() {
this->wallet.accounts.refresh ();
this->wallet.accounts.refresh_wallet_balance ();
});
QObject::connect (show_peers, &QPushButton::released, [this]() {
refresh_peers ();
this->wallet.push_main_stack (peers_window);
});
QObject::connect (show_ledger, &QPushButton::released, [this]() {
this->wallet.push_main_stack (ledger_window);
});
QObject::connect (back, &QPushButton::released, [this]() {
this->wallet.pop_main_stack ();
});
QObject::connect (peers_back, &QPushButton::released, [this]() {
this->wallet.pop_main_stack ();
});
QObject::connect (peers_bootstrap, &QPushButton::released, [this]() {
rai::endpoint endpoint;
auto error (rai::parse_endpoint (bootstrap_line->text ().toStdString (), endpoint));
if (!error)
{
show_line_ok (*bootstrap_line);
bootstrap_line->clear ();
this->wallet.node.bootstrap_initiator.bootstrap (endpoint);
}
else
{
show_line_error (*bootstrap_line);
}
});
QObject::connect (peers_refresh, &QPushButton::released, [this]() {
refresh_peers ();
});
QObject::connect (ledger_refresh, &QPushButton::released, [this]() {
refresh_ledger ();
});
QObject::connect (ledger_back, &QPushButton::released, [this]() {
this->wallet.pop_main_stack ();
});
QObject::connect (search_for_receivables, &QPushButton::released, [this]() {
this->wallet.wallet_m->search_pending ();
});
QObject::connect (bootstrap, &QPushButton::released, [this]() {
this->wallet.node.bootstrap_initiator.bootstrap ();
});
QObject::connect (create_block, &QPushButton::released, [this]() {
this->wallet.push_main_stack (this->wallet.block_creation.window);
});
QObject::connect (enter_block, &QPushButton::released, [this]() {
this->wallet.push_main_stack (this->wallet.block_entry.window);
});
QObject::connect (block_viewer, &QPushButton::released, [this]() {
this->wallet.push_main_stack (this->wallet.block_viewer.window);
});
QObject::connect (account_viewer, &QPushButton::released, [this]() {
this->wallet.push_main_stack (this->wallet.account_viewer.window);
});
bootstrap->setToolTip ("Multi-connection bootstrap to random peers");
search_for_receivables->setToolTip ("Search for pending blocks");
create_block->setToolTip ("Create block in JSON format");
enter_block->setToolTip ("Enter block in JSON format");
}
void rai_qt::advanced_actions::refresh_peers ()
{
peers_model->removeRows (0, peers_model->rowCount ());
auto list (wallet.node.peers.list_version ());
for (auto i (list.begin ()), n (list.end ()); i != n; ++i)
{
std::stringstream endpoint;
endpoint << i->first.address ().to_string ();
endpoint << ':';
endpoint << i->first.port ();
QString qendpoint (endpoint.str ().c_str ());
QList<QStandardItem *> items;
items.push_back (new QStandardItem (qendpoint));
items.push_back (new QStandardItem (QString (std::to_string (i->second).c_str ())));
peers_model->appendRow (items);
}
}
void rai_qt::advanced_actions::refresh_ledger ()
{
ledger_model->removeRows (0, ledger_model->rowCount ());
rai::transaction transaction (wallet.node.store.environment, nullptr, false);
for (auto i (wallet.node.ledger.store.latest_begin (transaction)), j (wallet.node.ledger.store.latest_end ()); i != j; ++i)
{
QList<QStandardItem *> items;
items.push_back (new QStandardItem (QString (rai::block_hash (i->first.uint256 ()).to_account ().c_str ())));
rai::account_info info (i->second);
std::string balance;
rai::amount (info.balance.number () / wallet.rendering_ratio).encode_dec (balance);
items.push_back (new QStandardItem (QString (balance.c_str ())));
std::string block_hash;
info.head.encode_hex (block_hash);
items.push_back (new QStandardItem (QString (block_hash.c_str ())));
ledger_model->appendRow (items);
}
}
rai_qt::block_entry::block_entry (rai_qt::wallet & wallet_a) :
window (new QWidget),
layout (new QVBoxLayout),
block (new QPlainTextEdit),
status (new QLabel),
process (new QPushButton ("Process")),
back (new QPushButton ("Back")),
wallet (wallet_a)
{
layout->addWidget (block);
layout->addWidget (status);
layout->addWidget (process);
layout->addWidget (back);
window->setLayout (layout);
QObject::connect (process, &QPushButton::released, [this]() {
auto string (block->toPlainText ().toStdString ());
try
{
boost::property_tree::ptree tree;
std::stringstream istream (string);
boost::property_tree::read_json (istream, tree);
auto block_l (rai::deserialize_block_json (tree));
if (block_l != nullptr)
{
show_label_ok (*status);
this->status->setText ("");
this->wallet.node.process_active (std::move (block_l));
}
else
{
show_label_error (*status);
this->status->setText ("Unable to parse block");
}
}
catch (std::runtime_error const &)
{
show_label_error (*status);
this->status->setText ("Unable to parse block");
}
});
QObject::connect (back, &QPushButton::released, [this]() {
this->wallet.pop_main_stack ();
});
}
rai_qt::block_creation::block_creation (rai_qt::wallet & wallet_a) :
window (new QWidget),
layout (new QVBoxLayout),
group (new QButtonGroup),
button_layout (new QHBoxLayout),
send (new QRadioButton ("Send")),
receive (new QRadioButton ("Receive")),
change (new QRadioButton ("Change")),
open (new QRadioButton ("Open")),
account_label (new QLabel ("Account:")),
account (new QLineEdit),
source_label (new QLabel ("Source:")),
source (new QLineEdit),
amount_label (new QLabel ("Amount:")),
amount (new QLineEdit),
destination_label (new QLabel ("Destination:")),
destination (new QLineEdit),
representative_label (new QLabel ("Representative:")),
representative (new QLineEdit),
block (new QPlainTextEdit),
status (new QLabel),
create (new QPushButton ("Create")),
back (new QPushButton ("Back")),
wallet (wallet_a)
{
group->addButton (send);
group->addButton (receive);
group->addButton (change);
group->addButton (open);
group->setId (send, 0);
group->setId (receive, 1);
group->setId (change, 2);
group->setId (open, 3);
button_layout->addWidget (send);
button_layout->addWidget (receive);
button_layout->addWidget (open);
button_layout->addWidget (change);
layout->addLayout (button_layout);
layout->addWidget (account_label);
layout->addWidget (account);
layout->addWidget (source_label);
layout->addWidget (source);
layout->addWidget (amount_label);
layout->addWidget (amount);
layout->addWidget (destination_label);
layout->addWidget (destination);
layout->addWidget (representative_label);
layout->addWidget (representative);
layout->addWidget (block);
layout->addWidget (status);
layout->addWidget (create);
layout->addWidget (back);
window->setLayout (layout);
QObject::connect (send, &QRadioButton::toggled, [this]() {
if (send->isChecked ())
{
deactivate_all ();
activate_send ();
}
});
QObject::connect (receive, &QRadioButton::toggled, [this]() {
if (receive->isChecked ())
{
deactivate_all ();
activate_receive ();
}
});
QObject::connect (open, &QRadioButton::toggled, [this]() {
if (open->isChecked ())
{
deactivate_all ();
activate_open ();
}
});
QObject::connect (change, &QRadioButton::toggled, [this]() {
if (change->isChecked ())
{
deactivate_all ();
activate_change ();
}
});
QObject::connect (create, &QPushButton::released, [this]() {
switch (group->checkedId ())
{
case 0:
create_send ();
break;
case 1:
create_receive ();
break;
case 2:
create_change ();
break;
case 3:
create_open ();
break;
default:
assert (false);
break;
}
});
QObject::connect (back, &QPushButton::released, [this]() {
this->wallet.pop_main_stack ();
});
send->click ();
}
void rai_qt::block_creation::deactivate_all ()
{
account_label->hide ();
account->hide ();
source_label->hide ();
source->hide ();
amount_label->hide ();
amount->hide ();
destination_label->hide ();
destination->hide ();
representative_label->hide ();
representative->hide ();
}
void rai_qt::block_creation::activate_send ()
{
account_label->show ();
account->show ();
amount_label->show ();
amount->show ();
destination_label->show ();
destination->show ();
}
void rai_qt::block_creation::activate_receive ()
{
source_label->show ();
source->show ();
}
void rai_qt::block_creation::activate_open ()
{
source_label->show ();
source->show ();
representative_label->show ();
representative->show ();
}
void rai_qt::block_creation::activate_change ()
{
account_label->show ();
account->show ();
representative_label->show ();
representative->show ();
}
void rai_qt::block_creation::create_send ()
{
rai::account account_l;
auto error (account_l.decode_account (account->text ().toStdString ()));
if (!error)
{
rai::amount amount_l;
error = amount_l.decode_dec (amount->text ().toStdString ());
if (!error)
{
rai::account destination_l;
error = destination_l.decode_account (destination->text ().toStdString ());
if (!error)
{
rai::transaction transaction (wallet.node.store.environment, nullptr, false);
rai::raw_key key;
if (!wallet.wallet_m->store.fetch (transaction, account_l, key))
{
auto balance (wallet.node.ledger.account_balance (transaction, account_l));
if (amount_l.number () <= balance)
{
rai::account_info info;
auto error (wallet.node.store.account_get (transaction, account_l, info));
assert (!error);
rai::send_block send (info.head, destination_l, balance - amount_l.number (), key, account_l, wallet.wallet_m->work_fetch (transaction, account_l, info.head));
std::string block_l;
send.serialize_json (block_l);
block->setPlainText (QString (block_l.c_str ()));
show_label_ok (*status);
status->setText ("Created block");
}
else
{
show_label_error (*status);
status->setText ("Insufficient balance");
}
}
else
{
show_label_error (*status);
status->setText ("Account is not in wallet");
}
}
else
{
show_label_error (*status);
status->setText ("Unable to decode destination");
}
}
else
{
show_label_error (*status);
status->setText ("Unable to decode amount");
}
}
else
{
show_label_error (*status);
status->setText ("Unable to decode account");
}
}
void rai_qt::block_creation::create_receive ()
{
rai::block_hash source_l;
auto error (source_l.decode_hex (source->text ().toStdString ()));
if (!error)
{
rai::transaction transaction (wallet.node.store.environment, nullptr, false);
auto block_l (wallet.node.store.block_get (transaction, source_l));
if (block_l != nullptr)
{
auto send_block (dynamic_cast<rai::send_block *> (block_l.get ()));
if (send_block != nullptr)
{
rai::pending_key pending_key (send_block->hashables.destination, source_l);
rai::pending_info pending;
if (!wallet.node.store.pending_get (transaction, pending_key, pending))
{
rai::account_info info;
auto error (wallet.node.store.account_get (transaction, pending_key.account, info));
if (!error)
{
rai::raw_key key;
auto error (wallet.wallet_m->store.fetch (transaction, pending_key.account, key));
if (!error)
{
rai::receive_block receive (info.head, source_l, key, pending_key.account, wallet.wallet_m->work_fetch (transaction, pending_key.account, info.head));
std::string block_l;
receive.serialize_json (block_l);
block->setPlainText (QString (block_l.c_str ()));
show_label_ok (*status);
status->setText ("Created block");
}
else
{
show_label_error (*status);
status->setText ("Account is not in wallet");
}
}
else
{
show_label_error (*status);
status->setText ("Account not yet open");
}
}
else
{
show_label_error (*status);
status->setText ("Source block is not pending to receive");
}
}
else
{
show_label_error (*status);
status->setText ("Source is not a send block");
}
}
else
{
show_label_error (*status);
status->setText ("Source block not found");
}
}
else
{
show_label_error (*status);
status->setText ("Unable to decode source");
}
}
void rai_qt::block_creation::create_change ()
{
rai::account account_l;
auto error (account_l.decode_account (account->text ().toStdString ()));
if (!error)
{
rai::account representative_l;
error = representative_l.decode_account (representative->text ().toStdString ());
if (!error)
{
rai::transaction transaction (wallet.node.store.environment, nullptr, false);
rai::account_info info;
auto error (wallet.node.store.account_get (transaction, account_l, info));
if (!error)
{
rai::raw_key key;
auto error (wallet.wallet_m->store.fetch (transaction, account_l, key));
if (!error)
{
rai::change_block change (info.head, representative_l, key, account_l, wallet.wallet_m->work_fetch (transaction, account_l, info.head));
std::string block_l;
change.serialize_json (block_l);
block->setPlainText (QString (block_l.c_str ()));
show_label_ok (*status);
status->setText ("Created block");
}
else
{
show_label_error (*status);
status->setText ("Account is not in wallet");
}
}
else
{
show_label_error (*status);
status->setText ("Account not yet open");
}
}
else
{
show_label_error (*status);
status->setText ("Unable to decode representative");
}
}
else
{
show_label_error (*status);
status->setText ("Unable to decode account");
}
}
void rai_qt::block_creation::create_open ()
{
rai::block_hash source_l;
auto error (source_l.decode_hex (source->text ().toStdString ()));
if (!error)
{
rai::account representative_l;
error = representative_l.decode_account (representative->text ().toStdString ());
if (!error)
{
rai::transaction transaction (wallet.node.store.environment, nullptr, false);
auto block_l (wallet.node.store.block_get (transaction, source_l));
if (block_l != nullptr)
{
auto send_block (dynamic_cast<rai::send_block *> (block_l.get ()));
if (send_block != nullptr)
{
rai::pending_key pending_key (send_block->hashables.destination, source_l);
rai::pending_info pending;
if (!wallet.node.store.pending_get (transaction, pending_key, pending))
{
rai::account_info info;
auto error (wallet.node.store.account_get (transaction, pending_key.account, info));
if (error)
{
rai::raw_key key;
auto error (wallet.wallet_m->store.fetch (transaction, pending_key.account, key));
if (!error)
{
rai::open_block open (source_l, representative_l, pending_key.account, key, pending_key.account, wallet.wallet_m->work_fetch (transaction, pending_key.account, pending_key.account));
std::string block_l;
open.serialize_json (block_l);
block->setPlainText (QString (block_l.c_str ()));
show_label_ok (*status);
status->setText ("Created block");
}
else
{
show_label_error (*status);
status->setText ("Account is not in wallet");
}
}
else
{
show_label_error (*status);
status->setText ("Account already open");
}
}
else
{
show_label_error (*status);
status->setText ("Source block is not pending to receive");
}
}
else
{
show_label_error (*status);
status->setText ("Source is not a send block");
}
}
else
{
show_label_error (*status);
status->setText ("Source block not found");
}
}
else
{
show_label_error (*status);
status->setText ("Unable to decode representative");
}
}
else
{
show_label_error (*status);
status->setText ("Unable to decode source");
}
}
| 1 | 13,126 | This didn't require corresponding changes to the test case(s)? | nanocurrency-nano-node | cpp |
@@ -509,6 +509,7 @@ type TaskConfig struct {
Count Count `yaml:"count"`
ExecuteCommand ExecuteCommand `yaml:"exec"`
Variables map[string]string `yaml:"variables"`
+ EnvFile string `yaml:"env_file"`
Secrets map[string]string `yaml:"secrets"`
Storage Storage `yaml:"storage"`
} | 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package manifest provides functionality to create Manifest files.
package manifest
import (
"errors"
"fmt"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/aws/copilot-cli/internal/pkg/docker/dockerengine"
"github.com/google/shlex"
"github.com/aws/aws-sdk-go/aws"
"gopkg.in/yaml.v3"
)
// AWS VPC subnet placement options.
const (
firelensContainerName = "firelens_log_router"
defaultFluentbitImage = "public.ecr.aws/aws-observability/aws-for-fluent-bit:latest"
defaultDockerfileName = "Dockerfile"
)
// Platform options.
const (
OSLinux = dockerengine.OSLinux
OSWindows = dockerengine.OSWindows
OSWindowsServer2019Core = "windows_server_2019_core"
OSWindowsServer2019Full = "windows_server_2019_full"
ArchAMD64 = dockerengine.ArchAMD64
ArchX86 = dockerengine.ArchX86
ArchARM = dockerengine.ArchARM
ArchARM64 = dockerengine.ArchARM64
// Minimum CPU and mem values required for Windows-based tasks.
MinWindowsTaskCPU = 1024
MinWindowsTaskMemory = 2048
)
var (
// AWS VPC subnet placement options.
PublicSubnetPlacement = Placement("public")
PrivateSubnetPlacement = Placement("private")
// WorkloadTypes holds all workload manifest types.
WorkloadTypes = append(ServiceTypes, JobTypes...)
// Acceptable strings for Windows operating systems.
WindowsOSFamilies = []string{OSWindows, OSWindowsServer2019Core, OSWindowsServer2019Full}
// ValidShortPlatforms are all of the os/arch combinations that the PlatformString field may accept.
ValidShortPlatforms = []string{
dockerengine.PlatformString(OSLinux, ArchAMD64),
dockerengine.PlatformString(OSLinux, ArchX86),
dockerengine.PlatformString(OSLinux, ArchARM),
dockerengine.PlatformString(OSLinux, ArchARM64),
dockerengine.PlatformString(OSWindows, ArchAMD64),
dockerengine.PlatformString(OSWindows, ArchX86),
}
defaultPlatform = platformString(OSLinux, ArchAMD64)
// validAdvancedPlatforms are all of the OsFamily/Arch combinations that the PlatformArgs field may accept.
validAdvancedPlatforms = []PlatformArgs{
{OSFamily: aws.String(OSLinux), Arch: aws.String(ArchX86)},
{OSFamily: aws.String(OSLinux), Arch: aws.String(ArchAMD64)},
{OSFamily: aws.String(OSLinux), Arch: aws.String(ArchARM)},
{OSFamily: aws.String(OSLinux), Arch: aws.String(ArchARM64)},
{OSFamily: aws.String(OSWindows), Arch: aws.String(ArchX86)},
{OSFamily: aws.String(OSWindows), Arch: aws.String(ArchAMD64)},
{OSFamily: aws.String(OSWindowsServer2019Core), Arch: aws.String(ArchX86)},
{OSFamily: aws.String(OSWindowsServer2019Core), Arch: aws.String(ArchAMD64)},
{OSFamily: aws.String(OSWindowsServer2019Full), Arch: aws.String(ArchX86)},
{OSFamily: aws.String(OSWindowsServer2019Full), Arch: aws.String(ArchAMD64)},
}
// All placement options.
subnetPlacements = []string{string(PublicSubnetPlacement), string(PrivateSubnetPlacement)}
// Error definitions.
ErrAppRunnerInvalidPlatformWindows = errors.New("Windows is not supported for App Runner services")
errUnmarshalBuildOpts = errors.New("unable to unmarshal build field into string or compose-style map")
errUnmarshalPlatformOpts = errors.New("unable to unmarshal platform field into string or compose-style map")
errUnmarshalCountOpts = errors.New(`unable to unmarshal "count" field to an integer or autoscaling configuration`)
errUnmarshalRangeOpts = errors.New(`unable to unmarshal "range" field`)
errUnmarshalExec = errors.New(`unable to unmarshal "exec" field into boolean or exec configuration`)
errUnmarshalEntryPoint = errors.New(`unable to unmarshal "entrypoint" into string or slice of strings`)
errUnmarshalAlias = errors.New(`unable to unmarshal "alias" into string or slice of strings`)
errUnmarshalCommand = errors.New(`unable to unmarshal "command" into string or slice of strings`)
)
// WorkloadManifest represents a workload manifest.
type WorkloadManifest interface {
ApplyEnv(envName string) (WorkloadManifest, error)
Validate() error
}
// WorkloadProps contains properties for creating a new workload manifest.
type WorkloadProps struct {
Name string
Dockerfile string
Image string
}
// Workload holds the basic data that every workload manifest file needs to have.
type Workload struct {
Name *string `yaml:"name"`
Type *string `yaml:"type"` // must be one of the supported manifest types.
}
// OverrideRule holds the manifest overriding rule for CloudFormation template.
type OverrideRule struct {
Path string `yaml:"path"`
Value yaml.Node `yaml:"value"`
}
// DependsOn represents container dependency for a container.
type DependsOn map[string]string
// Image represents the workload's container image.
type Image struct {
Build BuildArgsOrString `yaml:"build"` // Build an image from a Dockerfile.
Location *string `yaml:"location"` // Use an existing image instead.
Credentials *string `yaml:"credentials"` // ARN of the secret containing the private repository credentials.
DockerLabels map[string]string `yaml:"labels,flow"` // Apply Docker labels to the container at runtime.
DependsOn DependsOn `yaml:"depends_on,flow"` // Add any sidecar dependencies.
}
// UnmarshalYAML overrides the default YAML unmarshaling logic for the Image
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v3) interface.
func (i *Image) UnmarshalYAML(value *yaml.Node) error {
type image Image
if err := value.Decode((*image)(i)); err != nil {
return err
}
if !i.Build.isEmpty() && i.Location != nil {
return &errFieldMutualExclusive{
firstField: "build",
secondField: "location",
mustExist: true,
}
}
return nil
}
// ImageWithHealthcheck represents a container image with health check.
type ImageWithHealthcheck struct {
Image Image `yaml:",inline"`
HealthCheck ContainerHealthCheck `yaml:"healthcheck"`
}
// ImageWithPortAndHealthcheck represents a container image with an exposed port and health check.
type ImageWithPortAndHealthcheck struct {
ImageWithPort `yaml:",inline"`
HealthCheck ContainerHealthCheck `yaml:"healthcheck"`
}
// ImageWithPort represents a container image with an exposed port.
type ImageWithPort struct {
Image Image `yaml:",inline"`
Port *uint16 `yaml:"port"`
}
// ImageWithHealthcheckAndOptionalPort represents a container image with an optional exposed port and health check.
type ImageWithHealthcheckAndOptionalPort struct {
ImageWithOptionalPort `yaml:",inline"`
HealthCheck ContainerHealthCheck `yaml:"healthcheck"`
}
// ImageWithOptionalPort represents a container image with an optional exposed port.
type ImageWithOptionalPort struct {
Image Image `yaml:",inline"`
Port *uint16 `yaml:"port"`
}
// GetLocation returns the location of the image.
func (i Image) GetLocation() string {
return aws.StringValue(i.Location)
}
// BuildConfig populates a docker.BuildArguments struct from the fields available in the manifest.
// Prefer the following hierarchy:
// 1. Specific dockerfile, specific context
// 2. Specific dockerfile, context = dockerfile dir
// 3. "Dockerfile" located in context dir
// 4. "Dockerfile" located in ws root.
func (i *Image) BuildConfig(rootDirectory string) *DockerBuildArgs {
df := i.dockerfile()
ctx := i.context()
dockerfile := aws.String(filepath.Join(rootDirectory, defaultDockerfileName))
context := aws.String(rootDirectory)
if df != "" && ctx != "" {
dockerfile = aws.String(filepath.Join(rootDirectory, df))
context = aws.String(filepath.Join(rootDirectory, ctx))
}
if df != "" && ctx == "" {
dockerfile = aws.String(filepath.Join(rootDirectory, df))
context = aws.String(filepath.Join(rootDirectory, filepath.Dir(df)))
}
if df == "" && ctx != "" {
dockerfile = aws.String(filepath.Join(rootDirectory, ctx, defaultDockerfileName))
context = aws.String(filepath.Join(rootDirectory, ctx))
}
return &DockerBuildArgs{
Dockerfile: dockerfile,
Context: context,
Args: i.args(),
Target: i.target(),
CacheFrom: i.cacheFrom(),
}
}
// dockerfile returns the path to the workload's Dockerfile. If no dockerfile is specified,
// returns "".
func (i *Image) dockerfile() string {
// Prefer to use the "Dockerfile" string in BuildArgs. Otherwise,
// "BuildString". If no dockerfile specified, return "".
if i.Build.BuildArgs.Dockerfile != nil {
return aws.StringValue(i.Build.BuildArgs.Dockerfile)
}
var dfPath string
if i.Build.BuildString != nil {
dfPath = aws.StringValue(i.Build.BuildString)
}
return dfPath
}
// context returns the build context directory if it exists, otherwise an empty string.
func (i *Image) context() string {
return aws.StringValue(i.Build.BuildArgs.Context)
}
// args returns the args section, if it exists, to override args in the dockerfile.
// Otherwise it returns an empty map.
func (i *Image) args() map[string]string {
return i.Build.BuildArgs.Args
}
// target returns the build target stage if it exists, otherwise nil.
func (i *Image) target() *string {
return i.Build.BuildArgs.Target
}
// cacheFrom returns the cache from build section, if it exists.
// Otherwise it returns nil.
func (i *Image) cacheFrom() []string {
return i.Build.BuildArgs.CacheFrom
}
// ImageOverride holds fields that override Dockerfile image defaults.
type ImageOverride struct {
EntryPoint EntryPointOverride `yaml:"entrypoint"`
Command CommandOverride `yaml:"command"`
}
// EntryPointOverride is a custom type which supports unmarshalling "entrypoint" yaml which
// can either be of type string or type slice of string.
type EntryPointOverride stringSliceOrString
// CommandOverride is a custom type which supports unmarshalling "command" yaml which
// can either be of type string or type slice of string.
type CommandOverride stringSliceOrString
// UnmarshalYAML overrides the default YAML unmarshalling logic for the EntryPointOverride
// struct, allowing it to perform more complex unmarshalling behavior.
// This method implements the yaml.Unmarshaler (v3) interface.
func (e *EntryPointOverride) UnmarshalYAML(value *yaml.Node) error {
if err := unmarshalYAMLToStringSliceOrString((*stringSliceOrString)(e), value); err != nil {
return errUnmarshalEntryPoint
}
return nil
}
// ToStringSlice converts an EntryPointOverride to a slice of string using shell-style rules.
func (e *EntryPointOverride) ToStringSlice() ([]string, error) {
out, err := toStringSlice((*stringSliceOrString)(e))
if err != nil {
return nil, err
}
return out, nil
}
// UnmarshalYAML overrides the default YAML unmarshaling logic for the CommandOverride
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v3) interface.
func (c *CommandOverride) UnmarshalYAML(value *yaml.Node) error {
if err := unmarshalYAMLToStringSliceOrString((*stringSliceOrString)(c), value); err != nil {
return errUnmarshalCommand
}
return nil
}
// ToStringSlice converts an CommandOverride to a slice of string using shell-style rules.
func (c *CommandOverride) ToStringSlice() ([]string, error) {
out, err := toStringSlice((*stringSliceOrString)(c))
if err != nil {
return nil, err
}
return out, nil
}
type stringSliceOrString struct {
String *string
StringSlice []string
}
func unmarshalYAMLToStringSliceOrString(s *stringSliceOrString, value *yaml.Node) error {
if err := value.Decode(&s.StringSlice); err != nil {
switch err.(type) {
case *yaml.TypeError:
break
default:
return err
}
}
if s.StringSlice != nil {
// Unmarshaled successfully to s.StringSlice, unset s.String, and return.
s.String = nil
return nil
}
return value.Decode(&s.String)
}
func toStringSlice(s *stringSliceOrString) ([]string, error) {
if s.StringSlice != nil {
return s.StringSlice, nil
}
if s.String == nil {
return nil, nil
}
out, err := shlex.Split(*s.String)
if err != nil {
return nil, fmt.Errorf("convert string into tokens using shell-style rules: %w", err)
}
return out, nil
}
// BuildArgsOrString is a custom type which supports unmarshaling yaml which
// can either be of type string or type DockerBuildArgs.
type BuildArgsOrString struct {
BuildString *string
BuildArgs DockerBuildArgs
}
func (b *BuildArgsOrString) isEmpty() bool {
if aws.StringValue(b.BuildString) == "" && b.BuildArgs.isEmpty() {
return true
}
return false
}
// UnmarshalYAML overrides the default YAML unmarshaling logic for the BuildArgsOrString
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v3) interface.
func (b *BuildArgsOrString) UnmarshalYAML(value *yaml.Node) error {
if err := value.Decode(&b.BuildArgs); err != nil {
switch err.(type) {
case *yaml.TypeError:
break
default:
return err
}
}
if !b.BuildArgs.isEmpty() {
// Unmarshaled successfully to b.BuildArgs, unset b.BuildString, and return.
b.BuildString = nil
return nil
}
if err := value.Decode(&b.BuildString); err != nil {
return errUnmarshalBuildOpts
}
return nil
}
// DockerBuildArgs represents the options specifiable under the "build" field
// of Docker Compose services. For more information, see:
// https://docs.docker.com/compose/compose-file/#build
type DockerBuildArgs struct {
Context *string `yaml:"context,omitempty"`
Dockerfile *string `yaml:"dockerfile,omitempty"`
Args map[string]string `yaml:"args,omitempty"`
Target *string `yaml:"target,omitempty"`
CacheFrom []string `yaml:"cache_from,omitempty"`
}
func (b *DockerBuildArgs) isEmpty() bool {
if b.Context == nil && b.Dockerfile == nil && b.Args == nil && b.Target == nil && b.CacheFrom == nil {
return true
}
return false
}
// ExecuteCommand is a custom type which supports unmarshaling yaml which
// can either be of type bool or type ExecuteCommandConfig.
type ExecuteCommand struct {
Enable *bool
Config ExecuteCommandConfig
}
// UnmarshalYAML overrides the default YAML unmarshaling logic for the ExecuteCommand
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v3) interface.
func (e *ExecuteCommand) UnmarshalYAML(value *yaml.Node) error {
if err := value.Decode(&e.Config); err != nil {
switch err.(type) {
case *yaml.TypeError:
break
default:
return err
}
}
if !e.Config.IsEmpty() {
return nil
}
if err := value.Decode(&e.Enable); err != nil {
return errUnmarshalExec
}
return nil
}
// ExecuteCommandConfig represents the configuration for ECS Execute Command.
type ExecuteCommandConfig struct {
Enable *bool `yaml:"enable"`
// Reserved for future use.
}
// IsEmpty returns whether ExecuteCommandConfig is empty.
func (e ExecuteCommandConfig) IsEmpty() bool {
return e.Enable == nil
}
// Logging holds configuration for Firelens to route your logs.
type Logging struct {
Retention *int `yaml:"retention"`
Image *string `yaml:"image"`
Destination map[string]string `yaml:"destination,flow"`
EnableMetadata *bool `yaml:"enableMetadata"`
SecretOptions map[string]string `yaml:"secretOptions"`
ConfigFile *string `yaml:"configFilePath"`
Variables map[string]string `yaml:"variables"`
Secrets map[string]string `yaml:"secrets"`
}
// IsEmpty returns empty if the struct has all zero members.
func (lc *Logging) IsEmpty() bool {
return lc.Image == nil && lc.Destination == nil && lc.EnableMetadata == nil &&
lc.SecretOptions == nil && lc.ConfigFile == nil && lc.Variables == nil && lc.Secrets == nil
}
// LogImage returns the default Fluent Bit image if not otherwise configured.
func (lc *Logging) LogImage() *string {
if lc.Image == nil {
return aws.String(defaultFluentbitImage)
}
return lc.Image
}
// GetEnableMetadata returns the configuration values and sane default for the EnableMEtadata field
func (lc *Logging) GetEnableMetadata() *string {
if lc.EnableMetadata == nil {
// Enable ecs log metadata by default.
return aws.String("true")
}
return aws.String(strconv.FormatBool(*lc.EnableMetadata))
}
// SidecarConfig represents the configurable options for setting up a sidecar container.
type SidecarConfig struct {
Port *string `yaml:"port"`
Image *string `yaml:"image"`
Essential *bool `yaml:"essential"`
CredsParam *string `yaml:"credentialsParameter"`
Variables map[string]string `yaml:"variables"`
Secrets map[string]string `yaml:"secrets"`
MountPoints []SidecarMountPoint `yaml:"mount_points"`
DockerLabels map[string]string `yaml:"labels"`
DependsOn DependsOn `yaml:"depends_on"`
HealthCheck ContainerHealthCheck `yaml:"healthcheck"`
ImageOverride `yaml:",inline"`
}
// TaskConfig represents the resource boundaries and environment variables for the containers in the task.
type TaskConfig struct {
CPU *int `yaml:"cpu"`
Memory *int `yaml:"memory"`
Platform PlatformArgsOrString `yaml:"platform,omitempty"`
Count Count `yaml:"count"`
ExecuteCommand ExecuteCommand `yaml:"exec"`
Variables map[string]string `yaml:"variables"`
Secrets map[string]string `yaml:"secrets"`
Storage Storage `yaml:"storage"`
}
// ContainerPlatform returns the platform for the service.
func (t *TaskConfig) ContainerPlatform() string {
if t.Platform.IsEmpty() {
return ""
}
if t.IsWindows() {
return platformString(OSWindows, t.Platform.Arch())
}
return platformString(t.Platform.OS(), t.Platform.Arch())
}
// IsWindows returns whether or not the service is building with a Windows OS.
func (t TaskConfig) IsWindows() bool {
return isWindowsPlatform(t.Platform)
}
// IsARM returns whether or not the service is building with an ARM Arch.
func (t TaskConfig) IsARM() bool {
return IsArmArch(t.Platform.Arch())
}
// PublishConfig represents the configurable options for setting up publishers.
type PublishConfig struct {
Topics []Topic `yaml:"topics"`
}
// Topic represents the configurable options for setting up a SNS Topic.
type Topic struct {
Name *string `yaml:"name"`
}
// NetworkConfig represents options for network connection to AWS resources within a VPC.
type NetworkConfig struct {
VPC vpcConfig `yaml:"vpc"`
}
// IsEmpty returns empty if the struct has all zero members.
func (c *NetworkConfig) IsEmpty() bool {
return c.VPC.isEmpty()
}
// UnmarshalYAML ensures that a NetworkConfig always defaults to public subnets.
// If the user specified a placement that's not valid then throw an error.
func (c *NetworkConfig) UnmarshalYAML(value *yaml.Node) error {
type networkWithDefaults NetworkConfig
publicPlacement := Placement(PublicSubnetPlacement)
defaultVPCConf := vpcConfig{
Placement: &publicPlacement,
}
conf := networkWithDefaults{
VPC: defaultVPCConf,
}
if err := value.Decode(&conf); err != nil {
return err
}
if conf.VPC.isEmpty() { // If after unmarshaling the user did not specify VPC configuration then reset it to public.
conf.VPC = defaultVPCConf
}
*c = NetworkConfig(conf)
return nil
}
// Placement represents where to place tasks (public or private subnets).
type Placement string
// vpcConfig represents the security groups and subnets attached to a task.
type vpcConfig struct {
*Placement `yaml:"placement"`
SecurityGroups []string `yaml:"security_groups"`
}
func (c *vpcConfig) isEmpty() bool {
return c.Placement == nil && c.SecurityGroups == nil
}
// UnmarshalWorkload deserializes the YAML input stream into a workload manifest object.
// If an error occurs during deserialization, then returns the error.
// If the workload type in the manifest is invalid, then returns an ErrInvalidManifestType.
func UnmarshalWorkload(in []byte) (WorkloadManifest, error) {
type manifest interface {
WorkloadManifest
}
am := Workload{}
if err := yaml.Unmarshal(in, &am); err != nil {
return nil, fmt.Errorf("unmarshal to workload manifest: %w", err)
}
typeVal := aws.StringValue(am.Type)
var m manifest
switch typeVal {
case LoadBalancedWebServiceType:
m = newDefaultLoadBalancedWebService()
case RequestDrivenWebServiceType:
m = newDefaultRequestDrivenWebService()
case BackendServiceType:
m = newDefaultBackendService()
case WorkerServiceType:
m = newDefaultWorkerService()
case ScheduledJobType:
m = newDefaultScheduledJob()
default:
return nil, &ErrInvalidWorkloadType{Type: typeVal}
}
if err := yaml.Unmarshal(in, m); err != nil {
return nil, fmt.Errorf("unmarshal manifest for %s: %w", typeVal, err)
}
return m, nil
}
// ContainerHealthCheck holds the configuration to determine if the service container is healthy.
// See https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecs-taskdefinition-healthcheck.html
type ContainerHealthCheck struct {
Command []string `yaml:"command"`
Interval *time.Duration `yaml:"interval"`
Retries *int `yaml:"retries"`
Timeout *time.Duration `yaml:"timeout"`
StartPeriod *time.Duration `yaml:"start_period"`
}
// NewDefaultContainerHealthCheck returns container health check configuration
// that's identical to a load balanced web service's defaults.
func NewDefaultContainerHealthCheck() *ContainerHealthCheck {
return &ContainerHealthCheck{
Command: []string{"CMD-SHELL", "curl -f http://localhost/ || exit 1"},
Interval: durationp(10 * time.Second),
Retries: aws.Int(2),
Timeout: durationp(5 * time.Second),
StartPeriod: durationp(0 * time.Second),
}
}
// IsEmpty checks if the health check is empty.
func (hc ContainerHealthCheck) IsEmpty() bool {
return hc.Command == nil && hc.Interval == nil && hc.Retries == nil && hc.Timeout == nil && hc.StartPeriod == nil
}
// ApplyIfNotSet changes the healthcheck's fields only if they were not set and the other healthcheck has them set.
func (hc *ContainerHealthCheck) ApplyIfNotSet(other *ContainerHealthCheck) {
if hc.Command == nil && other.Command != nil {
hc.Command = other.Command
}
if hc.Interval == nil && other.Interval != nil {
hc.Interval = other.Interval
}
if hc.Retries == nil && other.Retries != nil {
hc.Retries = other.Retries
}
if hc.Timeout == nil && other.Timeout != nil {
hc.Timeout = other.Timeout
}
if hc.StartPeriod == nil && other.StartPeriod != nil {
hc.StartPeriod = other.StartPeriod
}
}
// PlatformArgsOrString is a custom type which supports unmarshaling yaml which
// can either be of type string or type PlatformArgs.
type PlatformArgsOrString struct {
*PlatformString
PlatformArgs PlatformArgs
}
// UnmarshalYAML overrides the default YAML unmarshaling logic for the PlatformArgsOrString
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v3) interface.
func (p *PlatformArgsOrString) UnmarshalYAML(value *yaml.Node) error {
if err := value.Decode(&p.PlatformArgs); err != nil {
switch err.(type) {
case *yaml.TypeError:
break
default:
return err
}
}
if !p.PlatformArgs.isEmpty() {
// Unmarshaled successfully to p.PlatformArgs, unset p.PlatformString, and return.
p.PlatformString = nil
return nil
}
if err := value.Decode(&p.PlatformString); err != nil {
return errUnmarshalPlatformOpts
}
return nil
}
// OS returns the operating system family.
func (p *PlatformArgsOrString) OS() string {
if p := aws.StringValue((*string)(p.PlatformString)); p != "" {
args := strings.Split(p, "/")
return strings.ToLower(args[0])
}
return strings.ToLower(aws.StringValue(p.PlatformArgs.OSFamily))
}
// Arch returns the architecture of PlatformArgsOrString.
func (p *PlatformArgsOrString) Arch() string {
if p := aws.StringValue((*string)(p.PlatformString)); p != "" {
args := strings.Split(p, "/")
return strings.ToLower(args[1])
}
return strings.ToLower(aws.StringValue(p.PlatformArgs.Arch))
}
// PlatformArgs represents the specifics of a target OS.
type PlatformArgs struct {
OSFamily *string `yaml:"osfamily,omitempty"`
Arch *string `yaml:"architecture,omitempty"`
}
// PlatformString represents the string format of Platform.
type PlatformString string
// String implements the fmt.Stringer interface.
func (p *PlatformArgs) String() string {
return fmt.Sprintf("('%s', '%s')", aws.StringValue(p.OSFamily), aws.StringValue(p.Arch))
}
// IsEmpty returns if the platform field is empty.
func (p *PlatformArgsOrString) IsEmpty() bool {
return p.PlatformString == nil && p.PlatformArgs.isEmpty()
}
func (p *PlatformArgs) isEmpty() bool {
return p.OSFamily == nil && p.Arch == nil
}
func (p *PlatformArgs) bothSpecified() bool {
return (p.OSFamily != nil) && (p.Arch != nil)
}
// platformString returns a specified of the format <os>/<arch>.
func platformString(os, arch string) string {
return fmt.Sprintf("%s/%s", os, arch)
}
// RedirectPlatform returns a platform that's supported for the given manifest type.
func RedirectPlatform(os, arch, wlType string) (platform string, err error) {
// Return nil if passed the default platform.
if platformString(os, arch) == defaultPlatform {
return "", nil
}
// Return an error if a platform cannot be redirected.
if wlType == RequestDrivenWebServiceType && os == OSWindows {
return "", ErrAppRunnerInvalidPlatformWindows
}
// All architectures default to 'x86_64' (though 'arm64' is now also supported); leave OS as is.
// If a string is returned, the platform is not the default platform but is supported (except for more obscure platforms).
return platformString(os, dockerengine.ArchX86), nil
}
func isWindowsPlatform(platform PlatformArgsOrString) bool {
for _, win := range WindowsOSFamilies {
if platform.OS() == win {
return true
}
}
return false
}
// IsArmArch returns whether or not the arch is ARM.
func IsArmArch(arch string) bool {
return strings.ToLower(arch) == ArchARM || strings.ToLower(arch) == ArchARM64
}
func requiresBuild(image Image) (bool, error) {
noBuild, noURL := image.Build.isEmpty(), image.Location == nil
// Error if both of them are specified or neither is specified.
if noBuild == noURL {
return false, fmt.Errorf(`either "image.build" or "image.location" needs to be specified in the manifest`)
}
if image.Location == nil {
return true, nil
}
return false, nil
}
func dockerfileBuildRequired(workloadType string, svc interface{}) (bool, error) {
type manifest interface {
BuildRequired() (bool, error)
}
mf, ok := svc.(manifest)
if !ok {
return false, fmt.Errorf("%s does not have required methods BuildRequired()", workloadType)
}
required, err := mf.BuildRequired()
if err != nil {
return false, fmt.Errorf("check if %s requires building from local Dockerfile: %w", workloadType, err)
}
return required, nil
}
func stringP(s string) *string {
if s == "" {
return nil
}
return &s
}
func uint16P(n uint16) *uint16 {
if n == 0 {
return nil
}
return &n
}
| 1 | 20,257 | A question! I think `string` totally works, but what do you think of `*string` for consistency? | aws-copilot-cli | go |
@@ -25,6 +25,12 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
+const (
+ AnnotationClusterInfrastructureReady = "aws.cluster.sigs.k8s.io/infrastructure-ready"
+ AnnotationControlPlaneReady = "aws.cluster.sigs.k8s.io/control-plane-ready"
+ ValueReady = "true"
+)
+
// AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters.
// Only one of ID, ARN or Filters may be specified. Specifying more than one will result in
// a validation error. | 1 | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"fmt"
"sort"
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters.
// Only one of ID, ARN or Filters may be specified. Specifying more than one will result in
// a validation error.
type AWSResourceReference struct {
// ID of resource
// +optional
ID *string `json:"id,omitempty"`
// ARN of resource
// +optional
ARN *string `json:"arn,omitempty"`
// Filters is a set of key/value pairs used to identify a resource
// They are applied according to the rules defined by the AWS API:
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
// +optional
Filters []Filter `json:"filters,omitempty"`
}
// Filter is a filter used to identify an AWS resource
type Filter struct {
// Name of the filter. Filter names are case-sensitive.
Name string `json:"name"`
// Values includes one or more filter values. Filter values are case-sensitive.
Values []string `json:"values"`
}
// AWSMachineProviderConditionType is a valid value for AWSMachineProviderCondition.Type
type AWSMachineProviderConditionType string
// Valid conditions for an AWS machine instance
const (
// MachineCreated indicates whether the machine has been created or not. If not,
// it should include a reason and message for the failure.
MachineCreated AWSMachineProviderConditionType = "MachineCreated"
)
// AWSMachineProviderCondition is a condition in a AWSMachineProviderStatus
type AWSMachineProviderCondition struct {
// Type is the type of the condition.
Type AWSMachineProviderConditionType `json:"type"`
// Status is the status of the condition.
Status corev1.ConditionStatus `json:"status"`
// LastProbeTime is the last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime"`
// LastTransitionTime is the last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime"`
// Reason is a unique, one-word, CamelCase reason for the condition's last transition.
// +optional
Reason string `json:"reason"`
// Message is a human-readable message indicating details about last transition.
// +optional
Message string `json:"message"`
}
// Network encapsulates AWS networking resources.
type Network struct {
// SecurityGroups is a map from the role/kind of the security group to its unique name, if any.
SecurityGroups map[string]SecurityGroup `json:"securityGroups,omitempty"`
// APIServerELB is the Kubernetes api server classic load balancer.
APIServerELB ClassicELB `json:"apiServerElb,omitempty"`
}
// ClassicELBScheme defines the scheme of a classic load balancer.
type ClassicELBScheme string
var (
// ClassicELBSchemeInternetFacing defines an internet-facing, publicly
// accessible AWS Classic ELB scheme
ClassicELBSchemeInternetFacing = ClassicELBScheme("Internet-facing")
// ClassicELBSchemeInternal defines an internal-only facing
// load balancer internal to an ELB.
ClassicELBSchemeInternal = ClassicELBScheme("internal")
)
// ClassicELBProtocol defines listener protocols for a classic load balancer.
type ClassicELBProtocol string
var (
// ClassicELBProtocolTCP defines the ELB API string representing the TCP protocol
ClassicELBProtocolTCP = ClassicELBProtocol("TCP")
// ClassicELBProtocolSSL defines the ELB API string representing the TLS protocol
ClassicELBProtocolSSL = ClassicELBProtocol("SSL")
// ClassicELBProtocolHTTP defines the ELB API string representing the HTTP protocol at L7
ClassicELBProtocolHTTP = ClassicELBProtocol("HTTP")
// ClassicELBProtocolHTTPS defines the ELB API string representing the HTTP protocol at L7
ClassicELBProtocolHTTPS = ClassicELBProtocol("HTTPS")
)
// ClassicELB defines an AWS classic load balancer.
type ClassicELB struct {
// The name of the load balancer. It must be unique within the set of load balancers
// defined in the region. It also serves as identifier.
Name string `json:"name,omitempty"`
// DNSName is the dns name of the load balancer.
DNSName string `json:"dnsName,omitempty"`
// Scheme is the load balancer scheme, either internet-facing or private.
Scheme ClassicELBScheme `json:"scheme,omitempty"`
// SubnetIDs is an array of subnets in the VPC attached to the load balancer.
SubnetIDs []string `json:"subnetIds,omitempty"`
// SecurityGroupIDs is an array of security groups assigned to the load balancer.
SecurityGroupIDs []string `json:"securityGroupIds,omitempty"`
// Listeners is an array of classic elb listeners associated with the load balancer. There must be at least one.
Listeners []*ClassicELBListener `json:"listeners,omitempty"`
// HealthCheck is the classic elb health check associated with the load balancer.
HealthCheck *ClassicELBHealthCheck `json:"healthChecks,omitempty"`
// Attributes defines extra attributes associated with the load balancer.
Attributes ClassicELBAttributes `json:"attributes,omitempty"`
// Tags is a map of tags associated with the load balancer.
Tags map[string]string `json:"tags,omitempty"`
}
// ClassicELBAttributes defines extra attributes associated with a classic load balancer.
type ClassicELBAttributes struct {
// IdleTimeout is time that the connection is allowed to be idle (no data
// has been sent over the connection) before it is closed by the load balancer.
IdleTimeout time.Duration `json:"idleTimeout,omitempty"`
}
// ClassicELBListener defines an AWS classic load balancer listener.
type ClassicELBListener struct {
Protocol ClassicELBProtocol `json:"protocol"`
Port int64 `json:"port"`
InstanceProtocol ClassicELBProtocol `json:"instanceProtocol"`
InstancePort int64 `json:"instancePort"`
}
// ClassicELBHealthCheck defines an AWS classic load balancer health check.
type ClassicELBHealthCheck struct {
Target string `json:"target"`
Interval time.Duration `json:"interval"`
Timeout time.Duration `json:"timeout"`
HealthyThreshold int64 `json:"healthyThreshold"`
UnhealthyThreshold int64 `json:"unhealthyThreshold"`
}
// Subnets is a slice of Subnet.
type Subnets []*SubnetSpec
// ToMap returns a map from id to subnet.
func (s Subnets) ToMap() map[string]*SubnetSpec {
res := make(map[string]*SubnetSpec)
for _, x := range s {
res[x.ID] = x
}
return res
}
// FindByID returns a single subnet matching the given id or nil.
func (s Subnets) FindByID(id string) *SubnetSpec {
for _, x := range s {
if x.ID == id {
return x
}
}
return nil
}
// FilterPrivate returns a slice containing all subnets marked as private.
func (s Subnets) FilterPrivate() (res Subnets) {
for _, x := range s {
if !x.IsPublic {
res = append(res, x)
}
}
return
}
// FilterPublic returns a slice containing all subnets marked as public.
func (s Subnets) FilterPublic() (res Subnets) {
for _, x := range s {
if x.IsPublic {
res = append(res, x)
}
}
return
}
// FilterByZone returns a slice containing all subnets that live in the availability zone specified.
func (s Subnets) FilterByZone(zone string) (res Subnets) {
for _, x := range s {
if x.AvailabilityZone == zone {
res = append(res, x)
}
}
return
}
// RouteTable defines an AWS routing table.
type RouteTable struct {
ID string `json:"id"`
}
// SecurityGroupRole defines the unique role of a security group.
type SecurityGroupRole string
var (
// SecurityGroupBastion defines an SSH bastion role
SecurityGroupBastion = SecurityGroupRole("bastion")
// SecurityGroupNode defines a Kubernetes workload node role
SecurityGroupNode = SecurityGroupRole("node")
// SecurityGroupControlPlane defines a Kubernetes control plane node role
SecurityGroupControlPlane = SecurityGroupRole("controlplane")
// SecurityGroupLB defines a container for the cloud provider to inject its load balancer ingress rules
SecurityGroupLB = SecurityGroupRole("lb")
)
// SecurityGroup defines an AWS security group.
type SecurityGroup struct {
// ID is a unique identifier.
ID string `json:"id"`
// Name is the security group name.
Name string `json:"name"`
// IngressRules is the inbound rules associated with the security group.
IngressRules IngressRules `json:"ingressRule"`
// Tags is a map of tags associated with the security group.
Tags Tags `json:"tags,omitempty"`
}
// String returns a string representation of the security group.
func (s *SecurityGroup) String() string {
return fmt.Sprintf("id=%s/name=%s", s.ID, s.Name)
}
// SecurityGroupProtocol defines the protocol type for a security group rule.
type SecurityGroupProtocol string
var (
// SecurityGroupProtocolAll is a wildcard for all IP protocols
SecurityGroupProtocolAll = SecurityGroupProtocol("-1")
// SecurityGroupProtocolIPinIP represents the IP in IP protocol in ingress rules
SecurityGroupProtocolIPinIP = SecurityGroupProtocol("4")
// SecurityGroupProtocolTCP represents the TCP protocol in ingress rules
SecurityGroupProtocolTCP = SecurityGroupProtocol("tcp")
// SecurityGroupProtocolUDP represents the UDP protocol in ingress rules
SecurityGroupProtocolUDP = SecurityGroupProtocol("udp")
// SecurityGroupProtocolICMP represents the ICMP protocol in ingress rules
SecurityGroupProtocolICMP = SecurityGroupProtocol("icmp")
// SecurityGroupProtocolICMPv6 represents the ICMPv6 protocol in ingress rules
SecurityGroupProtocolICMPv6 = SecurityGroupProtocol("58")
)
// IngressRule defines an AWS ingress rule for security groups.
type IngressRule struct {
Description string `json:"description"`
Protocol SecurityGroupProtocol `json:"protocol"`
FromPort int64 `json:"fromPort"`
ToPort int64 `json:"toPort"`
// List of CIDR blocks to allow access from. Cannot be specified with SourceSecurityGroupID.
CidrBlocks []string `json:"cidrBlocks"`
// The security group id to allow access from. Cannot be specified with CidrBlocks.
SourceSecurityGroupIDs []string `json:"sourceSecurityGroupIds"`
}
// String returns a string representation of the ingress rule.
func (i *IngressRule) String() string {
return fmt.Sprintf("protocol=%s/range=[%d-%d]/description=%s", i.Protocol, i.FromPort, i.ToPort, i.Description)
}
// IngressRules is a slice of AWS ingress rules for security groups.
type IngressRules []*IngressRule
// Difference returns the difference between this slice and the other slice.
func (i IngressRules) Difference(o IngressRules) (out IngressRules) {
for _, x := range i {
found := false
for _, y := range o {
if x.Equals(y) {
found = true
break
}
}
if !found {
out = append(out, x)
}
}
return
}
// Equals returns true if two IngressRule are equal
func (i *IngressRule) Equals(o *IngressRule) bool {
if len(i.CidrBlocks) != len(o.CidrBlocks) {
return false
}
sort.Strings(i.CidrBlocks)
sort.Strings(o.CidrBlocks)
for i, v := range i.CidrBlocks {
if v != o.CidrBlocks[i] {
return false
}
}
if len(i.SourceSecurityGroupIDs) != len(o.SourceSecurityGroupIDs) {
return false
}
sort.Strings(i.SourceSecurityGroupIDs)
sort.Strings(o.SourceSecurityGroupIDs)
for i, v := range i.SourceSecurityGroupIDs {
if v != o.SourceSecurityGroupIDs[i] {
return false
}
}
return i.Description == o.Description &&
i.FromPort == o.FromPort &&
i.ToPort == o.ToPort &&
i.Protocol == o.Protocol
}
// InstanceState describes the state of an AWS instance.
type InstanceState string
var (
// InstanceStatePending is the string representing an instance in a pending state
InstanceStatePending = InstanceState("pending")
// InstanceStateRunning is the string representing an instance in a pending state
InstanceStateRunning = InstanceState("running")
// InstanceStateShuttingDown is the string representing an instance shutting down
InstanceStateShuttingDown = InstanceState("shutting-down")
// InstanceStateTerminated is the string representing an instance that has been terminated
InstanceStateTerminated = InstanceState("terminated")
// InstanceStateStopping is the string representing an instance
// that is in the process of being stopped and can be restarted
InstanceStateStopping = InstanceState("stopping")
// InstanceStateStopped is the string representing an instance
// that has been stopped and can be restarted
InstanceStateStopped = InstanceState("stopped")
)
// Instance describes an AWS instance.
type Instance struct {
ID string `json:"id"`
// The current state of the instance.
State InstanceState `json:"instanceState,omitempty"`
// The instance type.
Type string `json:"type,omitempty"`
// The ID of the subnet of the instance.
SubnetID string `json:"subnetId,omitempty"`
// The ID of the AMI used to launch the instance.
ImageID string `json:"imageId,omitempty"`
// The name of the SSH key pair.
KeyName *string `json:"keyName,omitempty"`
// SecurityGroupIDs are one or more security group IDs this instance belongs to.
SecurityGroupIDs []string `json:"securityGroupIds,omitempty"`
// UserData is the raw data script passed to the instance which is run upon bootstrap.
// This field must not be base64 encoded and should only be used when running a new instance.
UserData *string `json:"userData,omitempty"`
// The name of the IAM instance profile associated with the instance, if applicable.
IAMProfile string `json:"iamProfile,omitempty"`
// The private IPv4 address assigned to the instance.
PrivateIP *string `json:"privateIp,omitempty"`
// The public IPv4 address assigned to the instance, if applicable.
PublicIP *string `json:"publicIp,omitempty"`
// Specifies whether enhanced networking with ENA is enabled.
ENASupport *bool `json:"enaSupport,omitempty"`
// Indicates whether the instance is optimized for Amazon EBS I/O.
EBSOptimized *bool `json:"ebsOptimized,omitempty"`
// Specifies size (in Gi) of the root storage device
RootDeviceSize int64 `json:"rootDeviceSize,omitempty"`
// The tags associated with the instance.
Tags map[string]string `json:"tags,omitempty"`
}
const (
AnnotationClusterInfrastructureReady = "aws.cluster.sigs.k8s.io/infrastructure-ready"
ValueReady = "true"
AnnotationControlPlaneReady = "aws.cluster.sigs.k8s.io/control-plane-ready"
)
| 1 | 10,005 | Should this be: `aws.infrastructure.cluster.sigs.k8s.io` instead? | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -142,7 +142,7 @@ void PairLubricate::compute(int eflag, int vflag)
Ef[2][2] = h_rate[2]/domain->zprd;
Ef[0][1] = Ef[1][0] = 0.5 * h_rate[5]/domain->yprd;
Ef[0][2] = Ef[2][0] = 0.5 * h_rate[4]/domain->zprd;
- Ef[1][2] = Ef[2][1] = 0.5 * h_rate[3]/domain->zprd;
+ Ef[1][2] = Ef[2][1] = 0.5 * h_rate[3]/domain->xprd;
// copy updated velocity/omega/angmom to the ghost particles
// no need to do this if not shearing since comm->ghost_velocity is set | 1 | /* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
http://lammps.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing authors: Randy Schunk (SNL)
Amit Kumar and Michael Bybee (UIUC)
------------------------------------------------------------------------- */
#include "pair_lubricate.h"
#include <mpi.h>
#include <cmath>
#include <cstring>
#include "atom.h"
#include "comm.h"
#include "force.h"
#include "neighbor.h"
#include "neigh_list.h"
#include "domain.h"
#include "modify.h"
#include "fix.h"
#include "fix_deform.h"
#include "fix_wall.h"
#include "input.h"
#include "variable.h"
#include "math_const.h"
#include "memory.h"
#include "error.h"
#include "utils.h"
using namespace LAMMPS_NS;
using namespace MathConst;
// same as fix_wall.cpp
enum{NONE=0,EDGE,CONSTANT,VARIABLE};
/* ---------------------------------------------------------------------- */
PairLubricate::PairLubricate(LAMMPS *lmp) : Pair(lmp)
{
single_enable = 0;
// set comm size needed by this Pair
comm_forward = 6;
}
/* ---------------------------------------------------------------------- */
PairLubricate::~PairLubricate()
{
if (allocated) {
memory->destroy(setflag);
memory->destroy(cutsq);
memory->destroy(cut);
memory->destroy(cut_inner);
}
}
/* ---------------------------------------------------------------------- */
void PairLubricate::compute(int eflag, int vflag)
{
int i,j,ii,jj,inum,jnum,itype,jtype;
double xtmp,ytmp,ztmp,delx,dely,delz,fx,fy,fz,tx,ty,tz;
double rsq,r,h_sep,radi;
double vr1,vr2,vr3,vnnr,vn1,vn2,vn3;
double vt1,vt2,vt3,wt1,wt2,wt3,wdotn;
double vRS0;
double vi[3],vj[3],wi[3],wj[3],xl[3];
double a_sq,a_sh,a_pu;
int *ilist,*jlist,*numneigh,**firstneigh;
double lamda[3],vstream[3];
double vxmu2f = force->vxmu2f;
ev_init(eflag,vflag);
double **x = atom->x;
double **v = atom->v;
double **f = atom->f;
double **omega = atom->omega;
double **torque = atom->torque;
double *radius = atom->radius;
int *type = atom->type;
int nlocal = atom->nlocal;
int newton_pair = force->newton_pair;
inum = list->inum;
ilist = list->ilist;
numneigh = list->numneigh;
firstneigh = list->firstneigh;
// subtract streaming component of velocity, omega, angmom
// assume fluid streaming velocity = box deformation rate
// vstream = (ux,uy,uz)
// ux = h_rate[0]*x + h_rate[5]*y + h_rate[4]*z
// uy = h_rate[1]*y + h_rate[3]*z
// uz = h_rate[2]*z
// omega_new = omega - curl(vstream)/2
// angmom_new = angmom - I*curl(vstream)/2
// Ef = (grad(vstream) + (grad(vstream))^T) / 2
if (shearing) {
double *h_rate = domain->h_rate;
double *h_ratelo = domain->h_ratelo;
for (ii = 0; ii < inum; ii++) {
i = ilist[ii];
itype = type[i];
radi = radius[i];
domain->x2lamda(x[i],lamda);
vstream[0] = h_rate[0]*lamda[0] + h_rate[5]*lamda[1] +
h_rate[4]*lamda[2] + h_ratelo[0];
vstream[1] = h_rate[1]*lamda[1] + h_rate[3]*lamda[2] + h_ratelo[1];
vstream[2] = h_rate[2]*lamda[2] + h_ratelo[2];
v[i][0] -= vstream[0];
v[i][1] -= vstream[1];
v[i][2] -= vstream[2];
omega[i][0] += 0.5*h_rate[3];
omega[i][1] -= 0.5*h_rate[4];
omega[i][2] += 0.5*h_rate[5];
}
// set Ef from h_rate in strain units
Ef[0][0] = h_rate[0]/domain->xprd;
Ef[1][1] = h_rate[1]/domain->yprd;
Ef[2][2] = h_rate[2]/domain->zprd;
Ef[0][1] = Ef[1][0] = 0.5 * h_rate[5]/domain->yprd;
Ef[0][2] = Ef[2][0] = 0.5 * h_rate[4]/domain->zprd;
Ef[1][2] = Ef[2][1] = 0.5 * h_rate[3]/domain->zprd;
// copy updated velocity/omega/angmom to the ghost particles
// no need to do this if not shearing since comm->ghost_velocity is set
comm->forward_comm_pair(this);
}
// This section of code adjusts R0/RT0/RS0 if necessary due to changes
// in the volume fraction as a result of fix deform or moving walls
double dims[3], wallcoord;
if (flagVF) // Flag for volume fraction corrections
if (flagdeform || flagwall == 2){ // Possible changes in volume fraction
if (flagdeform && !flagwall)
for (j = 0; j < 3; j++)
dims[j] = domain->prd[j];
else if (flagwall == 2 || (flagdeform && flagwall == 1)){
double wallhi[3], walllo[3];
for (int j = 0; j < 3; j++){
wallhi[j] = domain->prd[j];
walllo[j] = 0;
}
for (int m = 0; m < wallfix->nwall; m++){
int dim = wallfix->wallwhich[m] / 2;
int side = wallfix->wallwhich[m] % 2;
if (wallfix->xstyle[m] == VARIABLE){
wallcoord = input->variable->compute_equal(wallfix->xindex[m]);
}
else wallcoord = wallfix->coord0[m];
if (side == 0) walllo[dim] = wallcoord;
else wallhi[dim] = wallcoord;
}
for (int j = 0; j < 3; j++)
dims[j] = wallhi[j] - walllo[j];
}
double vol_T = dims[0]*dims[1]*dims[2];
double vol_f = vol_P/vol_T;
if (flaglog == 0) {
R0 = 6*MY_PI*mu*rad*(1.0 + 2.16*vol_f);
RT0 = 8*MY_PI*mu*pow(rad,3.0);
RS0 = 20.0/3.0*MY_PI*mu*pow(rad,3.0)*
(1.0 + 3.33*vol_f + 2.80*vol_f*vol_f);
} else {
R0 = 6*MY_PI*mu*rad*(1.0 + 2.725*vol_f - 6.583*vol_f*vol_f);
RT0 = 8*MY_PI*mu*pow(rad,3.0)*(1.0 + 0.749*vol_f - 2.469*vol_f*vol_f);
RS0 = 20.0/3.0*MY_PI*mu*pow(rad,3.0)*
(1.0 + 3.64*vol_f - 6.95*vol_f*vol_f);
}
}
// end of R0 adjustment code
for (ii = 0; ii < inum; ii++) {
i = ilist[ii];
xtmp = x[i][0];
ytmp = x[i][1];
ztmp = x[i][2];
itype = type[i];
radi = radius[i];
jlist = firstneigh[i];
jnum = numneigh[i];
// angular velocity
wi[0] = omega[i][0];
wi[1] = omega[i][1];
wi[2] = omega[i][2];
// FLD contribution to force and torque due to isotropic terms
// FLD contribution to stress from isotropic RS0
if (flagfld) {
f[i][0] -= vxmu2f*R0*v[i][0];
f[i][1] -= vxmu2f*R0*v[i][1];
f[i][2] -= vxmu2f*R0*v[i][2];
torque[i][0] -= vxmu2f*RT0*wi[0];
torque[i][1] -= vxmu2f*RT0*wi[1];
torque[i][2] -= vxmu2f*RT0*wi[2];
if (shearing && vflag_either) {
vRS0 = -vxmu2f * RS0;
v_tally_tensor(i,i,nlocal,newton_pair,
vRS0*Ef[0][0],vRS0*Ef[1][1],vRS0*Ef[2][2],
vRS0*Ef[0][1],vRS0*Ef[0][2],vRS0*Ef[1][2]);
}
}
if (!flagHI) continue;
for (jj = 0; jj < jnum; jj++) {
j = jlist[jj];
j &= NEIGHMASK;
delx = xtmp - x[j][0];
dely = ytmp - x[j][1];
delz = ztmp - x[j][2];
rsq = delx*delx + dely*dely + delz*delz;
jtype = type[j];
if (rsq < cutsq[itype][jtype]) {
r = sqrt(rsq);
// angular momentum = I*omega = 2/5 * M*R^2 * omega
wj[0] = omega[j][0];
wj[1] = omega[j][1];
wj[2] = omega[j][2];
// xl = point of closest approach on particle i from its center
xl[0] = -delx/r*radi;
xl[1] = -dely/r*radi;
xl[2] = -delz/r*radi;
// velocity at the point of closest approach on both particles
// v = v + omega_cross_xl - Ef.xl
// particle i
vi[0] = v[i][0] + (wi[1]*xl[2] - wi[2]*xl[1])
- (Ef[0][0]*xl[0] + Ef[0][1]*xl[1] + Ef[0][2]*xl[2]);
vi[1] = v[i][1] + (wi[2]*xl[0] - wi[0]*xl[2])
- (Ef[1][0]*xl[0] + Ef[1][1]*xl[1] + Ef[1][2]*xl[2]);
vi[2] = v[i][2] + (wi[0]*xl[1] - wi[1]*xl[0])
- (Ef[2][0]*xl[0] + Ef[2][1]*xl[1] + Ef[2][2]*xl[2]);
// particle j
vj[0] = v[j][0] - (wj[1]*xl[2] - wj[2]*xl[1])
+ (Ef[0][0]*xl[0] + Ef[0][1]*xl[1] + Ef[0][2]*xl[2]);
vj[1] = v[j][1] - (wj[2]*xl[0] - wj[0]*xl[2])
+ (Ef[1][0]*xl[0] + Ef[1][1]*xl[1] + Ef[1][2]*xl[2]);
vj[2] = v[j][2] - (wj[0]*xl[1] - wj[1]*xl[0])
+ (Ef[2][0]*xl[0] + Ef[2][1]*xl[1] + Ef[2][2]*xl[2]);
// scalar resistances XA and YA
h_sep = r - 2.0*radi;
// if less than the minimum gap use the minimum gap instead
if (r < cut_inner[itype][jtype])
h_sep = cut_inner[itype][jtype] - 2.0*radi;
// scale h_sep by radi
h_sep = h_sep/radi;
// scalar resistances
if (flaglog) {
a_sq = 6.0*MY_PI*mu*radi*(1.0/4.0/h_sep + 9.0/40.0*log(1.0/h_sep));
a_sh = 6.0*MY_PI*mu*radi*(1.0/6.0*log(1.0/h_sep));
a_pu = 8.0*MY_PI*mu*pow(radi,3.0)*(3.0/160.0*log(1.0/h_sep));
} else
a_sq = 6.0*MY_PI*mu*radi*(1.0/4.0/h_sep);
// relative velocity at the point of closest approach
// includes fluid velocity
vr1 = vi[0] - vj[0];
vr2 = vi[1] - vj[1];
vr3 = vi[2] - vj[2];
// normal component (vr.n)n
vnnr = (vr1*delx + vr2*dely + vr3*delz)/r;
vn1 = vnnr*delx/r;
vn2 = vnnr*dely/r;
vn3 = vnnr*delz/r;
// tangential component vr - (vr.n)n
vt1 = vr1 - vn1;
vt2 = vr2 - vn2;
vt3 = vr3 - vn3;
// force due to squeeze type motion
fx = a_sq*vn1;
fy = a_sq*vn2;
fz = a_sq*vn3;
// force due to all shear kind of motions
if (flaglog) {
fx = fx + a_sh*vt1;
fy = fy + a_sh*vt2;
fz = fz + a_sh*vt3;
}
// scale forces for appropriate units
fx *= vxmu2f;
fy *= vxmu2f;
fz *= vxmu2f;
// add to total force
f[i][0] -= fx;
f[i][1] -= fy;
f[i][2] -= fz;
if (newton_pair || j < nlocal) {
f[j][0] += fx;
f[j][1] += fy;
f[j][2] += fz;
}
// torque due to this force
if (flaglog) {
tx = xl[1]*fz - xl[2]*fy;
ty = xl[2]*fx - xl[0]*fz;
tz = xl[0]*fy - xl[1]*fx;
torque[i][0] -= vxmu2f*tx;
torque[i][1] -= vxmu2f*ty;
torque[i][2] -= vxmu2f*tz;
if (newton_pair || j < nlocal) {
torque[j][0] -= vxmu2f*tx;
torque[j][1] -= vxmu2f*ty;
torque[j][2] -= vxmu2f*tz;
}
// torque due to a_pu
wdotn = ((wi[0]-wj[0])*delx + (wi[1]-wj[1])*dely +
(wi[2]-wj[2])*delz)/r;
wt1 = (wi[0]-wj[0]) - wdotn*delx/r;
wt2 = (wi[1]-wj[1]) - wdotn*dely/r;
wt3 = (wi[2]-wj[2]) - wdotn*delz/r;
tx = a_pu*wt1;
ty = a_pu*wt2;
tz = a_pu*wt3;
torque[i][0] -= vxmu2f*tx;
torque[i][1] -= vxmu2f*ty;
torque[i][2] -= vxmu2f*tz;
if (newton_pair || j < nlocal) {
torque[j][0] += vxmu2f*tx;
torque[j][1] += vxmu2f*ty;
torque[j][2] += vxmu2f*tz;
}
}
if (evflag) ev_tally_xyz(i,j,nlocal,newton_pair,
0.0,0.0,-fx,-fy,-fz,delx,dely,delz);
}
}
}
// restore streaming component of velocity, omega, angmom
if (shearing) {
double *h_rate = domain->h_rate;
double *h_ratelo = domain->h_ratelo;
for (ii = 0; ii < inum; ii++) {
i = ilist[ii];
itype = type[i];
radi = radius[i];
domain->x2lamda(x[i],lamda);
vstream[0] = h_rate[0]*lamda[0] + h_rate[5]*lamda[1] +
h_rate[4]*lamda[2] + h_ratelo[0];
vstream[1] = h_rate[1]*lamda[1] + h_rate[3]*lamda[2] + h_ratelo[1];
vstream[2] = h_rate[2]*lamda[2] + h_ratelo[2];
v[i][0] += vstream[0];
v[i][1] += vstream[1];
v[i][2] += vstream[2];
omega[i][0] -= 0.5*h_rate[3];
omega[i][1] += 0.5*h_rate[4];
omega[i][2] -= 0.5*h_rate[5];
}
}
if (vflag_fdotr) virial_fdotr_compute();
}
/* ----------------------------------------------------------------------
allocate all arrays
------------------------------------------------------------------------- */
void PairLubricate::allocate()
{
allocated = 1;
int n = atom->ntypes;
memory->create(setflag,n+1,n+1,"pair:setflag");
for (int i = 1; i <= n; i++)
for (int j = i; j <= n; j++)
setflag[i][j] = 0;
memory->create(cutsq,n+1,n+1,"pair:cutsq");
memory->create(cut,n+1,n+1,"pair:cut");
memory->create(cut_inner,n+1,n+1,"pair:cut_inner");
}
/* ----------------------------------------------------------------------
global settings
------------------------------------------------------------------------- */
void PairLubricate::settings(int narg, char **arg)
{
if (narg != 5 && narg != 7) error->all(FLERR,"Illegal pair_style command");
mu = force->numeric(FLERR,arg[0]);
flaglog = force->inumeric(FLERR,arg[1]);
flagfld = force->inumeric(FLERR,arg[2]);
cut_inner_global = force->numeric(FLERR,arg[3]);
cut_global = force->numeric(FLERR,arg[4]);
flagHI = flagVF = 1;
if (narg == 7) {
flagHI = force->inumeric(FLERR,arg[5]);
flagVF = force->inumeric(FLERR,arg[6]);
}
if (flaglog == 1 && flagHI == 0) {
error->warning(FLERR,"Cannot include log terms without 1/r terms; "
"setting flagHI to 1");
flagHI = 1;
}
// reset cutoffs that have been explicitly set
if (allocated) {
for (int i = 1; i <= atom->ntypes; i++)
for (int j = i; j <= atom->ntypes; j++)
if (setflag[i][j]) {
cut_inner[i][j] = cut_inner_global;
cut[i][j] = cut_global;
}
}
}
/* ----------------------------------------------------------------------
set coeffs for one or more type pairs
------------------------------------------------------------------------- */
void PairLubricate::coeff(int narg, char **arg)
{
if (narg != 2 && narg != 4)
error->all(FLERR,"Incorrect args for pair coefficients");
if (!allocated) allocate();
int ilo,ihi,jlo,jhi;
force->bounds(FLERR,arg[0],atom->ntypes,ilo,ihi);
force->bounds(FLERR,arg[1],atom->ntypes,jlo,jhi);
double cut_inner_one = cut_inner_global;
double cut_one = cut_global;
if (narg == 4) {
cut_inner_one = force->numeric(FLERR,arg[2]);
cut_one = force->numeric(FLERR,arg[3]);
}
int count = 0;
for (int i = ilo; i <= ihi; i++) {
for (int j = MAX(jlo,i); j <= jhi; j++) {
cut_inner[i][j] = cut_inner_one;
cut[i][j] = cut_one;
setflag[i][j] = 1;
count++;
}
}
if (count == 0) error->all(FLERR,"Incorrect args for pair coefficients");
}
/* ----------------------------------------------------------------------
init specific to this pair style
------------------------------------------------------------------------- */
void PairLubricate::init_style()
{
if (!atom->sphere_flag)
error->all(FLERR,"Pair lubricate requires atom style sphere");
if (comm->ghost_velocity == 0)
error->all(FLERR,"Pair lubricate requires ghost atoms store velocity");
neighbor->request(this,instance_me);
// require that atom radii are identical within each type
// require monodisperse system with same radii for all types
double radtype;
for (int i = 1; i <= atom->ntypes; i++) {
if (!atom->radius_consistency(i,radtype))
error->all(FLERR,"Pair lubricate requires monodisperse particles");
if (i > 1 && radtype != rad)
error->all(FLERR,"Pair lubricate requires monodisperse particles");
rad = radtype;
}
// check for fix deform, if exists it must use "remap v"
// If box will change volume, set appropriate flag so that volume
// and v.f. corrections are re-calculated at every step.
//
// If available volume is different from box volume
// due to walls, set volume appropriately; if walls will
// move, set appropriate flag so that volume and v.f. corrections
// are re-calculated at every step.
shearing = flagdeform = flagwall = 0;
for (int i = 0; i < modify->nfix; i++){
if (strcmp(modify->fix[i]->style,"deform") == 0) {
shearing = flagdeform = 1;
if (((FixDeform *) modify->fix[i])->remapflag != Domain::V_REMAP)
error->all(FLERR,"Using pair lubricate with inconsistent "
"fix deform remap option");
}
if (strstr(modify->fix[i]->style,"wall") != NULL) {
if (flagwall)
error->all(FLERR,
"Cannot use multiple fix wall commands with pair lubricate");
flagwall = 1; // Walls exist
wallfix = (FixWall *) modify->fix[i];
if (wallfix->xflag) flagwall = 2; // Moving walls exist
}
}
// set the isotropic constants that depend on the volume fraction
// vol_T = total volume
double vol_T;
double wallcoord;
if (!flagwall) vol_T = domain->xprd*domain->yprd*domain->zprd;
else {
double wallhi[3], walllo[3];
for (int j = 0; j < 3; j++){
wallhi[j] = domain->prd[j];
walllo[j] = 0;
}
for (int m = 0; m < wallfix->nwall; m++){
int dim = wallfix->wallwhich[m] / 2;
int side = wallfix->wallwhich[m] % 2;
if (wallfix->xstyle[m] == VARIABLE){
wallfix->xindex[m] = input->variable->find(wallfix->xstr[m]);
//Since fix->wall->init happens after pair->init_style
wallcoord = input->variable->compute_equal(wallfix->xindex[m]);
}
else wallcoord = wallfix->coord0[m];
if (side == 0) walllo[dim] = wallcoord;
else wallhi[dim] = wallcoord;
}
vol_T = (wallhi[0] - walllo[0]) * (wallhi[1] - walllo[1]) *
(wallhi[2] - walllo[2]);
}
// vol_P = volume of particles, assuming monodispersity
// vol_f = volume fraction
vol_P = atom->natoms*(4.0/3.0)*MY_PI*pow(rad,3.0);
double vol_f = vol_P/vol_T;
if (!flagVF) vol_f = 0;
// set isotropic constants for FLD
if (flaglog == 0) {
R0 = 6*MY_PI*mu*rad*(1.0 + 2.16*vol_f);
RT0 = 8*MY_PI*mu*pow(rad,3.0);
RS0 = 20.0/3.0*MY_PI*mu*pow(rad,3.0)*(1.0 + 3.33*vol_f + 2.80*vol_f*vol_f);
} else {
R0 = 6*MY_PI*mu*rad*(1.0 + 2.725*vol_f - 6.583*vol_f*vol_f);
RT0 = 8*MY_PI*mu*pow(rad,3.0)*(1.0 + 0.749*vol_f - 2.469*vol_f*vol_f);
RS0 = 20.0/3.0*MY_PI*mu*pow(rad,3.0)*(1.0 + 3.64*vol_f - 6.95*vol_f*vol_f);
}
// set Ef = 0 since used whether shearing or not
Ef[0][0] = Ef[0][1] = Ef[0][2] = 0.0;
Ef[1][0] = Ef[1][1] = Ef[1][2] = 0.0;
Ef[2][0] = Ef[2][1] = Ef[2][2] = 0.0;
}
/* ----------------------------------------------------------------------
init for one type pair i,j and corresponding j,i
------------------------------------------------------------------------- */
double PairLubricate::init_one(int i, int j)
{
if (setflag[i][j] == 0) {
cut_inner[i][j] = mix_distance(cut_inner[i][i],cut_inner[j][j]);
cut[i][j] = mix_distance(cut[i][i],cut[j][j]);
}
cut_inner[j][i] = cut_inner[i][j];
return cut[i][j];
}
/* ----------------------------------------------------------------------
proc 0 writes to restart file
------------------------------------------------------------------------- */
void PairLubricate::write_restart(FILE *fp)
{
write_restart_settings(fp);
int i,j;
for (i = 1; i <= atom->ntypes; i++)
for (j = i; j <= atom->ntypes; j++) {
fwrite(&setflag[i][j],sizeof(int),1,fp);
if (setflag[i][j]) {
fwrite(&cut_inner[i][j],sizeof(double),1,fp);
fwrite(&cut[i][j],sizeof(double),1,fp);
}
}
}
/* ----------------------------------------------------------------------
proc 0 reads from restart file, bcasts
------------------------------------------------------------------------- */
void PairLubricate::read_restart(FILE *fp)
{
read_restart_settings(fp);
allocate();
int i,j;
int me = comm->me;
for (i = 1; i <= atom->ntypes; i++)
for (j = i; j <= atom->ntypes; j++) {
if (me == 0) utils::sfread(FLERR,&setflag[i][j],sizeof(int),1,fp,NULL,error);
MPI_Bcast(&setflag[i][j],1,MPI_INT,0,world);
if (setflag[i][j]) {
if (me == 0) {
utils::sfread(FLERR,&cut_inner[i][j],sizeof(double),1,fp,NULL,error);
utils::sfread(FLERR,&cut[i][j],sizeof(double),1,fp,NULL,error);
}
MPI_Bcast(&cut_inner[i][j],1,MPI_DOUBLE,0,world);
MPI_Bcast(&cut[i][j],1,MPI_DOUBLE,0,world);
}
}
}
/* ----------------------------------------------------------------------
proc 0 writes to restart file
------------------------------------------------------------------------- */
void PairLubricate::write_restart_settings(FILE *fp)
{
fwrite(&mu,sizeof(double),1,fp);
fwrite(&flaglog,sizeof(int),1,fp);
fwrite(&flagfld,sizeof(int),1,fp);
fwrite(&cut_inner_global,sizeof(double),1,fp);
fwrite(&cut_global,sizeof(double),1,fp);
fwrite(&offset_flag,sizeof(int),1,fp);
fwrite(&mix_flag,sizeof(int),1,fp);
fwrite(&flagHI,sizeof(int),1,fp);
fwrite(&flagVF,sizeof(int),1,fp);
}
/* ----------------------------------------------------------------------
proc 0 reads from restart file, bcasts
------------------------------------------------------------------------- */
void PairLubricate::read_restart_settings(FILE *fp)
{
int me = comm->me;
if (me == 0) {
utils::sfread(FLERR,&mu,sizeof(double),1,fp,NULL,error);
utils::sfread(FLERR,&flaglog,sizeof(int),1,fp,NULL,error);
utils::sfread(FLERR,&flagfld,sizeof(int),1,fp,NULL,error);
utils::sfread(FLERR,&cut_inner_global,sizeof(double),1,fp,NULL,error);
utils::sfread(FLERR,&cut_global,sizeof(double),1,fp,NULL,error);
utils::sfread(FLERR,&offset_flag,sizeof(int),1,fp,NULL,error);
utils::sfread(FLERR,&mix_flag,sizeof(int),1,fp,NULL,error);
utils::sfread(FLERR,&flagHI,sizeof(int),1,fp,NULL,error);
utils::sfread(FLERR,&flagVF,sizeof(int),1,fp,NULL,error);
}
MPI_Bcast(&mu,1,MPI_DOUBLE,0,world);
MPI_Bcast(&flaglog,1,MPI_INT,0,world);
MPI_Bcast(&flagfld,1,MPI_INT,0,world);
MPI_Bcast(&cut_inner_global,1,MPI_DOUBLE,0,world);
MPI_Bcast(&cut_global,1,MPI_DOUBLE,0,world);
MPI_Bcast(&offset_flag,1,MPI_INT,0,world);
MPI_Bcast(&mix_flag,1,MPI_INT,0,world);
MPI_Bcast(&flagHI,1,MPI_INT,0,world);
MPI_Bcast(&flagVF,1,MPI_INT,0,world);
}
/* ---------------------------------------------------------------------- */
int PairLubricate::pack_forward_comm(int n, int *list, double *buf,
int /*pbc_flag*/, int * /*pbc*/)
{
int i,j,m;
double **v = atom->v;
double **omega = atom->omega;
m = 0;
for (i = 0; i < n; i++) {
j = list[i];
buf[m++] = v[j][0];
buf[m++] = v[j][1];
buf[m++] = v[j][2];
buf[m++] = omega[j][0];
buf[m++] = omega[j][1];
buf[m++] = omega[j][2];
}
return m;
}
/* ---------------------------------------------------------------------- */
void PairLubricate::unpack_forward_comm(int n, int first, double *buf)
{
int i,m,last;
double **v = atom->v;
double **omega = atom->omega;
m = 0;
last = first + n;
for (i = first; i < last; i++) {
v[i][0] = buf[m++];
v[i][1] = buf[m++];
v[i][2] = buf[m++];
omega[i][0] = buf[m++];
omega[i][1] = buf[m++];
omega[i][2] = buf[m++];
}
}
/* ----------------------------------------------------------------------
check if name is recognized, return integer index for that name
if name not recognized, return -1
if type pair setting, return -2 if no type pairs are set
------------------------------------------------------------------------- */
int PairLubricate::pre_adapt(char *name, int /*ilo*/, int /*ihi*/, int /*jlo*/, int /*jhi*/)
{
if (strcmp(name,"mu") == 0) return 0;
return -1;
}
/* ----------------------------------------------------------------------
adapt parameter indexed by which
change all pair variables affected by the reset parameter
if type pair setting, set I-J and J-I coeffs
------------------------------------------------------------------------- */
void PairLubricate::adapt(int /*which*/, int /*ilo*/, int /*ihi*/, int /*jlo*/, int /*jhi*/,
double value)
{
mu = value;
}
| 1 | 27,612 | Not clear on why this change is correct. | lammps-lammps | cpp |
@@ -193,5 +193,10 @@ public final class ByteBuffersIndexInput extends IndexInput implements RandomAcc
if (in == null) {
throw new AlreadyClosedException("Already closed.");
}
- }
+ }
+
+ @Override
+ public boolean isMMapped() {
+ return true;
+ }
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.store;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.Map;
import java.util.Set;
/**
* An {@link IndexInput} implementing {@link RandomAccessInput} and backed
* by a {@link ByteBuffersDataInput}.
*/
public final class ByteBuffersIndexInput extends IndexInput implements RandomAccessInput {
private ByteBuffersDataInput in;
public ByteBuffersIndexInput(ByteBuffersDataInput in, String resourceDescription) {
super(resourceDescription);
this.in = in;
}
@Override
public void close() throws IOException {
in = null;
}
@Override
public long getFilePointer() {
ensureOpen();
return in.position();
}
@Override
public void seek(long pos) throws IOException {
ensureOpen();
in.seek(pos);
}
@Override
public long length() {
ensureOpen();
return in.size();
}
@Override
public ByteBuffersIndexInput slice(String sliceDescription, long offset, long length) throws IOException {
ensureOpen();
return new ByteBuffersIndexInput(in.slice(offset, length),
"(sliced) offset=" + offset + ", length=" + length + " " + toString() + " [slice=" + sliceDescription + "]");
}
@Override
public byte readByte() throws IOException {
ensureOpen();
return in.readByte();
}
@Override
public void readBytes(byte[] b, int offset, int len) throws IOException {
ensureOpen();
in.readBytes(b, offset, len);
}
@Override
public RandomAccessInput randomAccessSlice(long offset, long length) throws IOException {
ensureOpen();
return slice("", offset, length);
}
@Override
public void readBytes(byte[] b, int offset, int len, boolean useBuffer) throws IOException {
ensureOpen();
in.readBytes(b, offset, len, useBuffer);
}
@Override
public short readShort() throws IOException {
ensureOpen();
return in.readShort();
}
@Override
public int readInt() throws IOException {
ensureOpen();
return in.readInt();
}
@Override
public int readVInt() throws IOException {
ensureOpen();
return in.readVInt();
}
@Override
public int readZInt() throws IOException {
ensureOpen();
return in.readZInt();
}
@Override
public long readLong() throws IOException {
ensureOpen();
return in.readLong();
}
@Override
public long readVLong() throws IOException {
ensureOpen();
return in.readVLong();
}
@Override
public long readZLong() throws IOException {
ensureOpen();
return in.readZLong();
}
@Override
public String readString() throws IOException {
ensureOpen();
return in.readString();
}
@Override
public Map<String, String> readMapOfStrings() throws IOException {
ensureOpen();
return in.readMapOfStrings();
}
@Override
public Set<String> readSetOfStrings() throws IOException {
ensureOpen();
return in.readSetOfStrings();
}
@Override
public void skipBytes(long numBytes) throws IOException {
ensureOpen();
super.skipBytes(numBytes);
}
@Override
public byte readByte(long pos) throws IOException {
ensureOpen();
return in.readByte(pos);
}
@Override
public short readShort(long pos) throws IOException {
ensureOpen();
return in.readShort(pos);
}
@Override
public int readInt(long pos) throws IOException {
ensureOpen();
return in.readInt(pos);
}
@Override
public long readLong(long pos) throws IOException {
ensureOpen();
return in.readLong(pos);
}
@Override
public IndexInput clone() {
ensureOpen();
ByteBuffersIndexInput cloned = new ByteBuffersIndexInput(in.slice(0, in.size()), "(clone of) " + toString());
try {
cloned.seek(getFilePointer());
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return cloned;
}
private void ensureOpen() {
if (in == null) {
throw new AlreadyClosedException("Already closed.");
}
}
}
| 1 | 28,783 | Hi Simon. Whether this should return true depends on what byte buffers are used? The same applies to ByteBufferIndexInput, actually... I don't think you can generally tell whether the ByteBuffers the input operates on come from a mmap call or from somewhere else (even direct buffers don't have to be a result of mmap). | apache-lucene-solr | java |
@@ -54,6 +54,7 @@ storiesOf( 'PageSpeed Insights Module/Settings', module )
decorators: [
withRegistry,
],
+ padding: 0,
} )
.add( 'View, open with all settings', ( args, { registry } ) => {
return <Settings isOpen={ true } registry={ registry } />; | 1 | /**
* PageSpeed Insights Settings stories.
*
* Site Kit by Google, Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import { storiesOf } from '@storybook/react';
/**
* Internal dependencies
*/
import {
createTestRegistry,
provideModules,
provideModuleRegistrations,
} from '../tests/js/utils';
import createLegacySettingsWrapper from './utils/create-legacy-settings-wrapper';
const Settings = createLegacySettingsWrapper( 'pagespeed-insights' );
const withRegistry = ( Story ) => {
const registry = createTestRegistry();
provideModules( registry, [ {
slug: 'pagespeed-insights',
active: true,
connected: true,
} ] );
provideModuleRegistrations( registry );
return (
<Story registry={ registry } />
);
};
storiesOf( 'PageSpeed Insights Module/Settings', module )
.add( 'View, closed', ( args, { registry } ) => {
return <Settings isOpen={ false } registry={ registry } />;
}, {
decorators: [
withRegistry,
],
} )
.add( 'View, open with all settings', ( args, { registry } ) => {
return <Settings isOpen={ true } registry={ registry } />;
}, {
decorators: [
withRegistry,
],
} )
.add( 'Edit, open with all settings', ( args, { registry } ) => {
return <Settings isOpen={ true } isEditing={ true } registry={ registry } />;
}, {
decorators: [
withRegistry,
],
} )
;
| 1 | 38,279 | All stories in this file also need to have the default padding. | google-site-kit-wp | js |
@@ -804,8 +804,10 @@ static void subsurface_handle_place_above(struct wl_client *client,
return;
}
+ assert(sibling->parent == subsurface->parent);
+
wl_list_remove(&subsurface->parent_pending_link);
- wl_list_insert(&sibling->parent_pending_link,
+ wl_list_insert(sibling->parent_pending_link.prev,
&subsurface->parent_pending_link);
subsurface->reordered = true; | 1 | #include <assert.h>
#include <stdlib.h>
#include <wayland-server-core.h>
#include <wlr/render/interface.h>
#include <wlr/types/wlr_buffer.h>
#include <wlr/types/wlr_compositor.h>
#include <wlr/types/wlr_matrix.h>
#include <wlr/types/wlr_region.h>
#include <wlr/types/wlr_surface.h>
#include <wlr/types/wlr_output.h>
#include <wlr/util/log.h>
#include <wlr/util/region.h>
#include "util/signal.h"
#include "util/time.h"
#define CALLBACK_VERSION 1
#define SURFACE_VERSION 4
#define SUBSURFACE_VERSION 1
static int min(int fst, int snd) {
if (fst < snd) {
return fst;
} else {
return snd;
}
}
static int max(int fst, int snd) {
if (fst > snd) {
return fst;
} else {
return snd;
}
}
static void surface_state_reset_buffer(struct wlr_surface_state *state) {
if (state->buffer_resource) {
wl_list_remove(&state->buffer_destroy.link);
state->buffer_resource = NULL;
}
}
static void surface_handle_buffer_destroy(struct wl_listener *listener,
void *data) {
struct wlr_surface_state *state =
wl_container_of(listener, state, buffer_destroy);
surface_state_reset_buffer(state);
}
static void surface_state_set_buffer(struct wlr_surface_state *state,
struct wl_resource *buffer_resource) {
surface_state_reset_buffer(state);
state->buffer_resource = buffer_resource;
if (buffer_resource != NULL) {
wl_resource_add_destroy_listener(buffer_resource,
&state->buffer_destroy);
state->buffer_destroy.notify = surface_handle_buffer_destroy;
}
}
static void surface_destroy(struct wl_client *client,
struct wl_resource *resource) {
wl_resource_destroy(resource);
}
static void surface_attach(struct wl_client *client,
struct wl_resource *resource,
struct wl_resource *buffer, int32_t dx, int32_t dy) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
surface->pending.committed |= WLR_SURFACE_STATE_BUFFER;
surface->pending.dx = dx;
surface->pending.dy = dy;
surface_state_set_buffer(&surface->pending, buffer);
}
static void surface_damage(struct wl_client *client,
struct wl_resource *resource,
int32_t x, int32_t y, int32_t width, int32_t height) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
if (width < 0 || height < 0) {
return;
}
surface->pending.committed |= WLR_SURFACE_STATE_SURFACE_DAMAGE;
pixman_region32_union_rect(&surface->pending.surface_damage,
&surface->pending.surface_damage,
x, y, width, height);
}
static void callback_handle_resource_destroy(struct wl_resource *resource) {
wl_list_remove(wl_resource_get_link(resource));
}
static void surface_frame(struct wl_client *client,
struct wl_resource *resource, uint32_t callback) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
struct wl_resource *callback_resource = wl_resource_create(client,
&wl_callback_interface, CALLBACK_VERSION, callback);
if (callback_resource == NULL) {
wl_resource_post_no_memory(resource);
return;
}
wl_resource_set_implementation(callback_resource, NULL, NULL,
callback_handle_resource_destroy);
wl_list_insert(surface->pending.frame_callback_list.prev,
wl_resource_get_link(callback_resource));
surface->pending.committed |= WLR_SURFACE_STATE_FRAME_CALLBACK_LIST;
}
static void surface_set_opaque_region(struct wl_client *client,
struct wl_resource *resource,
struct wl_resource *region_resource) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
surface->pending.committed |= WLR_SURFACE_STATE_OPAQUE_REGION;
if (region_resource) {
pixman_region32_t *region = wlr_region_from_resource(region_resource);
pixman_region32_copy(&surface->pending.opaque, region);
} else {
pixman_region32_clear(&surface->pending.opaque);
}
}
static void surface_set_input_region(struct wl_client *client,
struct wl_resource *resource,
struct wl_resource *region_resource) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
surface->pending.committed |= WLR_SURFACE_STATE_INPUT_REGION;
if (region_resource) {
pixman_region32_t *region = wlr_region_from_resource(region_resource);
pixman_region32_copy(&surface->pending.input, region);
} else {
pixman_region32_fini(&surface->pending.input);
pixman_region32_init_rect(&surface->pending.input,
INT32_MIN, INT32_MIN, UINT32_MAX, UINT32_MAX);
}
}
/**
* Computes the surface viewport source size, ie. the size after applying the
* surface's scale, transform and cropping (via the viewport's source
* rectangle) but before applying the viewport scaling (via the viewport's
* destination rectangle).
*/
static void surface_state_viewport_src_size(struct wlr_surface_state *state,
int *out_width, int *out_height) {
if (state->buffer_width == 0 && state->buffer_height == 0) {
*out_width = *out_height = 0;
return;
}
if (state->viewport.has_src) {
*out_width = state->viewport.src.width;
*out_height = state->viewport.src.height;
} else {
int width = state->buffer_width / state->scale;
int height = state->buffer_height / state->scale;
if ((state->transform & WL_OUTPUT_TRANSFORM_90) != 0) {
int tmp = width;
width = height;
height = tmp;
}
*out_width = width;
*out_height = height;
}
}
static void surface_state_finalize(struct wlr_surface *surface,
struct wlr_surface_state *state) {
if ((state->committed & WLR_SURFACE_STATE_BUFFER)) {
if (state->buffer_resource != NULL) {
wlr_resource_get_buffer_size(state->buffer_resource,
surface->renderer, &state->buffer_width, &state->buffer_height);
} else {
state->buffer_width = state->buffer_height = 0;
}
}
if (state->viewport.has_dst) {
if (state->buffer_width == 0 && state->buffer_height == 0) {
state->width = state->height = 0;
} else {
state->width = state->viewport.dst_width;
state->height = state->viewport.dst_height;
}
} else {
surface_state_viewport_src_size(state,
&state->width, &state->height);
}
pixman_region32_intersect_rect(&state->surface_damage,
&state->surface_damage, 0, 0, state->width, state->height);
pixman_region32_intersect_rect(&state->buffer_damage,
&state->buffer_damage, 0, 0, state->buffer_width,
state->buffer_height);
}
static void surface_update_damage(pixman_region32_t *buffer_damage,
struct wlr_surface_state *current, struct wlr_surface_state *pending) {
pixman_region32_clear(buffer_damage);
if (pending->width != current->width ||
pending->height != current->height) {
// Damage the whole buffer on resize
pixman_region32_union_rect(buffer_damage, buffer_damage, 0, 0,
pending->buffer_width, pending->buffer_height);
} else {
// Copy over surface damage + buffer damage
pixman_region32_t surface_damage;
pixman_region32_init(&surface_damage);
pixman_region32_copy(&surface_damage, &pending->surface_damage);
if (pending->viewport.has_dst) {
int src_width, src_height;
surface_state_viewport_src_size(pending, &src_width, &src_height);
float scale_x = (float)pending->viewport.dst_width / src_width;
float scale_y = (float)pending->viewport.dst_height / src_height;
wlr_region_scale_xy(&surface_damage, &surface_damage,
1.0 / scale_x, 1.0 / scale_y);
}
if (pending->viewport.has_src) {
// This is lossy: do a best-effort conversion
pixman_region32_translate(&surface_damage,
floor(pending->viewport.src.x),
floor(pending->viewport.src.y));
}
wlr_region_transform(&surface_damage, &surface_damage,
wlr_output_transform_invert(pending->transform),
pending->width, pending->height);
wlr_region_scale(&surface_damage, &surface_damage, pending->scale);
pixman_region32_union(buffer_damage,
&pending->buffer_damage, &surface_damage);
pixman_region32_fini(&surface_damage);
}
}
static void surface_state_copy(struct wlr_surface_state *state,
struct wlr_surface_state *next) {
state->width = next->width;
state->height = next->height;
state->buffer_width = next->buffer_width;
state->buffer_height = next->buffer_height;
if (next->committed & WLR_SURFACE_STATE_SCALE) {
state->scale = next->scale;
}
if (next->committed & WLR_SURFACE_STATE_TRANSFORM) {
state->transform = next->transform;
}
if (next->committed & WLR_SURFACE_STATE_BUFFER) {
state->dx = next->dx;
state->dy = next->dy;
} else {
state->dx = state->dy = 0;
}
if (next->committed & WLR_SURFACE_STATE_SURFACE_DAMAGE) {
pixman_region32_copy(&state->surface_damage, &next->surface_damage);
} else {
pixman_region32_clear(&state->surface_damage);
}
if (next->committed & WLR_SURFACE_STATE_BUFFER_DAMAGE) {
pixman_region32_copy(&state->buffer_damage, &next->buffer_damage);
} else {
pixman_region32_clear(&state->buffer_damage);
}
if (next->committed & WLR_SURFACE_STATE_OPAQUE_REGION) {
pixman_region32_copy(&state->opaque, &next->opaque);
}
if (next->committed & WLR_SURFACE_STATE_INPUT_REGION) {
pixman_region32_copy(&state->input, &next->input);
}
if (next->committed & WLR_SURFACE_STATE_VIEWPORT) {
memcpy(&state->viewport, &next->viewport, sizeof(state->viewport));
}
state->committed |= next->committed;
}
/**
* Append pending state to current state and clear pending state.
*/
static void surface_state_move(struct wlr_surface_state *state,
struct wlr_surface_state *next) {
surface_state_copy(state, next);
if (next->committed & WLR_SURFACE_STATE_BUFFER) {
surface_state_set_buffer(state, next->buffer_resource);
surface_state_reset_buffer(next);
next->dx = next->dy = 0;
}
if (next->committed & WLR_SURFACE_STATE_SURFACE_DAMAGE) {
pixman_region32_clear(&next->surface_damage);
}
if (next->committed & WLR_SURFACE_STATE_BUFFER_DAMAGE) {
pixman_region32_clear(&next->buffer_damage);
}
if (next->committed & WLR_SURFACE_STATE_FRAME_CALLBACK_LIST) {
wl_list_insert_list(&state->frame_callback_list,
&next->frame_callback_list);
wl_list_init(&next->frame_callback_list);
}
next->committed = 0;
}
static void surface_damage_subsurfaces(struct wlr_subsurface *subsurface) {
// XXX: This is probably the wrong way to do it, because this damage should
// come from the client, but weston doesn't do it correctly either and it
// seems to work ok. See the comment on weston_surface_damage for more info
// about a better approach.
struct wlr_surface *surface = subsurface->surface;
pixman_region32_union_rect(&surface->buffer_damage,
&surface->buffer_damage, 0, 0,
surface->current.buffer_width, surface->current.buffer_height);
subsurface->reordered = false;
struct wlr_subsurface *child;
wl_list_for_each(child, &subsurface->surface->subsurfaces, parent_link) {
surface_damage_subsurfaces(child);
}
}
static void surface_apply_damage(struct wlr_surface *surface) {
struct wl_resource *resource = surface->current.buffer_resource;
if (resource == NULL) {
// NULL commit
if (surface->buffer != NULL) {
wlr_buffer_unlock(&surface->buffer->base);
}
surface->buffer = NULL;
return;
}
if (surface->buffer != NULL && surface->buffer->resource_released) {
struct wlr_client_buffer *updated_buffer =
wlr_client_buffer_apply_damage(surface->buffer, resource,
&surface->buffer_damage);
if (updated_buffer != NULL) {
surface->buffer = updated_buffer;
return;
}
}
struct wlr_client_buffer *buffer =
wlr_client_buffer_import(surface->renderer, resource);
if (buffer == NULL) {
wlr_log(WLR_ERROR, "Failed to upload buffer");
return;
}
if (surface->buffer != NULL) {
wlr_buffer_unlock(&surface->buffer->base);
}
surface->buffer = buffer;
}
static void surface_update_opaque_region(struct wlr_surface *surface) {
struct wlr_texture *texture = wlr_surface_get_texture(surface);
if (texture == NULL) {
pixman_region32_clear(&surface->opaque_region);
return;
}
if (wlr_texture_is_opaque(texture)) {
pixman_region32_init_rect(&surface->opaque_region,
0, 0, surface->current.width, surface->current.height);
return;
}
pixman_region32_intersect_rect(&surface->opaque_region,
&surface->current.opaque,
0, 0, surface->current.width, surface->current.height);
}
static void surface_update_input_region(struct wlr_surface *surface) {
pixman_region32_intersect_rect(&surface->input_region,
&surface->current.input,
0, 0, surface->current.width, surface->current.height);
}
static void surface_commit_pending(struct wlr_surface *surface) {
surface_state_finalize(surface, &surface->pending);
if (surface->role && surface->role->precommit) {
surface->role->precommit(surface);
}
bool invalid_buffer = surface->pending.committed & WLR_SURFACE_STATE_BUFFER;
surface->sx += surface->pending.dx;
surface->sy += surface->pending.dy;
surface_update_damage(&surface->buffer_damage,
&surface->current, &surface->pending);
surface_state_copy(&surface->previous, &surface->current);
surface_state_move(&surface->current, &surface->pending);
if (invalid_buffer) {
surface_apply_damage(surface);
}
surface_update_opaque_region(surface);
surface_update_input_region(surface);
// commit subsurface order
struct wlr_subsurface *subsurface;
wl_list_for_each_reverse(subsurface, &surface->subsurface_pending_list,
parent_pending_link) {
wl_list_remove(&subsurface->parent_link);
wl_list_insert(&surface->subsurfaces, &subsurface->parent_link);
if (subsurface->reordered) {
// TODO: damage all the subsurfaces
surface_damage_subsurfaces(subsurface);
}
}
if (surface->role && surface->role->commit) {
surface->role->commit(surface);
}
wlr_signal_emit_safe(&surface->events.commit, surface);
}
static bool subsurface_is_synchronized(struct wlr_subsurface *subsurface) {
while (subsurface != NULL) {
if (subsurface->synchronized) {
return true;
}
if (!subsurface->parent) {
return false;
}
if (!wlr_surface_is_subsurface(subsurface->parent)) {
break;
}
subsurface = wlr_subsurface_from_wlr_surface(subsurface->parent);
}
return false;
}
/**
* Recursive function to commit the effectively synchronized children.
*/
static void subsurface_parent_commit(struct wlr_subsurface *subsurface,
bool synchronized) {
struct wlr_surface *surface = subsurface->surface;
if (synchronized || subsurface->synchronized) {
if (subsurface->has_cache) {
surface_state_move(&surface->pending, &subsurface->cached);
surface_commit_pending(surface);
subsurface->has_cache = false;
subsurface->cached.committed = 0;
}
struct wlr_subsurface *subsurface;
wl_list_for_each(subsurface, &surface->subsurfaces, parent_link) {
subsurface_parent_commit(subsurface, true);
}
}
}
static void subsurface_commit(struct wlr_subsurface *subsurface) {
struct wlr_surface *surface = subsurface->surface;
if (subsurface_is_synchronized(subsurface)) {
surface_state_move(&subsurface->cached, &surface->pending);
subsurface->has_cache = true;
} else {
if (subsurface->has_cache) {
surface_state_move(&surface->pending, &subsurface->cached);
surface_commit_pending(surface);
subsurface->has_cache = false;
} else {
surface_commit_pending(surface);
}
}
}
static void surface_commit(struct wl_client *client,
struct wl_resource *resource) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
struct wlr_subsurface *subsurface = wlr_surface_is_subsurface(surface) ?
wlr_subsurface_from_wlr_surface(surface) : NULL;
if (subsurface != NULL) {
subsurface_commit(subsurface);
} else {
surface_commit_pending(surface);
}
wl_list_for_each(subsurface, &surface->subsurfaces, parent_link) {
subsurface_parent_commit(subsurface, false);
}
}
static void surface_set_buffer_transform(struct wl_client *client,
struct wl_resource *resource, int32_t transform) {
if (transform < WL_OUTPUT_TRANSFORM_NORMAL ||
transform > WL_OUTPUT_TRANSFORM_FLIPPED_270) {
wl_resource_post_error(resource, WL_SURFACE_ERROR_INVALID_TRANSFORM,
"Specified transform value (%d) is invalid", transform);
return;
}
struct wlr_surface *surface = wlr_surface_from_resource(resource);
surface->pending.committed |= WLR_SURFACE_STATE_TRANSFORM;
surface->pending.transform = transform;
}
static void surface_set_buffer_scale(struct wl_client *client,
struct wl_resource *resource, int32_t scale) {
if (scale <= 0) {
wl_resource_post_error(resource, WL_SURFACE_ERROR_INVALID_SCALE,
"Specified scale value (%d) is not positive", scale);
return;
}
struct wlr_surface *surface = wlr_surface_from_resource(resource);
surface->pending.committed |= WLR_SURFACE_STATE_SCALE;
surface->pending.scale = scale;
}
static void surface_damage_buffer(struct wl_client *client,
struct wl_resource *resource,
int32_t x, int32_t y, int32_t width,
int32_t height) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
if (width < 0 || height < 0) {
return;
}
surface->pending.committed |= WLR_SURFACE_STATE_BUFFER_DAMAGE;
pixman_region32_union_rect(&surface->pending.buffer_damage,
&surface->pending.buffer_damage,
x, y, width, height);
}
static const struct wl_surface_interface surface_interface = {
.destroy = surface_destroy,
.attach = surface_attach,
.damage = surface_damage,
.frame = surface_frame,
.set_opaque_region = surface_set_opaque_region,
.set_input_region = surface_set_input_region,
.commit = surface_commit,
.set_buffer_transform = surface_set_buffer_transform,
.set_buffer_scale = surface_set_buffer_scale,
.damage_buffer = surface_damage_buffer
};
struct wlr_surface *wlr_surface_from_resource(struct wl_resource *resource) {
assert(wl_resource_instance_of(resource, &wl_surface_interface,
&surface_interface));
return wl_resource_get_user_data(resource);
}
static void surface_state_init(struct wlr_surface_state *state) {
state->scale = 1;
state->transform = WL_OUTPUT_TRANSFORM_NORMAL;
wl_list_init(&state->frame_callback_list);
pixman_region32_init(&state->surface_damage);
pixman_region32_init(&state->buffer_damage);
pixman_region32_init(&state->opaque);
pixman_region32_init_rect(&state->input,
INT32_MIN, INT32_MIN, UINT32_MAX, UINT32_MAX);
}
static void surface_state_finish(struct wlr_surface_state *state) {
surface_state_reset_buffer(state);
struct wl_resource *resource, *tmp;
wl_resource_for_each_safe(resource, tmp, &state->frame_callback_list) {
wl_resource_destroy(resource);
}
pixman_region32_fini(&state->surface_damage);
pixman_region32_fini(&state->buffer_damage);
pixman_region32_fini(&state->opaque);
pixman_region32_fini(&state->input);
}
static void subsurface_unmap(struct wlr_subsurface *subsurface);
static void subsurface_destroy(struct wlr_subsurface *subsurface) {
if (subsurface == NULL) {
return;
}
subsurface_unmap(subsurface);
wlr_signal_emit_safe(&subsurface->events.destroy, subsurface);
wl_list_remove(&subsurface->surface_destroy.link);
surface_state_finish(&subsurface->cached);
if (subsurface->parent) {
wl_list_remove(&subsurface->parent_link);
wl_list_remove(&subsurface->parent_pending_link);
wl_list_remove(&subsurface->parent_destroy.link);
}
wl_resource_set_user_data(subsurface->resource, NULL);
if (subsurface->surface) {
subsurface->surface->role_data = NULL;
}
free(subsurface);
}
static void surface_output_destroy(struct wlr_surface_output *surface_output);
static void surface_handle_resource_destroy(struct wl_resource *resource) {
struct wlr_surface_output *surface_output, *tmp;
struct wlr_surface *surface = wlr_surface_from_resource(resource);
wl_list_for_each_safe(surface_output, tmp, &surface->current_outputs, link) {
surface_output_destroy(surface_output);
}
wlr_signal_emit_safe(&surface->events.destroy, surface);
wl_list_remove(wl_resource_get_link(surface->resource));
wl_list_remove(&surface->renderer_destroy.link);
surface_state_finish(&surface->pending);
surface_state_finish(&surface->current);
surface_state_finish(&surface->previous);
pixman_region32_fini(&surface->buffer_damage);
pixman_region32_fini(&surface->opaque_region);
pixman_region32_fini(&surface->input_region);
if (surface->buffer != NULL) {
wlr_buffer_unlock(&surface->buffer->base);
}
free(surface);
}
static void surface_handle_renderer_destroy(struct wl_listener *listener,
void *data) {
struct wlr_surface *surface =
wl_container_of(listener, surface, renderer_destroy);
wl_resource_destroy(surface->resource);
}
struct wlr_surface *wlr_surface_create(struct wl_client *client,
uint32_t version, uint32_t id, struct wlr_renderer *renderer,
struct wl_list *resource_list) {
assert(version <= SURFACE_VERSION);
struct wlr_surface *surface = calloc(1, sizeof(struct wlr_surface));
if (!surface) {
wl_client_post_no_memory(client);
return NULL;
}
surface->resource = wl_resource_create(client, &wl_surface_interface,
version, id);
if (surface->resource == NULL) {
free(surface);
wl_client_post_no_memory(client);
return NULL;
}
wl_resource_set_implementation(surface->resource, &surface_interface,
surface, surface_handle_resource_destroy);
wlr_log(WLR_DEBUG, "New wlr_surface %p (res %p)", surface, surface->resource);
surface->renderer = renderer;
surface_state_init(&surface->current);
surface_state_init(&surface->pending);
surface_state_init(&surface->previous);
wl_signal_init(&surface->events.commit);
wl_signal_init(&surface->events.destroy);
wl_signal_init(&surface->events.new_subsurface);
wl_list_init(&surface->subsurfaces);
wl_list_init(&surface->subsurface_pending_list);
wl_list_init(&surface->current_outputs);
pixman_region32_init(&surface->buffer_damage);
pixman_region32_init(&surface->opaque_region);
pixman_region32_init(&surface->input_region);
wl_signal_add(&renderer->events.destroy, &surface->renderer_destroy);
surface->renderer_destroy.notify = surface_handle_renderer_destroy;
struct wl_list *resource_link = wl_resource_get_link(surface->resource);
if (resource_list != NULL) {
wl_list_insert(resource_list, resource_link);
} else {
wl_list_init(resource_link);
}
return surface;
}
struct wlr_texture *wlr_surface_get_texture(struct wlr_surface *surface) {
if (surface->buffer == NULL) {
return NULL;
}
return surface->buffer->texture;
}
bool wlr_surface_has_buffer(struct wlr_surface *surface) {
return wlr_surface_get_texture(surface) != NULL;
}
bool wlr_surface_set_role(struct wlr_surface *surface,
const struct wlr_surface_role *role, void *role_data,
struct wl_resource *error_resource, uint32_t error_code) {
assert(role != NULL);
if (surface->role != NULL && surface->role != role) {
if (error_resource != NULL) {
wl_resource_post_error(error_resource, error_code,
"Cannot assign role %s to wl_surface@%" PRIu32 ", already has role %s\n",
role->name, wl_resource_get_id(surface->resource),
surface->role->name);
}
return false;
}
if (surface->role_data != NULL && surface->role_data != role_data) {
wl_resource_post_error(error_resource, error_code,
"Cannot reassign role %s to wl_surface@%" PRIu32 ","
"role object still exists", role->name,
wl_resource_get_id(surface->resource));
return false;
}
surface->role = role;
surface->role_data = role_data;
return true;
}
static const struct wl_subsurface_interface subsurface_implementation;
static struct wlr_subsurface *subsurface_from_resource(
struct wl_resource *resource) {
assert(wl_resource_instance_of(resource, &wl_subsurface_interface,
&subsurface_implementation));
return wl_resource_get_user_data(resource);
}
static void subsurface_resource_destroy(struct wl_resource *resource) {
struct wlr_subsurface *subsurface = subsurface_from_resource(resource);
wl_list_remove(wl_resource_get_link(resource));
subsurface_destroy(subsurface);
}
static void subsurface_handle_destroy(struct wl_client *client,
struct wl_resource *resource) {
wl_resource_destroy(resource);
}
static void subsurface_handle_set_position(struct wl_client *client,
struct wl_resource *resource, int32_t x, int32_t y) {
struct wlr_subsurface *subsurface = subsurface_from_resource(resource);
if (subsurface == NULL) {
return;
}
subsurface->pending.x = x;
subsurface->pending.y = y;
}
static struct wlr_subsurface *subsurface_find_sibling(
struct wlr_subsurface *subsurface, struct wlr_surface *surface) {
struct wlr_surface *parent = subsurface->parent;
struct wlr_subsurface *sibling;
wl_list_for_each(sibling, &parent->subsurfaces, parent_link) {
if (sibling->surface == surface && sibling != subsurface) {
return sibling;
}
}
return NULL;
}
static void subsurface_handle_place_above(struct wl_client *client,
struct wl_resource *resource, struct wl_resource *sibling_resource) {
struct wlr_subsurface *subsurface = subsurface_from_resource(resource);
if (subsurface == NULL) {
return;
}
struct wlr_surface *sibling_surface =
wlr_surface_from_resource(sibling_resource);
struct wlr_subsurface *sibling =
subsurface_find_sibling(subsurface, sibling_surface);
if (!sibling) {
wl_resource_post_error(subsurface->resource,
WL_SUBSURFACE_ERROR_BAD_SURFACE,
"%s: wl_surface@%" PRIu32 "is not a parent or sibling",
"place_above", wl_resource_get_id(sibling_surface->resource));
return;
}
wl_list_remove(&subsurface->parent_pending_link);
wl_list_insert(&sibling->parent_pending_link,
&subsurface->parent_pending_link);
subsurface->reordered = true;
}
static void subsurface_handle_place_below(struct wl_client *client,
struct wl_resource *resource, struct wl_resource *sibling_resource) {
struct wlr_subsurface *subsurface = subsurface_from_resource(resource);
if (subsurface == NULL) {
return;
}
struct wlr_surface *sibling_surface =
wlr_surface_from_resource(sibling_resource);
struct wlr_subsurface *sibling =
subsurface_find_sibling(subsurface, sibling_surface);
if (!sibling) {
wl_resource_post_error(subsurface->resource,
WL_SUBSURFACE_ERROR_BAD_SURFACE,
"%s: wl_surface@%" PRIu32 " is not a parent or sibling",
"place_below", wl_resource_get_id(sibling_surface->resource));
return;
}
wl_list_remove(&subsurface->parent_pending_link);
wl_list_insert(sibling->parent_pending_link.prev,
&subsurface->parent_pending_link);
subsurface->reordered = true;
}
static void subsurface_handle_set_sync(struct wl_client *client,
struct wl_resource *resource) {
struct wlr_subsurface *subsurface = subsurface_from_resource(resource);
if (subsurface == NULL) {
return;
}
subsurface->synchronized = true;
}
static void subsurface_handle_set_desync(struct wl_client *client,
struct wl_resource *resource) {
struct wlr_subsurface *subsurface = subsurface_from_resource(resource);
if (subsurface == NULL) {
return;
}
if (subsurface->synchronized) {
subsurface->synchronized = false;
if (!subsurface_is_synchronized(subsurface)) {
// TODO: do a synchronized commit to flush the cache
subsurface_parent_commit(subsurface, true);
}
}
}
static const struct wl_subsurface_interface subsurface_implementation = {
.destroy = subsurface_handle_destroy,
.set_position = subsurface_handle_set_position,
.place_above = subsurface_handle_place_above,
.place_below = subsurface_handle_place_below,
.set_sync = subsurface_handle_set_sync,
.set_desync = subsurface_handle_set_desync,
};
/**
* Checks if this subsurface needs to be marked as mapped. This can happen if:
* - The subsurface has a buffer
* - Its parent is mapped
*/
static void subsurface_consider_map(struct wlr_subsurface *subsurface,
bool check_parent) {
if (subsurface->mapped || !wlr_surface_has_buffer(subsurface->surface)) {
return;
}
if (check_parent) {
if (subsurface->parent == NULL) {
return;
}
if (wlr_surface_is_subsurface(subsurface->parent)) {
struct wlr_subsurface *parent =
wlr_subsurface_from_wlr_surface(subsurface->parent);
if (parent == NULL || !parent->mapped) {
return;
}
}
}
// Now we can map the subsurface
wlr_signal_emit_safe(&subsurface->events.map, subsurface);
subsurface->mapped = true;
// Try mapping all children too
struct wlr_subsurface *child;
wl_list_for_each(child, &subsurface->surface->subsurfaces, parent_link) {
subsurface_consider_map(child, false);
}
}
static void subsurface_unmap(struct wlr_subsurface *subsurface) {
if (!subsurface->mapped) {
return;
}
wlr_signal_emit_safe(&subsurface->events.unmap, subsurface);
subsurface->mapped = false;
// Unmap all children
struct wlr_subsurface *child;
wl_list_for_each(child, &subsurface->surface->subsurfaces, parent_link) {
subsurface_unmap(child);
}
}
static void subsurface_role_commit(struct wlr_surface *surface) {
struct wlr_subsurface *subsurface =
wlr_subsurface_from_wlr_surface(surface);
if (subsurface == NULL) {
return;
}
if (subsurface->current.x != subsurface->pending.x ||
subsurface->current.y != subsurface->pending.y) {
// Subsurface has moved
int dx = subsurface->current.x - subsurface->pending.x;
int dy = subsurface->current.y - subsurface->pending.y;
subsurface->current.x = subsurface->pending.x;
subsurface->current.y = subsurface->pending.y;
if ((surface->current.transform & WL_OUTPUT_TRANSFORM_90) != 0) {
int tmp = dx;
dx = dy;
dy = tmp;
}
pixman_region32_union_rect(&surface->buffer_damage,
&surface->buffer_damage,
dx * surface->previous.scale, dy * surface->previous.scale,
surface->previous.buffer_width, surface->previous.buffer_height);
pixman_region32_union_rect(&surface->buffer_damage,
&surface->buffer_damage, 0, 0,
surface->current.buffer_width, surface->current.buffer_height);
}
subsurface_consider_map(subsurface, true);
}
static void subsurface_role_precommit(struct wlr_surface *surface) {
struct wlr_subsurface *subsurface =
wlr_subsurface_from_wlr_surface(surface);
if (subsurface == NULL) {
return;
}
if (surface->pending.committed & WLR_SURFACE_STATE_BUFFER &&
surface->pending.buffer_resource == NULL) {
// This is a NULL commit
subsurface_unmap(subsurface);
}
}
const struct wlr_surface_role subsurface_role = {
.name = "wl_subsurface",
.commit = subsurface_role_commit,
.precommit = subsurface_role_precommit,
};
static void subsurface_handle_parent_destroy(struct wl_listener *listener,
void *data) {
struct wlr_subsurface *subsurface =
wl_container_of(listener, subsurface, parent_destroy);
subsurface_unmap(subsurface);
wl_list_remove(&subsurface->parent_link);
wl_list_remove(&subsurface->parent_pending_link);
wl_list_remove(&subsurface->parent_destroy.link);
subsurface->parent = NULL;
}
static void subsurface_handle_surface_destroy(struct wl_listener *listener,
void *data) {
struct wlr_subsurface *subsurface =
wl_container_of(listener, subsurface, surface_destroy);
subsurface_destroy(subsurface);
}
struct wlr_subsurface *wlr_subsurface_create(struct wlr_surface *surface,
struct wlr_surface *parent, uint32_t version, uint32_t id,
struct wl_list *resource_list) {
assert(version <= SUBSURFACE_VERSION);
struct wl_client *client = wl_resource_get_client(surface->resource);
struct wlr_subsurface *subsurface =
calloc(1, sizeof(struct wlr_subsurface));
if (!subsurface) {
wl_client_post_no_memory(client);
return NULL;
}
surface_state_init(&subsurface->cached);
subsurface->synchronized = true;
subsurface->surface = surface;
subsurface->resource =
wl_resource_create(client, &wl_subsurface_interface, version, id);
if (subsurface->resource == NULL) {
surface_state_finish(&subsurface->cached);
free(subsurface);
wl_client_post_no_memory(client);
return NULL;
}
wl_resource_set_implementation(subsurface->resource,
&subsurface_implementation, subsurface,
subsurface_resource_destroy);
wl_signal_init(&subsurface->events.destroy);
wl_signal_init(&subsurface->events.map);
wl_signal_init(&subsurface->events.unmap);
wl_signal_add(&surface->events.destroy, &subsurface->surface_destroy);
subsurface->surface_destroy.notify = subsurface_handle_surface_destroy;
// link parent
subsurface->parent = parent;
wl_signal_add(&parent->events.destroy, &subsurface->parent_destroy);
subsurface->parent_destroy.notify = subsurface_handle_parent_destroy;
wl_list_insert(parent->subsurfaces.prev, &subsurface->parent_link);
wl_list_insert(parent->subsurface_pending_list.prev,
&subsurface->parent_pending_link);
surface->role_data = subsurface;
struct wl_list *resource_link = wl_resource_get_link(subsurface->resource);
if (resource_list != NULL) {
wl_list_insert(resource_list, resource_link);
} else {
wl_list_init(resource_link);
}
wlr_signal_emit_safe(&parent->events.new_subsurface, subsurface);
return subsurface;
}
struct wlr_surface *wlr_surface_get_root_surface(struct wlr_surface *surface) {
while (wlr_surface_is_subsurface(surface)) {
struct wlr_subsurface *subsurface =
wlr_subsurface_from_wlr_surface(surface);
if (subsurface == NULL) {
break;
}
if (subsurface->parent == NULL) {
return NULL;
}
surface = subsurface->parent;
}
return surface;
}
bool wlr_surface_point_accepts_input(struct wlr_surface *surface,
double sx, double sy) {
return sx >= 0 && sx < surface->current.width &&
sy >= 0 && sy < surface->current.height &&
pixman_region32_contains_point(&surface->current.input, floor(sx), floor(sy), NULL);
}
struct wlr_surface *wlr_surface_surface_at(struct wlr_surface *surface,
double sx, double sy, double *sub_x, double *sub_y) {
struct wlr_subsurface *subsurface;
wl_list_for_each_reverse(subsurface, &surface->subsurfaces, parent_link) {
double _sub_x = subsurface->current.x;
double _sub_y = subsurface->current.y;
struct wlr_surface *sub = wlr_surface_surface_at(subsurface->surface,
sx - _sub_x, sy - _sub_y, sub_x, sub_y);
if (sub != NULL) {
return sub;
}
}
if (wlr_surface_point_accepts_input(surface, sx, sy)) {
if (sub_x) {
*sub_x = sx;
}
if (sub_y) {
*sub_y = sy;
}
return surface;
}
return NULL;
}
static void surface_output_destroy(struct wlr_surface_output *surface_output) {
wl_list_remove(&surface_output->bind.link);
wl_list_remove(&surface_output->destroy.link);
wl_list_remove(&surface_output->link);
free(surface_output);
}
static void surface_handle_output_bind(struct wl_listener *listener,
void *data) {
struct wlr_output_event_bind *evt = data;
struct wlr_surface_output *surface_output =
wl_container_of(listener, surface_output, bind);
struct wl_client *client = wl_resource_get_client(
surface_output->surface->resource);
if (client == wl_resource_get_client(evt->resource)) {
wl_surface_send_enter(surface_output->surface->resource, evt->resource);
}
}
static void surface_handle_output_destroy(struct wl_listener *listener,
void *data) {
struct wlr_surface_output *surface_output =
wl_container_of(listener, surface_output, destroy);
surface_output_destroy(surface_output);
}
void wlr_surface_send_enter(struct wlr_surface *surface,
struct wlr_output *output) {
struct wl_client *client = wl_resource_get_client(surface->resource);
struct wlr_surface_output *surface_output;
struct wl_resource *resource;
wl_list_for_each(surface_output, &surface->current_outputs, link) {
if (surface_output->output == output) {
return;
}
}
surface_output = calloc(1, sizeof(struct wlr_surface_output));
if (surface_output == NULL) {
return;
}
surface_output->bind.notify = surface_handle_output_bind;
surface_output->destroy.notify = surface_handle_output_destroy;
wl_signal_add(&output->events.bind, &surface_output->bind);
wl_signal_add(&output->events.destroy, &surface_output->destroy);
surface_output->surface = surface;
surface_output->output = output;
wl_list_insert(&surface->current_outputs, &surface_output->link);
wl_resource_for_each(resource, &output->resources) {
if (client == wl_resource_get_client(resource)) {
wl_surface_send_enter(surface->resource, resource);
}
}
}
void wlr_surface_send_leave(struct wlr_surface *surface,
struct wlr_output *output) {
struct wl_client *client = wl_resource_get_client(surface->resource);
struct wlr_surface_output *surface_output, *tmp;
struct wl_resource *resource;
wl_list_for_each_safe(surface_output, tmp,
&surface->current_outputs, link) {
if (surface_output->output == output) {
surface_output_destroy(surface_output);
wl_resource_for_each(resource, &output->resources) {
if (client == wl_resource_get_client(resource)) {
wl_surface_send_leave(surface->resource, resource);
}
}
break;
}
}
}
void wlr_surface_send_frame_done(struct wlr_surface *surface,
const struct timespec *when) {
struct wl_resource *resource, *tmp;
wl_resource_for_each_safe(resource, tmp,
&surface->current.frame_callback_list) {
wl_callback_send_done(resource, timespec_to_msec(when));
wl_resource_destroy(resource);
}
}
static void surface_for_each_surface(struct wlr_surface *surface, int x, int y,
wlr_surface_iterator_func_t iterator, void *user_data) {
iterator(surface, x, y, user_data);
struct wlr_subsurface *subsurface;
wl_list_for_each(subsurface, &surface->subsurfaces, parent_link) {
struct wlr_subsurface_state *state = &subsurface->current;
int sx = state->x;
int sy = state->y;
surface_for_each_surface(subsurface->surface, x + sx, y + sy,
iterator, user_data);
}
}
void wlr_surface_for_each_surface(struct wlr_surface *surface,
wlr_surface_iterator_func_t iterator, void *user_data) {
surface_for_each_surface(surface, 0, 0, iterator, user_data);
}
struct bound_acc {
int32_t min_x, min_y;
int32_t max_x, max_y;
};
static void handle_bounding_box_surface(struct wlr_surface *surface,
int x, int y, void *data) {
struct bound_acc *acc = data;
acc->min_x = min(x, acc->min_x);
acc->min_y = min(y, acc->min_y);
acc->max_x = max(x + surface->current.width, acc->max_x);
acc->max_y = max(y + surface->current.height, acc->max_y);
}
void wlr_surface_get_extends(struct wlr_surface *surface, struct wlr_box *box) {
struct bound_acc acc = {
.min_x = 0,
.min_y = 0,
.max_x = surface->current.width,
.max_y = surface->current.height,
};
wlr_surface_for_each_surface(surface, handle_bounding_box_surface, &acc);
box->x = acc.min_x;
box->y = acc.min_y;
box->width = acc.max_x - acc.min_x;
box->height = acc.max_y - acc.min_y;
}
static void crop_region(pixman_region32_t *dst, pixman_region32_t *src,
const struct wlr_box *box) {
pixman_region32_intersect_rect(dst, src,
box->x, box->y, box->width, box->height);
pixman_region32_translate(dst, -box->x, -box->y);
}
void wlr_surface_get_effective_damage(struct wlr_surface *surface,
pixman_region32_t *damage) {
pixman_region32_clear(damage);
// Transform and copy the buffer damage in terms of surface coordinates.
wlr_region_transform(damage, &surface->buffer_damage,
surface->current.transform, surface->current.buffer_width,
surface->current.buffer_height);
wlr_region_scale(damage, damage, 1.0 / (float)surface->current.scale);
if (surface->current.viewport.has_src) {
struct wlr_box src_box = {
.x = floor(surface->current.viewport.src.x),
.y = floor(surface->current.viewport.src.y),
.width = ceil(surface->current.viewport.src.width),
.height = ceil(surface->current.viewport.src.height),
};
crop_region(damage, damage, &src_box);
}
if (surface->current.viewport.has_dst) {
int src_width, src_height;
surface_state_viewport_src_size(&surface->current,
&src_width, &src_height);
float scale_x = (float)surface->current.viewport.dst_width / src_width;
float scale_y = (float)surface->current.viewport.dst_height / src_height;
wlr_region_scale_xy(damage, damage, scale_x, scale_y);
}
// On resize, damage the previous bounds of the surface. The current bounds
// have already been damaged in surface_update_damage.
if (surface->previous.width > surface->current.width ||
surface->previous.height > surface->current.height) {
pixman_region32_union_rect(damage, damage, 0, 0,
surface->previous.width, surface->previous.height);
}
// On move, damage where the surface was with its old dimensions.
if (surface->current.dx != 0 || surface->current.dy != 0) {
int prev_x = -surface->current.dx;
int prev_y = -surface->current.dy;
if ((surface->previous.transform & WL_OUTPUT_TRANSFORM_90) != 0) {
int temp = prev_x;
prev_x = prev_y;
prev_y = temp;
}
pixman_region32_union_rect(damage, damage, prev_x, prev_y,
surface->previous.width, surface->previous.height);
}
}
void wlr_surface_get_buffer_source_box(struct wlr_surface *surface,
struct wlr_fbox *box) {
box->x = box->y = 0;
box->width = surface->current.buffer_width;
box->height = surface->current.buffer_height;
if (surface->current.viewport.has_src) {
box->x = surface->current.viewport.src.x * surface->current.scale;
box->y = surface->current.viewport.src.y * surface->current.scale;
box->width = surface->current.viewport.src.width * surface->current.scale;
box->height = surface->current.viewport.src.height * surface->current.scale;
if ((surface->current.transform & WL_OUTPUT_TRANSFORM_90) != 0) {
double tmp = box->x;
box->x = box->y;
box->y = tmp;
tmp = box->width;
box->width = box->height;
box->height = tmp;
}
}
}
| 1 | 16,836 | I don't think these asserts are necessary, because `subsurface_find_sibling` already searches in the parent. Or am I missing something? | swaywm-wlroots | c |
@@ -0,0 +1,15 @@
+class Episode < ActiveRecord::Base
+ attr_accessible :title, :duration, :file, :description, :published_on, :notes,
+ :old_url, :file_size
+
+ validates_presence_of :title, :duration, :file, :file_size, :description,
+ :published_on
+
+ def self.published
+ where("published_on <= ?", Date.today).order('published_on desc')
+ end
+
+ def full_title
+ "Episode #{id}: #{title}"
+ end
+end | 1 | 1 | 6,629 | Should this be `number` instead of `id`? | thoughtbot-upcase | rb |
|
@@ -1569,12 +1569,11 @@ NATable *BindWA::getNATable(CorrName& corrName,
((QualifiedName&)(table->getTableName())).setIsVolatile(TRUE);
}
- // For now, do not allow access through the Trafodion external name created for
- // the HIVE object unless the inDDL flag is set. inDDL is set for drop
- // table and SHOWDDL statements.
- // TDB - may want to merge the Trafodion version with the HIVE version.
- // TDB - similar operation may be needed for external HBase tables
- if ((table) && (table->isExternalTable() && (! bindWA->inDDL())))
+ // For now, don't allow access through the Trafodion external name created for
+ // native HIVE or HBASE objects unless the allowExternalTables flag is set.
+ // allowExternalTables is set for drop table and SHOWDDL statements.
+ // TDB - may want to merge the Trafodion version with the native version.
+ if ((table) && (table->isExternalTable() && (! bindWA->allowExternalTables())))
{
*CmpCommon::diags() << DgSqlCode(-4258)
<< DgTableName(table->getTableName().getQualifiedNameAsAnsiString()); | 1 | /**********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
**********************************************************************/
/* -*-C++-*-
******************************************************************************
*
* File: BindRelExpr.C
* Description: Relational expressions (both physical and logical operators)
* Methods related to the SQL binder
*
* Created: 5/17/94
* Language: C++
*
*
*
* It is the secret sympathy,
* The silver link, the silken tie,
* Which heart to heart, and mind to mind,
* In body and in soul can bind.
* -- Sir Walter Scott,
* "The Lay of the Last Minstrel"
*
******************************************************************************
*/
#define SQLPARSERGLOBALS_FLAGS // must precede all #include's
#define SQLPARSERGLOBALS_NADEFAULTS
#include "Platform.h"
#include "NAWinNT.h"
#include "Sqlcomp.h"
#include "AllItemExpr.h"
#include "AllRelExpr.h"
#include "BindWA.h"
#include "ComOperators.h"
#include "ComTransInfo.h"
#include "ComLocationNames.h"
#include "ControlDB.h"
#include "Debug.h"
#include "ex_error.h"
#include "GroupAttr.h"
#include "ParNameLocList.h"
#include "parser.h"
#include "Rel3GL.h"
#include "RelDCL.h"
#include "RelPackedRows.h"
#include "RelSequence.h"
#include "ShowSchema.h" // GetControlDefaults class
#include "StmtDDLAddConstraintCheck.h"
#include "StmtDDLCreateView.h"
#include "ElemDDLColRefArray.h"
#include "ElemDDLSaltOptions.h"
#include "desc.h"
#include "UdrErrors.h"
#include "SequenceGeneratorAttributes.h"
#include "wstr.h"
#include "Inlining.h"
#include "Triggers.h"
#include "TriggerDB.h"
#include "MVInfo.h"
#include "Refresh.h"
#include "ChangesTable.h"
#include "MvRefreshBuilder.h"
#include "OptHints.h"
#include "CmpStatement.h"
#include "OptimizerSimulator.h"
#include "charinfo.h"
#include "UdfDllInteraction.h"
#include "SqlParserGlobals.h" // must be last #include
#include "ItmFlowControlFunction.h"
#include "ComSchemaName.h" // for ComSchemaName
#include "ItemSample.h"
#include "NAExecTrans.h"
#include "HDFSHook.h"
#include "CmpSeabaseDDL.h"
#include "ComUser.h"
#include "ComSqlId.h"
#include "PrivMgrCommands.h"
#include "PrivMgrComponentPrivileges.h"
#include "PrivMgrDefs.h"
#include "PrivMgrMD.h"
#define SLASH_C '/'
NAWchar *SQLTEXTW();
// -----------------------------------------------------------------------
// external declarations
// -----------------------------------------------------------------------
//
// -----------------------------------------------------------------------
// static functions
// -----------------------------------------------------------------------
#ifdef NDEBUG
THREAD_P NABoolean GU_DEBUG = FALSE;
#else
THREAD_P NABoolean GU_DEBUG;
#endif
static void GU_DEBUG_Display(BindWA *bindWA, GenericUpdate *gu,
const char *text,
RelExpr *reDown = NULL,
NABoolean preEndl = FALSE,
NABoolean postEndl = FALSE)
{
#ifndef NDEBUG
if (!GU_DEBUG)
return;
// LCOV_EXCL_START - dpm
if (preEndl) cerr << endl;
cerr << "---" << endl;
if (gu->getTableDesc()) {
NAString tmp;
ValueIdList vtmp(gu->getTableDesc()->getColumnList());
vtmp.unparse(tmp);
cerr << gu->getUpdTableNameText() << " this>td(" << text << ") "
<< gu->getTableDesc()->getCorrNameObj().getExposedNameAsAnsiString()
<< " " << tmp << endl;
}
RETDesc *rd = gu->getRETDesc();
if (rd) {
cerr << gu->getUpdTableNameText() << " this>grd(" << text << ") " << flush;
rd->display();
}
if (reDown) RETDesc::displayDown(reDown);
if (bindWA->getCurrentScope()->getRETDesc() &&
bindWA->getCurrentScope()->getRETDesc() != rd) {
cerr << gu->getUpdTableNameText() << " bwa>cs>grd(" << text << ") " <<flush;
bindWA->getCurrentScope()->getRETDesc()->display();
}
// LCOV_EXCL_STOP
if (postEndl) cerr << endl;
#endif
} // GU_DEBUG_Display()
#pragma nowarn(770) // warning elimination
static RETDesc *bindRowValues(BindWA *bindWA,
ItemExpr *exprTree,
ValueIdList &vidList,
RelExpr *parent,
NABoolean inTrueRoot)
{
// Before we convert the row value expressions into a ValueIdList, save the
// original value expression root nodes in an ItemExprList.
//
ItemExprList exprList(exprTree, bindWA->wHeap());
//
// Bind the row value expressions and create a ValueIdList.
//
exprTree->convertToValueIdList(vidList, bindWA, ITM_ITEM_LIST, parent);
if (bindWA->errStatus()) return NULL;
// Set up context flags.
// We are in a subquery if the previous scope's flag is set, note.
//
BindScope *currScope = bindWA->getCurrentScope();
BindScope *prevScope = bindWA->getPreviousScope(currScope);
NABoolean inSelectList = currScope->context()->inSelectList();
NABoolean inInsert = currScope->context()->inInsert();
NABoolean inSubquery = FALSE;
if (prevScope)
inSubquery = prevScope->context()->inSubquery();
// See if UDF_SUBQ_IN_AGGS_AND_GBYS is enabled. It is enabled if the
// default is ON, or if the default is SYSTEM and ALLOW_UDF is ON.
NABoolean udfSubqInAggGrby_Enabled = FALSE;
DefaultToken udfSubqTok = CmpCommon::getDefault(UDF_SUBQ_IN_AGGS_AND_GBYS);
if ((udfSubqTok == DF_ON) ||
(udfSubqTok == DF_SYSTEM))
udfSubqInAggGrby_Enabled = TRUE;
// See if ALLOW_MULTIDEGREE_SUBQ_IN_SELECTLIST is enabled. It is
// enabled if the default is ON, or if the default is SYSTEM and
// ALLOW_UDF is ON.
NABoolean allowMultiDegSubqInSelect_Enabled = FALSE;
DefaultToken allowMultiDegreeTok =
CmpCommon::getDefault(ALLOW_MULTIDEGREE_SUBQ_IN_SELECTLIST);
if ((allowMultiDegreeTok == DF_ON) ||
(allowMultiDegreeTok == DF_SYSTEM))
allowMultiDegSubqInSelect_Enabled = TRUE;
//
// Create the result table.
// If a row value expression is not a column reference and does not have
// a rename AS clause, the column is an unnamed expression.
//
RETDesc *resultTable = new (bindWA->wHeap()) RETDesc(bindWA);
CollIndex j = 0;
for (CollIndex i = 0; i < exprList.entries(); i++, j++)
{
ItemExpr *itemExpr = (ItemExpr *) exprList[i];
ValueId valId = itemExpr->getValueId();
ValueId boundValId = vidList[j];
CMPASSERT(boundValId != NULL_VALUE_ID);
if (inSelectList && inTrueRoot &&
(boundValId.getType().getTypeQualifier() == NA_UNKNOWN_TYPE)&&
(boundValId.getItemExpr()->getOperatorType() == ITM_CONSTANT))
{
ConstValue * constItemExpr = (ConstValue*) boundValId.getItemExpr();
if (constItemExpr->isNull())
boundValId.coerceType(NA_NUMERIC_TYPE) ;
}
switch (itemExpr->getOperatorType())
{
case ITM_REFERENCE: {
ColReference *colRef = (ColReference *) itemExpr;
const ColRefName &colRefName = colRef->getColRefNameObj();
CMPASSERT(valId != NULL_VALUE_ID || colRefName.isStar());
if (colRefName.isStar()) {
const ColumnDescList *star = colRef->getStarExpansion();
CMPASSERT(star != NULL);
const ColumnDescList &starExpansion = *star;
CMPASSERT(starExpansion.entries() > 0); // ColRef::bind chked this alrdy
CMPASSERT(inSelectList);
resultTable->addColumns(bindWA, starExpansion);
j += starExpansion.entries() - 1;
} // isStar
else {
// Do another xcnm lookup so the column we add to our resultTable
// will have its CorrName object correct
// (e.g., in "SELECT TL.B,* FROM TA TL,TA TR ORDER BY B;"
// colref TL.B will resolve to TL.B, not CAT.SCH.TL.B)
// and its heading (Genesis 10-980126-5495).
BindScope *bindScope;
ColumnNameMap *xcnmEntry = bindWA->findColumn(colRefName, bindScope);
if (NOT xcnmEntry) // ## I don't recall when this case occurs...
resultTable->addColumn(bindWA,
colRefName,
boundValId,
colRef->getTargetColumnClass());
else
resultTable->addColumn(bindWA,
xcnmEntry->getColRefNameObj(),
boundValId,
colRef->getTargetColumnClass(), // MV --
xcnmEntry->getColumnDesc()->getHeading());
}
break;
}
case ITM_RENAME_COL:
{
RenameCol *renameCol = (RenameCol *) itemExpr;
const ColRefName &colRefName = *renameCol->getNewColRefName();
CMPASSERT(NOT colRefName.isStar());
const char * heading = NULL;
// if this rename was for a BLOB/CLOB column from JDBC, return
// the heading of the child base column. This is needed for JDBC
// as it uses the heading to figure out if the column is a LOB
// column.
if (CmpCommon::getDefault(JDBC_PROCESS) == DF_ON)
{
ItemExpr * childExpr = itemExpr->child(0)->castToItemExpr();
if (childExpr->getOperatorType() == ITM_BASECOLUMN)
{
heading = ((BaseColumn *)childExpr)->getNAColumn()->getHeading();
if (heading)
{
if ((strcmp(heading, "JDBC_BLOB_COLUMN -") != 0) &&
(strcmp(heading, "JDBC_CLOB_COLUMN -") != 0))
heading = NULL;
}
}
}
// No heading is passed here (whole point of SQL derived-column is rename)
// unless it is a jdbc blob/clob heading.
resultTable->addColumn(bindWA,
colRefName,
boundValId,
renameCol->getTargetColumnClass(),
heading);
break;
}
case ITM_ROW_SUBQUERY:
case ITM_USER_DEF_FUNCTION: {
// Deal with multi Valued User Defined Functions or Subqueries with
// degree > 1.
//
// In order to have the correct degree during the bind phase,
// since we don't have all the information until after the transform
// phase, we need to put entries into the RETDesc early.
//
// Say you have a query like this:
// select mvf(a,b) from t1;
// and assume mvf outputs 2 values.
//
// at bind time, the select list will only have 1 entry in it, namely
// the ITM_USER_DEF_FUNCTION.
// Since we do degree checking at bind time, we need to know now that
// mvf() actually produces 2 values.
//
// So what we do here, is that we substitute the original
// ITM_USER_DEF_FUNCTION with ValueIdProxies. One for each output of
// the original function. The selectList of the RelRoot as well as the
// retDESC are updated with the additional elements.
//
// Similarly if we have a subquery like this:
//
// select (select max(a),max(b) from t2), a from t1;
//
// we will wrap the subquery in a ValeIdProxy representing the
// subquery from a transformation point of view, but representing
// max(a) from an output point of view. A second ValueIdProxy will be
// added for max(b), so the select list of the outer query would look
// like this:
//
// [ ValueIdProxy(Subq:max(a)), ValueIdProxy(Subq:max(b)), a ]
//
// instead of just
//
// [ Subq, a ]
//
// like we are used to.
//
// At transform time the valueIdProxies, will disappear and we will
// transform the UDF/Subquery carried inside the valueIdProxy
// marked to be transformed. Some might hang around until Normalization.
// Only the ValueIdProxy representing the first output will be marked
// to be transformed, so we only transform the UDF/Subquery once.
//
// Similarly, we update the outer query's retDESC.
NABoolean isSubquery =
(itemExpr->getOperatorType() == ITM_ROW_SUBQUERY) ?
TRUE : FALSE;
NAColumnArray outCols;
ValueIdList outColVids;
CollIndex currIndex = j;
if (isSubquery)
{
Subquery * subq = (Subquery *) itemExpr;
const RETDesc *retDesc = subq->getSubquery()->getRETDesc();
if( retDesc )
{
retDesc->getColumnList()->getValueIdList(outColVids);
}
}
else
{
UDFunction * udf = (UDFunction *) itemExpr;
CMPASSERT(udf->getRoutineDesc());
const RoutineDesc *rDesc = udf->getRoutineDesc();
// Get the outputs of this UDF, these are as defined in metadata
// including names etc.
outCols = rDesc->getEffectiveNARoutine()->getOutParams();
outColVids = rDesc->getOutputColumnList();
}
if ( (outColVids.entries() == 1) ||
( isSubquery &&
(!allowMultiDegSubqInSelect_Enabled)
))
{
// Do exactly what we used to do if the degree is 1.
// or we have disallowed subqueries of degree > 1.
if (isSubquery)
{
// ## Here we ought to manufacture a unique name per Ansi 7.9 SR 9c.
ColRefName colRefName;
resultTable->addColumn(bindWA, colRefName, boundValId);
}
else
{
NAColumn *col = outCols[0];
const char * heading = col->getHeading();
ColRefName colRefName( col->getColName());
ColumnClass colClass( col->getColumnClass());
resultTable->addColumn(bindWA,
colRefName,
boundValId,
colClass,
heading);
}
break;
}
// Wrap all the outputs with a ValueIdProxy
// so that we can deal with multiple outputs
// If we didn't have a RETDesc or a RoutineDesc, outColVids
// will be empty and we don't do anything.
// Also we do not need to worry about recursing through the
// RETDesc entries as the call to convertToValueIdList() above
// did that already.
for (CollIndex idx = 0; idx < outColVids.entries(); idx++)
{
NAColumn *col;
NABoolean isRealOrRenameColumn =
(outColVids[idx].getItemExpr()->getOperatorType() ==
ITM_BASECOLUMN) ||
(outColVids[idx].getItemExpr()->getOperatorType() ==
ITM_RENAME_COL) ||
!isSubquery ? TRUE : FALSE;
if (isSubquery)
{
col = ((NAColumn *) outColVids[idx].getItemExpr());
}
else
{
col = ((NAColumn *) outCols[idx]);
}
const char * heading = isRealOrRenameColumn ?
col->getHeading() : "";
ColRefName colRefName( isRealOrRenameColumn ?
col->getColName() : "");
ColumnClass colClass( isRealOrRenameColumn ?
col->getColumnClass() : USER_COLUMN);
// We are wrapping the MVF/Subquery and its additional outputs
// with a ValueIdProxy. This way we don't end up flattening or
// expanding the outputs of the MVF multiple times.
// The valueId of the RoutineParam corresponding to the
// metadata column is used for the output valueId.
// So if you had a query like this:
//
// select swap2(a,b) from t1;
//
// and swap2() returns 2 outputs (basically the inputs swapped)
//
// The new select list for the query would be:
//
// 1: ValueIdProxy with the derivedNode being swap2, and output
// valueId containing the first output parameter of swap2.
// Also the transformDerivedFrom flag would be set
// 2: ValueIdProxy with the derivedNode being swap2, and output
// valueId containing the second output parameter of swap2.
//
// These ValueIdProxy nodes will go away at transform time..
ValueIdProxy *proxyOutput = new (CmpCommon::statementHeap())
ValueIdProxy( boundValId,
outColVids[idx],
idx);
// The type of the proxy is the same as the output valueId associated
// with it.
proxyOutput = (ValueIdProxy *) proxyOutput->bindNode(bindWA);
if (bindWA->errStatus()) return NULL;
// Make sure we transform the MVF
if (idx == 0) proxyOutput->setTransformChild(TRUE);
if (!isSubquery || isRealOrRenameColumn)
{
resultTable->addColumn(bindWA,
colRefName,
proxyOutput->getValueId(),
colClass,
heading);
}
else
{
resultTable->addColumn(bindWA, colRefName,
proxyOutput->getValueId());
}
if (idx == 0)
{
vidList.removeAt(currIndex); // we need to delete the old valueId
}
else
j++; // The first entry simply replaces the original
// Update the list with the new value.
// insertAt has the nice feature that it will push
// the residual elements to the right, so we do not need to
// manage the valueIds we haven't processed yet as long as we
// update the index (j++ above) correctly.
vidList.insertAt(currIndex++,proxyOutput->getValueId());
}
break;
}
default:
{
// ## Here we ought to manufacture a unique name per Ansi 7.9 SR 9c.
ColRefName colRefName;
resultTable->addColumn(bindWA, colRefName, boundValId);
break;
}
} // switch
} // for
// need this for static cursor declaration
cmpCurrentContext->saveRetrievedCols_ = resultTable->getDegree();
// Before we can return the result table, we need to check for the possible
// syntax error below, in which we can't use the definition of "inSubquery"
// that we calculate above. Our example case is, if we're directly below
// a GroupByAgg, then we need to look at the scope *before* the GroupByAgg
// to determine if we satisfy the error condition below. This is a problem
// with how our plan trees don't sync completely with SQL syntax.
// Here's the error case (Genesis 10-980518-0765):
//
// >> select (select distinct 1,2 from T1 t) from T1;
//
// First of all, yes, it's a really stupid query. Oh well! :-)
//
// It's pretty clear that the "1,2" is part of a "select list inside the
// subquery of a select list." However, the parser creates a GroupByAgg
// for the distinct keyword (sigh), which means that we have an
// additional scope between the scope of the SQLRecord (1,2) and the
// scope of the "TRUE" parent, the inner-select. This additional scope
// is for the GroupByAgg. So in the case of a GroupByAgg (and possibly
// another case will arise later ...?), we need to look at the
// GroupByAgg's parent to determine if we satisfy this error condition.
//
// To recap: To handle this one (stupid) case we've added a ton of
// comments and code here and in GroupByAgg::bindNode(), plus created
// the new functions/members BindWA::getSubqueryScope(), and
// BindContext::lookAboveToDecideSubquery_/(). Wonderful!
//
if (prevScope) {
BindScope *subQScope = bindWA->getSubqueryScope(currScope);
//
// subQScope should be non-NULL when prevScope is non-NULL
//
CMPASSERT(subQScope);
NABoolean inSubqueryInSelectList = subQScope->context()->inSubquery() &&
subQScope->context()->inSelectList();
NABoolean inSubqueryInGroupByClause = subQScope->context()->inSubquery() &&
subQScope->context()->inGroupByClause() &&
(CmpCommon::getDefault(UDF_SUBQ_IN_AGGS_AND_GBYS) == DF_ON);
//10-060602-6930 Begin
//Added a check to not enter this condition when we are in bindView scope
if (inSelectList &&
(inSubqueryInSelectList ||
inSubqueryInGroupByClause) &&
!bindWA->inViewExpansion()) {
//10-060602-6930 End
// We now can check for the syntax error that we've done so much work
// above (and in GroupByAgg::bindNode(), BindWA.h & BindWA.cpp)
// to detect:
if ((j > 1) &&
(!allowMultiDegSubqInSelect_Enabled) ) {
// 4019 The select list of a subquery in a select list must be scalar
*CmpCommon::diags() << DgSqlCode(-4019);
bindWA->setErrStatus();
return NULL;
}
}
} // prevScope
return resultTable;
} // bindRowValues()
#pragma warn(770) // warning elimination
// Bind a constraint (MP Check Constraint).
// Returns NULL if error in constraint *OR* we can safely ignore the constraint
// (e.g., a NOT NULL NONDROPPABLE constraint); caller must check bindWA errsts.
//
static ItemExpr* bindCheckConstraint(
BindWA *bindWA,
CheckConstraint *constraint,
const NATable *naTable,
NABoolean catmanCollectUsages = FALSE,
ItemExpr *viewCheckPred = NULL)
{
ItemExpr *constraintPred = NULL;
if (viewCheckPred) {
// view WITH CHECK OPTION: the view's where-clause was already parsed
// in bindView
CMPASSERT(constraint->getConstraintText().isNull()); // sanity check
constraintPred = viewCheckPred;
}
else {
Parser parser(bindWA->currentCmpContext());
constraintPred = parser.getItemExprTree(constraint->getConstraintText().data(),
constraint->getConstraintText().length(),
CharInfo::UTF8 // ComGetNameInterfaceCharSet()
);
}
if (constraintPred) {
ParNameLocList *saveNameLocList = bindWA->getNameLocListPtr();
if (!catmanCollectUsages ||
!bindWA->getUsageParseNodePtr() ||
bindWA->getUsageParseNodePtr()->getOperatorType() == DDL_CREATE_VIEW)
bindWA->setNameLocListPtr(NULL);
CMPASSERT(!bindWA->getCurrentScope()->context()->inCheckConstraint());
bindWA->getCurrentScope()->context()->inCheckConstraint() = constraint;
constraintPred->bindNode(bindWA);
bindWA->setNameLocListPtr(saveNameLocList);
bindWA->getCurrentScope()->context()->inCheckConstraint() = NULL;
if (bindWA->errStatus()) {
delete constraintPred;
constraintPred = NULL;
}
}
// A NOT NULL constraint on a single column which never allows nulls
// (has no null indicator bytes)
// -- i.e., the common case of a column declared NOT NULL NONDROPPABLE --
// does not need to be separately enforced as a constraint, because
// Executor will raise a numeric-overflow error if someone tries to
// put a NULL into such a column.
//
// So we don't need to put this constraint into the list, but we do need
// to save its name, for run-time error diags.
//
// ##To be done:
// ## GenRelUpdate DP2Insert/Update: for each col in newRecExpr(),
// ## if getNotNullViolationCode(), then
// ## save the SqlCode and the getNotNullConstraintName()...asAnsiString()
// ## and some column identifier (pos or offset) in some per-query struct
// ## Executor: if error 8411, if truly a NULL violation, look up that column
// ## in the nnconstraint struct and populate diags with the info there.
//
if (constraintPred) {
ItemExprList nncols(bindWA->wHeap());
constraintPred->getColumnsIfThisIsISNOTNULL(nncols);
for (CollIndex i = 0; i < nncols.entries(); i++) {
NAColumn *nacol = nncols[i]->getValueId().getNAColumn();
if (!nacol->getType()->supportsSQLnullPhysical()) {
nacol->setNotNullNondroppable(constraint);
//
// DO *NOT* do: delete constraintPred;
// -- it deletes a whole tree of stuff referenced elsewhere!
//
constraintPred = NULL;
} else {
// Leaving the column's type's supportsSQLnullPhysical() as is (TRUE),
// set its supportsSQLnullLogical() to FALSE,
// for the Transform phase.
nacol->mutateType()->setNullable(TRUE/*supports physical nulls*/,
FALSE/*but not logical nulls */);
}
}
}
else {
*CmpCommon::diags() << DgSqlCode(-4025)
<< DgConstraintName(ToAnsiIdentifier(constraint->getConstraintName().getObjectName()))
<< DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
}
return constraintPred;
} // bindCheckConstraint()
// LCOV_EXCL_START - cnu
static ItemExpr *intersectColumns(const RETDesc &leftTable,
const RETDesc &rightTable,
BindWA* bindWA)
{
ItemExpr *predicate = NULL;
for (CollIndex i = 0; i < leftTable.getDegree(); i++) {
ItemExpr *leftExpr = leftTable.getValueId(i).getItemExpr();
ItemExpr *rightExpr = rightTable.getValueId(i).getItemExpr();
BiRelat *compare = new (bindWA->wHeap())
BiRelat(ITM_EQUAL, leftExpr, rightExpr);
if (predicate)
predicate = new (bindWA->wHeap()) BiLogic(ITM_AND, predicate, compare);
else
predicate = compare;
}
// Binding this predicate must be done in caller's context/scope, not here...
return predicate;
} // intersectColumns()
// LCOV_EXCL_STOP
static ItemExpr *joinCommonColumns(const RelExpr *const leftRelExpr,
const RelExpr *const rightRelExpr,
BindWA* bindWA)
{
const RETDesc &leftTable = *leftRelExpr->getRETDesc();
const RETDesc &rightTable = *rightRelExpr->getRETDesc();
//
// Find the common column names between two tables and create a predicate
// that joins the columns. For example, if tables T1 and T2 have common
// column names A and B, return the predicate T1.A = T2.A AND T1.B = T2.B.
// The checking for ambiguous common columns will be done when they are
// are coalesced for the output list.
//
ItemExpr *predicate = NULL;
for (CollIndex i = 0; i < leftTable.getDegree(); i++) {
ColRefName simpleColRefName(leftTable.getColRefNameObj(i).getColName()); //
if (NOT simpleColRefName.isEmpty()) { //
ColumnNameMap *commonCol = rightTable.findColumn(simpleColRefName); //
if (commonCol) { //
ItemExpr *leftExpr = leftTable.getValueId(i).getItemExpr();
ItemExpr *rightExpr = commonCol->getValueId().getItemExpr(); //
bindWA->markAsReferencedColumn(leftExpr);
bindWA->markAsReferencedColumn(rightExpr);
BiRelat *compare = new (bindWA->wHeap())
BiRelat(ITM_EQUAL, leftExpr, rightExpr);
if (predicate)
predicate = new(bindWA->wHeap()) BiLogic(ITM_AND, predicate, compare);
else
predicate = compare;
}
}
}
// Binding this predicate is being done in caller, Join::bindNode()
return predicate;
} // joinCommonColumns()
// Functions findNonCommonColumns() and coalesceCommonColumns()
//
// These create the column descriptors for the result of a natural join.
// A natural join is equivalent to
//
// SELECT SLCC, SLT1, SLT2 FROM T1, T2
//
// where SLCC represents the list of coalesced common columns of T1 and T2,
// SLT1 represents the list of non-common columns of T1, and
// SLT2 represents the list of non-common columns of T2.
//
// A coalesced common column C is equivalent to
//
// COALESCE (T1.C, T2.C) AS C -- i.e. there is no table name; CorrName is ""
//
// where COALESCE (T1.C, T2.C) is equivalent to
//
// CASE WHEN T1.C IS NOT NULL THEN T1.C ELSE T2.C END
//
// Function findNonCommonColumns(), on the first call, coalesces common
// columns into the resultTable, and collects non-common columns.
// On the second call it continues to collect non-common columns.
//
// Function coalesceCommonColumns() adds SLCC, SLT1, SLT2 to the
// resultTable in the proper order.
//
static void findNonCommonColumns(BindWA *bindWA,
OperatorTypeEnum joinType,
const RETDesc &sourceTable,
const RETDesc &targetTable,
RETDesc &resultTable,
ColumnDescList &nonCommonCols)
{
// Used for ANSI 6.4 SR 3aii below.
CorrName implemDependCorr(bindWA->fabricateUniqueName(), TRUE);
//
for (CollIndex i = 0; i < sourceTable.getDegree(); i++) {
const ColRefName &sourceColRefName = sourceTable.getColRefNameObj(i);
ValueId sourceId = sourceTable.getValueId(i);
ColRefName simpleColRefName(sourceColRefName.getColName());
//
// If a column is an unnamed expression, it is a non-common column.
//
if (simpleColRefName.isEmpty())
nonCommonCols.insert(new (bindWA->wHeap())
ColumnDesc(sourceColRefName, sourceId, NULL, bindWA->wHeap()));
else {
ColumnNameMap *commonCol = targetTable.findColumn(simpleColRefName);
//
// If the named column does not have a corresponding column in the
// target table, it is a non-common column.
//
if (NOT commonCol)
nonCommonCols.insert(new (bindWA->wHeap())
ColumnDesc(sourceColRefName, sourceId, NULL, bindWA->wHeap()));
//
// If the target table has more than one corresponding column, error.
//
else if (commonCol->isDuplicate()) {
NAString fmtdList(bindWA->wHeap());
LIST(TableNameMap*) xtnmList(bindWA->wHeap());
targetTable.getTableList(xtnmList, &fmtdList); // Tables in the RETDesc
*CmpCommon::diags() << DgSqlCode(-4004)
<< DgColumnName(simpleColRefName.getColName())
<< DgTableName(commonCol->getColRefNameObj().getCorrNameObj().
getExposedNameAsAnsiString())
<< DgString0(fmtdList)
<< DgString1(bindWA->getDefaultSchema().getSchemaNameAsAnsiString());
bindWA->setErrStatus();
return;
}
else if (joinType != ITM_NO_OP) {
//
// Coalesce the common columns and add them to the result table.
//
ValueId resultId;
switch(joinType) {
case REL_JOIN:
case REL_LEFT_JOIN:
resultId = sourceId;
break;
case REL_RIGHT_JOIN:
resultId = commonCol->getValueId();
break;
default: {
ItemExpr *sourceExpr = sourceId.getItemExpr();
ItemExpr *targetExpr = commonCol->getValueId().getItemExpr();
UnLogic *test = new (bindWA->wHeap())
UnLogic(ITM_IS_NULL, sourceExpr);
ItemExpr *coalesce = new (bindWA->wHeap())
Case(NULL, new (bindWA->wHeap())
IfThenElse(test,
targetExpr,
sourceExpr));
coalesce = coalesce->bindNode(bindWA)->castToItemExpr();
if (bindWA->errStatus()) {
delete test;
delete coalesce;
return;
}
resultId = coalesce->getValueId();
break;
} // default case (braces required since vars are initialized here)
} // switch
//
// ANSI 6.4 SR 3aii:
// We've fabricated a unique implementation-dependent CorrName
// outside the loop; the common columns have this basically
// invisible CorrName, the point of which seems to be that
// select * from
// ta natural join tb
// join -- not natural!
// (ta tx natural join tb ty)
// on 1=1;
// should not generate an ambiguous column reference error
// from the star-expansion. So according to ANSI,
// the two natural joins produce, respectively,
// fab1.slcc, ta.slt1, tb.slt2
// fab2.slcc, tx.slt1, ty.slt2
// so the join produces
// fab1.slcc, ta.slt1, tb.slt2, fab2.slcc, tx.slt1, ty.slt2
// i.e. the two SLCC's are unambiguous.
//
ColRefName implemDepend(simpleColRefName.getColName(),implemDependCorr);
resultTable.addColumn(bindWA, implemDepend, resultId);
} // coalesce SLCC into resultTable
} // named column
} // for
} // findNonCommonColumns()
// Comments for this function can be found above the preceding function.
static void coalesceCommonColumns(BindWA *bindWA,
OperatorTypeEnum joinType,
const RETDesc &leftTable,
const RETDesc &rightTable,
RETDesc &resultTable)
{
ColumnDescList nonCommonCols(bindWA->wHeap());
// non-common columns of the left table
//
// Coalesce the common column names of the left and right tables and add
// them to the result table.
// Collect the non-common column names from the left.
//
findNonCommonColumns(bindWA,
joinType,
leftTable,
rightTable,
resultTable,
nonCommonCols);
if (bindWA->errStatus()) return;
//
// Collect the non-common column names from the right.
//
RETDesc irrelevantOnThisCall;
findNonCommonColumns(bindWA,
ITM_NO_OP, // do not add SLCC to resultTable
rightTable,
leftTable,
irrelevantOnThisCall,
nonCommonCols);
if (bindWA->errStatus()) return;
//
// Add the non-common columns from the left and right to the result table.
//
resultTable.addColumns(bindWA, nonCommonCols);
nonCommonCols.clearAndDestroy();
//
// Add the system columns from the left and right to the result table.
//
resultTable.addColumns(bindWA, *leftTable.getSystemColumnList(), SYSTEM_COLUMN);
resultTable.addColumns(bindWA, *rightTable.getSystemColumnList(), SYSTEM_COLUMN);
} // coalesceCommonColumns()
// For Catalog Manager, this function:
// 1) Fixes up the name location list to help with computing of the view text,
// check constraint search condition text, etc.
// 2) Collects the table (base table, view, etc.) usages information for
// view definitions, check constraint definitions, etc.
//
// ** Some of this could be implemented, perhaps more simply,
// ** using BindWA::viewCount() and BindWA::tableViewUsageList().
//
static void BindUtil_CollectTableUsageInfo(BindWA *bindWA,
const CorrName& corrName)
{
// Task (1)
//
ParNameLocList *pNameLocList = bindWA->getNameLocListPtr();
if (pNameLocList)
{
ParNameLoc * pNameLoc
= pNameLocList->getNameLocPtr(corrName.getNamePosition());
if (pNameLoc)
{
if (NOT pNameLoc->getExpandedName(FALSE).isNull())
CMPASSERT(pNameLoc->getExpandedName() ==
corrName.getQualifiedNameObj().getQualifiedNameAsAnsiString());
pNameLoc->setExpandedName(
corrName.getQualifiedNameObj().getQualifiedNameAsAnsiString());
}
//
// Task (2)
//
ExprNode *pUsageParseNode = bindWA->getUsageParseNodePtr();
if (pUsageParseNode)
{
if (pUsageParseNode->getOperatorType() == DDL_CREATE_VIEW)
{
StmtDDLCreateView &cvpn = *pUsageParseNode->castToElemDDLNode()
->castToStmtDDLCreateView();
ParTableUsageList &vtul = cvpn.getViewUsages().getViewTableUsageList();
vtul.insert(corrName.getExtendedQualNameObj());
}
else if (pUsageParseNode->getOperatorType()
== DDL_ALTER_TABLE_ADD_CONSTRAINT_CHECK)
{
StmtDDLAddConstraintCheck &node = *pUsageParseNode->castToElemDDLNode()
->castToStmtDDLAddConstraintCheck();
ParTableUsageList &tul = node.getTableUsageList();
tul.insert(corrName.getQualifiedNameObj());
}
}
} // if (pNameLocList)
} // BindUtil_CollectTableUsageInfo()
void castComputedColumnsToAnsiTypes(BindWA *bindWA,
RETDesc *rd,
ValueIdList &compExpr)
{
const ColumnDescList &cols = *rd->getColumnList();
CollIndex i = cols.entries();
CMPASSERT(i == compExpr.entries());
NAString tmp;
// For a SELECT query that is part of a CREATE VIEW statement, force use of IEEE floating-point
// because SQL/MX Catalog Manager does not support Tandem floating-point, and would return an
// internal error if it is encountered.
if (bindWA->inViewDefinition() || bindWA->inMVDefinition())
tmp = "IEEE";
else
CmpCommon::getDefault(FLOATTYPE, tmp, -1);
NABoolean outputFloattypeIEEE =
((tmp == "IEEE") ||
(CmpCommon::getDefault(ODBC_PROCESS) == DF_ON) ||
(CmpCommon::getDefault(JDBC_PROCESS) == DF_ON));
while (i--) {
ColumnDesc *col = cols[i];
if (col->getValueId().getType().getTypeQualifier() == NA_ROWSET_TYPE) {
return;
}
const NAType &naType = col->getValueId().getType();
//
// Note: the unsupported and DATETIME cases are mutually exclusive with the LARGEDEC case below.
//
if (!naType.isSupportedType()) {
// Unsupported types are displayed as strings of '#' to their display length
ItemExpr *theRepeat =
new (bindWA->wHeap()) Repeat(new (bindWA->wHeap()) SystemLiteral("#"),
new (bindWA->wHeap()) SystemLiteral(
naType.getDisplayLength(
naType.getFSDatatype(),
0,
naType.getPrecision(),
naType.getScale(),
0)));
theRepeat = theRepeat->bindNode(bindWA);
col->setValueId(theRepeat->getValueId());
compExpr[i] = theRepeat->getValueId();
}
else if ((CmpCommon::getDefault(MODE_SPECIAL_1) == DF_ON) &&
(NOT bindWA->inViewDefinition()) &&
(NOT bindWA->inMVDefinition()) &&
(NOT bindWA->inCTAS()) &&
(naType.getTypeQualifier()== NA_DATETIME_TYPE &&
((const DatetimeType &)naType).getSubtype() ==
DatetimeType::SUBTYPE_SQLDate) &&
(! CmpCommon::context()->getSqlmxRegress()) &&
(strcmp(ActiveSchemaDB()->getDefaults().getValue(OUTPUT_DATE_FORMAT),
"ANSI") != 0))
{ // Special1 DATE, return as YY/MM/DD
ItemExpr * newChild =
new (bindWA->wHeap())
Format(col->getValueId().getItemExpr(), "YY/MM/DD", FALSE);
newChild = newChild->bindNode(bindWA);
col->setValueId(newChild->getValueId());
compExpr[i] = newChild->getValueId();
}
// For dynamic queries that are not part of a CREATE VIEW, change the returned type based on the
// 'floattype' CQD. The default is Tandem type.
// This is done to be upward compatible with
// pre-R2 dynamic programs which are coded to expect tandem float
// types in dynamic statements (describe, get descriptor, etc...).
// The static statements are ok as we would convert from/to
// tandem float hostvariables at runtime.
// For the SELECT query that is part of a CREATE VIEW statement, do not convert to any
// Tandem floating-point type because SQL/MX catalog manager does not support Tandem floating-point
// and would give internal error.
if ((naType.getTypeQualifier() == NA_NUMERIC_TYPE) &&
(CmpCommon::context()->GetMode() == STMT_DYNAMIC))
{
NumericType &nTyp = (NumericType &)col->getValueId().getType();
if ((outputFloattypeIEEE &&
(nTyp.getFSDatatype() == REC_TDM_FLOAT32 ||
nTyp.getFSDatatype() == REC_TDM_FLOAT64)) ||
(! outputFloattypeIEEE &&
(nTyp.getFSDatatype() == REC_IEEE_FLOAT32 ||
nTyp.getFSDatatype() == REC_IEEE_FLOAT64)))
{
NAType *newTyp;
if (outputFloattypeIEEE)
{
// convert to IEEE floating point.
newTyp = new (bindWA->wHeap())
SQLDoublePrecision(nTyp.supportsSQLnull(),
bindWA->wHeap(),
nTyp.getBinaryPrecision());
}
else
{
// convert to Tandem floating point.
if (nTyp.getFSDatatype() == REC_IEEE_FLOAT32)
newTyp = new (bindWA->wHeap())
SQLRealTdm(nTyp.supportsSQLnull(),
bindWA->wHeap(),
nTyp.getBinaryPrecision());
else
newTyp = new (bindWA->wHeap())
SQLDoublePrecisionTdm(nTyp.supportsSQLnull(),
bindWA->wHeap(),
nTyp.getBinaryPrecision());
}
ItemExpr *ie = col->getValueId().getItemExpr();
ItemExpr *cast = new (bindWA->wHeap())
Cast(ie, newTyp, ITM_CAST);
cast = cast->bindNode(bindWA);
if (bindWA->errStatus()) return;
col->setValueId(cast->getValueId());
compExpr[i] = cast->getValueId();
}
}
if (naType.getTypeQualifier() == NA_NUMERIC_TYPE && !((NumericType &)col->getValueId().getType()).binaryPrecision()) {
NumericType &nTyp = (NumericType &)col->getValueId().getType();
ItemExpr * ie = col->getValueId().getItemExpr();
NAType *newTyp = NULL;
Lng32 newPrec;
Lng32 newScale;
Lng32 oflow = -1;
Lng32 bignumOflow = -1;
NABoolean bignumIO = FALSE;
if (CmpCommon::getDefault(BIGNUM_IO) == DF_ON)
bignumIO = TRUE; // explicitely set to ON
else if (CmpCommon::getDefault(BIGNUM_IO) == DF_OFF)
bignumIO = FALSE; // explicitely set to OFF
else if (CmpCommon::getDefault(BIGNUM_IO) == DF_SYSTEM)
{
if ((((NumericType &)col->getValueId().getType()).isBigNum()) &&
(((SQLBigNum &)col->getValueId().getType()).isARealBigNum()))
bignumIO = TRUE;
}
if (CmpCommon::getDefaultNumeric(MAX_NUMERIC_PRECISION_ALLOWED) ==
MAX_HARDWARE_SUPPORTED_SIGNED_NUMERIC_PRECISION)
bignumIO = FALSE;
if (bignumIO)
bignumOflow = nTyp.getPrecision() -
(Lng32)CmpCommon::getDefaultNumeric(MAX_NUMERIC_PRECISION_ALLOWED);
else
{
if (nTyp.isSigned())
oflow = nTyp.getPrecision() - MAX_HARDWARE_SUPPORTED_SIGNED_NUMERIC_PRECISION;
else
oflow = nTyp.getPrecision() - MAX_HARDWARE_SUPPORTED_UNSIGNED_NUMERIC_PRECISION;
}
if ((bignumOflow > 0) || (oflow > 0))
{
if (bignumOflow > 0) {
newPrec =
(Lng32)CmpCommon::getDefaultNumeric(MAX_NUMERIC_PRECISION_ALLOWED);
Lng32 orgMagnitude = nTyp.getPrecision() - nTyp.getScale();
// set the newScale
// IF there is overflow in magnitude set the scale to 0.
// ELSE set the accomodate the magnitude part and truncate the scale
newScale = (orgMagnitude >= newPrec) ? 0 : newPrec - orgMagnitude ;
if (newScale > newPrec)
{
*CmpCommon::diags() << DgSqlCode(-3015)
<< DgInt0(newScale) << DgInt1(newPrec);
bindWA->setErrStatus();
return;
}
newTyp = new (bindWA->wHeap())
SQLBigNum(newPrec,
newScale,
((SQLBigNum &)col->getValueId().getType()).isARealBigNum(),
nTyp.isSigned(),
nTyp.supportsSQLnull(),
NULL);
}
else if (oflow > 0) {
// If it's not a computed expr, but a column w/ a legal type, re-loop
if (col->getValueId().getNAColumn(TRUE/*don't assert*/)) {
//CMPASSERT(!nTyp.isInternalType());
//continue;
}
CMPASSERT(nTyp.isInternalType());
OperatorTypeEnum op = ie->origOpType();
CMPASSERT(op != NO_OPERATOR_TYPE && // Init'd correctly?
op != ITM_RENAME_COL && // Expect these to have
op != ITM_REFERENCE); // been bound, vanished.
ItemExpr *ie2 = ie;
while (op == ITM_INSTANTIATE_NULL)
{
ie2 = ie2->child(0).getPtr();
op = ie2->origOpType();
}
// ANSI 6.5 SR 7 - 9: aggregates must be exact if column is exact.
newPrec = MAX_NUMERIC_PRECISION;
Lng32 orgMagnitude = (nTyp.getMagnitude() + 9) / 10;
// set the newScale
// IF there is overflow in magnitude set the scale to 0.
// ELSE set the accomodate the magnitude part and truncate the scale
newScale = (orgMagnitude >= newPrec) ? 0 : newPrec - orgMagnitude ;
// Based on the CQD set the scale to MIN value.
// CQD specifies the MIN scale that has to be preserved in case
// of overflow.
NADefaults &defs = ActiveSchemaDB()->getDefaults();
Lng32 minScale = defs.getAsLong(PRESERVE_MIN_SCALE);
newScale = MAXOF(minScale, newScale);
if (op == ITM_SUM || op == ITM_AVG) {
// AVG = DIVIDE( SUM(), COUNT() )
ItemExpr *tmp = (op == ITM_SUM) ?
ie2 : ie2->child(0).getPtr();
//
// Now that we support OLAP functions, this may be
// a pointer to an ITM_NOTCOVERED node. If so, we
// need to check its child(0) node rather than
// the ITM_NOTCOVERED node.
//
if (tmp->getOperatorType() == ITM_NOTCOVERED )
tmp = (Aggregate *)(ItemExpr *)tmp->child(0);
CMPASSERT(tmp->isAnAggregate());
Aggregate *sum = (Aggregate *)tmp;
ItemExpr *arg = (sum->getOriginalChild()) ?
sum->getOriginalChild() : sum->child(0).getPtr();
if (arg->getValueId() == NULL_VALUE_ID)
arg = sum->child(0).getPtr();
CMPASSERT(arg->getValueId() != NULL_VALUE_ID);
Lng32 needScale = arg->getValueId().getType().getScale();
if (needScale > newPrec)
needScale = newPrec;
if (newScale < needScale || op == ITM_SUM) // ANSI 6.5 SR 9 b + c
newScale = needScale;
}
if (newScale == 0)
newTyp = new (bindWA->wHeap())
SQLLargeInt(TRUE, // hardware only supports signed
nTyp.supportsSQLnull());
else
newTyp = new (bindWA->wHeap())
SQLNumeric(sizeof(Int64),
newPrec,
newScale,
nTyp.isSigned(),
nTyp.supportsSQLnull());
} // overflow
ItemExpr *cast = new (bindWA->wHeap())
Cast(ie, newTyp, ITM_CAST, TRUE/*checkForTrunc*/);
cast = cast->bindNode(bindWA);
if (bindWA->errStatus()) return;
if (!col->getColRefNameObj().getColName().isNull()) {
// We get here via CREATE VIEW v AS SELECT (expr op expr) AS nam ...;
// ColumnDesc::setValueId() makes the RETDesc's XCNM inconsistent --
// but this is ok because name lookup over this XCNM doesn't happen
// after the point we've gotten to here --
// a) if caller is StmtDDLCreateView::bindNode via RelRoot::bindNode,
// there's no further lookup at all;
// b) if caller is bindView(), then thanks to the way RenameTable
// and RETDesc work, the inconsistent XCNM is not consulted
// so we don't have to worry about this issue ... (for now anyhow!)
}
col->setValueId(cast->getValueId());
compExpr[i] = cast->getValueId();
} // overflow (bignum or regular)
} // numeric
} // loop over cols in RETDesc
} // castComputedColumnsToAnsiTypes()
desc_struct *generateSpecialDesc(const CorrName& corrName)
{
desc_struct * desc = NULL;
if (corrName.getSpecialType() == ExtendedQualName::VIRTUAL_TABLE)
{
if (corrName.getQualifiedNameObj().getObjectName() == ExplainFunc::getVirtualTableNameStr())
{
ExplainFunc ef;
desc = ef.createVirtualTableDesc();
}
else if (corrName.getQualifiedNameObj().getObjectName() == StatisticsFunc::getVirtualTableNameStr())
{
StatisticsFunc sf;
desc = sf.createVirtualTableDesc();
}
}
return desc;
} // generateSpecialDesc()
// -----------------------------------------------------------------------
// member functions for class BindWA
// -----------------------------------------------------------------------
// LCOV_EXCL_START - cnu
/*
static NABoolean checkForReservedObjectName(QualifiedName &inName)
{
if ((inName.getCatalogName() == "NEO") &&
(inName.getSchemaName() == "PUBLIC_ACCESS_SCHEMA") &&
(inName.getObjectName() == "_MAINTAIN_CONTROL_INFO_"))
{
return TRUE;
}
return FALSE;
}
*/
// LCOV_EXCL_STOP
NARoutine *BindWA::getNARoutine ( const QualifiedName &name )
{
NARoutineDBKey key(name, wHeap());
NARoutine * naRoutine = getSchemaDB()->getNARoutineDB()->get(this, &key);
if (!naRoutine)
{
desc_struct *udfMetadata = NULL;
CmpSeabaseDDL cmpSBD(STMTHEAP);
udfMetadata = cmpSBD.getSeabaseRoutineDesc(
name.getCatalogName(),
name.getSchemaName(),
name.getObjectName());
if (!udfMetadata)
return NULL;
NAHeap *routineHeap;
if (getSchemaDB()->getNARoutineDB()->cachingMetaData())
{
const Lng32 size = 16 * 1024; // The initial size
routineHeap = new CTXTHEAP NAHeap("NARoutine Heap", (NAHeap *)CTXTHEAP,
size);
routineHeap->setJmpBuf(CmpInternalErrorJmpBufPtr);
}
else
routineHeap=CmpCommon::statementHeap();
Int32 errors=0;
naRoutine = new (routineHeap)
NARoutine(name,
udfMetadata,
this,
errors,
routineHeap);
if ( NULL == naRoutine || errors != 0)
{
setErrStatus();
return NULL;
}
// Add NARoutine to the NARoutineDB cache.
if (getSchemaDB()->getNARoutineDB()->cachingMetaData())
getSchemaDB()->getNARoutineDB()->put(naRoutine);
}
return naRoutine;
}
NATable *BindWA::getNATable(CorrName& corrName,
NABoolean catmanCollectTableUsages, // default TRUE
desc_struct *inTableDescStruct) // default NULL
{
BindWA *bindWA = this; // for coding convenience
NATable * table = NULL;
// Search in volatile schema first. If not found, search in regular cat/sch.
NABoolean volatileTableFound = FALSE;
NAString userName;
if ((CmpCommon::context()->sqlSession()->volatileSchemaInUse()) &&
(! inTableDescStruct) &&
(corrName.getSpecialType() != ExtendedQualName::VIRTUAL_TABLE))
{
CorrName newCorrName =
CmpCommon::context()->sqlSession()->getVolatileCorrName
(corrName);
if (bindWA->errStatus())
return NULL;
//get NATable from cache
table = bindWA->getSchemaDB()->getNATableDB()->
get(newCorrName, bindWA, inTableDescStruct);
if (!table)
{
// now search in regular cat/sch.
// clear diags area.
CmpCommon::diags()->clear();
bindWA->resetErrStatus();
}
else
{
NABoolean isValid =
CmpCommon::context()->sqlSession()->validateVolatileCorrName
(corrName);
// if this table is found in volatile schema, then
// make sure it is a volatile table.
if ((isValid) &&
(NOT table->isVolatileTable()))
{
*CmpCommon::diags() << DgSqlCode(-4190) <<
DgTableName(table->getTableName().
getQualifiedNameAsAnsiString(TRUE));
bindWA->setErrStatus();
return NULL;
}
if (isValid)
{
newCorrName.setIsVolatile(TRUE);
corrName = newCorrName;
}
else
{
// table was found in the volatile schema but it is
// not a valid volatile name.
// Look for it in regular schema.
table = NULL;
CmpCommon::diags()->clear();
bindWA->resetErrStatus();
// remember that volatile table was found so we
// can generate a better error message later.
volatileTableFound = TRUE;
}
}
}
if (! table)
{
// Expand the table (base table, view, etc.) name with
// the default catalog and schema parts if the specified
// table name does not include these parts.
// This method will also first apply any prototype value (from a host var)
// into the corrName's qualifiedName.
//
NABoolean catNameSpecified =
(NOT corrName.getQualifiedNameObj().getCatalogName().isNull());
NABoolean schNameSpecified =
(NOT corrName.getQualifiedNameObj().getSchemaName().isNull());
// try PUBLIC SCHEMA only when no schema was specified
// and CQD PUBLIC_SCHEMA_NAME is specified
NAString publicSchema = "";
CmpCommon::getDefault(PUBLIC_SCHEMA_NAME, publicSchema, FALSE);
ComSchemaName pubSchema(publicSchema);
NAString pubSchemaIntName = "";
if ( !schNameSpecified && !pubSchema.getSchemaNamePart().isEmpty() )
{
pubSchemaIntName = pubSchema.getSchemaNamePart().getInternalName();
}
corrName.applyDefaults(bindWA, bindWA->getDefaultSchema());
if (bindWA->errStatus())
return NULL; // prototype value parse error
// override schema
if ( ( overrideSchemaEnabled() )
// not volatile table
&& ( ! volatileTableFound )
)
{
doOverrideSchema(corrName);
}
// if DEFAULT_SCHEMA_ACCESS_ONLY, can only access default and public schemas
if (corrName.getSpecialType()==ExtendedQualName::NORMAL_TABLE)
// NORMAL_TABLE also covers synonym, view and MV
{
if (violateAccessDefaultSchemaOnly(corrName.getQualifiedNameObj()))
return NULL;
}
// make sure that schema name is not a VOLATILE SCHEMA
if ((! bindWA->inDDL()) ||
((bindWA->inViewDefinition()) ||
(bindWA->inMVDefinition())))
{
if (! CmpCommon::context()->sqlSession()->validateVolatileQualifiedSchemaName
(corrName.getQualifiedNameObj()))
{
bindWA->setErrStatus();
return NULL;
}
}
//get NATable (from cache or from metadata)
table = bindWA->getSchemaDB()->getNATableDB()->
get(corrName, bindWA, inTableDescStruct);
//try the public schema if not found
if ( !table && !pubSchemaIntName.isNull() )
{
CorrName pCorrName(corrName);
pCorrName.getQualifiedNameObj().setSchemaName(pubSchemaIntName);
if ( !pubSchema.getCatalogNamePart().isEmpty() )
{
pCorrName.getQualifiedNameObj().setCatalogName(
pubSchema.getCatalogNamePart().getInternalName());
}
bindWA->resetErrStatus();
table = bindWA->getSchemaDB()->getNATableDB()->
get(pCorrName, bindWA, inTableDescStruct);
if ( !bindWA->errStatus() && table )
{ // if found in public schema, do not show previous error
// and replace corrName
CmpCommon::diags()->clear();
corrName.getQualifiedNameObj().setCatalogName(
pCorrName.getQualifiedNameObj().getCatalogName());
corrName.getQualifiedNameObj().setSchemaName(
pCorrName.getQualifiedNameObj().getSchemaName());
}
}
// move to here, after public schema try because BindUtil_CollectTableUsageInfo
// saves table info for mv definition, etc.
// Conditionally (usually) do stuff for Catalog Manager (static func above).
if (catmanCollectTableUsages)
if (corrName.getSpecialType() != ExtendedQualName::TRIGTEMP_TABLE)
BindUtil_CollectTableUsageInfo(bindWA, corrName);
if (!table)
{
if (volatileTableFound)
{
if ((CmpCommon::diags()->mainSQLCODE() == -1003) &&
(NOT catNameSpecified))
{
// the name is in true USER_NAME.VOL_TAB_NAME form
// where the USER_NAME doesn't match current name.
// Clear errors and return an appropriate message.
CmpCommon::diags()->clear();
CmpCommon::context()->sqlSession()->validateVolatileCorrName
(corrName);
bindWA->setErrStatus();
}
}
return NULL;
}
}
// if a volatile table is found, make sure that volatile schema is in
// use and volatile tables are allowed.
if ((table) && (table->isVolatileTable()))
{
// set volatile table indication in table's tablename
((QualifiedName&)(table->getTableName())).setIsVolatile(TRUE);
}
// For now, do not allow access through the Trafodion external name created for
// the HIVE object unless the inDDL flag is set. inDDL is set for drop
// table and SHOWDDL statements.
// TDB - may want to merge the Trafodion version with the HIVE version.
// TDB - similar operation may be needed for external HBase tables
if ((table) && (table->isExternalTable() && (! bindWA->inDDL())))
{
*CmpCommon::diags() << DgSqlCode(-4258)
<< DgTableName(table->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return NULL;
}
HostVar *proto = corrName.getPrototype();
if (proto && proto->isPrototypeValid())
corrName.getPrototype()->bindNode(bindWA);
// Solution 10-040518-6149: When we bind the view as part of the compound
// create schema statement, we need to reset referenceCount_ of the base
// table to zero. Otherwise, error 1109 would be reported.
if ( bindWA->isCompoundCreateSchema() && bindWA->inViewDefinition() )
table->resetReferenceCount();
// This test is not "inAnyConstraint()" because we DO want to increment
// the count for View With Check Option constraints.
if (!getCurrentScope()->context()->inTableCheckConstraint() &&
!getCurrentScope()->context()->inRIConstraint())
table->incrReferenceCount();
if (table)
OSIM_captureTableOrView(table);
return table;
} // BindWA::getNATable()
static TableDesc *createTableDesc2(BindWA *bindWA,
const NATable *naTable,
CorrName &corrName, Hint *hint)
{
// Allocate a base table descriptor.
//
TableDesc *tdesc = new (bindWA->wHeap()) TableDesc(bindWA, naTable, corrName);
// Insert the table name into the XTNM.
//
bindWA->getCurrentScope()->getXTNM()->insertNames(bindWA, corrName);
if (bindWA->errStatus()) return NULL;
// For each NAColumn, allocate a BaseColumn, bind the BaseColumn, and
// add the ValueId to the TableDesc.
//
CollIndex i = 0;
for (i = 0; i < naTable->getColumnCount(); i++) {
BaseColumn *baseCol = new (bindWA->wHeap()) BaseColumn(tdesc, i);
baseCol->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
ValueId valId = baseCol->getValueId();
tdesc->addToColumnList(valId);
}
// set primary key for this table
tdesc->setPrimaryKeyColumns();
// For each index, create an IndexDesc.
//
NAString indexChoice;
NADefaults &defs = ActiveSchemaDB()->getDefaults();
defs.getValue(HIDE_INDEXES,indexChoice);
for (i = 0; i < naTable->getIndexList().entries(); i++)
{
NAFileSet *nfs=naTable->getIndexList()[i];
IndexDesc *idesc = new (bindWA->wHeap())
IndexDesc(tdesc, nfs, bindWA->currentCmpContext());
if (naTable->getClusteringIndex()->getFileSetName() ==
idesc->getIndexName()) {
tdesc->setClusteringIndex(idesc);
idesc->markAsClusteringIndex();
}
if(indexChoice.compareTo("NONE") ==0
OR indexChoice.compareTo("VERTICAL") ==0
OR (indexChoice.compareTo("KEYINDEXES") ==0 AND
tdesc->isKeyIndex(idesc))
OR naTable->getClusteringIndex()->getFileSetName() ==
nfs->getFileSetName())
{
tdesc->addIndex(idesc);
// implementation of optimizer hints
if (hint AND hint->hasIndexHint
(idesc->getNAFileSet()->getExtFileSetName()))
{
tdesc->addHintIndex(idesc);
}
if (idesc->isUniqueIndex() )
tdesc->addUniqueIndex(idesc);
}
else
{
delete idesc;
}
}
// For each vertical partition, create an IndexDesc.
// Add this VP to the list of VPs for the TableDesc.
for (i = 0; i < naTable->getVerticalPartitionList().entries(); i++) {
if(indexChoice.compareTo("NONE") ==0
OR indexChoice.compareTo("INDEXES")==0
OR indexChoice.compareTo("KEYINDEXES")==0)
{
IndexDesc *idesc = new (bindWA->wHeap())
IndexDesc(tdesc, naTable->getVerticalPartitionList()[i],
bindWA->currentCmpContext());
tdesc->addVerticalPartition(idesc);
}
}
// Allocate a RETDesc, attach it to the BindScope.
//
bindWA->getCurrentScope()->setRETDesc(new (bindWA->wHeap())
RETDesc(bindWA, tdesc));
// Do not include tables-referenced-in-a-constraint (when/if we allow them)
// in the view-contains-table list; if we did include them, then
// TableViewUsageList::getViewsOnTable() would give wrong results
// for where it's used to prevent the Halloween problem.
//
// If we end up needing this extra info, I advise either a separate list,
// or a new field in TableViewUsage indicating usage type (containment
// versus reference), enhancing method getViewsOnTable() accordingly.
//
if (!bindWA->getCurrentScope()->context()->inAnyConstraint())
bindWA->tableViewUsageList().insert(new (bindWA->wHeap())
TableViewUsage(
tdesc->getCorrNameObj().getQualifiedNameObj(),
tdesc->getCorrNameObj().getSpecialType(),
naTable->getViewText() != NULL,
bindWA->viewCount()));
return tdesc;
} // static createTableDesc2()
TableDesc *BindWA::createTableDesc(const NATable *naTable,
CorrName &corrName,
NABoolean catmanCollectUsages, Hint *hint)
{
BindWA *bindWA = this; // for coding convenience
TableDesc *tdesc = createTableDesc2(bindWA, naTable, corrName, hint);
if (bindWA->errStatus()) return NULL;
// Now bind any table check constraints and attach them to our new tdesc.
// These constraints must be processed for UPDATE and INSERT.
// DELETEs must clear them; see Delete::bindNode.
//
// For SELECTs, NOT NULL constraints are marked on the NAColumn::allowsNulls
// allowing more elaborate Transformations. For SELECTs, other types of
// constraints are not currently used, but could be in future,
// to optimize by providing additional predicate/selectivity info.
//
// ## We ought to write some regression test cases like
// INSERT INTO T (SELECT * FROM S) -- T's constraints yes, S irrelevant
// INSERT INTO T VALUES ((SELECT A FROM S WHERE..),..)
// INSERT INTO V3 ... -- underlying basetbl's constrts yes
// -- V3 atop VA atop T: let the views be
// -- WITH CHECK OPTION, then viewpred-constrt yes
//
const CheckConstraintList &ccl = naTable->getCheckConstraints();
if (ccl.entries()) {
// Table check constraint text is stored in the metadata tables
// with the underlying table/view name (e.g. "CHECK (C.S.T.COL > 0)"),
// whereas any correlation name in a query
// (e.g. "SELECT * FROM C.S.T FOO WHERE COL < 10")
// is irrelevant to the persistent constraint text --
// when binding the check constraint, we want to find column C.S.T.COL,
// while the TableDesc/RETDesc just built only exposes the column
// under names COL and FOO.COL.
//
// So, if we have a correlation name, we must:
// - rename our TableDesc (rename FOO to C.S.T)
// - create a temporary table name scope for C.S.T that will hide FOO
// - construct a temporary RETDesc with names COL, T.COL, S.T.COL, C.S.T.COL
// but the same ValueId's they had before
//
// Then we bind the constraints using that RETDesc for name lookups.
//
// Then for the non-empty correlation, reset/undo the temporary stuff.
RETDesc *savedRETDesc = NULL;
NABoolean corrNameIsNonEmpty = !corrName.getCorrNameAsString().isNull();
CorrName synonymReferenceCorrName;
if(naTable->getIsSynonymTranslationDone()){
QualifiedName baseQualifiedName(naTable->getSynonymReferenceName(),3);
synonymReferenceCorrName=baseQualifiedName;
}
if ((corrNameIsNonEmpty) || (naTable->getIsSynonymTranslationDone())) {
CorrName baseCorrName;
baseCorrName = (naTable->getIsSynonymTranslationDone()) ? synonymReferenceCorrName : naTable->getTableName();
tdesc->setCorrName(baseCorrName);
bindWA->getCurrentScope()->xtnmStack()->createXTNM();
bindWA->getCurrentScope()->getXTNM()->insertNames(bindWA, baseCorrName);
if (bindWA->errStatus()) return NULL;
savedRETDesc = bindWA->getCurrentScope()->getRETDesc();
bindWA->getCurrentScope()->setRETDesc(new (bindWA->wHeap())
RETDesc(bindWA, tdesc));
if (bindWA->errStatus()) return NULL;
}
for (CollIndex i = 0; i < ccl.entries(); i++) {
ItemExpr *constraintPred =
bindCheckConstraint(bindWA, ccl[i], naTable, catmanCollectUsages);
if (constraintPred)
tdesc->addCheckConstraint(bindWA, naTable, ccl[i], constraintPred);
else if (bindWA->errStatus())
break;
}
if ((corrNameIsNonEmpty) || (naTable->getIsSynonymTranslationDone())){ // reset temporaries
tdesc->setCorrName(corrName);
delete bindWA->getCurrentScope()->getRETDesc();
bindWA->getCurrentScope()->setRETDesc(savedRETDesc);
bindWA->getCurrentScope()->xtnmStack()->removeXTNM();
}
} // check constraint processing required
// if the table contains computed columns, bind the expressions to compute the columns
for (CollIndex c = 0; c < naTable->getColumnCount(); c++) {
NAColumn *nac = tdesc->getNATable()->getNAColumnArray()[c];
if (nac->isComputedColumn()) {
ItemExpr *computedColumnExpr = NULL;
Parser parser(bindWA->currentCmpContext());
// parse the text stored in the NAColumn
computedColumnExpr = parser.getItemExprTree(
nac->getComputedColumnExprString(),
str_len(nac->getComputedColumnExprString()),
CharInfo::UTF8);
if (computedColumnExpr) {
ParNameLocList *saveNameLocList = bindWA->getNameLocListPtr();
bindWA->setNameLocListPtr(NULL);
bindWA->getCurrentScope()->context()->inComputedColumnExpr() = TRUE;
computedColumnExpr = computedColumnExpr->bindNode(bindWA);
bindWA->setNameLocListPtr(saveNameLocList);
bindWA->getCurrentScope()->context()->inComputedColumnExpr() = FALSE;
if (bindWA->errStatus()) {
delete computedColumnExpr;
computedColumnExpr = NULL;
return NULL;
}
else {
// Store the expression tree in the base column
((BaseColumn *) tdesc->getColumnList()[c].getItemExpr())->
setComputedColumnExpr(computedColumnExpr->getValueId());
}
}
}
}
return tdesc;
} // BindWA::createTableDesc()
// QSTUFF - helper for BindWA::bindView.
static void propagateDeleteAndStream(RelExpr *re, GroupAttributes *ga)
{
if (ga->isEmbeddedUpdateOrDelete())
re->getGroupAttr()->setEmbeddedIUD(
ga->getEmbeddedIUD());
if (ga->isStream())
re->getGroupAttr()->setStream(TRUE);
if (ga->isSkipInitialScan())
re->getGroupAttr()->setSkipInitialScan(TRUE);
Int32 arity = re->getArity();
for (Int32 i = 0; i < arity; i++) {
if (re->child(i))
propagateDeleteAndStream(re->child(i), ga);
}
}
RelExpr *BindWA::bindView(const CorrName &viewName,
const NATable *naTable,
const StmtLevelAccessOptions &accessOptions,
ItemExpr *predicate,
GroupAttributes *groupAttrs,
NABoolean catmanCollectUsages)
{
BindWA *bindWA = this; // for coding convenience
CMPASSERT(viewName.getQualifiedNameObj() == naTable->getTableName());
NABoolean inViewExpansion = bindWA->setInViewExpansion(TRUE); // QSTUFF
// set a flag for overrride_schema
//if (overrideSchemaEnabled())
bindWA->getCurrentScope()->setInViewExpansion(TRUE);
if (!bindWA->getCurrentScope()->context()->inAnyConstraint())
bindWA->tableViewUsageList().insert(new (bindWA->wHeap())
TableViewUsage(
viewName.getQualifiedNameObj(),
viewName.getSpecialType(),
TRUE/*isView*/,
bindWA->viewCount()));
// save the current parserflags setting
ULng32 savedParserFlags = Get_SqlParser_Flags (0xFFFFFFFF);
// allow funny characters in the tablenames used in the select list.
// This enables views to be created on 'internal' secret table
// so they could be accessed.
// At view creation time, the caller still need to set this
// parserflag from the sql interface(mxci, etc) otherwise the view
// creation will fail. Since parserflags can only be set by super
// users, the view with special tablenames could only have been created
// by a super user.
Set_SqlParser_Flags(ALLOW_FUNNY_IDENTIFIER);
// Parse the view text.
//
// isolation level and order by are allowed in create view, if
// the corresponding cqds are set.
// These cqds are only valid during 'create view' time. Once the views
// are created, we don't need to look at them.
// During view expansion when we reach this method, turn the cqds on if
// they are not already on, so parser doesn't return an error.
// Reset them back, if they were set here.
NABoolean allowIsolationLevelWasSet = FALSE;
NABoolean allowOrderByWasSet = FALSE;
if (CmpCommon::getDefault(ALLOW_ISOLATION_LEVEL_IN_CREATE_VIEW) == DF_OFF)
{
allowIsolationLevelWasSet = TRUE;
NAString op("ON");
ActiveSchemaDB()->getDefaults().validateAndInsert
("ALLOW_ISOLATION_LEVEL_IN_CREATE_VIEW", op, FALSE);
}
if (CmpCommon::getDefault(ALLOW_ORDER_BY_IN_CREATE_VIEW) == DF_OFF)
{
allowOrderByWasSet = TRUE;
NAString op("ON");
ActiveSchemaDB()->getDefaults().validateAndInsert
("ALLOW_ORDER_BY_IN_CREATE_VIEW", op, FALSE);
}
Parser parser(bindWA->currentCmpContext());
ExprNode *viewTree = parser.parseDML(naTable->getViewText(),
naTable->getViewLen(),
naTable->getViewTextCharSet());
// Restore parser flags settings to what they originally were
Set_SqlParser_Flags (savedParserFlags);
if (allowIsolationLevelWasSet)
{
NAString op("OFF");
ActiveSchemaDB()->getDefaults().validateAndInsert
("ALLOW_ISOLATION_LEVEL_IN_CREATE_VIEW", op, FALSE);
}
if (allowOrderByWasSet)
{
NAString op("OFF");
ActiveSchemaDB()->getDefaults().validateAndInsert
("ALLOW_ORDER_BY_IN_CREATE_VIEW", op, FALSE);
}
if (NOT viewTree) {
bindWA->setErrStatus();
return NULL;
}
// Remove the StmtQuery node.
// Clear the root flag in the RelRoot node since this not the topmost
// RelRoot in the query tree.
//
CMPASSERT(viewTree->getOperatorType() == STM_QUERY);
RelExpr *queryTree = viewTree->castToStatementExpr()->getQueryExpression();
CMPASSERT(queryTree->getOperatorType() == REL_ROOT);
((RelRoot *)queryTree)->setRootFlag(FALSE);
CMPASSERT(queryTree->getChild(0)->getOperatorType() == REL_DDL);
StmtDDLCreateView *createViewTree = ((DDLExpr *)(queryTree->getChild(0)))->
getDDLNode()->castToStmtDDLNode()->castToStmtDDLCreateView();
CMPASSERT(createViewTree);
queryTree = createViewTree->getQueryExpression();
CMPASSERT(queryTree->getOperatorType() == REL_ROOT);
((RelRoot *)queryTree)->setRootFlag(FALSE);
RelRoot *viewRoot = (RelRoot *)queryTree; // save for add'l binding below
ParNameLocList *saveNameLocList = bindWA->getNameLocListPtr();
// This was put here for Genesis 10-980217-0467.
// Now with the fix for 10-980408-5149, we even more strongly need to bypass
// or ignore any accessOpts from the view, for a consistent access model.
if ((CmpCommon::getDefault(ALLOW_ISOLATION_LEVEL_IN_CREATE_VIEW) == DF_OFF) ||
(viewRoot->accessOptions().accessType() == ACCESS_TYPE_NOT_SPECIFIED_))
{
// if cqd is set and view options were explicitely specified,
// then do not overwrite it with accessOptions.
viewRoot->accessOptions() = accessOptions;
}
// Set the WCO context (Genesis 10-971112-7028 + 10-990518-8420):
// If this view is WITH CHECK OPTION, then all views below it acquire
// check-option-ness, per Ansi 11.19 GR 9-11a
// (we implement only CASCADED -- see further notes later on in this func);
// if some view above this one is WCO, then this view effectively is too,
// regardless of its getViewCheck() value.
// Genesis 10-990518-8420 fix in particular:
// with-check-option views of the form
// SELECT..FROM(SELECT..WHERE p1)REN WHERE p2
// were emitting a bind error on pred p1, and ignoring pred p2!
//
NABoolean topmostViewWithCheckOption = FALSE;
if (naTable->getViewCheck() &&
bindWA->getCurrentScope()->context()->inUpdateOrInsert() &&
!bindWA->inViewWithCheckOption()) {
topmostViewWithCheckOption = TRUE;
bindWA->inViewWithCheckOption() = naTable;
}
// QSTUFF
// Give the new query tree the pubsub group attrs before
// binding, so that binder checks are applied to the new tree.
if ((groupAttrs) &&
(groupAttrs->isEmbeddedUpdateOrDelete() || groupAttrs->isStream()))
propagateDeleteAndStream(queryTree,groupAttrs);
// ************ THE FIRST OF TWO BINDNODE'S ************
// Bind the basic queryTree first (before Rename), for stoi_ security stuff.
// Cascade the WCO-ness down to RelExpr::bindSelf which captures predicates.
// On this bind, unconditionally we never collect usages.
//
bindWA->viewCount()++;
bindWA->setNameLocListPtr(NULL); // do not collect usages for catman
queryTree = queryTree->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
bindWA->setNameLocListPtr(saveNameLocList);
bindWA->viewCount()--;
if (bindWA->errStatus())
return NULL;
// if RelRoot has an order by, insert a Logical Sort node below it
// and move the order by expr from view root to this sort node.
// The view root node is eliminated during transformation/normalization
// and the sortlogical node provides a place to 'hold' the order by expr.
// During transformation, this sort key is moved from the sortlogical node
// to the root node of the query, if there is no explicit order by
// specified as part of the query.
// SortLogical node is a shortlived node and is eliminated during
// the normalization phase.
if (viewRoot->hasOrderBy())
{
RelExpr * sortNode = new (bindWA->wHeap())
SortLogical(queryTree->child(0)->castToRelExpr(),
viewRoot->reqdOrder(),
bindWA->wHeap());
sortNode = sortNode->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
viewRoot->removeOrderByTree();
viewRoot->reqdOrder().clear();
viewRoot->setChild(0, sortNode);
}
// Insert a RenameTable node above the view tree.
//
const NAColumnArray &columns = naTable->getNAColumnArray();
ItemExpr *columnList = new (bindWA->wHeap())
RenameCol(NULL, new (bindWA->wHeap())
ColRefName(columns[0]->getColName(), bindWA->wHeap()));
//
CollIndex i = 1;
for (i = 1; i < naTable->getColumnCount(); i++)
columnList = new (bindWA->wHeap())
ItemList(columnList, new (bindWA->wHeap())
RenameCol(NULL, new (bindWA->wHeap())
ColRefName(columns[i]->getColName(), bindWA->wHeap())));
//
queryTree = new (bindWA->wHeap())
RenameTable(TRUE/*copy tableName as is*/,
queryTree->castToRelExpr(),
viewName,
columnList,
bindWA->wHeap(),
TRUE/*isView*/);
if (predicate) queryTree->addSelPredTree(predicate);
((RenameTable *) queryTree)->setViewNATable(naTable);
// this query used this view
appendViewName
(viewName.getQualifiedNameObj().getQualifiedNameAsAnsiString().data());
// set a flag for overrride_schema
// with the call to bindNode below, only the Rename node will be bound.
// Since the view has already been expanded we reset the viewExpansion flag here.
//if (overrideSchemaEnabled())
bindWA->getCurrentScope()->setInViewExpansion(inViewExpansion);
// ************ THE SECOND OF TWO BINDNODE'S ************
// Bind the view tree whose top is this new RenameTable.
// If we are the topmost WCO, then do NOT cascade the incoming predicate!
// Collect usages only if CatMan caller requested it.
//
if (topmostViewWithCheckOption) bindWA->inViewWithCheckOption() = NULL;
if (!catmanCollectUsages) bindWA->setNameLocListPtr(NULL);
queryTree = queryTree->bindNode(bindWA);
bindWA->setNameLocListPtr(saveNameLocList);
if (bindWA->errStatus()) return NULL;
((RenameTable *) queryTree)->setViewNATable(NULL);
// Genesis 10-980126-5495:
// Now that we have the RenameTable's RETDesc, set its view column headings.
// We know that the NATable and the RenameTable column lists are in lockstep.
//
const ColumnDescList &columnsRET = *queryTree->getRETDesc()->getColumnList();
CMPASSERT(columns.entries() == naTable->getColumnCount() &&
columns.entries() == columnsRET.entries());
for (i = 0; i < naTable->getColumnCount(); i++)
{
columnsRET[i]->setHeading(columns[i]->getHeading());
}
// If it's a view that is WITH CHECK OPTION, and this is an UPDATE/INSERT,
// bind/transform/normalize the view predicate and place it as a constraint
// on the base table's TableDesc. This is equivalent to the default kind
// of check clause, WITH CASCADED CHECK OPTION, which is all we need provide
// up through Intermediate-Level SQL'92.
//
// (ANSI says that all CHECK OPTION views must be updatable (11.19 SR12)
// which means it references exactly one updatable view or, at bottom,
// exactly one base table (7.9 SR12).
// MP guarantees that all CHECK OPTION views must be protection views, and
// all pviews reference exactly one base table.)
//
// Notice that since (Genesis 10-990518-8420) we now bind and collect the
// view preds in bindSelf -- i.e. pushed down below here --
// only this topmost WCO can set up the constraint(s).
// Thus we have lost the nice, but not mandated by Ansi, ability to specify
// which cascaded-down-to view causes which exact pred violation --
// i.e. error EXE_CHECK_OPTION_VIOLATION_CASCADED (8104)
// no longer appears, only EXE_CHECK_OPTION_VIOLATION (8105).
if (topmostViewWithCheckOption) {
CheckConstraint *constraint = NULL;
ItemExpr *viewCheckPred = NULL;
if (bindWA->predsOfViewWithCheckOption().entries()) {
constraint = new (bindWA->wHeap())
CheckConstraint(viewName.getQualifiedNameObj(), // this view name
naTable->getTableName(), // no parsing needed
bindWA->wHeap());
viewCheckPred = bindWA->predsOfViewWithCheckOption().rebuildExprTree();
}
// if at least one predicate exists in the view or what underlies it
if (constraint) {
RelExpr *underlyingTableOrView = viewRoot->child(0);
RETDesc *saveRETDesc = bindWA->getCurrentScope()->getRETDesc();
RETDesc *underlyingRETDesc = underlyingTableOrView->getRETDesc();
bindWA->getCurrentScope()->setRETDesc(underlyingRETDesc);
CMPASSERT(underlyingTableOrView);
CMPASSERT(underlyingTableOrView->getOperatorType() == REL_RENAME_TABLE ||
underlyingTableOrView->getOperatorType() == REL_SCAN);
ItemExpr *constraintPred =
bindCheckConstraint(bindWA,
constraint,
naTable,
catmanCollectUsages,
viewCheckPred);
if (constraintPred)
queryTree->getScanNode()->getTableDesc()->addCheckConstraint(
bindWA,
naTable, // topmost WCO view
constraint, // this view name
constraintPred);
bindWA->getCurrentScope()->setRETDesc(saveRETDesc);
} // at least one predicate exists
bindWA->inViewWithCheckOption() = NULL;
bindWA->predsOfViewWithCheckOption().clear();
} // topmost WCO view
// QSTUFF
bindWA->setInViewExpansion(inViewExpansion);
bindWA->getUpdateToScanValueIds().clear();
// QSTUFF
return queryTree;
} // BindWA::bindView()
// -----------------------------------------------------------------------
// member functions for class RelExpr
// -----------------------------------------------------------------------
void RelExpr::bindChildren(BindWA *bindWA)
{
// Increment the trigger recursion counter.
if (getInliningInfo().isTriggerRoot())
getInliningInfo().getTriggerObject()->incRecursionCounter();
// TSJ's flow their data from left child to right child;
// some can also share binding scope column info from left to right.
Int32 arity = getArity();
for (Int32 i = 0; i < arity; i++) {
if (child(i)) {
// If doing a non-first child and the operator is
// NOT one in which values/names can flow from one scope
// the sibling scope, then we must clear the current RETDesc
// (so as to disallow the illegal query in the Binder internals document,
// section 1.5.3, also in TEST028).
//
if (i && !getOperator().match(REL_ANY_TSJ))
bindWA->getCurrentScope()->setRETDesc(NULL);
child(i) = child(i)->bindNode(bindWA);
if (bindWA->errStatus()) return;
}
}
synthPropForBindChecks(); // QSTUFF
// Decrement the trigger recursion counter.
if (getInliningInfo().isTriggerRoot())
getInliningInfo().getTriggerObject()->decRecursionCounter();
} // RelExpr::bindChildren()
void RelExpr::synthPropForBindChecks() // QSTUFF
{
// synthesis of delete and stream properties to
// allow for binder checks. We assume that all
// operators are rejected when binding the respective node
// -- except UNIONS -- in which more than one child has
// has any of those attributes. If both attributes are
// specified both must be specified for the same
// result-set/base table.
for (Int32 j = 0; j < getArity(); j++) {
if (child(j)) {
if (child(j)->getGroupAttr()->isStream())
{
getGroupAttr()->setStream(TRUE);
if (child(j)->getGroupAttr()->isSkipInitialScan())
getGroupAttr()->setSkipInitialScan(TRUE);
}
if (child(j)->getGroupAttr()->isEmbeddedUpdateOrDelete() ||
child(j)->getGroupAttr()->isEmbeddedInsert())
getGroupAttr()->setEmbeddedIUD(
child(j)->getGroupAttr()->getEmbeddedIUD());
if (child(j)->getGroupAttr()->reorderNeeded())
getGroupAttr()->setReorderNeeded(TRUE);
}
}
}
RelExpr *RelExpr::bindSelf(BindWA *bindWA)
{
// create the group attributes
//
if (NOT getGroupAttr())
setGroupAttr(new (bindWA->wHeap()) GroupAttributes);
//
// Detach the item expression tree for the predicate, bind it, convert it to
// a ValueIdSet, and attach it to the RelExpr node.
//
ItemExpr *predTree = removeSelPredTree();
if (predTree) {
bindWA->getCurrentScope()->context()->inWhereClause() = TRUE;
predTree->convertToValueIdSet(selectionPred(), bindWA, ITM_AND);
bindWA->getCurrentScope()->context()->inWhereClause() = FALSE;
if (bindWA->errStatus()) return this;
// If this is an embedded insert, then subquery predicates are not
// allowed.
// For example: To handle this query and issue an error stating
// subqueries are not allowed in embedded inserts
//
// select a from (insert into t901t01 values(22,22,222))t(a,b,c)
// where t.a IN (select m from t901t03 where t901t03.m = 77);
if (getGroupAttr()->isEmbeddedInsert())
{
if (!selectionPred().isEmpty() && selectionPred().containsSubquery())
{
*CmpCommon::diags() << DgSqlCode(-4337);
bindWA->setErrStatus();
return this;
}
}
// Genesis 10-990518-8420.
if (bindWA->inViewWithCheckOption())
bindWA->predsOfViewWithCheckOption() += selectionPred();
}
// ++MV
// Bind the uniqueColumnsTree expression.
//
ItemExpr *uniqueColumnsTree = removeUniqueColumnsTree();
if (uniqueColumnsTree)
{
uniqueColumnsTree->
convertToValueIdSet(getUniqueColumns(), bindWA, ITM_ITEM_LIST);
if (bindWA->errStatus()) return this;
}
// --MV
// set flag here if an Insert/Update/Delete operation is below this node
if( bindWA->isBindingIUD() )
{
setSeenIUD();
}
//
// This mechanism is used to set InliningInfo flags on an entire subtree.
getInliningInfo().setFlags(bindWA->getInliningInfoFlagsToSetRecursivly());
//
// Add the values in the Outer References Set as the input values
// that must be supplied to this RelExpr.
//
getGroupAttr()->addCharacteristicInputs(bindWA->getCurrentScope()->getOuterRefs());
markAsBound();
return this;
} // RelExpr::bindSelf()
RelExpr *RelExpr::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
bindChildren(bindWA);
if (bindWA->errStatus())
return this;
return bindSelf(bindWA);
}
RETDesc *RelExpr::getRETDesc() const
{
if (RETDesc_)
return RETDesc_;
if (getArity() == 1)
return child(0)->getRETDesc();
else
return NULL;
}
// When there is a view atop a view atop a ... atop a single base table,
// this will follow the chain of RenameTable-RelRoot-... down till it finds
// the bottom, the single base table's Scan node.
//
// This method does check to ensure exactly one single base table.
//
Scan *RelExpr::getScanNode(NABoolean assertExactlyOneScanNode) const
{
RelExpr *result = (RelExpr *)this; // cast away constness, big whoop
while (result) {
if ((result->getOperatorType() == REL_SCAN) ||
(result->getOperatorType() == REL_HBASE_ACCESS))
break;
if (result->getArity() > 1) {
if (assertExactlyOneScanNode)
{
CMPASSERT(result->getArity() <= 1);
}
else return NULL;
}
result = result->child(0);
}
if (assertExactlyOneScanNode) { CMPASSERT(result); }
return (Scan *)result;
}
Scan *RelExpr::getLeftmostScanNode() const
{
RelExpr *result = (RelExpr *)this; // cast away constness, big whoop
while (result) {
if (result->getOperatorType() == REL_SCAN) break;
result = result->child(0);
}
return (Scan *)result;
}
// QSTUFF
// We use this method for finding the scan node of an updatable view.
// This may either be a base table scan or a RenameTable node inserted
// by a previous index expansion.
RelExpr *RelExpr::getViewScanNode(NABoolean isTopLevelUpdateInView) const
{
RelExpr *result = (RelExpr *)this; // cast away constness, big whoop
while (result) {
if (result->getOperatorType() == REL_SCAN) break;
if (result->getOperatorType() == REL_RENAME_TABLE &&
((RenameTable *)result)->isView()) break;
result = result->child(0);
}
return result;
}
// -----------------------------------------------------------------------
// getFirstIUDNode
//
// Return the first node that is an insert, update, or delete.
// Only search down left side from the starting point (currentNode)
//
// If an IUD node is not found, return NULL
// -----------------------------------------------------------------------
GenericUpdate * Join::getFirstIUDNode(RelExpr *currentNode)
{
while(currentNode)
{
if( currentNode->getOperator().match(REL_ANY_GEN_UPDATE))
{
break;
}
currentNode = currentNode->child(0);
}
return (GenericUpdate*)currentNode;
}
// -----------------------------------------------------------------------
// member functions for class Join
//
// When we implement "JOIN USING (column list)", we need to: ##
// - disallow both NATURAL and USING in the same query (syntax err in Parser?)
// - ensure that the named USING cols are indeed common cols
// - coalesce common cols for USING just as we do for NATURAL,
// including ensuring that common cols are marked as referenced
// (as done in joinCommonColumns)
// -----------------------------------------------------------------------
RelExpr *Join::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// Do not support for general NEO users.
if ( (getOperatorType() == REL_FULL_JOIN) &&
(CmpCommon::getDefault(COMP_BOOL_192) == DF_ON) ) {
RelExpr *leftJoin = this;
leftJoin->setOperatorType(REL_LEFT_JOIN);
RelExpr *antiJoin = leftJoin->copyTree(bindWA->wHeap());
antiJoin->setOperatorType(REL_RIGHT_JOIN);
NAString leftName("ALJ", bindWA->wHeap());
// Make it unique.
//
leftName += bindWA->fabricateUniqueName();
RelExpr *rename = new (bindWA->wHeap())
RenameTable(antiJoin, leftName);
RelExpr *unionAll = new (bindWA->wHeap()) Union(leftJoin, rename);
unionAll->bindNode(bindWA);
if (bindWA->errStatus()) return this;
// Make sure there is at least one null instantiated
// value that is suitable for use as a filter.
// To be suitable, it must be null instantiated and
// it's child must not be nullable. We want to filter
// the NULL that are a result of null instantiation, not
// original null values.
//
ItemExpr *cval = new (bindWA->wHeap()) SystemLiteral(1);
cval->bindNode(bindWA);
if (bindWA->errStatus()) return this;
// Null instantiate the value.
//
ValueId niCval = cval->getValueId().nullInstantiate(bindWA, TRUE);
// Add it to the RETDesc of the Join.
//
ColRefName cvalName("", bindWA->wHeap());
antiJoin->getRETDesc()->addColumn(bindWA, cvalName , niCval, USER_COLUMN);
// Add it to the list of null instantiated outputs.
//
((Join *)antiJoin)->nullInstantiatedOutput().insert(niCval);
ItemExpr *nullCheck = niCval.getItemExpr();
CMPASSERT(nullCheck);
ItemExpr *filter = new (bindWA->wHeap())
UnLogic(ITM_IS_NULL, nullCheck );
filter->bindNode(bindWA);
if (bindWA->errStatus()) return this;
// Add filter to Join
//
antiJoin->selectionPred() += filter->getValueId();
return unionAll;
}
Join *saveInJ = bindWA->getCurrentScope()->context()->inJoin();
bindWA->getCurrentScope()->context()->inJoin() = this;
NABoolean savedPrivSetting = FALSE;
// Bind the child nodes.
//
bindChildren(bindWA);
if (bindWA->errStatus()) return this;
// MV logging push-down
if( getInliningInfo().isDrivingMvLogInsert() )
{
GenericUpdate *rightSideIUD = getFirstIUDNode(this->child(1));
if( NULL != rightSideIUD )
{
TableDesc *tdesc = rightSideIUD->getTableDesc();
CMPASSERT(tdesc);
const NATable *table = tdesc->getNATable();
// only for MV logs
if( ExtendedQualName::IUD_LOG_TABLE == table->getSpecialType() )
{
updateTableDesc_ = tdesc;
updateSelectValueIdMap_ = new (bindWA->wHeap())
ValueIdMap(rightSideIUD->updateToSelectMap());
}
}
}
// Controlled availability of Full Outer Join support
// The COMP_BOOL_199 must be removed when full outer join
// becomes general availability.
// Full outer joins are not currently supported.
// But can enabled by setting COMP_BOOL_199 to ON.
if ((getOperatorType() == REL_FULL_JOIN &&
(CmpCommon::getDefault(COMP_BOOL_199) == DF_OFF))
|| //OR
(getOperatorType() == REL_UNION_JOIN )){
// 3022 Feature not yet supported
*CmpCommon::diags() << DgSqlCode(-3022)
<< DgString0(
(getOperatorType() == REL_FULL_JOIN) ?
"FULL OUTER JOIN" : "UNION JOIN");
bindWA->setErrStatus();
return this;
}
//
// Bind the ON clause of the join.
//
RelExpr *leftRelExpr = child(0).getPtr();
RelExpr *rightRelExpr = child(1).getPtr();
RETDesc *leftTable = child(0)->getRETDesc();
RETDesc *rightTable = child(1)->getRETDesc();
ItemExpr *joinPredx;
if (isNaturalJoin()) {
// since the common column references need fetch histograms, the where
// flag is set here so that when we call markAsReferencedColumn()
// in the joinCommoncolumns() method it would set the common
// columns as refenced by looking a the inWhereCaluse_ flag.
NABoolean orig = bindWA->getCurrentScope()->context()->inWhereClause();
bindWA->getCurrentScope()->context()->inWhereClause() = TRUE;
joinPredx = joinCommonColumns(leftRelExpr, rightRelExpr, bindWA);
bindWA->getCurrentScope()->context()->inWhereClause() = orig;
}
else
joinPredx = removeJoinPredTree();
if (joinPredx) {
ItemExpr *saveInJP = bindWA->getCurrentScope()->context()->inJoinPred();
bindWA->getCurrentScope()->context()->inJoinPred() = joinPredx;
RETDesc preJoinResult;
preJoinResult.addColumns(bindWA, *leftTable);
preJoinResult.addColumns(bindWA, *rightTable);
bindWA->getCurrentScope()->setRETDesc(&preJoinResult);
joinPredx->convertToValueIdSet(joinPred(), bindWA, ITM_AND);
bindWA->getCurrentScope()->context()->inJoinPred() = saveInJP;
if (bindWA->errStatus()) return this;
}
//
// Create the output list.
// The TRUE's in the nullInstantiate() force a Cast expression to be set up,
// as required by the Normalizer.
//
NABoolean newTables = TRUE;
ValueIdList &nullOutputList = nullInstantiatedOutput();
ValueIdList &nullOutputForRightJoinList = nullInstantiatedForRightJoinOutput();
switch(getOperatorType()) {
case REL_LEFT_JOIN:
leftTable = new (bindWA->wHeap()) RETDesc(bindWA, *leftTable);
rightTable = rightTable->nullInstantiate(bindWA, TRUE, nullOutputList);
break;
case REL_RIGHT_JOIN:
leftTable = leftTable->nullInstantiate(bindWA, TRUE, nullOutputList);
rightTable = new (bindWA->wHeap()) RETDesc(bindWA, *rightTable);
break;
case REL_FULL_JOIN:
case REL_UNION_JOIN:
{
leftTable = leftTable->nullInstantiate(bindWA, TRUE, nullOutputForRightJoinList);
rightTable = rightTable->nullInstantiate(bindWA, TRUE, nullOutputList);
// comp_bool_198 = 'on' enables FullOuter transformation
// inner, left or right
if (CmpCommon::getDefault(COMP_BOOL_198) == DF_OFF) //don't enable FOJ transformation
{
ItemExpr * instNull = NULL;
CollIndex index = 0;
// disable the FOJ Transformation.
for (index = 0; index < nullInstantiatedOutput().entries(); index++)
{
instNull = nullInstantiatedOutput()[index].getItemExpr();
CMPASSERT(instNull->getOperatorType() == ITM_INSTANTIATE_NULL);
((InstantiateNull *)instNull)->NoCheckforLeftToInnerJoin = TRUE;
} // endfor
instNull = NULL;
for (index = 0;
index < nullInstantiatedForRightJoinOutput().entries(); index++)
{
instNull = nullInstantiatedForRightJoinOutput()[index].getItemExpr();
CMPASSERT(instNull->getOperatorType() == ITM_INSTANTIATE_NULL);
((InstantiateNull *)instNull)->NoCheckforLeftToInnerJoin = TRUE;
} // endfor
} // env "ENABLE_FOJ_TRANSFORMATION"
break;
}
case REL_JOIN:
default:
newTables = FALSE;
break;
}
RETDesc *resultTable = new (bindWA->wHeap()) RETDesc(bindWA);
Int32 rowSet = (child(0)->getOperatorType() == REL_RENAME_TABLE) &&
(child(0)->child(0)->getOperatorType() == REL_UNPACKROWS) &&
(child(1)->getOperatorType() == REL_ROOT);
if (NOT isNaturalJoin()) {
if ((!rowSet) &&
(getOperatorType() != REL_TSJ_FLOW)) {
resultTable->addColumns(bindWA, *leftTable);
}
// ++MV -- bug fixing for semi-joins
if (!isSemiJoin())
{
resultTable->addColumns(bindWA, *rightTable);
}
// --MV -- bug fixing for semi-joins
} else {
coalesceCommonColumns(bindWA,
getOperatorType(),
*leftTable,
*rightTable,
*resultTable);
if (bindWA->errStatus()) return this;
}
setRETDesc(resultTable);
bindWA->getCurrentScope()->setRETDesc(resultTable);
// QSTUFF
NAString fmtdList(bindWA->wHeap());
LIST(TableNameMap*) xtnmList(bindWA->wHeap());
bindWA->getTablesInScope(xtnmList, &fmtdList);
if ((child(0)->getGroupAttr()->isStream()) &&
(child(1)->getGroupAttr()->isStream())){
bindWA->getTablesInScope(xtnmList, &fmtdList);
*CmpCommon::diags() << DgSqlCode(-4158)
<< DgString0(fmtdList);
bindWA->setErrStatus();
return this;
}
// Disallowing joins for EMBEDDED...INSERT
//
if (getGroupAttr()->isEmbeddedInsert() &&
!isTSJForWrite() // the tsjForWrite flag is set for
// those joins which are created by
// the Binder during inlining (eg. IndexMaintanence)
// Here we only want to disable user specified joins
// and not joins introduced as part of inlining.
){
*CmpCommon::diags() << DgSqlCode(-4336)
<< DgString0(fmtdList)
<< DgString1(getGroupAttr()->getOperationWithinGroup());
bindWA->setErrStatus();
return this;
}
if ( ((child(0)->getGroupAttr()->isEmbeddedUpdateOrDelete()) &&
(child(1)->getGroupAttr()->isEmbeddedUpdateOrDelete())) ||
((child(0)->getGroupAttr()->isEmbeddedInsert()) &&
(child(1)->getGroupAttr()->isEmbeddedInsert())) ||
(bindWA->isEmbeddedIUDStatement()) ) {
NAString type0,type1;
if (child(0)->getGroupAttr()->isEmbeddedUpdate())
type0 = "UPDATE";
else
{
if (child(0)->getGroupAttr()->isEmbeddedInsert())
type0 = "INSERT";
else
type0 = "DELETE";
}
if (child(1)->getGroupAttr()->isEmbeddedUpdate())
type1 = "UPDATE";
else
{
if (child(1)->getGroupAttr()->isEmbeddedInsert())
type1 = "INSERT";
else
type1 = "DELETE";
}
*CmpCommon::diags() << DgSqlCode(-4175)
<< DgString0(fmtdList)
<< DgString1(type0)
<< DgString2(type1);
bindWA->setErrStatus();
return this;
}
if ((child(0)->getGroupAttr()->isEmbeddedUpdateOrDelete() ||
child(0)->getGroupAttr()->isStream()) &&
(child(1)->getGroupAttr()->isEmbeddedUpdateOrDelete() ||
child(1)->getGroupAttr()->isStream())){
*CmpCommon::diags() << DgSqlCode(-4176)
<< DgString0(fmtdList)
<< (getGroupAttr()->isEmbeddedUpdate() ?
DgString1("UPDATE"):DgString1("DELETE"));
bindWA->setErrStatus();
return this;
}
if (getOperatorType() == REL_LEFT_JOIN){
if (child(1)->getGroupAttr()->isEmbeddedUpdateOrDelete()){
*CmpCommon::diags() << DgSqlCode(-4156)
<< DgString0(fmtdList)
<< (child(1)->getGroupAttr()->isEmbeddedUpdate() ?
DgString1("UPDATE"):DgString1("DELETE"));
bindWA->setErrStatus();
return this;
}
if (child(1)->getGroupAttr()->isStream()){
*CmpCommon::diags() << DgSqlCode(-4157)
<< DgString0(fmtdList);
bindWA->setErrStatus();
return this;
}
}
if (getOperatorType() == REL_RIGHT_JOIN){
if (child(0)->getGroupAttr()->isEmbeddedUpdateOrDelete()){
*CmpCommon::diags() << DgSqlCode(-4164)
<< DgString0(fmtdList)
<< (child(0)->getGroupAttr()->isEmbeddedUpdate() ?
DgString1("UPDATE"):DgString1("DELETE"));
bindWA->setErrStatus();
return this;
}
if (child(0)->getGroupAttr()->isStream()){
*CmpCommon::diags() << DgSqlCode(-4165)
<< DgString0(fmtdList);
bindWA->setErrStatus();
return this;
}
}
// we need to move stream and nested updates to the
// left to ensure correct execution. This causes the statement
// to be rejected if the user specified join_order_by_user and
// the query must be reordered
if (child(1)->getGroupAttr()->isStream() ||
child(1)->getGroupAttr()->isEmbeddedUpdateOrDelete()){
getGroupAttr()->setReorderNeeded(TRUE);
}
// QSTUFF
if (newTables) {
delete leftTable;
delete rightTable;
}
bindWA->getCurrentScope()->context()->inJoin() = saveInJ;
if (getOperatorType() == REL_TSJ){
//Using rowsets in a predicate with embedded update/delete results
//in a NestedJoin subtree after Normalization.This NestedJoin subtree
//has embedded update/delete as the right child, which is not allowed
//during optimization. Here we try to disallow this usage at Binding
//when a REL_TSJ subtree has rowsets as the left child and embedded
//update/delete as the right child. An error message[4123] is signaled.
if (rowSet && getGroupAttr()->isEmbeddedUpdateOrDelete()){
*CmpCommon::diags() << DgSqlCode(-4213);
bindWA->setErrStatus();
return this;
}
}
// transfer rowsetRowCountArraySize from HostArrayWA to this node.
if (bindWA->getHostArraysArea() && isRowsetIterator())
setRowsetRowCountArraySize(bindWA->getHostArraysArea()->getRowsetRowCountArraySize());
// Bind the base class.
//
return bindSelf(bindWA);
} // Join::bindNode()
//++MV
// This function builds the BalueIdMap that is used for translating the required
// sort key to the right child sort key and backwards
void Join::BuildRightChildMapForLeftJoin()
{
ValueIdMap &map = rightChildMapForLeftJoin();
for (CollIndex j = 0; j < nullInstantiatedOutput().entries(); j++)
{
ValueId instNullId, rightChildId;
instNullId = nullInstantiatedOutput_[j];
assert(instNullId.getItemExpr()->getOperatorType() == ITM_INSTANTIATE_NULL);
// Access the operand of the InstantiateNull
rightChildId = (((InstantiateNull *)(instNullId.getItemExpr()))->getExpr()->getValueId());
map.addMapEntry(instNullId, rightChildId);
}
}
//--MV
//++MV
// This function builds the ValueIdMap that is used for translating the
// required
// sort key to the left child sort key and backwards
void Join::BuildLeftChildMapForRightJoin()
{
ValueIdMap &map = leftChildMapForRightJoin();
for (CollIndex j = 0; j < nullInstantiatedForRightJoinOutput().entries(); j++)
{
ValueId instNullId, leftChildId;
instNullId = nullInstantiatedForRightJoinOutput_[j];
assert(instNullId.getItemExpr()->getOperatorType() == ITM_INSTANTIATE_NULL);
// Access the operand of the InstantiateNull
leftChildId = (((InstantiateNull *)(instNullId.getItemExpr()))->getExpr()->getValueId());
map.addMapEntry(instNullId, leftChildId);
}
}
//--MV
// -----------------------------------------------------------------------
// member functions for class Intersect
// -----------------------------------------------------------------------
// LCOV_EXCL_START - cnu
RelExpr *Intersect::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// Bind the child nodes.
//
bindChildren(bindWA);
if (bindWA->errStatus()) return this;
// Check that there are an equal number of select items on both sides.
//
const RETDesc &leftTable = *child(0)->getRETDesc();
const RETDesc &rightTable = *child(1)->getRETDesc();
if (leftTable.getDegree() != rightTable.getDegree()) {
// 4014 The operands of an intersect must be of equal degree.
*CmpCommon::diags() << DgSqlCode(-4014);
bindWA->setErrStatus();
return this;
}
// Join the columns of both sides. This is wrong semantics tho! ##
//
*CmpCommon::diags() << DgSqlCode(-3022) // ## INTERSECT not yet supported
<< DgString0("INTERSECT"); // ##
bindWA->setErrStatus(); // ##
if (bindWA->errStatus()) return NULL; // ##
//
ItemExpr *predicate = intersectColumns(leftTable, rightTable, bindWA);
RelExpr *join = new (bindWA->wHeap())
Join(child(0)->castToRelExpr(),
child(1)->castToRelExpr(),
REL_JOIN,
predicate);
// Bind the join.
//
join = join->bindNode(bindWA)->castToRelExpr();
if (bindWA->errStatus()) return join;
// Change the output of the join to just the left side.
//
delete join->getRETDesc();
join->setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA, leftTable));
bindWA->getCurrentScope()->setRETDesc(join->getRETDesc());
// QSTUFF
NAString fmtdList1(bindWA->wHeap());
LIST(TableNameMap*) xtnmList1(bindWA->wHeap());
NAString fmtdList2(bindWA->wHeap());
LIST(TableNameMap*) xtnmList2(bindWA->wHeap());
leftTable.getTableList(xtnmList1, &fmtdList1);
rightTable.getTableList(xtnmList2, &fmtdList2);
if (child(0)->getGroupAttr()->isStream() &&
child(1)->getGroupAttr()->isStream()){
*CmpCommon::diags() << DgSqlCode(-4159)
<< DgString0(fmtdList1) << DgString1(fmtdList2);
bindWA->setErrStatus();
return this;
}
// Needs to be removed when supporting get_next for INTERSECT
if (getGroupAttr()->isEmbeddedUpdateOrDelete()) {
*CmpCommon::diags() << DgSqlCode(-4160)
<< DgString0(fmtdList1)
<< DgString1(fmtdList2)
<< (child(0)->getGroupAttr()->isEmbeddedUpdate() ?
DgString2("UPDATE"):DgString2("DELETE"))
<< (child(1)->getGroupAttr()->isEmbeddedUpdate() ?
DgString3("UPDATE"):DgString3("DELETE"));
bindWA->setErrStatus();
return this;
}
// QSTUFF
return join;
} // Intersect::bindNode()
// LCOV_EXCL_STOP
// -----------------------------------------------------------------------
// member functions for class Union
// -----------------------------------------------------------------------
RelExpr *Union::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
//
// Bind the conditional expression.
//
ItemExpr *condExprTree = removeCondExprTree();
if (condExprTree)
{
condExprTree->convertToValueIdList(condExpr(), bindWA, ITM_ITEM_LIST);
if (bindWA->errStatus()) {
return NULL;
}
}
//
// Bind the triggered action exception expression.
//
ItemExpr *trigExprTree = removeTrigExceptExprTree();
if (trigExprTree)
{
// the assumption in the binder (in Union::addValueIdUnion) is that
// unionMap_ count is always less than or equal to one but triggers
// code might increment this number during binding because of
// recursive triggers or triggers that are used more than once
// in the statement. This check fixes the unionMap_ for triggers.
if ((unionMap_ != NULL) && (unionMap_->count_ > 1))
{
unionMap_->count_--;
unionMap_ = new (CmpCommon::statementHeap()) UnionMap;
}
trigExprTree->convertToValueIdList(trigExceptExpr(), bindWA, ITM_ITEM_LIST);
if (bindWA->errStatus()) {
return NULL;
}
}
AssignmentStArea *assignArea = NULL;
// We store a pointer to this Union node in the assignment statements area.
// This is needed for compound statements project, in particular when we have
// assignment statements within an IF statement
if (getUnionForIF()) {
assignArea = bindWA->getAssignmentStArea();
setPreviousIF(assignArea->getCurrentIF());
assignArea->setCurrentIF(this);
}
//
// Bind the child nodes.
//
bindWA->getCurrentScope()->context()->inUnion() = TRUE;
currentChild() = 0;
child(0) = child(0)->bindNode(bindWA);
if (bindWA->errStatus()) return this;
// If we have assignment statements of compound statements, we need to get rid
// of the value ids generated while binding the first child. Also, we create a
// list of the value ids of the variables that are on the left side of a SET
// statement
if (getUnionForIF() && leftList() && assignArea) {
assignArea->removeLastValueIds(leftList(), this);
}
if (getCondUnary()) {
CollIndex leftDegree = child(0)->getRETDesc()->getDegree();
ItemExpr *tupleExpr = new (bindWA->wHeap()) ConstValue();
for (CollIndex i=0; i+1<leftDegree; i++) {
ItemExpr *con = new (bindWA->wHeap()) ConstValue();
ItemList *list = new (bindWA->wHeap()) ItemList(con, tupleExpr);
tupleExpr = list;
}
RelExpr *tuple = new (bindWA->wHeap()) Tuple(tupleExpr);
// create the selection predicate (1=0) for the Tuple node
ItemExpr *predicate = new (bindWA->wHeap())
BiRelat(ITM_EQUAL,
new (bindWA->wHeap()) ConstValue(1),
new (bindWA->wHeap()) ConstValue(0));
tuple->addSelPredTree(predicate);
RelExpr *tupleRoot = new (bindWA->wHeap()) RelRoot(tuple);
setChild (1, tupleRoot);
}
if (child(1)) {
if (!(child(1)->getOperator().match(REL_ANY_TSJ))) {
bindWA->getCurrentScope()->setRETDesc(NULL);
}
currentChild() = 1;
child(1) = child(1)->bindNode(bindWA);
if (bindWA->errStatus()) return this;
// If we have assignment statements of compound statements,
// we need to get rid of the value ids generated while binding
// the second child
if (getUnionForIF() && rightList() && assignArea) {
assignArea->removeLastValueIds(rightList(), this);
}
}
// check for & warn against UNIONs that have inconsistent access/lock modes.
// flag "select * from t1 union select * from t2 for <access> mode"
// with a warning that t1 and t2 may have inconsistent access/lock modes.
checkAccessLockModes();
//Copies the leftlist and rightlist this conditional union to the appropriate list of the
//conditional union node pointed to by the previousIF argument.
Union * previousIF = getPreviousIF();
if (previousIF && getUnionForIF()) {
copyLeftRightListsToPreviousIF(previousIF, bindWA);
}
synthPropForBindChecks();
// QSTUFF
bindWA->getCurrentScope()->context()->inUnion() = FALSE;
//
// Check that there are an equal number of select items on both sides.
//
const RETDesc &leftTable = *child(0)->getRETDesc();
const RETDesc &rightTable = *child(1)->getRETDesc();
RETDesc *resultTable = NULL;
RelRoot * root = bindWA->getTopRoot() ;
if (root) {
if (getGroupAttr()->isStream() && root->hasOrderBy()){
NAString fmtdList1(bindWA->wHeap());
LIST(TableNameMap*) xtnmList1(bindWA->wHeap());
NAString fmtdList2(bindWA->wHeap());
LIST(TableNameMap*) xtnmList2(bindWA->wHeap());
leftTable.getTableList(xtnmList1, &fmtdList1);
rightTable.getTableList(xtnmList2, &fmtdList2);
*CmpCommon::diags() << DgSqlCode(-4166)
<< DgString0(fmtdList1)
<< DgString1(fmtdList2) ;
bindWA->setErrStatus();
return this;
}
}
if (leftTable.getDegree() != rightTable.getDegree()) {
#ifndef NDEBUG
dumpChildrensRETDescs(leftTable, rightTable);
#endif
if ( (!getUnionForIF()) &&
(!getCondUnary()) //for triggers
) {
// 4126 The row-value-ctors of a VALUES must be of equal degree.
// 4066 The operands of a union must be of equal degree.
// This is not necessary if we are in an assignment stmt.
Lng32 sqlcode = bindWA->getCurrentScope()->context()->inTupleList() ?
-4126 : -4066;
*CmpCommon::diags() << DgSqlCode(sqlcode);
bindWA->setErrStatus();
return this;
}
}
//
// For each select item on both sides, create a ValueIdUnion and insert its
// ValueId into the select list for the union.
//
// We check to see if there were assignments on either side
if ( !getUnionForIF() ) {
resultTable = new (bindWA->wHeap()) RETDesc(bindWA);
for (CollIndex i = 0; i < leftTable.getDegree(); i++) {
ValueIdUnion *vidUnion = new (bindWA->wHeap())
ValueIdUnion(leftTable.getValueId(i),
rightTable.getValueId(i),
NULL_VALUE_ID,
#pragma nowarn(1506) // warning elimination
getUnionFlags());
#pragma warn(1506) // warning elimination
vidUnion->setIsTrueUnion(TRUE);
vidUnion->bindNode(bindWA);
if (bindWA->errStatus()) {
delete vidUnion;
delete resultTable;
return this;
}
ValueId valId = vidUnion->getValueId();
addValueIdUnion(valId, bindWA->wHeap());
resultTable->addColumn(bindWA, leftTable.getColRefNameObj(i), valId);
}
}
else {
// Case in which we have asignment statements below this node.
// We have to carefuly match the valueids in the IF and ELSE part.
// For instance, if SET :a = ... occurs in both branches or only in one.
if (getUnionForIF() && assignArea) {
resultTable = createReturnTable(assignArea, bindWA);
}
}
setRETDesc(resultTable);
bindWA->getCurrentScope()->setRETDesc(resultTable);
//
// Bind the base class.
//
// We are done binding this node. The current IF node is now the closest
// IF node that is also an ancestor of this node
if (getUnionForIF() && assignArea) {
assignArea->setCurrentIF(getPreviousIF());
}
// QSTUFF
// this is not a hard restriction. Once the get_next protocol supports unions
// similar to the split-top operator, this check can be removed.
if (getGroupAttr()->isEmbeddedUpdateOrDelete() ||
(getGroupAttr()->isEmbeddedInsert() && !isSystemGenerated_) ||
(bindWA->isEmbeddedIUDStatement())) {
if (getUnionForIF()) {
*CmpCommon::diags() << DgSqlCode(-4210);
bindWA->setErrStatus();
return this;
}
NAString fmtdList1(bindWA->wHeap());
LIST(TableNameMap*) xtnmList1(bindWA->wHeap());
NAString fmtdList2(bindWA->wHeap());
LIST(TableNameMap*) xtnmList2(bindWA->wHeap());
leftTable.getTableList(xtnmList1, &fmtdList1);
rightTable.getTableList(xtnmList2, &fmtdList2);
// Fix for Solution 10-070117-1834.
// Error Message for -4161 - assumes that both sides
// of the UNION is an embedded operation. For a
// query such as,
// select * from (delete from t709t1)as x union all (select * from t709t1)
// the right side of the UNION is not an embedded operation.
// Hence, changing the text for 4161 to a more generic one so
// that all cases are covered in this one text message.
*CmpCommon::diags() << DgSqlCode(-4161)
<< DgString0(fmtdList1)
<< DgString1(fmtdList2);
bindWA->setErrStatus();
return this;
}
// QSTUFF
// ++MV
// Bind the alternateRightChildOrderExprTree expression.
//
ItemExpr *alternateRightChildOrderExprTree = removeAlternateRightChildOrderExprTree();
if (alternateRightChildOrderExprTree)
{
alternateRightChildOrderExprTree->
convertToValueIdList(alternateRightChildOrderExpr(), bindWA, ITM_ITEM_LIST);
if (bindWA->errStatus()) {
return NULL;
}
}
// --MV
// Bind the base class.
//
RelExpr *boundExpr = bindSelf(bindWA);
if (bindWA->errStatus()) {
delete resultTable;
return boundExpr;
}
return boundExpr;
} // Union::bindNode()
// check for & warn against UNIONs that have inconsistent access/lock modes
void Union::checkAccessLockModes()
{
Scan *left = child(0)->getAnyScanNode();
Scan *right = child(1)->getAnyScanNode();
if (!left || !right) return; // no-op.
// UNION is user-specified as opposed to system-generated (eg, by
// triggers/RI in GenericUpdate::inlinePipelineActions, etc)
if (isSystemGenerated_) {
return;
}
Lng32 lockFlagSession = CmpCommon::transMode()->getDP2LockFlags().getValue();
StmtLevelAccessOptions optionsLeft = left->accessOptions();
StmtLevelAccessOptions optionsRight = right->accessOptions();
Lng32 lockFlagLeft = lockFlagSession;
Lng32 lockFlagRight = lockFlagSession;
if (optionsLeft.userSpecified()) {
lockFlagLeft = optionsLeft.getDP2LockFlags().getValue();
}
if (optionsRight.userSpecified()) {
lockFlagRight = optionsRight.getDP2LockFlags().getValue();
}
if (lockFlagLeft != lockFlagRight) {
*CmpCommon::diags()
<< DgSqlCode(3192)
<< DgString0(left->getTableName().getQualifiedNameAsString())
<< DgString1(right->getTableName().getQualifiedNameAsString());
}
} // Union::checkAccessLockModes()
void Union::copyLeftRightListsToPreviousIF(Union * previousIF, BindWA * bindWA)
{
AssignmentStHostVars *thisLeftList = leftList();
AssignmentStHostVars *thisRightList = rightList();
// If the previous IF node does not have a left list, we copy the left and right
// lists to that left list
if (previousIF->currentChild() == 0 && !(previousIF->leftList())) {
AssignmentStHostVars *leftListOfPreviousIF = previousIF->getCurrentList(bindWA);
// Copy the leftList of this node to the left list of the previous IF
leftListOfPreviousIF->addAllToListInIF(thisLeftList) ;
// Copy the rightList of this node to the left list of the previous IF
leftListOfPreviousIF->addAllToListInIF(thisRightList) ;
}
// If the previous IF node does not have a right list, we copy the left and right
// lists to that left list
if (previousIF->currentChild() == 1 && !(previousIF->rightList())) {
AssignmentStHostVars *rightListOfPreviousIF = previousIF->getCurrentList(bindWA);
// Copy the leftList of this node to the right list of the previous IF
rightListOfPreviousIF->addAllToListInIF(thisLeftList) ;
// Copy the rightList of this node to the right list of the previous IF
rightListOfPreviousIF->addAllToListInIF(thisRightList) ;
}
} // Union::copyLeftRightListsToPreviousIF
// -----------------------------------------------------------------------
// MV --
// A debugging method for dumping the columns in the RETDesc of both
// children when they do not match.
void Union::dumpChildrensRETDescs(const RETDesc& leftTable,
const RETDesc& rightTable)
{
#ifndef NDEBUG
// -- MVs. Debugging code !!!!! TBD
fprintf(stdout, " # Left Right\n");
CollIndex maxIndex, minIndex;
NABoolean leftIsBigger;
if (leftTable.getDegree() > rightTable.getDegree())
{
maxIndex = leftTable.getDegree();
minIndex = rightTable.getDegree();
leftIsBigger = TRUE;
}
else
{
maxIndex = rightTable.getDegree();
minIndex = leftTable.getDegree();
leftIsBigger = FALSE;
}
for (CollIndex i=0; i<minIndex; i++)
{
ColumnDesc *leftColDesc = leftTable.getColumnList()->at(i);
ColumnDesc *rightColDesc = rightTable.getColumnList()->at(i);
NAString leftCol (leftColDesc->getColRefNameObj().getColRefAsString());
NAString rightCol(rightColDesc->getColRefNameObj().getColRefAsString());
fprintf(stdout, " %3d %-55s %-55s \n",
i, leftCol.data(), rightCol.data());
}
if (leftIsBigger)
{
for (CollIndex j=minIndex; j<maxIndex; j++)
{
ColumnDesc *leftColDesc = leftTable.getColumnList()->at(j);
NAString leftCol(leftColDesc->getColRefNameObj().getColRefAsString());
fprintf(stdout, " %3d %-35s\n",
j, leftCol.data());
}
}
else
{
for (CollIndex k=minIndex; k<maxIndex; k++)
{
ColumnDesc *rightColDesc = rightTable.getColumnList()->at(k);
NAString rightCol(rightColDesc->getColRefNameObj().getColRefAsString());
fprintf(stdout, " %3d %-35s \n",
k, rightCol.data());
}
}
#endif
}
// ----------------------------------------------------------------------
// static helper functions for classes RelRoot and GroupByAgg
// ----------------------------------------------------------------------
static NABoolean containsGenericUpdate(const RelExpr *re)
{
if (re->getOperator().match(REL_ANY_GEN_UPDATE)) return TRUE;
for (Int32 i = 0; i < re->getArity(); ++i ) {
if (re->child(i) && containsGenericUpdate(re->child(i))) return TRUE;
}
return FALSE;
}
static NABoolean containsUpdateOrDelete(const RelExpr *re)
{
if (re->getOperator().match(REL_ANY_UPDATE_DELETE))
return TRUE;
for (Int32 i = 0; i < re->getArity(); ++i ) {
if (re->child(i) && containsUpdateOrDelete(re->child(i)))
return TRUE;
}
return FALSE;
}
// QSTUFF
static GenericUpdate *getGenericUpdate(RelExpr *re)
{
if (re) {
if (re->getOperatorType() == REL_UNARY_UPDATE ||
re->getOperatorType() == REL_UNARY_DELETE)
return (GenericUpdate *)re;
for (Int32 i = 0; i < re->getArity(); ++i) { // check all children (both sides)
GenericUpdate *gu = getGenericUpdate(re->child(i));
if (gu) return gu;
}
}
return NULL;
}
static NABoolean checkUnresolvedAggregates(BindWA *bindWA)
{
const ValueIdSet &aggs = bindWA->getCurrentScope()->getUnresolvedAggregates();
if (aggs.isEmpty()) return FALSE; // no error
NAString unparsed(bindWA->wHeap());
for (ValueId vid = aggs.init(); aggs.next(vid); aggs.advance(vid)) {
const ItemExpr *ie = vid.getItemExpr();
CMPASSERT(ie->isAnAggregate());
Aggregate *agg = (Aggregate *)ie;
// Don't display COUNT() part of SUM()/COUNTxxx(), our implementation of AVG()
// Display only the COUNT_NONULL() our implementation of VARIANCE and STDDEV
// This is to avoid printing the aggregate functions more than once.
if((agg->origOpType() != ITM_AVG || agg->getOperatorType() == ITM_SUM) &&
(!(agg->origOpType() == ITM_STDDEV || agg->origOpType() == ITM_VARIANCE)
|| agg->getOperatorType() == ITM_COUNT_NONULL)){
unparsed += ", ";
if (agg->origOpType() == ITM_COUNT_STAR__ORIGINALLY)
unparsed += "COUNT(*)";
else
agg->unparse(unparsed, DEFAULT_PHASE, USER_FORMAT_DELUXE);
}
}
unparsed.remove(0,2); // remove initial ", "
// 4015 Aggregate functions placed incorrectly.
*CmpCommon::diags() << DgSqlCode(-4015) << DgString0(unparsed);
bindWA->setErrStatus();
return TRUE;
} // checkUnresolvedAggregates()
// ----------------------------------------------------------------------
// member functions for class RelRoot
// ----------------------------------------------------------------------
static NABoolean isRenamedColInSelList(BindWA * bindWA, ItemExpr * col,
ItemExprList &origSelectList,
CollIndex &indx,
RETDesc * childRETDesc)
{
if (col->getOperatorType() != ITM_REFERENCE)
return FALSE;
ColReference * havingColReference = (ColReference*)col;
CollIndex j = 0;
NABoolean found = FALSE;
while (j < origSelectList.entries())
{
ItemExpr * selectListEntry = origSelectList[j];
if (selectListEntry->getOperatorType() == ITM_RENAME_COL)
{
const ColRefName &selectListColRefName =
*((RenameCol *)selectListEntry)->getNewColRefName();
if (havingColReference->getColRefNameObj() == selectListColRefName)
{
if (found)
{
// multiple entries with the same name. Error.
*CmpCommon::diags() << DgSqlCode(-4195)
<< DgString0(selectListColRefName.getColName());
bindWA->setErrStatus();
return FALSE;
}
ColumnNameMap *baseColExpr = NULL;
if (childRETDesc)
baseColExpr = childRETDesc->findColumn(selectListColRefName);
if ( NOT baseColExpr)
{
found = TRUE;
indx = j;
}
}
} // rename col
j++;
} // while
return found;
}
static short replaceRenamedColInHavingWithSelIndex(
BindWA * bindWA,
ItemExpr * expr,
ItemExprList &origSelectList,
NABoolean &replaced,
NABoolean ¬AllowedWithSelIndexInHaving,
RETDesc * childRETDesc)
{
if (((expr->getOperatorType() >= ITM_ROW_SUBQUERY) &&
(expr->getOperatorType() <= ITM_GREATER_EQ_ANY)) ||
((expr->getOperatorType() >= ITM_AVG) &&
(expr->getOperatorType() <= ITM_VARIANCE)) ||
((expr->getOperatorType() >= ITM_DIFF1) &&
(expr->getOperatorType() <= ITM_NOT_THIS)))
{
notAllowedWithSelIndexInHaving = TRUE;
return 0;
}
for (Int32 i = 0; i < expr->getArity(); i++)
{
CollIndex j = 0;
if (isRenamedColInSelList(bindWA, expr->child(i), origSelectList,
j, childRETDesc))
{
SelIndex * selIndex = new(bindWA->wHeap()) SelIndex(j+1);
expr->setChild(i, selIndex);
replaced = TRUE;
}
else if (bindWA->errStatus())
return -1;
else if (replaceRenamedColInHavingWithSelIndex(
bindWA, expr->child(i), origSelectList, replaced,
notAllowedWithSelIndexInHaving, childRETDesc))
return -1;
}
return 0;
}
static short setValueIdForRenamedColsInHaving(BindWA * bindWA,
ItemExpr * expr,
ValueIdList &compExpr)
{
if (((expr->getOperatorType() >= ITM_ROW_SUBQUERY) &&
(expr->getOperatorType() <= ITM_GREATER_EQ_ANY)) ||
((expr->getOperatorType() >= ITM_AVG) &&
(expr->getOperatorType() <= ITM_VARIANCE)) ||
((expr->getOperatorType() >= ITM_DIFF1) &&
(expr->getOperatorType() <= ITM_NOT_THIS)))
{
return 0;
}
for (Int32 i = 0; i < expr->getArity(); i++)
{
if (expr->child(i)->getOperatorType() == ITM_SEL_INDEX)
{
SelIndex * si = (SelIndex*)expr->child(i)->castToItemExpr();
si->setValueId(compExpr[si->getSelIndex()-1]);
}
else
setValueIdForRenamedColsInHaving(bindWA, expr->child(i), compExpr);
}
return 0;
}
// Method to update the selIndecies after we have gone through a
// selectList expansion due to MVFs or Subqueries with degree > 1
// used to update the orderByTree
//
// Returns a list of SelIndecies that were updated.
static void fixUpSelectIndecies(ItemExpr * expr, ValueIdSet &updatedIndecies,
CollIndex idx, CollIndex offset)
{
if (expr == NULL ) return;
for (Int32 i = 0; i < expr->getArity(); i++)
{
// Only update ones that we haven't already done.
if ((expr->child(i)->getOperatorType() == ITM_SEL_INDEX) &&
!updatedIndecies.contains(expr->child(i)->getValueId()))
{
SelIndex * si = (SelIndex*)expr->child(i)->castToItemExpr();
if (si->getSelIndex() > idx)
{
si->setSelIndex(si->getSelIndex() + offset);
updatedIndecies += si->getValueId();
}
}
else
fixUpSelectIndecies(expr->child(i), updatedIndecies, idx, offset);
}
// Now check myself..
// Only update ones that we haven't already done.
if ((expr->getOperatorType() == ITM_SEL_INDEX) &&
!updatedIndecies.contains(expr->getValueId()))
{
SelIndex * si = (SelIndex*)expr->castToItemExpr();
if (si->getSelIndex() > idx)
{
si->setSelIndex(si->getSelIndex() + offset);
updatedIndecies += si->getValueId();
}
}
}
// Method to update the selIndecies after we have gone through a
// selectList expansion due to MVFs or Subqueries with degree > 1
// used to update the GroupByList
//
// Returns a list of SelIndecies that were updated.
static void fixUpSelectIndeciesInSet(ValueIdSet & expr,
ValueIdSet &updatedIndecies,
CollIndex idx,
CollIndex offset)
{
for (ValueId vid = expr.init(); expr.next(vid); expr.advance(vid))
{
// Only update ones that we haven't already done.
if (((ItemExpr *)vid.getItemExpr())->getOperatorType() == ITM_SEL_INDEX &&
!updatedIndecies.contains(vid))
{
SelIndex * si = (SelIndex*) vid.getItemExpr();
if (si->getSelIndex() > idx)
{
si->setSelIndex(si->getSelIndex() + offset);
updatedIndecies += si->getValueId();
}
}
}
}
RelRoot * RelRoot::transformOrderByWithExpr(BindWA *bindWA)
{
NABoolean specialMode = (CmpCommon::getDefault(MODE_SPECIAL_4) == DF_ON);
if (NOT specialMode)
return this;
ItemExprList origSelectList(bindWA->wHeap());
ItemExprList origOrderByList(bindWA->wHeap());
CollIndex origSelectListCount ;
if ((getCompExprTree() == NULL) &&
(child(0)->getOperatorType() != REL_GROUPBY))
{
return this;
}
ItemExpr *orderByTree = getOrderByTree();
if (!orderByTree)
return this;
if (orderByTree)
{
origOrderByList.insertTree(orderByTree);
}
if (getCompExprTree())
origSelectList.insertTree(getCompExprTree());
else if (child(0)->getOperatorType() == REL_GROUPBY)
{
// this is the case: select distinct <expr> from t order by <expr>
GroupByAgg * grby = (GroupByAgg *)(child(0)->castToRelExpr());
if (grby->child(0) && grby->child(0)->getOperatorType() == REL_ROOT)
{
RelRoot * selRoot = (RelRoot*)grby->child(0)->castToRelExpr();
if (selRoot->getCompExprTree())
origSelectList.insertTree(selRoot->getCompExprTree());
}
}
Lng32 selListCount = origSelectList.entries();
// if there is an expression in the order by list and this expression matches
// a select list expression, then replace it with the index of that select list item.
ItemExprList newOrderByList((Lng32)origOrderByList.entries(), bindWA->wHeap());
NABoolean orderByExprFound = FALSE;
for (Lng32 i = 0; i < origOrderByList.entries(); i++)
{
ItemExpr * currOrderByItemExpr = origOrderByList[i];
NABoolean isDesc = FALSE;
if (currOrderByItemExpr->getOperatorType() == ITM_INVERSE)
{
currOrderByItemExpr = currOrderByItemExpr->child(0)->castToItemExpr();
isDesc = TRUE;
}
if (NOT ((currOrderByItemExpr->getOperatorType() == ITM_SEL_INDEX) ||
(currOrderByItemExpr->getOperatorType() == ITM_REFERENCE) ||
(currOrderByItemExpr->getOperatorType() == ITM_CONSTANT)))
{
NABoolean found = FALSE;
Lng32 selListIndex = 0;
ItemExpr * selItem = NULL;
while ((NOT found) && (selListIndex < selListCount))
{
selItem = origSelectList[selListIndex];
found = currOrderByItemExpr->duplicateMatch(*selItem);
if (NOT found)
selListIndex++;
}
if (NOT found)
{
*CmpCommon::diags() << DgSqlCode(-4197)
<< DgString0("ORDER BY");
bindWA->setErrStatus();
return NULL;
}
selItem->setInOrderByOrdinal(TRUE);
currOrderByItemExpr = new(bindWA->wHeap()) SelIndex(selListIndex+1);
if (isDesc)
{
currOrderByItemExpr = new(bindWA->wHeap()) InverseOrder(currOrderByItemExpr);
}
orderByExprFound = TRUE;
} // if order by expr
newOrderByList.insert(currOrderByItemExpr);
}
if ((orderByExprFound) &&
(newOrderByList.entries() > 0))
{
removeOrderByTree();
addOrderByTree(newOrderByList.convertToItemExpr());
}
return this;
}
///////////////////////////////////////////////////////////////////////////
//
// This methods performs the following in this order:
//
// If groupby name refers to a renamed col name in the select list,
// replace group by entry with ordinal position of that sel list entry.
//
// If groupby ordinal exceeds the number of select list elements,
// return error.
//
// If groupby ordinal referes to a '*', return error.
//
// If groupby ordinal refers to a column(ITM_REFERENCE) or a renamed
// col name(ITM_RENAME_COL) whose child is a column(ITM_REFERENCE),
// replace ordinal with actual col name.
//
// If there are ordinals in group by list, mark RelRoot indicating
// phase2 transformation is needed.
//
// Mark all select list item exprs which are referened as an ordinal to
// indicate that groupby check to validate grouping columns is not needed
// for the subtree rooted below that select list item.
//
///////////////////////////////////////////////////////////////////////////
RelRoot * RelRoot::transformGroupByWithOrdinalPhase1(BindWA *bindWA)
{
NABoolean specialMode =
((CmpCommon::getDefault(MODE_SPECIAL_1) == DF_ON) ||
(CmpCommon::getDefault(MODE_SPECIAL_2) == DF_ON) ||
(CmpCommon::getDefault(MODE_SPECIAL_4) == DF_ON));
if ((CmpCommon::getDefault(GROUP_BY_USING_ORDINAL) == DF_OFF) &&
(NOT specialMode))
return this;
// make sure child of root is a groupby node.or a sequence node
// whose child is a group by node
// And has groupby clause, if in specialMode
if (child(0)->getOperatorType() != REL_GROUPBY &&
(child(0)->getOperatorType() != REL_SEQUENCE ||
(child(0)->child(0) && child(0)->child(0)->getOperatorType()!=REL_GROUPBY)))
return this;
NABoolean compExprTreeIsNull = FALSE;
CollIndex origSelectListCount ;
if (getCompExprTree() == NULL)
{
compExprTreeIsNull = TRUE;
origSelectListCount = 0;
// return this;
}
GroupByAgg * grby;
if (child(0)->getOperatorType() == REL_GROUPBY)
{
grby = (GroupByAgg *)(child(0)->castToRelExpr());
}
else
{// sequence node above group by
grby = (GroupByAgg *)(child(0)->child(0)->castToRelExpr());
}
DCMPASSERT(grby != NULL);
if ((NOT specialMode) &&
(grby->getGroupExprTree() == NULL))
return this;
ItemExpr * groupExprTree = grby->getGroupExprTree();
ItemExprList origSelectList(bindWA->wHeap());
ItemExprList origGrbyList(bindWA->wHeap());
if (groupExprTree)
{
origGrbyList.insertTree(groupExprTree);
}
if (NOT compExprTreeIsNull)
{
origSelectList.insertTree(getCompExprTree());
origSelectListCount = origSelectList.entries();
}
ItemExprList newGroupByList((Lng32)origGrbyList.entries(), bindWA->wHeap());
NABoolean foundSelIndex = FALSE;
NABoolean lookForRenamedCols = TRUE;
if ((CmpCommon::getDefault(GROUP_BY_USING_ORDINAL) != DF_ALL) &&
(NOT specialMode))
lookForRenamedCols = FALSE;
NABoolean lookForExprInGroupByClause = TRUE;
if (CmpCommon::getDefault(COMP_BOOL_92) == DF_ON)
lookForExprInGroupByClause = FALSE;
// See if UDF_SUBQ_IN_AGGS_AND_GBYS is enabled. It is enabled if the
// default is ON, or if the default is SYSTEM and ALLOW_UDF is ON.
NABoolean udfSubqInAggGrby_Enabled = FALSE;
DefaultToken udfSubqTok = CmpCommon::getDefault(UDF_SUBQ_IN_AGGS_AND_GBYS);
if ((udfSubqTok == DF_ON) ||
(udfSubqTok == DF_SYSTEM))
udfSubqInAggGrby_Enabled = TRUE;
// This list will store duplicate expression specified in select list and
// GroupBy clause. It helps with specifying select Index as well as
// mark InGroupByOrdinal flag correctly (Gen Sol:10-100129-7836)
NAList<CollIndex> listOfExpressions(CmpCommon::statementHeap());
for (CollIndex i = 0; (i < (CollIndex) origGrbyList.entries());i++)
{
ItemExpr * currGroupByItemExpr =
((ItemExpr *) origGrbyList[i])->castToItemExpr();
ItemExpr * newGroupByItemExpr = NULL;
NABoolean selIndexError = FALSE;
Int64 selIndex = -1;
if (currGroupByItemExpr->getOperatorType() == ITM_CONSTANT)
{
ConstValue * cv = (ConstValue*)currGroupByItemExpr;
if ((cv->canGetExactNumericValue()) &&
(cv->getType()->getScale() == 0))
{
selIndex = cv->getExactNumericValue();
if ((selIndex >= 0) && (selIndex < MAX_COMSINT32))
{
if (selIndex == 0 || selIndex > origSelectListCount)
{
// remember that this select index is in error.
// Look for this constant in the select list.
// If it is not found, then this const will be
// treated as a select index and an error will
// returned. If it is found in the select list,
// then it will be treated as a group by expression.
selIndexError = TRUE;
}
else
currGroupByItemExpr =
new(bindWA->wHeap()) SelIndex((Lng32)selIndex);
}
}
}
NABoolean found = FALSE;
if ((currGroupByItemExpr->getOperatorType() != ITM_REFERENCE) &&
(currGroupByItemExpr->getOperatorType() != ITM_SEL_INDEX) &&
(lookForExprInGroupByClause))
{
Int32 selListIndex = -1, lastMatch = -1;
CollIndex j = 0;
while ((NOT found) && (j < origSelectListCount))
{
ItemExpr * selectListEntry = origSelectList[j];
if ((selectListEntry->getOperatorType() != ITM_REFERENCE) &&
((selectListEntry->getOperatorType() != ITM_RENAME_COL) ||
((selectListEntry->child(0)) &&
(selectListEntry->child(0)->getOperatorType() != ITM_REFERENCE))))
{
ItemExpr * renameColEntry = NULL;
if (selectListEntry->getOperatorType() == ITM_RENAME_COL)
{
renameColEntry = selectListEntry;
selectListEntry = selectListEntry->child(0);
}
found =
currGroupByItemExpr->duplicateMatch(*selectListEntry);
if (found)
{
lastMatch = j;
if(!listOfExpressions.contains(j))
{
selListIndex = j;
listOfExpressions.insert(j);
selectListEntry->setInGroupByOrdinal(TRUE);
if (renameColEntry)
renameColEntry->setInGroupByOrdinal(TRUE);
}
else
found = FALSE;
}
}
j++;
} // while
if(lastMatch != -1)
{
found = TRUE;
if(selListIndex == -1)
selListIndex = lastMatch;
if (bindWA->inViewDefinition())
currGroupByItemExpr =
new(bindWA->wHeap()) SelIndex(selListIndex+1,
currGroupByItemExpr);
else
currGroupByItemExpr = new(bindWA->wHeap()) SelIndex(selListIndex+1);
}
} // expr in group by clause
if ((NOT found) &&
(selIndexError) &&
(selIndex > 0))
{
// this const was not found in the select list and it was
// not a valid select index.
// Return an error.
*CmpCommon::diags() << DgSqlCode(-4007)
<< DgInt0((Lng32)selIndex)
<< DgInt1((Lng32)origSelectList.entries());
bindWA->setErrStatus();
return NULL;
}
if (compExprTreeIsNull)
return this;
if (currGroupByItemExpr->getOperatorType() == ITM_SEL_INDEX)
{
SelIndex * si = (SelIndex*)currGroupByItemExpr;
if (si->getSelIndex() > origSelectList.entries())
{
*CmpCommon::diags() << DgSqlCode(-4007)
<< DgInt0((Lng32)si->getSelIndex())
<< DgInt1((Lng32)origSelectList.entries());
bindWA->setErrStatus();
return NULL;
}
ItemExpr * selectListEntry = origSelectList[si->getSelIndex()-1];
if ((selectListEntry->getOperatorType() == ITM_RENAME_COL) &&
(selectListEntry->child(0)->getOperatorType() == ITM_REFERENCE))
{
// make a copy of this entry's child
newGroupByItemExpr =
selectListEntry->child(0)->
castToItemExpr()->copyTopNode(NULL, bindWA->wHeap());
}
else if (selectListEntry->getOperatorType() == ITM_REFERENCE)
{
if (((ColReference*)selectListEntry)-> getColRefNameObj().isStar())
{
*CmpCommon::diags() << DgSqlCode(-4185) ;
bindWA->setErrStatus();
return NULL;
}
// make a copy of this entry
newGroupByItemExpr =
selectListEntry->copyTopNode(NULL, bindWA->wHeap());
}
else
{
selectListEntry->setInGroupByOrdinal(TRUE);
newGroupByItemExpr = currGroupByItemExpr;
}
foundSelIndex = TRUE;
} // group by ordinal
else if (currGroupByItemExpr->getOperatorType() == ITM_REFERENCE)
{
ColReference * groupByColReference =
(ColReference*)currGroupByItemExpr;
// find out if this ColReference name is a renamed col in the
// select list.
if (lookForRenamedCols &&
groupByColReference->getCorrNameObj().getQualifiedNameObj().getObjectName().length() == 0)
{
NABoolean renamedColsInSelectList = FALSE;
CollIndex j = 0;
NABoolean found = FALSE;
while (j < origSelectList.entries())
{
ItemExpr * selectListEntry = origSelectList[j];
if (selectListEntry->getOperatorType() == ITM_RENAME_COL)
{
renamedColsInSelectList = TRUE;
const ColRefName &selectListColRefName =
*((RenameCol *)selectListEntry)->getNewColRefName();
if (groupByColReference->getColRefNameObj().getColName()
== selectListColRefName.getColName())
{
if (found)
{
// multiple entries with the same name. Error.
*CmpCommon::diags() << DgSqlCode(-4195)
<< DgString0(selectListColRefName.getColName());
bindWA->setErrStatus();
return NULL;
}
foundSelIndex = TRUE;
selectListEntry->setInGroupByOrdinal(TRUE);
newGroupByItemExpr =
new(bindWA->wHeap()) SelIndex(j+1);
((SelIndex *) newGroupByItemExpr)->
setRenamedColNameInGrbyClause(TRUE);
found = TRUE;
}
} // rename col
j++;
} // while
if ((NOT renamedColsInSelectList) &&
(j == origSelectList.entries()))
lookForRenamedCols = FALSE;
} // lookForRenamedCols
if (! newGroupByItemExpr)
newGroupByItemExpr = currGroupByItemExpr;
} // else foundSelIndex
else if ((currGroupByItemExpr->getOperatorType() == ITM_USER_DEF_FUNCTION) &&
(udfSubqInAggGrby_Enabled))
newGroupByItemExpr = currGroupByItemExpr;
else if ((currGroupByItemExpr->getOperatorType() == ITM_ROW_SUBQUERY) &&
(udfSubqInAggGrby_Enabled))
newGroupByItemExpr = currGroupByItemExpr;
else
{
*CmpCommon::diags() << DgSqlCode(-4197)
<< DgString0("GROUP BY");
bindWA->setErrStatus();
return NULL;
}
newGroupByList.insert(newGroupByItemExpr);
} // for
if ((foundSelIndex) &&
(newGroupByList.entries() > 0))
{
grby->removeGroupExprTree();
grby->addGroupExprTree(newGroupByList.convertToItemExpr());
}
if ((CmpCommon::getDefault(GROUP_BY_USING_ORDINAL) != DF_OFF) ||
(specialMode)) {
grby->setParentRootSelectList(getCompExprTree());
}
// if order by and group by are specified, check to see that
// all columns specified in the order by clause are also present
// in the group by clause.
allOrderByRefsInGby_ = FALSE;
if ((specialMode) &&
(getOrderByTree()) &&
(grby->getGroupExprTree() != NULL))
{
ItemExpr *orderByTree = getOrderByTree();
ItemExprList orderByList(orderByTree, bindWA->wHeap());
ItemExprList groupByList(grby->getGroupExprTree(), bindWA->wHeap());
allOrderByRefsInGby_ = TRUE;
for (CollIndex ii = 0; ii < orderByList.entries(); ii++)
{
ItemExpr * colRef = orderByList[ii];
if (colRef->getOperatorType() == ITM_INVERSE)
colRef = colRef->child(0)->castToItemExpr();
if (colRef && colRef->getOperatorType() == ITM_REFERENCE)
{
ColReference * obyColRef = (ColReference*)colRef;
NABoolean found = FALSE;
for (CollIndex j = 0; j < groupByList.entries(); j++)
{
ItemExpr * gbyExpr = groupByList[j];
if (gbyExpr->getOperatorType() == ITM_REFERENCE)
{
ColReference * gbyColRef = (ColReference*)gbyExpr;
if (obyColRef->getColRefNameObj().getColName() ==
gbyColRef->getColRefNameObj().getColName())
{
found = TRUE;
break;
}
} // if
} // for
if (NOT found)
{
allOrderByRefsInGby_ = FALSE;
break;
}
} // if
} // for
} // if
return this;
}
RelRoot * RelRoot::transformGroupByWithOrdinalPhase2(BindWA *bindWA)
{
NABoolean specialMode =
((CmpCommon::getDefault(MODE_SPECIAL_1) == DF_ON) ||
(CmpCommon::getDefault(MODE_SPECIAL_2) == DF_ON) ||
(CmpCommon::getDefault(MODE_SPECIAL_4) == DF_ON));
// make sure child of root is a groupby node.or a sequence node
// whose child is a group by node
if (child(0)->getOperatorType() != REL_GROUPBY &&
(child(0)->getOperatorType() != REL_SEQUENCE ||
(child(0)->child(0) && child(0)->child(0)->getOperatorType()!=REL_GROUPBY)))
return this;
GroupByAgg * grby;
RelSequence * seqNode=NULL;
if (child(0)->getOperatorType() == REL_GROUPBY )
{
grby=(GroupByAgg *)(child(0)->castToRelExpr());
}
else
{//sequence node above group by
grby=(GroupByAgg *)(child(0)->child(0)->castToRelExpr());
seqNode=(RelSequence *)(child(0)->castToRelExpr());
}
DCMPASSERT(grby != NULL);
ValueIdSet &groupExpr = grby->groupExpr();
// copy of groupExpr used to identify the changed
// value ids
ValueIdSet groupExprCpy(grby->groupExpr());
// When we encounter subqueries or MVFs in the select list
// these gets expanded at bind time, and so the select index have to
// be offset with the expansion number since the sel_index number
// reflects the select list at parse time.
for (ValueId vid = groupExpr.init();
groupExpr.next(vid);
groupExpr.advance(vid))
{
if (vid.getItemExpr()->getOperatorType() == ITM_SEL_INDEX)
{
CollIndex selIndexExpansionOffset = 0;
SelIndex * si = (SelIndex*)(vid.getItemExpr());
ValueId grpById =
compExpr()[si->getSelIndex() -1];
si->setValueId(grpById);
if (child(0)->getOperatorType() != REL_SEQUENCE)
{
groupExprCpy.remove(vid);
groupExprCpy.insert(grpById);
}
else
{ //sequence
CMPASSERT(seqNode);
const ValueIdSet seqCols = ((const RelSequence*)seqNode)->sequencedColumns();
ItemExpr * ie = grpById.getItemExpr();
ItemExpr::removeNotCoveredFromExprTree(ie,seqCols);
//ie = ie->copyTree(bindWA->wHeap());
//ie = ie->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
groupExprCpy.remove(vid);
groupExprCpy.insert(ie->getValueId());
ie = new (bindWA->wHeap()) NotCovered(ie);
ie->synthTypeAndValueId();
compExpr()[si->getSelIndex()-1] = ie->getValueId();
seqNode->addSequencedColumn(ie->getValueId());
}
switch (grpById.getItemExpr()->getOperatorType())
{
case ITM_VALUEID_PROXY:
{
ValueId derivedId =
(( ValueIdProxy *)(grpById.getItemExpr()))->isDerivedFrom();
// If this is not the ValueIdProxy that represents the MVF or Subq
// skip the expansion.
if ((( ValueIdProxy *)(grpById.getItemExpr()))->
needToTransformChild() != TRUE) break;
ValueIdList outputs;
switch (derivedId.getItemExpr()->getOperatorType())
{
case ITM_USER_DEF_FUNCTION:
{
// When we reference a UDF in the groupBy clause,
// if the UDF is a MVF(has multiple outputs), we need to add
// the other elements from the MVF's outputs.
// These elements have already been expanded into the
// select list, so all we need to do is to add them to the
// groupby expression.
// By default, we associate the valueId of the MVF with
// its first output, so we just need to copy the rest of the
// outputs.
UDFunction *udf = (UDFunction *) derivedId.getItemExpr();
const RoutineDesc *rDesc = udf->getRoutineDesc();
outputs = rDesc->getOutputColumnList();
break;
}
case ITM_ROW_SUBQUERY:
{
// When we reference a subquery in the groupBy clause,
// if the subquery has a degree > 1, we need to add the other
// elements from the subquery's select list.
Subquery *subq = (Subquery *) derivedId.getItemExpr();
RelRoot *subqRoot = (RelRoot *) subq->getSubquery();
outputs = subqRoot->compExpr();
break;
}
default:
CMPASSERT(0); // we don't support anything else
}
// Add in the other outputs from the MVF/Subquery
for (CollIndex i=1; i < outputs.entries(); i++)
{
selIndexExpansionOffset ++;
groupExprCpy.insert(outputs[i]);
}
// Need to check the groupBy and orderBy lists
// for selIndexes with an index greater than this one,
// If we find one, bump its index into the select list by
// the expansion.
ValueIdSet fixedUpIndecies;
fixUpSelectIndeciesInSet(grby->groupExpr(),fixedUpIndecies,
si->getSelIndex(),
selIndexExpansionOffset);
fixUpSelectIndecies(getOrderByTree(), fixedUpIndecies,
si->getSelIndex(),
selIndexExpansionOffset);
break;
}
}
// Now that we have swapped the vid list from grouping
// expression to the corresponding one from select list
// go thru each expression, collect the base columns
// and mark each column as referenced for histogram.
// Since this is only for group by, we will get only single
// interval histograms - 10-081015-6557
ValueIdSet columns;
grpById.getItemExpr()->findAll(ITM_BASECOLUMN, columns, TRUE, TRUE);
for (ValueId id = columns.init();
columns.next(id);
columns.advance(id))
{
NAColumn *nacol = id.getNAColumn();
if (nacol->isReferencedForHistogram())
continue;
nacol->setReferencedForSingleIntHist();
}
} // found Sel Index
}
// recreate the groupExpr expression after updating the value ids
grby->setGroupExpr (groupExprCpy);
if (((CmpCommon::getDefault(GROUP_BY_USING_ORDINAL) != DF_OFF) ||
(specialMode)) &&
(grby->selPredTree()) &&
(grby->selIndexInHaving()))
{
setValueIdForRenamedColsInHaving(bindWA, grby->selPredTree(),
compExpr());
BindScope *currScope = bindWA->getCurrentScope();
ItemExpr *havingPred = grby->removeSelPredTree();
currScope->context()->inHavingClause() = TRUE;
havingPred->convertToValueIdSet(grby->selectionPred(),
bindWA, ITM_AND);
currScope->context()->inHavingClause() = FALSE;
if (bindWA->errStatus())
return this;
}
if (orderByTree_ && seqNode && grby)
{
ItemExprList origOrderByList(bindWA->wHeap());
origOrderByList.insertTree(orderByTree_);
ItemExprList newOrderByList((Lng32)origOrderByList.entries(), bindWA->wHeap());
for (CollIndex i = 0; (i < (CollIndex) origOrderByList.entries());i++)
{
ItemExpr * currOrderByItemExpr =
((ItemExpr *) origOrderByList[i])->castToItemExpr();
ItemExpr * newOrderByItemExpr = currOrderByItemExpr;
if (currOrderByItemExpr->getOperatorType() == ITM_SEL_INDEX)
{
SelIndex * si = (SelIndex*)(currOrderByItemExpr);
if (compExpr()[si->getSelIndex()-1].getItemExpr()->getOperatorType() != ITM_BASECOLUMN)
{
newOrderByItemExpr = compExpr()[si->getSelIndex()-1].getItemExpr();
}
}
newOrderByList.insert(newOrderByItemExpr);
}
orderByTree_ = newOrderByList.convertToItemExpr();
}
return this;
}
void RelRoot::transformTDPartitionOrdinals(BindWA *bindWA)
{
if(!getHasTDFunctions())
return ;
if (getCompExprTree() == NULL)
return ;
BindScope *currScope = bindWA->getCurrentScope();
RelExpr * realChildNode = NULL;
if (child(0)->getOperatorType() == REL_FIRST_N)
{
realChildNode = child(0)->child(0);
}
else
{
realChildNode = child(0);
}
if(realChildNode->getOperatorType() != REL_SEQUENCE )
{
return;
}
RelSequence * seqNode = (RelSequence *)realChildNode;
if (!seqNode->getPartitionBy())
{
return;
}
ItemExpr * partitionBy = seqNode->getPartitionBy()->copyTree(bindWA->wHeap());
ItemExprList origSelectList(getCompExprTree(), bindWA->wHeap());
ItemExprList origPartitionByList(bindWA->wHeap());
if (partitionBy)
{
origPartitionByList.insertTree(partitionBy);
}
for (CollIndex i = 0; (i < (CollIndex) origPartitionByList.entries());i++)
{
ItemExpr * currPartitionByItemExpr =
((ItemExpr *) origPartitionByList[i])->castToItemExpr();
NABoolean selIndexError = FALSE;
Int64 selIndex = -1;
if (currPartitionByItemExpr->getOperatorType() == ITM_CONSTANT)
{
ConstValue * cv = (ConstValue*)currPartitionByItemExpr;
if ((cv->canGetExactNumericValue()) &&
(cv->getType()->getScale() == 0))
{
selIndex = cv->getExactNumericValue();
if (selIndex <= 0 || selIndex > origSelectList.entries())
{ //index in error -- produce error message
//in TD mode group by <constant> -- constant is purely positional
//selIndexError = TRUE;
*CmpCommon::diags() << DgSqlCode(-4366);
bindWA->setErrStatus();
return;
}
else
{
origPartitionByList.usedEntry( i )=
origSelectList.usedEntry((CollIndex)selIndex-1)->copyTree(bindWA->wHeap());
}
}
}
}
seqNode->setPartitionBy(origPartitionByList.convertToItemExpr());
}
// resolveAggregates -
// If aggregate functions have been found in the select list, then
// either attach the aggregate functions to the existing GroupBy below
// this RelRoot, or if there is no GroupBy create a GroupBy with an
// empty groupby list (scalar) and attach the aggregate functions to
// this GroupBy.
//
void RelRoot::resolveAggregates(BindWA *bindWA)
{
BindScope *currScope = bindWA->getCurrentScope();
if (NOT currScope->getUnresolvedAggregates().isEmpty()) {
if (getHasTDFunctions())
{ //Using rank function and aggregate functions in the same scope is not supported.
*CmpCommon::diags() << DgSqlCode(-4365);
bindWA->setErrStatus();
return;
}
RelExpr *sequence = currScope->getSequenceNode();
// The aggregates were used without a GROUP BY or HAVING
// clause, i.e. an implicit aggregation is performed
// (with a NULL result for an empty input table).
NABoolean implicitGrouping = (child(0)->getOperatorType() != REL_GROUPBY);
if(getHasOlapFunctions()) {
implicitGrouping = (sequence->child(0)->getOperatorType() != REL_GROUPBY);
}
GroupByAgg *groupByAgg = NULL;
if (implicitGrouping) {
RelExpr * realChildNode = NULL;
// if my child is a FIRST_N node, then add the GroupByAgg below it.
// Otherwise, add the GroupByAgg below me.
if (child(0)->getOperatorType() == REL_FIRST_N)
{
realChildNode = child(0)->child(0);
}
else
realChildNode = child(0);
if(getHasOlapFunctions()) {
realChildNode = sequence->child(0);
}
groupByAgg =
new (bindWA->wHeap()) GroupByAgg(realChildNode,REL_GROUPBY);
realChildNode->setBlockStmt(isinBlockStmt());
if(getHasOlapFunctions())
sequence->setChild(0, groupByAgg);
else if (child(0)->getOperatorType() == REL_FIRST_N)
child(0)->setChild(0, groupByAgg);
else
setChild(0, groupByAgg);
groupByAgg->setBlockStmt(isinBlockStmt());
}
else {
if(getHasOlapFunctions()) {
groupByAgg = (GroupByAgg *)sequence->child(0).getPtr();
} else {
groupByAgg = (GroupByAgg *)child(0).getPtr();
}
}
NAString colName(bindWA->wHeap());
Lng32 sqlCode = 0;
ValueId valId = NULL_VALUE_ID;
if (currScope->context()->unaggColRefInSelectList()) {
sqlCode = -4021;
valId = currScope->context()->unaggColRefInSelectList()->getValueId();
}
else if (implicitGrouping) {
// Genesis 10-000414-9410: "SELECT SUM(A),* FROM T; --no GROUP BY"
// cannot be flagged with err 4012 in ColReference::bindNode
// because table not marked "grouped" yet.
//
const ColumnDescList &cols = *currScope->getRETDesc()->getColumnList();
CollIndex i, n = cols.entries();
for (i=0; i<n; i++) {
const ColumnDesc *col = cols[i];
if (!col->isGrouped())
if (col->getColRefNameObj().isStar() ||
col->getValueId().getNAColumn(TRUE/*okIfNotColumn*/)) {
sqlCode = -4012;
valId = col->getValueId();
colName = col->getColRefNameObj().getColRefAsAnsiString();
break;
}
}
}
// Table has no GROUP BY (so no grouping columns exist at all)
// but is grouped by dint of a column reference within an aggregate,
// making any unaggregated column references illegal, by ANSI 7.9 SR 7.
if (sqlCode) {
if (colName.isNull()) {
const NAColumn *nacol = valId.getNAColumn(TRUE/*okIfNotColumn*/);
if (nacol)
colName = nacol->getFullColRefNameAsAnsiString();
else
colName = "_unnamed_column_";
}
// 4012 Col ref must be grouping or aggregated -- no star ref allowed!
// 4021 The select list contains a non-grouping non-aggregated column.
*CmpCommon::diags() << DgSqlCode(sqlCode) << DgColumnName(colName);
bindWA->setErrStatus();
return;
}
// Move the unresolved aggregates into the groupby node and bind
// (simply returns if "groupByAgg" isn't new).
groupByAgg->aggregateExpr() += currScope->getUnresolvedAggregates();
currScope->getUnresolvedAggregates().clear();
groupByAgg->bindNode(bindWA);
}
}
// resolveSequenceFunctions -
// Add the unresolvedSequenceFunctions to the Sequence node for this
// scope. If there are sequence functions, but no sequence node, it
// is an error. Also if there is a sequence node, but no sequence
// functions, it is an error.
//
//
void RelRoot::resolveSequenceFunctions(BindWA *bindWA)
{
BindScope *currScope = bindWA->getCurrentScope();
// If we have a Sequence Node associated with the RelRoot node,
//
RelSequence *sequenceNode = (RelSequence *)currScope->getSequenceNode();
currScope->getSequenceNode() = NULL;
if (sequenceNode) {
if (getHasTDFunctions() && sequenceNode->child(0)->getOperatorType() == REL_GROUPBY)
{ //Using rank function and group by clause in the same scope is not supported.
*CmpCommon::diags() << DgSqlCode(-4366);
bindWA->setErrStatus();
return;
}
CMPASSERT(sequenceNode->getOperatorType() == REL_SEQUENCE);
// Do not allow sequence functions or OLAP Window functions
// with Embedded Updates.
//
if (getGroupAttr()->isEmbeddedUpdateOrDelete()){
*CmpCommon::diags() << DgSqlCode(-4202)
<< (getGroupAttr()->isEmbeddedUpdate() ?
DgString0("UPDATE"):DgString0("DELETE"));
bindWA->setErrStatus();
return;
}
// If there are some sequence functions that have not been attached
// to the Sequence node, do so now. These were found when binding
// the select list.
//
sequenceNode->
addUnResolvedSeqFunctions(currScope->getUnresolvedSequenceFunctions(),
bindWA);
currScope->getUnresolvedSequenceFunctions().clear();
currScope->getAllSequenceFunctions().clear();
if (bindWA->errStatus()) return;
// Make sure the sequence function has some work to do.
// The cast is needed since the compiler will attempt to pick the
// protected (writable) version of 'sequenceFunctions()'. (Is this
// a compiler bug)
//
if ((((const RelSequence *)sequenceNode)->sequenceFunctions().isEmpty() )
&&
( !getHasOlapFunctions() &&
((const RelSequence *)sequenceNode)->requiredOrder().entries() != 0 )) {
// Can't have a sequence by clause without
// sequence functions.
//
*CmpCommon::diags() << DgSqlCode(-4111);
bindWA->setErrStatus();
return;
}
} else if (! currScope->getUnresolvedSequenceFunctions().isEmpty()) {
// Can't have sequence functions without a
// sequence by clause.
// First, loop through the list of functions.
//
ValueIdSet &unresolved = currScope->getUnresolvedSequenceFunctions();
NAString unparsed(bindWA->wHeap());
for (ValueId vid = unresolved.init(); unresolved.next(vid); unresolved.advance(vid)) {
ItemExpr *ie = vid.getItemExpr();
CMPASSERT(ie->isASequenceFunction());
unparsed += ", ";
ie->unparse(unparsed, DEFAULT_PHASE, USER_FORMAT_DELUXE);
}
unparsed.remove(0,2); // remove initial ", "
*CmpCommon::diags() << DgSqlCode(-4110) << DgString0(unparsed);
bindWA->setErrStatus();
return;
}
}
// if a where pred is specified on an immediate child scan or rename node,
// and it contains an 'and'ed rownum() predicate of the form:
// rownum < val, or rownum <= val, or rownum = val
// then get the val and make it the firstN value.
// Also, remove this predicate from selPredTree.
void RelRoot::processRownum(BindWA * bindWA)
{
NABoolean specialMode = (CmpCommon::getDefault(MODE_SPECIAL_4) == DF_ON);
if (NOT specialMode)
return;
if (! child(0))
return;
if ((child(0)->getOperatorType() != REL_SCAN) &&
(child(0)->getOperatorType() != REL_RENAME_TABLE))
return;
if (! child(0)->selPredTree())
return;
ItemExpr * wherePred = child(0)->selPredTree();
ItemExprList iel(wherePred, bindWA->wHeap(), ITM_AND, FALSE, FALSE);
NABoolean found = FALSE;
for (Lng32 i = 0; ((NOT found) && (i < iel.entries())); i++)
{
ItemExpr * ie = iel[i];
if (ie->getArity() != 2)
continue;
if (NOT ((ie->getOperatorType() == ITM_LESS) ||
(ie->getOperatorType() == ITM_EQUAL) ||
(ie->getOperatorType() == ITM_LESS_EQ)))
continue;
ItemExpr * child0 = ie->child(0)->castToItemExpr();
ItemExpr * child1 = ie->child(1)->castToItemExpr();
if (NOT ((child0->getOperatorType() == ITM_REFERENCE) &&
(child1->getOperatorType() == ITM_CONSTANT)))
continue;
ColReference * col = (ColReference*)child0;
ColRefName &colRefName = col->getColRefNameObj();
CorrName &cn = col->getCorrNameObj();
const NAString &catName = cn.getQualifiedNameObj().getCatalogName();
const NAString &schName = cn.getQualifiedNameObj().getSchemaName();
const NAString &objName = cn.getQualifiedNameObj().getObjectName();
const NAString &colName = colRefName.getColName();
if (NOT ((catName.isNull()) &&
(schName.isNull()) &&
(objName.isNull()) &&
(colName == "ROWNUM")))
continue;
ConstValue * cv = (ConstValue*)child1;
if (NOT cv->canGetExactNumericValue())
continue;
Int64 val = cv->getExactNumericValue();
if (val < 0)
continue;
if ((ie->getOperatorType() == ITM_EQUAL) &&
(val != 1))
continue;
if ((ie->getOperatorType() == ITM_LESS) &&
(val > 0))
val--;
setFirstNRows(val);
// remove this pred from the list
iel.removeAt(i);
found = TRUE;
}
if (found)
{
// convert the list back to selection pred.
ItemExpr * ie = iel.convertToItemExpr();
child(0)->removeSelPredTree();
child(0)->addSelPredTree(ie);
}
return;
}
RelExpr *RelRoot::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
if (isTrueRoot())
{
// if this is simple scalar aggregate on a seabase table
// (of the form: select count(*), sum(a) from t; )
// then transform it so it could be evaluated using hbase co-processor.
if ((CmpCommon::getDefault(HBASE_COPROCESSORS) == DF_ON) &&
(child(0) && child(0)->getOperatorType() == REL_SCAN))
{
Scan * scan = (Scan*)child(0)->castToRelExpr();
if ((getCompExprTree()) &&
(NOT hasOrderBy()) &&
(! getSelPredTree()) &&
(! scan->getSelPredTree()) &&
(scan->selectionPred().isEmpty()) &&
((scan->getTableName().getSpecialType() == ExtendedQualName::NORMAL_TABLE) ||
(scan->getTableName().getSpecialType() == ExtendedQualName::INDEX_TABLE)) &&
!scan->getTableName().isPartitionNameSpecified() &&
!scan->getTableName().isPartitionRangeSpecified() &&
(NOT bindWA->inViewDefinition()))
{
ItemExprList selList(bindWA->wHeap());
selList.insertTree(getCompExprTree());
// for now, only count(*) can be co-proc'd
if ((selList.entries() == 1) &&
(selList[0]->getOperatorType() == ITM_COUNT) &&
(selList[0]->origOpType() == ITM_COUNT_STAR__ORIGINALLY))
{
NATable *naTable = bindWA->getNATable(scan->getTableName());
if (bindWA->errStatus())
return this;
if (((naTable->getObjectType() == COM_BASE_TABLE_OBJECT) ||
(naTable->getObjectType() == COM_INDEX_OBJECT)) &&
((naTable->isSeabaseTable()) ||
((naTable->isHiveTable()) &&
(naTable->getClusteringIndex()->getHHDFSTableStats()->isOrcFile()))))
{
Aggregate * agg =
new(bindWA->wHeap()) Aggregate(ITM_COUNT,
new (bindWA->wHeap()) SystemLiteral(1),
FALSE /*i.e. not distinct*/,
ITM_COUNT_STAR__ORIGINALLY,
'!');
agg->bindNode(bindWA);
if (bindWA->errStatus())
{
return this;
}
ValueIdSet aggrSet;
aggrSet.insert(agg->getValueId());
ExeUtilExpr * eue = NULL;
if (naTable->isSeabaseTable())
eue =
new(CmpCommon::statementHeap())
ExeUtilHbaseCoProcAggr(scan->getTableName(),
aggrSet);
else
eue =
new(CmpCommon::statementHeap())
ExeUtilOrcFastAggr(scan->getTableName(),
aggrSet);
eue->bindNode(bindWA);
if (bindWA->errStatus())
{
return this;
}
setChild(0, eue);
removeCompExprTree();
addCompExprTree(agg);
} // if seabaseTable
} // count aggr
}
} // coproc on
if (child(0) &&
((child(0)->getOperatorType() == REL_INSERT) ||
(child(0)->getOperatorType() == REL_UNARY_INSERT) ||
(child(0)->getOperatorType() == REL_LEAF_INSERT)))
{
Insert * ins = (Insert*)child(0)->castToRelExpr();
if (ins->isNoRollback())
{
if ((CmpCommon::getDefault(AQR_WNR)
!= DF_OFF) &&
(CmpCommon::getDefault(AQR_WNR_INSERT_CLEANUP)
!= DF_OFF))
ins->enableAqrWnrEmpty() = TRUE;
}
if (CmpCommon::transMode()->anyNoRollback())
{
// tbd - may need to integrate these two.
if ((CmpCommon::getDefault(AQR_WNR)
!= DF_OFF) &&
(CmpCommon::getDefault(AQR_WNR_INSERT_CLEANUP)
!= DF_OFF))
ins->enableAqrWnrEmpty() = TRUE;
}
}
// if lob is being extracted as chunks of string, then only one
// such expression could be specified in the select list.
// If this is the case, then insert ExeUtilLobExtract operator.
// This operator reads lob contents and returns them to caller as
// multiple rows.
// This lobextract function could only be used in the outermost select
// list and must be converted at this point.
// It is not evaluated on its own.
if (getCompExprTree())
{
ItemExprList selList(bindWA->wHeap());
selList.insertTree(getCompExprTree());
if ((selList.entries() == 1) &&
(selList[0]->getOperatorType() == ITM_LOBEXTRACT))
{
LOBextract * lef = (LOBextract*)selList[0];
ExeUtilLobExtract * le =
new (PARSERHEAP()) ExeUtilLobExtract
(lef, ExeUtilLobExtract::TO_STRING_,
NULL, NULL, lef->getTgtSize(), 0,
NULL, NULL, NULL, child(0), PARSERHEAP());
le->setHandleInStringFormat(FALSE);
setChild(0, le);
}
}
processRownum(bindWA);
} // isTrueRoot
if (getHasTDFunctions())
{
transformTDPartitionOrdinals(bindWA);
if (bindWA->errStatus()) return NULL;
}
RelRoot * returnedRoot =
transformGroupByWithOrdinalPhase1(bindWA);
if (! returnedRoot)
return NULL;
returnedRoot =
transformOrderByWithExpr(bindWA);
if (! returnedRoot)
return NULL;
if (bindWA->getCurrentScope()->context()->inTableCheckConstraint()) {
// See ANSI 11.9 Leveling Rule 1a (Intermediate Sql).
// 4089 A check constraint cannot contain a subquery.
*CmpCommon::diags() << DgSqlCode(-4089)
<< DgConstraintName(
bindWA->getCurrentScope()->context()->inCheckConstraint()->
getConstraintName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
}
if (isTrueRoot())
bindWA->setTopRoot(this);
bindWA->setBindTrueRoot(isTrueRoot());
if (!bindWA->getAssignmentStArea()) {
bindWA->getAssignmentStArea() =
new (bindWA->wHeap()) AssignmentStArea(bindWA);
bindWA->getAssignmentStArea()->getAssignmentStHostVars() =
new (bindWA->wHeap()) AssignmentStHostVars(bindWA);
}
// If there are one or more output rowset variables, then we introduce
// a RowsetInto node below this Root node. The RowsetInto node will
// create a Pack node later on when it is binded, so that we can
// insert values into the rowset output variables.
// We don't do this transformation if we are inside a compound statement.
//
if (isTrueRoot() && assignmentStTree()) {
ItemExpr *outputVar = getOutputVarTree();
if (outputVar) {
CMPASSERT(outputVar->getChild(0)->getOperatorType() == ITM_HOSTVAR);
HostVar *hostVar = (HostVar *) outputVar->getChild(0);
if (hostVar->getType()->getTypeQualifier() == NA_ROWSET_TYPE) {
ItemExpr *outputVar = removeOutputVarTree();
assignmentStTree() = NULL;
// Get the output size expression. It may be a constant or a variable.
ItemExpr * sizeExpr = getHostArraysArea()->outputSize();
// set the SelectIntoRowsets flag
getHostArraysArea()->setHasSelectIntoRowsets(TRUE);
// Create INTO node. Its child is the current root
RelExpr *intoNode =
new (bindWA->wHeap()) RowsetInto(this, outputVar, sizeExpr);
//If case of first N with ORDER BY generator introduces the FIRST N
//operator. For rowsets FIRST N node need to be introduced below the
//PACK node and not below the top root. So set first N rows for INTO
//node and not the top root.
if (hasOrderBy()) {
intoNode->setFirstNRows(getFirstNRows());
setFirstNRows(-1);
}
// Create a new root node that will go above the RowsetInto node
setRootFlag(FALSE);
RelRoot *newRoot = new (bindWA->wHeap()) RelRoot(intoNode);
newRoot->setRootFlag(TRUE);
// copy the display flag from this true Root to the new root.
// newRoot->setDisplayTree(getDisplayTree());
newRoot->setDisplayTree(TRUE);
newRoot->addInputVarTree(removeInputVarTree());
newRoot->outputVarCnt() = outputVarCnt();
NABoolean defaultSortedRows = newRoot->needFirstSortedRows();
//Int64 defaultFirstNRows = newRoot->getFirstNRows();
newRoot->needFirstSortedRows() = needFirstSortedRows();
//newRoot->setFirstNRows(getFirstNRows());
needFirstSortedRows() = defaultSortedRows;
// setFirstNRows(defaultFirstNRows);
newRoot->rollbackOnError() = rollbackOnError();
// migrate hostArraysArea to newroot, and tell bindWA about it
newRoot->setHostArraysArea(getHostArraysArea());
bindWA->setHostArraysArea(getHostArraysArea());
setSubRoot(FALSE); // old root is no longer the root
newRoot->setSubRoot(TRUE); // newRoot becomes the root
return newRoot->bindNode(bindWA);
}
}
}
if (assignmentStTree() && child(0)->getOperatorType() != REL_ROWSET_INTO) {
AssignmentStHostVars *ptr =
new (bindWA->wHeap()) AssignmentStHostVars(bindWA);
if (ptr->containsRowsets(assignmentStTree())) {
ItemExpr *outputSizeExpr = NULL;
// The user may have used the ROWSET FOR OUTPUT SIZE construct
// set the SelectIntoRowsets flag.
if (getHostArraysArea()) {
outputSizeExpr = getHostArraysArea()->outputSize();
getHostArraysArea()->setHasSelectIntoRowsets(TRUE);
}
// Create RowsetInto node. Its child is the current root
RelExpr *intoNode = new (bindWA->wHeap())
RowsetInto(this, assignmentStTree(), outputSizeExpr);
//If case of first N with ORDER BY generator introduces the FIRST N
//operator. For rowsets FIRST N node need to be introduced below the
//PACK node and not below the top root. So set first N rows for INTO
//node and not the top root.
if (hasOrderBy()) {
intoNode->setFirstNRows(getFirstNRows());
setFirstNRows(-1);
}
RelRoot *newRoot = new (bindWA->wHeap()) RelRoot(*this);
newRoot->child(0) = intoNode;
newRoot->removeCompExprTree();
setRootFlag(FALSE);
removeInputVarTree();
assignmentStTree() = NULL;
return newRoot->bindNode(bindWA);
}
}
// Create a new scope.
//
if (!isDontOpenNewScope()) // -- Triggers.
{
bindWA->initNewScope();
// MV --
if(TRUE == hasMvBindContext())
{
// Copy the MvBindContext object from the RelRoot node to the
// current BindContext.
bindWA->markScopeWithMvBindContext(getMvBindContext());
}
if (getInliningInfo().isTriggerRoot())
{
CMPASSERT(getInliningInfo().getTriggerObject() != NULL);
bindWA->getCurrentScope()->context()->triggerObj() =
getInliningInfo().getTriggerObject()->getCreateTriggerNode();
}
if (getInliningInfo().isActionOfRI())
bindWA->getCurrentScope()->context()->inRIConstraint() = TRUE;
}
// Save whether the user specified SQL/MP-style access options in the query
// (this is always true for the LOCK stmt, which we must maximize).
//
if (child(0)->getOperatorType() == REL_LOCK) {
accessOptions().updateAccessOptions(
TransMode::ILtoAT(TransMode::READ_COMMITTED_),
((RelLock *)child(0).getPtr())->getLockMode());
accessOptions().updateAccessOptions(
TransMode::ILtoAT(CmpCommon::transMode()->getIsolationLevel()));
}
// QSTUFF: the updateOrDelete flag is set to ensure that scans done as
// part of a generic update cause an exclusive lock to be set to ensure
// a consistent completion of the following update or delete.
if (containsUpdateOrDelete(this))
{
accessOptions().setUpdateOrDelete(TRUE);
}
else if (isTrueRoot())
{
// if the query does not contain any Generic Update nodes, mark it
// as read only query. In that case, we have freedom not to include
// some indexes in the indexes list.
bindWA->setReadOnlyQuery();
}
// This block of code used to be in RelRoot::propagateAccessOptions() which
// used to be called from here. We've since replaced this old 'push' call
// with the 'pull' of BindWA->findUserSpecifiedAccessOption() calls from
// RelRoot, Scan, and GenericUpdate.
// QSTUFF
// We decided to stick with READ COMMITTED as the default access
// (even for streams). However, if we change our mind again, this is
// the place to do it.
// if (getGroupAttr()->isStream() &&
// (accessOptions().accessType() == ACCESS_TYPE_NOT_SPECIFIED_))
// accessOptions().accessType() = SKIP_CONFLICT_;
// Set the flag to indicate to DP2 that this executes an
// embedded update or delete.
if (getGroupAttr()->isEmbeddedUpdateOrDelete())
accessOptions().setUpdateOrDelete(TRUE);
// QSTUFF
if (accessOptions().userSpecified())
bindWA->getCurrentScope()->context()->setStmtLevelAccessOptions(accessOptions());
if (isSubRoot() && getHostArraysArea())
getHostArraysArea()->setRoot(this);
if (isTrueRoot()) {
// If this were false, then SynthType's ValueDesc::create()
// would use a DIFFERENT SchemaDB than BindItemExpr's createValueDesc()
// -- wrong! Assert this only once per query.
CMPASSERT(ActiveSchemaDB() == bindWA->getSchemaDB());
// set the upDateCurrentOf_ attribute for the root if possible
if (child(0)->getOperatorType() == REL_UNARY_UPDATE ||
child(0)->getOperatorType() == REL_UNARY_DELETE) {
GenericUpdate *gu = (GenericUpdate *)child(0)->castToRelExpr();
if (gu->updateCurrentOf()) {
updateCurrentOf() = gu->updateCurrentOf();
currOfCursorName() = gu->currOfCursorName();
}
}
// If we are processing a rowset,
// then the child operator is a REL_TSJ.
// If this is the case, and the operation is
// an update or delete, we need to search
// further to deterine its correct child
// operator type.
// Otherwise, the child operator type is correct.
if (bindWA->getHostArraysArea() &&
bindWA->getHostArraysArea()->hasHostArraysInWhereClause() &&
bindWA->getHostArraysArea()->hasInputRowsetsInSelectPredicate() == HostArraysWA::NO_ &&
NOT bindWA->getHostArraysArea()->hasHostArraysInTuple())
// ensure that we don't flag rowset selects or insert selects with rowsets in the predicate
{
if (bindWA->getHostArraysArea()->hasHostArraysInSetClause()) // includes rowset merge statements too
childOperType() = REL_UNARY_UPDATE;
else
childOperType() = REL_UNARY_DELETE;
}
else
childOperType() = child(0)->getOperator();
// see if we can potentially optimize the buffer sizes for
// oltp queries. Done for update/delete/insert-values/select-unique.
// if scan, olt opt is possible.
if (childOperType() == REL_SCAN)
oltOptInfo().setOltOpt(TRUE);
/*
// For Denali release 1, compound statements are restricted
// to yield at most one row; so olt opt is possible for CS.
// If a compound statement is not pushed down to DP2, then
// OLT optimization will be turned off in generator.
//
// Turn it off for Compound statement as insertion with tuple list
// is possible in a CS.
*/
else if (childOperType() == REL_COMPOUND_STMT)
oltOptInfo().setOltOpt(TRUE);
// if INSERT...VALUES, olt opt is possible.
else if ((childOperType() == REL_UNARY_INSERT) &&
(NOT child(0)->child(0) ||
child(0)->child(0)->getOperatorType() == REL_TUPLE))
oltOptInfo().setOltOpt(TRUE);
} // isTrueRoot
else if (checkFirstNRowsNotAllowed(bindWA)) {
*CmpCommon::diags() << DgSqlCode(-4102);
bindWA->setErrStatus();
return NULL;
}
BindScope *currScope = bindWA->getCurrentScope();
// -- MVs
// Check for the Refresh node before binding, because after binding it
// will be gone.
if (child(0)->getOperatorType() == REL_REFRESH)
setRootOfInternalRefresh();
// set the currect host area in bindWA for non-root stmt.
// fix 10-031106-4430 (RG: mxcmp failed to compile INSERT
// statement with rowsets within IF statement)
HostArraysWA *tempWA = NULL;
if ( NOT isTrueRoot() && getHostArraysArea() )
{
tempWA = bindWA->getHostArraysArea();
bindWA->setHostArraysArea(getHostArraysArea());
}
bindWA->setBindTrueRoot(FALSE);
// Bind the children here to determine if we need to rollback on error
// for embedded update/delete's.
//
bindChildren(bindWA);
if ( tempWA )
{
// Restore previous environment
bindWA->setHostArraysArea(tempWA);
}
if (bindWA->errStatus()) return NULL;
// For SPJ, store the spOutParams_ from the bindWA in RelRoot,
// We need it at codegen
if ( bindWA->getSpOutParams ().entries ())
spOutParams_ = &( bindWA->getSpOutParams ());
if (isTrueRoot()) {
if (child(0)->getGroupAttr()->isEmbeddedUpdateOrDelete()) {
// Olt optimization is now supported for embedded updates/deletes (pub/sub
// thingy) for now.
oltOptInfo().setOltOpt(TRUE);
if (getFirstNRows() != -1) {
// [FIRST/ANY n] syntax cannot be used with an embedded update or embedded delete.
*CmpCommon::diags() << DgSqlCode(-4216);
bindWA->setErrStatus();
return NULL;
}
}
// If updateCurrentOf_ not set yet
// Check the tree for a GenericUpdate RelExpr (anywhere in the tree)
// so we can set the root node accordingly.
GenericUpdate *gu = getGenericUpdate(this);
if (!updateCurrentOf() && gu && gu->updateCurrentOf()) {
updateCurrentOf() = gu->updateCurrentOf();
currOfCursorName() = gu->currOfCursorName();
}
// if standalone update/delete(no update where current of),
// olt opt is possible.
if (((childOperType() == REL_UNARY_UPDATE) ||
(childOperType() == REL_UNARY_DELETE)) &&
(NOT updateCurrentOf()))
oltOptInfo().setOltOpt(TRUE);
// If transaction statement (begin/commit/rollback/set xn,
// olt opt is possible.
if (childOperType() == REL_TRANSACTION)
oltOptInfo().setOltOpt(TRUE);
// Set indication whether transaction need to be aborted on error
// during an IUD query.
// Rollback will be done for a query that contains
// rowsets, or an insert which is
// not an 'insert...values' with a single value.
//
// There are more cases when a transaction will be rolled back on
// an IUD error. These are set in GenericUpdate::preCodeGen,
// and DP2(IUD)::preCodeGen.
// These include embedded update or delete, stream access, non-unique
// update or delete... See ::preCodeGen methods for details.
rollbackOnError() = FALSE;
if (childOperType().match(REL_ANY_GEN_UPDATE))
{
if (bindWA->getHostArraysArea() &&
bindWA->getHostArraysArea()->done()) // rowsets
rollbackOnError() = TRUE;
else if ((childOperType() == REL_UNARY_INSERT) &&
(child(0)->child(0) &&
child(0)->child(0)->getOperatorType() != REL_TUPLE))
rollbackOnError() = TRUE;
}
if (bindWA->getHostArraysArea() &&
bindWA->getHostArraysArea()->getTolerateNonFatalError())
{
setTolerateNonFatalError(RelExpr::NOT_ATOMIC_);
}
}
CMPASSERT(currScope == bindWA->getCurrentScope()); // sanity check
// do not do olt qry optimization, if rowsets are present.
if (bindWA->getHostArraysArea() && bindWA->getHostArraysArea()->done())
{
oltOptInfo().setOltOpt(FALSE);
if (bindWA->getHostArraysArea()->getTolerateNonFatalError()) {
// we also cannot do dp2 level olt optimization if this is a non-atomic rowset insert
oltOptInfo().setOltEidOpt(FALSE);
}
else {
// but can do dp2 level olt optimization if this is "regular" rowset insert
oltOptInfo().setOltEidOpt(TRUE);
}
}
// If unresolved aggregate functions have been found in the children of the
// root node, that would mean that we are referencing aggregates before
// the groupby operation is performed
if (checkUnresolvedAggregates(bindWA)) return this;
// A RelRoot does not have a select list for SQL update, delete, insert
// statements as well as when the query contains an SQL union. If a
// select list is absent, assign the select list of its child to it.
// This will propagate the selection lists of the children of the
// union up to the root.
//
// Detach the item expression tree for the select list and bind it.
//
ItemExpr *compExprTree = removeCompExprTree();
if (NOT compExprTree) {
// -- for RI and Triggers
if (isEmptySelectList())
setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA));
else {
setRETDesc(child(0)->getRETDesc());
getRETDesc()->getValueIdList(compExpr());
}
}
else {
CMPASSERT(!currScope->context()->inSelectList());
currScope->context()->inSelectList() = TRUE;
// QSTUFF
// in case we are binding an embedded generic update within a view
// we have to rename column references using OLD or NEW as
// table names since we adopted the RETDesc/TableDesc of the
// scan node or the view scan node, i.e. the RenameTable node
// at the root of an expanded view.
if (bindWA->renameToScanTable()){
ColReference * cr = NULL;
ItemExpr * itm = compExprTree;
NABoolean done = FALSE;
const CorrName corr =
(getViewScanNode()->getOperatorType() == REL_RENAME_TABLE) ?
((RenameTable *)getViewScanNode())->getTableName() :
((Scan *)getViewScanNode())->getTableDesc()->getCorrNameObj();
while (NOT done){
if (itm->getOperatorType() == ITM_ITEM_LIST){
cr = (ColReference *) itm->getChild(0);
itm = itm->getChild(1)->castToItemExpr();
}
else {
cr = (ColReference *) itm;
done = TRUE;
}
cr->getCorrNameObj().getQualifiedNameObj().
setObjectName(corr.getQualifiedNameObj().getObjectName());
}
}
// QSTUFF
RelRoot *viewQueryRoot = NULL;
StmtDDLCreateView *pCreateView = NULL;
if (bindWA->inViewDefinition()) {
pCreateView = bindWA->getCreateViewParseNode();
if (pCreateView->getQueryExpression() == this) {
viewQueryRoot = this;
CMPASSERT(isTrueRoot());
pCreateView->setCurViewColNum((CollIndex)0);
}
}
// charset inference
compExprTree->setResolveIncompleteTypeStatus(TRUE);
HostArraysWA * arrayWA = bindWA->getHostArraysArea() ;
if (arrayWA && arrayWA->hasHostArraysInTuple()) {
CollIndex counterRowVals = 0;
CMPASSERT(!bindWA->getCurrentScope()->context()->counterForRowValues());
bindWA->getCurrentScope()->context()->counterForRowValues() = &counterRowVals;
// If this query (scope) contains OLAP Window functions, then add
// a Sequence Operator just below the Root node. Also, if aggregates
// exist, resolve them now.
//
setRETDesc(bindRowValues(bindWA, compExprTree, compExpr(), this, isTrueRoot()));
bindWA->getCurrentScope()->context()->counterForRowValues() = NULL;
}
else {
setRETDesc(bindRowValues(bindWA, compExprTree, compExpr(), viewQueryRoot, isTrueRoot()));
}
if (bindWA->errStatus()) return NULL;
if (viewQueryRoot) pCreateView->resetCurViewColNum();
currScope->context()->inSelectList() = FALSE;
}
// MVs --
if (bindWA->isPropagateOpAndSyskeyColumns() &&
child(0)->getOperatorType()!=REL_GROUPBY &&
child(0)->getOperatorType()!=REL_AGGREGATE &&
currScope->getUnresolvedAggregates().isEmpty() &&
!isEmptySelectList() &&
!isTrueRoot())
getRETDesc()->propagateOpAndSyskeyColumns(bindWA, TRUE);
CMPASSERT(currScope == bindWA->getCurrentScope()); // sanity check
currScope->setRETDesc(getRETDesc());
bindWA->setRenameToScanTable(FALSE); // QSTUFF
// Genesis 10-980106-2038 + 10-990202-1098.
//
if (isTrueRoot()) {
castComputedColumnsToAnsiTypes(bindWA, getRETDesc(), compExpr());
if (bindWA->errStatus()) return NULL;
}
// Genesis 10-970822-2581. See finalize() in SqlParser.y.
//
// If we are in a compound statement (an IF's UNION), do not issue an error.
//
// Added condition for CALL StoredProcedures
// If we invoke a CALL statement, the #out params do not match the
// # columns, we make that check in the CallSP::bindNode, so ignore it
// for now.
if (isTrueRoot() &&
(child(0)->getOperatorType() != REL_CALLSP &&
(child(0)->getOperatorType() != REL_COMPOUND_STMT &&
(child(0)->getOperatorType() != REL_TUPLE &&
(Int32)getRETDesc()->getDegree() != 0))) &&
(child(0)->getOperatorType() != REL_UNION ||
(!((Union *) (RelExpr *) child(0))->getUnionForIF())) &&
outputVarCntValid() &&
outputVarCnt() != (Int32)getRETDesc()->getDegree() &&
(outputVarCnt() ||
CmpCommon::context()->GetMode() != STMT_DYNAMIC)) {
// 4093 The number of output parameters ($0) must equal the number of cols
// 4094 The number of output host vars ($0) must equal the number of cols
Lng32 sqlcode = (CmpCommon::context()->GetMode() == STMT_DYNAMIC) ?
-4093 : -4094;
*CmpCommon::diags() << DgSqlCode(sqlcode)
#pragma nowarn(1506) // warning elimination
<< DgInt0(outputVarCnt()) << DgInt1(getRETDesc()->getDegree());
#pragma warn(1506) // warning elimination
bindWA->setErrStatus();
return NULL;
}
ItemExpr *inputVarTree = removeInputVarTree();
if (inputVarTree) {
inputVarTree->convertToValueIdList(inputVars(), bindWA, ITM_ITEM_LIST);
if (bindWA->errStatus()) return NULL;
// If DYNAMIC SQL compilation, then
// remove from the input var list (list of HostVars and DynamicParams)
// any env vars that were found to have a equivalence value which is
// valid (parseable) for the context it appears in
// (i.e., we've already bound the env var name's dynamic value,
// so we no longer need the env var name at all).
// Right now, this means that in sqlci you can say
// set envvar xyz cat.sch.tbl;
// select * from $xyz;
//
if (CmpCommon::context()->GetMode() == STMT_DYNAMIC) {
for (CollIndex i = inputVars().entries(); i--; ) {
HostVar *hostVar = (HostVar *)inputVars()[i].getItemExpr();
if (hostVar->getOperatorType() == ITM_HOSTVAR &&
hostVar->isPrototypeValid() &&
(hostVar->isEnvVar() ||
hostVar->isDefine()))
inputVars().removeAt(i);
}
} // STMT_DYNAMIC
} // inputVarTree
// add to the inputVars, any user functions that are to be treated
// like input values, that is, evaluated once and used therafter.
// Do not insert duplicate value ids.
for (CollIndex i = 0; i < bindWA->inputFunction().entries(); i++ ) {
if (NOT inputVars().contains(bindWA->inputFunction()[i]))
inputVars().insert(bindWA->inputFunction()[i]);
}
// If aggregate functions have been found in the select list, then
// create a groupby node with an empty groupby list, if the child is not
// already a groupby node.
//
resolveAggregates(bindWA);
if (bindWA->errStatus()) return NULL;
// Add the unresolvedSequenceFunctions to the Sequence node for this
// scope. If there are sequence functions, but no sequence node, it
// is an error. Also if there is a sequence node, but no sequence
// functions, it is an error.
// If OLAP Window functions exist for this scope, they will have been
// translated into sequence functions by this point and so will be added
// to the Sequence node here.
//
resolveSequenceFunctions(bindWA);
if (bindWA->errStatus()) return NULL;
BindScope *prevScope = bindWA->getPreviousScope(currScope);
NABoolean inRowSubquery = FALSE;
if (prevScope)
inRowSubquery = prevScope->context()->inRowSubquery();
if (inRowSubquery && (CmpCommon::getDefault(COMP_BOOL_137) == DF_OFF))
addOneRowAggregates(bindWA);
returnedRoot =
transformGroupByWithOrdinalPhase2(bindWA);
if (! returnedRoot)
return NULL;
// ItemExpr *orderByTree = removeOrderByTree();
ItemExpr *orderByTree = removeOrderByTree();
if (orderByTree) {
//
// Tandem extension to ANSI (done only if source table is not grouped!):
// Allow the ORDER BY clause to reference columns in the source table even
// if the columns are not referenced in the select list. Treat the extra
// columns as *system* columns so that they can be referenced by name
// (ORDER BY name) but not by position in select list (ORDER BY n).
// Thus, select-list columns have precedence, as they should since ANSI
// allows only them in ORDER BY to begin with!
//
// Add all source columns to system column list of temporary orderBy;
// remove select-list columns from this system column list;
// insert select-list columns into the *user* column list
// (these must be in separate loops to set up the orderBy XCNM correctly!).
// Then bind the temporary (convert to ValueId list), reset the RETDesc.
//
bindWA->getCurrentScope()->context()->inOrderBy() = TRUE;
CollIndex i;
RETDesc orderBy;
const RETDesc &select = *getRETDesc();
const RETDesc &source = *child(0)->getRETDesc();
// if the source is grouped, then the ORDER BY columns must be in
// the select list. So, don't add any other columns that aren't
// in the select list...
if (source.isGrouped()) {
orderBy.setGroupedFlag();
//10-031125-1549 -begin
//Since we are processing a groupby we should
//certainly have some node below it. Futher if
//that node is a REL_ROOT we will certainly have
//a child. So this rather unusual call sequence
//is safe. We are actually looking for a Pattern
//like REL_GROUPBY(REL_ROOT(*)) introduced to handle
//Distint qualifier.
//for example if we have a query like
//select distinct j as jcol from t1 order by j;
//the tree will look like
//REL_ROOT(REL_GROUPBY(REL_ROOT(REL_SCAN(t1))))
//In this is a NON-ANSI query. To support queries like this
//we need to expose "J" as a system column. To do that we need
//to get hold of the RetDesc of the node below the REL_ROOT
//(not the actual REL_ROOT).
RETDesc *src = NULL;
if(child(0)->child(0)&&
child(0)->child(0)->getOperatorType() == REL_ROOT)
{
src = child(0)->child(0)->child(0)->getRETDesc();
}
else
{
src = child(0)->getRETDesc();
}
const ColumnDescList &sysColList = *src->getSystemColumnList();
const ColumnDescList &usrColList = *src->getColumnList();
ValueId vid;
for(i = 0; i < select.getDegree(); i++) {
vid = select.getValueId(i);
for(CollIndex j = 0; j < sysColList.entries(); j++){
if( vid == sysColList[j]->getValueId()){
orderBy.addColumn(bindWA, sysColList[j]->getColRefNameObj()
, sysColList[j]->getValueId()
, SYSTEM_COLUMN);
}
}
for(CollIndex k = 0; k < usrColList.entries(); k++){
if(vid == usrColList[k]->getValueId()){
orderBy.addColumn(bindWA, usrColList[k]->getColRefNameObj()
, usrColList[k]->getValueId()
, SYSTEM_COLUMN);
}
}
}
//10-031125-1549 -end
NABoolean specialMode =
((CmpCommon::getDefault(MODE_SPECIAL_1) == DF_ON) ||
(CmpCommon::getDefault(MODE_SPECIAL_2) == DF_ON) ||
(CmpCommon::getDefault(MODE_SPECIAL_4) == DF_ON));
// In specialMode, we want to support order by on columns
// which are not explicitely specified in the select list.
// Ex: select a+1 from t group by a order by a;
// Find all the column references in the orderByTree which are
// also in the group by list but are not explicitely specified
// in the select list.
// This code path is for cases when both GROUP BY and ORDER BY are
// specified.
// If order by is specified without the group by, then that case
// is already covered in the 'else' portion.
if ((specialMode) &&
(child(0)->getOperatorType() == REL_GROUPBY) &&
(allOrderByRefsInGby_)) // already validated that all order by cols
// are also in group by clause
{
ItemExprList orderByList(orderByTree, bindWA->wHeap());
GroupByAgg * grby=(GroupByAgg *)(child(0)->castToRelExpr());
for (CollIndex ii = 0; ii < orderByList.entries(); ii++)
{
ItemExpr * colRef = orderByList[ii];
if (colRef->getOperatorType() == ITM_INVERSE)
colRef = colRef->child(0)->castToItemExpr();
if (colRef && colRef->getOperatorType() == ITM_REFERENCE)
{
ColReference * obyColRef = (ColReference*)colRef;
for (CollIndex k = 0; k < usrColList.entries(); k++)
{
if (obyColRef->getColRefNameObj().getColName() ==
usrColList[k]->getColRefNameObj().getColName())
{
orderBy.delColumn(bindWA,
usrColList[k]->getColRefNameObj(),
SYSTEM_COLUMN);
orderBy.addColumn(bindWA,
usrColList[k]->getColRefNameObj(),
usrColList[k]->getValueId(),
SYSTEM_COLUMN);
break;
} // if
} // for
} // if
} // for
}
for (i = 0; i < select.getDegree(); i++)
orderBy.delColumn(bindWA, select.getColRefNameObj(i), SYSTEM_COLUMN);
}
else {
// add the potential ORDER BY columns... omitting the ones that will
// in the select list anyway.
orderBy.addColumns(bindWA, *source.getColumnList(), SYSTEM_COLUMN);
orderBy.addColumns(bindWA, *source.getSystemColumnList(), SYSTEM_COLUMN);
for (i = 0; i < select.getDegree(); i++)
orderBy.delColumn(bindWA, select.getColRefNameObj(i), SYSTEM_COLUMN);
}
for (i = 0; i < select.getDegree(); i++)
orderBy.addColumn(bindWA, select.getColRefNameObj(i),
select.getValueId(i), USER_COLUMN);
bindWA->getCurrentScope()->setRETDesc(&orderBy);
// fix for defect 10-010522-2978
// If we need to move this OrderBy to the RelRoot above this one...
// move it to the rowsetReqdOrder_ of that RelRoot, otherwise keep
// it at this level... in the current RelRoot's reqdOrder_
ValueIdList & pRRO = getParentForRowsetReqdOrder() ?
getParentForRowsetReqdOrder()->rowsetReqdOrder_ :
reqdOrder();
// Replace any selIndexies in the orderByTree with what it refers to
// before we expand it.
// This is done so that we can deal with subqueries with degree > 1
// and MVFs.
ItemExpr *sPtr = orderByTree, *ePtr = orderByTree;
Int32 childIdx = 0;
NABoolean onlyOneEntry(TRUE);
CollIndex selListCount = compExpr().entries();
while (sPtr != NULL)
{
if (sPtr->getOperatorType() == ITM_ITEM_LIST)
{
ePtr = sPtr;
sPtr = ePtr->child(0);
childIdx = 0;
onlyOneEntry = FALSE;
}
if (sPtr->getOperatorType() == ITM_SEL_INDEX)
{
SelIndex * si = (SelIndex*)(sPtr);
CollIndex selIndex = si->getSelIndex();
if(selIndex == 0 || selIndex > selListCount)
{
*CmpCommon::diags() << DgSqlCode(-4007)
<< DgInt0((Lng32)si->getSelIndex())
<< DgInt1(selListCount);
bindWA->setErrStatus();
return NULL;
}
ValueId orderById = compExpr()[si->getSelIndex()-1];
if (ePtr->getOperatorType() == ITM_ITEM_LIST)
ePtr->child(childIdx) = orderById.getItemExpr();
else
ePtr = orderById.getItemExpr();
orderById.getItemExpr()->setInOrderByOrdinal(TRUE);
}
if ((ePtr->getArity() == 2) && ePtr->child(1) != NULL &&
ePtr->child(1)->getOperatorType() != ITM_ITEM_LIST &&
childIdx != 1)
childIdx = 1;
else
childIdx = 0;
sPtr = (childIdx == 1) ? ePtr->child(1) : NULL;
}
if (onlyOneEntry)
orderByTree = ePtr;
// If we had any ordinal expressions expand them in case there
// are any UDFs or subquery of degree > 1.
// Also expand any directly referenced UDFs and subqueries of degree > 1.
ItemExprList origOrderByList(orderByTree, bindWA->wHeap());
origOrderByList.convertToItemExpr()->
convertToValueIdList(pRRO, bindWA, ITM_ITEM_LIST);
// end fix for defect 10-010522-2978
if (bindWA->errStatus())
return NULL;
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
bindWA->getCurrentScope()->context()->inOrderBy() = FALSE;
}
// validate that select list doesn't contain any expressions that cannot be
// grouped or ordered.
for (Lng32 selIndex = 0; selIndex < compExpr().entries(); selIndex++)
{
ItemExpr * ie = compExpr()[selIndex].getItemExpr();
if ((ie->inGroupByOrdinal()) || (ie->inOrderByOrdinal()))
{
if (NOT ie->canBeUsedInGBorOB(TRUE))
{
return NULL;
}
}
}
if (hasPartitionBy())
{
ItemExpr *partByTree = removePartitionByTree();
partByTree->convertToValueIdSet(partArrangement_, bindWA, ITM_ITEM_LIST);
if (bindWA->errStatus()) return NULL;
}
// fix for defect 10-010522-2978
// If we're the upper level RelRoot, we must check to see if we have
// any entries that need to be added to reqdOrder() and add them if
// there are any...
if ( rowsetReqdOrder_.entries() ) {
// We never expect for reqdOrder to contain any entries. But
// if it ever does, we want to be able to take a look at this
// code again to decide whether we should be appending to the
// reqdOrder list. Currently the code is written to append to
// the end of the reqdOrder list, which is likely to be the correct
// behavior even if there are entries in reqdOrder; we just think
// that someone should have the chance to rethink this in the event
// there are entries in reqdOrder and so we're making it fail here
// to allow/force someone to make the decision.
CMPASSERT(reqdOrder().entries() == 0);
// note: NAList<ValueIdList>::insert(const NAList<ValueIdList> &)
// actually does an append to the END of the list (not an
// insert at the head or after the current position).
reqdOrder().insert( rowsetReqdOrder_ );
}
// end fix for defect 10-010522-2978
// Bind the update column specification of a cursor declaration.
// Don't remove the tree: leave it for possible error 4118 in NormRelExpr.
if (updateColTree_) {
updateColTree_->convertToValueIdList(updateCol(), bindWA, ITM_ITEM_LIST);
if (bindWA->errStatus()) {
if (CmpCommon::diags()->contains(-4001))
*CmpCommon::diags() << DgSqlCode(-4117);
return NULL;
}
if (getGroupAttr()->isEmbeddedDelete()) { // QSTUFF
*CmpCommon::diags() << DgSqlCode(-4169);
bindWA->setErrStatus() ;
return NULL;
}
}
// check whether a CONTROL QUERY SHAPE statement is in effect.
// Do not do if this is a control query statement.
if (ActiveControlDB()->getRequiredShape()) {
OperatorTypeEnum op = child(0)->getOperatorType();
if (!child(0)->isAControlStatement() &&
op != REL_DESCRIBE &&
op != REL_EXPLAIN &&
op != REL_DDL &&
op != REL_LOCK &&
op != REL_UNLOCK &&
op != REL_SET_TIMEOUT &&
op != REL_STATISTICS &&
op != REL_TRANSACTION &&
op != REL_EXE_UTIL)
reqdShape_ = ActiveControlDB()->getRequiredShape()->getShape();
}
// If this is a parallel extract producer query:
// * the number of requested streams must be greater than one and
// not more than the number of configured CPUs
// * force a shape with an ESP exchange node immediately below
// the root
ComUInt32 numExtractStreams = getNumExtractStreams();
if (numExtractStreams_ > 0)
{
// Check the number of requested streams
NADefaults &defs = bindWA->getSchemaDB()->getDefaults();
NABoolean fakeEnv = FALSE;
ComUInt32 numConfiguredESPs = defs.getTotalNumOfESPsInCluster(fakeEnv);
if ((numExtractStreams == 1) || (numExtractStreams > numConfiguredESPs))
{
*CmpCommon::diags() << DgSqlCode(-4119)
<< DgInt0((Lng32) numConfiguredESPs);
bindWA->setErrStatus();
return NULL;
}
// Force the shape. There are three cases to consider:
// a. there is no required shape in the ControlDB
// b. there is a required shape and it is acceptable for this
// parallel extract.
// c. there is a required shape and it is not acceptable.
if (reqdShape_ == NULL)
{
// Case a.
// Manufacture an esp_exchange(cut,N) shape
reqdShape_ = new (bindWA->wHeap())
ExchangeForceWildCard(new (bindWA->wHeap()) CutOp(0),
ExchangeForceWildCard::FORCED_ESP_EXCHANGE,
ExchangeForceWildCard::ANY_LOGPART,
(Lng32) numExtractStreams_);
}
else
{
NABoolean reqdShapeIsOK = FALSE;
if (reqdShape_->getOperatorType() == REL_FORCE_EXCHANGE)
{
ExchangeForceWildCard *exch = (ExchangeForceWildCard *) reqdShape_;
ExchangeForceWildCard::forcedExchEnum whichType = exch->getWhich();
Lng32 howMany = exch->getHowMany();
if (whichType == ExchangeForceWildCard::FORCED_ESP_EXCHANGE &&
howMany == (Lng32) numExtractStreams_)
{
reqdShapeIsOK = TRUE;
}
}
if (reqdShapeIsOK)
{
// Case b.
// Do nothing
}
else
{
// Case c.
// Add an esp_exchange to the top of the required shape
RelExpr *child = reqdShape_;
reqdShape_ = new (bindWA->wHeap())
ExchangeForceWildCard(child,
ExchangeForceWildCard::FORCED_ESP_EXCHANGE,
ExchangeForceWildCard::ANY_LOGPART,
(Lng32) numExtractStreams_);
}
} // if (reqdShape_ == NULL) else ...
} // if (numExtractStreams_ > 0)
// Bind the base class.
//
RelExpr *boundExpr = bindSelf(bindWA);
if (bindWA->errStatus()) return boundExpr;
// If we have dynamic rowsets, we want to replace
// dynamic parameters with available inputs.
if (isTrueRoot() && bindWA->hasDynamicRowsetsInQuery()) {
ValueIdSet inputs = getGroupAttr()->getCharacteristicInputs();
UInt32 j = 0;
// this for loop is over the list of available inputs. We are replacing array
// parameters with hostvars introduced during HostArraysWA::processArrayHostVar
// The hostvars introduced in that method are contained in the inputs() list.
for (ValueId id = inputs.init(); inputs.next(id); inputs.advance(id)) {
if (id.getItemExpr()->getOperatorType() == ITM_DYN_PARAM) {
continue;
}
// We are assuming here that the hostvars introduced are in the same order as
// the parameter arrays in inputVars(), i.e. (hv_A, hv_B) corresponds to
// (?,?,?(as A), ?(as B))
while (j < inputVars().entries()) {
ItemExpr *ie = inputVars()[j].getItemExpr() ;
OperatorTypeEnum ieType = ie->getOperatorType() ;
if (( ieType != ITM_DYN_PARAM) ||
(((DynamicParam *) ie)->getRowsetSize() == 0))
{
// if an ie is not a dynamicParam or it is a scalar dynamic Param do not remove
// it from inputVars_. From embedded SQL it is possible to have scalar and array
// dynamic params in the same statement. This is not possible from ODBC.
j++;
}
else
break ;
}
if (j < inputVars().entries()) {
inputVars().removeAt(j);
inputVars().insertAt(j, id);
j++;
}
}
}
// RelRoot::codeGen() and Statement::execute() use TOPMOST root's accessOpts.
//
if (bindWA->getCurrentScope()->context()->stmtLevelAccessOptions())
if (!accessOptions().userSpecified()) // seems redundant
accessOptions() = *bindWA->getCurrentScope()->context()->stmtLevelAccessOptions();
// Update operations currently require SERIALIZABLE (== MP REPEATABLE_)
// locking level -- the QSTUFF-enabled DP2 now does this, supporting a true
// READ_COMMITTED that is STABLE rather than merely CLEAN.
if (!containsGenericUpdate(this)) {
// Genesis 10-990114-6293:
// This flag tells RelRoot::codeGen to set a flagbit in the root-tdb which
// cli/Statement::execute + compareTransModes() will look at --
// if set, then this "read-write" stmt will be allowed to execute
// in a run-time transmode of read-only W/O HAVING TO BE RECOMPILED.
readOnlyTransIsOK() = TRUE;
}
if (isTrueRoot()) {
if (updateCurrentOf()) {
// add child genericupdate's primary key hostvars to pkeyList.
// The getLeftmostScanNode() method will return the leftmost Scan node
// as the original scan node may have moved due to the IM tree.
pkeyList().insert(child(0)->castToRelExpr()->getLeftmostScanNode()->pkeyHvarList());
}
for(Int32 st=0; st < (Int32)bindWA->getStoiList().entries(); st++)
{
if(bindWA->getStoiList()[st]->getStoi()->isView())
viewStoiList_.insert(bindWA->getStoiList()[st]);
}
if(bindWA->inDDL())
ddlStoiList_.insert(bindWA->getStoiList());
// populate the list of all the routines open information of this query
stoiUdrList_.insert(bindWA->getUdrStoiList());
// populate the list of all the UDF information of this query
udfList_.insert(bindWA->getUDFList());
// check privileges
if (!checkPrivileges(bindWA))
{
bindWA->setErrStatus();
return NULL;
}
// store the trigger's list in the root
if (bindWA->getTriggersList())
{
triggersList_ =
new (bindWA->wHeap()) LIST(ComTimestamp)
(bindWA->wHeap(), bindWA->getTriggersList()->entries());
triggersList_->insert(*(bindWA->getTriggersList()));
// Don't allow OLT optimization when triggers are involved.
oltOptInfo().setOltOpt(FALSE);
}
// store the uninitialized mv list if there are any
// entries
if( bindWA->getUninitializedMvList() )
{
uninitializedMvList_ = new (bindWA->wHeap()) UninitializedMvNameList
(bindWA->wHeap(), bindWA->getUninitializedMvList()->entries());
uninitializedMvList_->insert( *(bindWA->getUninitializedMvList()) );
}
DBG( if (getenv("TVUSG_DEBUG")) bindWA->tableViewUsageList().display(); )
} // isTrueRoot
// Don't allow OLT optimization when ON STATEMENT MV refresh is involved.
if (bindWA->isBindingOnStatementMv())
oltOptInfo().setOltOpt(FALSE);
// disable esp parallelism for merge statements.
// See class RelRoot for details about this.
if ((isTrueRoot()) &&
(bindWA->isMergeStatement()))
{
setDisableESPParallelism(TRUE);
}
// Remove the current scope.
//
if (!isDontOpenNewScope()) // -- Triggers
bindWA->removeCurrentScope();
// In case we have a query of the form
// SET <host var list> = <select statement>
// we must update the value ids of the host variables in that list.
// See Assignment Statement Internal Spec (a project of Compound Statements).
if (assignmentStTree() &&
bindWA->getAssignmentStArea() &&
bindWA->getAssignmentStArea()->getAssignmentStHostVars() &&
!bindWA->getAssignmentStArea()->getAssignmentStHostVars()->
updateValueIds(compExpr(), assignmentStTree())) {
bindWA->setErrStatus();
return NULL;
}
if (getPredExprTree())
{
CMPASSERT(isTrueRoot());
ItemExpr * ie = removePredExprTree();
ie = ie->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
addPredExprTree(ie);
}
if (getFirstNRowsParam())
{
firstNRowsParam_ = firstNRowsParam_->bindNode(bindWA);
if (bindWA->errStatus())
return this;
const SQLInt si(FALSE, FALSE);
ValueId vid = firstNRowsParam_->castToItemExpr()->getValueId();
vid.coerceType(si, NA_NUMERIC_TYPE);
if (vid.getType().getTypeQualifier() != NA_NUMERIC_TYPE)
{
// 4045 must be numeric.
*CmpCommon::diags() << DgSqlCode(-4045) << DgString0(getTextUpper());
bindWA->setErrStatus();
return this;
}
}
if ((NOT hasOrderBy()) &&
((getFirstNRows() != -1) ||
(getFirstNRowsParam())))
{
// create a firstN node to retrieve firstN rows.
FirstN * firstn = new(bindWA->wHeap())
FirstN(child(0), getFirstNRows(), getFirstNRowsParam());
firstn->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
setChild(0, firstn);
// reset firstN indication in the root node.
setFirstNRows(-1);
setFirstNRowsParam(NULL);
}
// if we have no user-specified access options then
// get it from nearest enclosing scope that has one (if any)
if (!accessOptions().userSpecified()) {
StmtLevelAccessOptions *axOpts = bindWA->findUserSpecifiedAccessOption();
if (axOpts) {
accessOptions() = *axOpts;
}
}
if (bindWA->getHoldableType() == SQLCLIDEV_ANSI_HOLDABLE)
{
if (accessOptions().accessType() != ACCESS_TYPE_NOT_SPECIFIED_)
{
if (accessOptions().accessType() == REPEATABLE_)
{
*CmpCommon::diags() << DgSqlCode(-4381);
bindWA->setErrStatus();
return NULL;
}
}
else
{
TransMode::IsolationLevel il=CmpCommon::transMode()->getIsolationLevel();
if (CmpCommon::transMode()->ILtoAT(il) == REPEATABLE_ )
{
*CmpCommon::diags() << DgSqlCode(-4381);
bindWA->setErrStatus();
return NULL;
}
}
}
// The above code is in Scan::bindNode also.
// It would be nice to refactor this common code; someday.
return boundExpr;
} // RelRoot::bindNode()
// Present the select list as a tree of Item Expressions
ItemExpr *RelRoot::selectList()
{
return compExpr().rebuildExprTree(ITM_ITEM_LIST);
} // RelRoot::selectList()
// Returns current place that assignmentStTree_ points to and
// sets that pointer to NULL
// LCOV_EXCL_START - cnu
ItemExpr * RelRoot::removeAssignmentStTree()
{
ItemExpr* tempTree = assignmentStTree_;
assignmentStTree_ = NULL;
return tempTree;
}
// LCOV_EXCL_STOP
bool OptSqlTableOpenInfo::checkColPriv(const PrivType privType,
const PrivMgrUserPrivs *pPrivInfo)
{
CMPASSERT (pPrivInfo);
NATable* table = getTable();
NAString columns = "";
if (!isColumnPrivType(privType))
{
*CmpCommon::diags() << DgSqlCode(-4481)
<< DgString0(PrivMgrUserPrivs::convertPrivTypeToLiteral(privType).c_str())
<< DgString1(table->getTableName().getQualifiedNameAsAnsiString())
<< DgString2(columns);
return false;
}
bool hasPriv = true;
// initialize to something, gets set appropriately below
LIST (Lng32) * colList = NULL ;
switch (privType)
{
case INSERT_PRIV:
{
colList = (LIST (Lng32) *)&(getInsertColList());
break;
}
case UPDATE_PRIV:
{
colList = (LIST (Lng32) *)&(getUpdateColList());
break;
}
case SELECT_PRIV:
{
colList = (LIST (Lng32) *)&(getSelectColList());
break;
}
default:
CMPASSERT(FALSE); // delete has no column privileges.
}
bool collectColumnNames = false;
if (pPrivInfo->hasAnyColPriv(privType))
{
collectColumnNames = true;
columns += "(columns:" ;
}
bool firstColumn = true;
for(size_t i = 0; i < colList->entries(); i++)
{
size_t columnNumber = (*colList)[i];
if (!(pPrivInfo->hasColPriv(privType,columnNumber)))
{
hasPriv = false;
if (firstColumn && collectColumnNames)
{
columns += " ";
firstColumn = false;
}
else
if (collectColumnNames)
columns += ", ";
if (collectColumnNames)
columns += table->getNAColumnArray()[columnNumber]->getColName();
}
}
if (collectColumnNames)
columns += ")" ;
// (colList->entries() == 0) ==> we have a select count(*) type query or a
// select 1 from T type query. In other words the table needs to be accessed
// but no column has been explicitly referenced.
// For such queries if the user has privilege on any one column that is
// sufficient. collectColumnNames indicates whether the user has privilege
// on at least one column. The following if statement applies only to selects
// For update and insert we do not expect colList to be empty.
if ((colList->entries() == 0)&& !collectColumnNames)
{
hasPriv = false;
columns = "";
}
if (!hasPriv)
*CmpCommon::diags() << DgSqlCode(-4481)
<< DgString0(PrivMgrUserPrivs::convertPrivTypeToLiteral(privType).c_str())
<< DgString1(table->getTableName().getQualifiedNameAsAnsiString())
<< DgString2(columns);
return hasPriv;
}
NABoolean RelRoot::checkFirstNRowsNotAllowed(BindWA *bindWA)
{
// do not call this method on a true root.
CMPASSERT(NOT isTrueRoot());
//*****************************************************************
// FirstNRows >= 0 (for FirstN)
// == -2 For Last 0
// == -3 For Last 1
// These values are set in parser; see the code SqlParser.y under
// Non-Terminal querySpecification when fisrtN is specified
//******************************************************************
if ( (getFirstNRows() >= 0 ||
getFirstNRows() == -2 ||
getFirstNRows() == -3) && // this root has firstn
(!((getInliningInfo().isEnableFirstNRows()) ||
(getHostArraysArea() && getHostArraysArea()->getHasSelectIntoRowsets()) || //firstn is allowed with a rowset subroot
(assignmentStTree())))) // first n is allowed in a CS. Presence of assignmentStTree
// on a non true root implies presence of select into statement
// within a cs
{
// 4102 The [FIRST/ANY n] syntax can only be used in an outermost SELECT statement.
if (CmpCommon::getDefault(ALLOW_FIRSTN_IN_SUBQUERIES) == DF_OFF)
return TRUE;
}
return FALSE;
}
// ----------------------------------------------------------------------------
// Method: checkPrivileges
//
// This method:
// - Verifies that the user executing the query has the necessary privileges
// - Adds security keys to RelRoot class that need to be checked when priv
// changes (revokes) are performed. Security keys are part of the Query
// Invalidation feature.
// - Also, removes any previously cached entries if the user has no priv
//
// Input: pointer to the binder work area
// Output: result of the check
// TRUE - user has priv
// FALSE - user does not have priv or unexpected error occurred
//
// The ComDiags area is populated with error details
// The BindWA flag setFailedForPrivileges is set to TRUE if priv check fails
// ----------------------------------------------------------------------------
NABoolean RelRoot::checkPrivileges(BindWA* bindWA)
{
// If internal caller and not part of explain, then return
if (Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL))
return TRUE;
// If qiPath (used for testing) is not 0, skip root user check
NAString qiPath = "";
CmpCommon::getDefault(QI_PATH, qiPath, FALSE);
if (qiPath.length() == 0 && ComUser::isRootUserID())
return TRUE;
// See if there is anything to check
// StoiList contains any tables used in the query
// UdrStoiList contains any routines used in the query
// CoProcAggrList contains any queries using the aggregate co-processor
// SeqValList contains any sequences
if (bindWA->getStoiList().entries() == 0 &&
bindWA->getUdrStoiList().entries() == 0 &&
bindWA->getCoProcAggrList().entries() == 0 &&
bindWA->getSeqValList().entries() == 0)
return TRUE;
// If authorization is not enabled, then return TRUE
if (!CmpCommon::context()->isAuthorizationEnabled())
return TRUE;
ComBoolean QI_enabled = (CmpCommon::getDefault(CAT_ENABLE_QUERY_INVALIDATION) == DF_ON);
NABoolean RemoveNATableEntryFromCache = FALSE ;
// Have the ComSecurityKey constructor compute the hash value for the the User's ID.
// Note: The following code doesn't care about the object's hash value or the resulting
// ComSecurityKey's ActionType....we just need the hash value for the User's ID.
int64_t objectUID = 12345;
Int32 thisUserID = ComUser::getCurrentUser();
ComSecurityKey userKey( thisUserID , objectUID
, SELECT_PRIV
, ComSecurityKey::OBJECT_IS_OBJECT
);
uint32_t userHashValue = userKey.getSubjectHashValue();
// Set up a PrivMgrCommands class in case we need to get privilege information
NAString privMDLoc;
CONCAT_CATSCH(privMDLoc,CmpSeabaseDDL::getSystemCatalogStatic(),SEABASE_PRIVMGR_SCHEMA);
PrivMgrCommands privInterface(privMDLoc.data(), CmpCommon::diags(), PrivMgr::PRIV_INITIALIZED);
PrivStatus retcode = STATUS_GOOD;
// ==> Check privileges for tables used in the query.
SqlTableOpenInfo * stoi = NULL ;
OptSqlTableOpenInfo * optStoi = NULL;
for(Int32 i=0; i<(Int32)bindWA->getStoiList().entries(); i++)
{
RemoveNATableEntryFromCache = FALSE ; // Initialize each time through loop
optStoi = (bindWA->getStoiList())[i];
stoi = optStoi->getStoi();
NATable* tab = optStoi->getTable();
// System metadata tables do not, by default, have privileges stored in the
// NATable structure. Go ahead and retrieve them now.
PrivMgrUserPrivs *pPrivInfo = tab->getPrivInfo();
PrivMgrUserPrivs privInfo;
if (!pPrivInfo)
{
CmpSeabaseDDL cmpSBD(STMTHEAP);
if (cmpSBD.switchCompiler(CmpContextInfo::CMPCONTEXT_TYPE_META))
{
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) == 0)
*CmpCommon::diags() << DgSqlCode( -4400 );
return FALSE;
}
retcode = privInterface.getPrivileges( tab->objectUid().get_value(),
tab->getObjectType(), thisUserID,
privInfo);
cmpSBD.switchBackCompiler();
if (retcode != STATUS_GOOD)
{
tab->setRemoveFromCacheBNC(TRUE);
bindWA->setFailedForPrivileges(TRUE);
*CmpCommon::diags() << DgSqlCode( -1034 );
return FALSE;
}
pPrivInfo = &privInfo;
}
// Check each primary DML privilege to see if the query requires it. If
// so, verify that the user has the privilege
bool insertQIKeys = (QI_enabled && tab->getSecKeySet().entries() > 0);
for (int_32 i = FIRST_DML_PRIV; i <= LAST_PRIMARY_DML_PRIV; i++)
{
if (stoi->getPrivAccess((PrivType)i))
{
if (!pPrivInfo->hasPriv((PrivType)i) && !optStoi->checkColPriv((PrivType)i, pPrivInfo))
RemoveNATableEntryFromCache = TRUE;
else
if (insertQIKeys)
findKeyAndInsertInOutputList(tab->getSecKeySet(),userHashValue,(PrivType)(i));
}
}
// wait until all the primary DML privileges have been checked before
// setting failure information
if ( RemoveNATableEntryFromCache )
{
bindWA->setFailedForPrivileges( TRUE );
tab->setRemoveFromCacheBNC(TRUE); // To be removed by CmpMain before Compilation retry
}
} // for loop over tables in stoi list
// ==> Check privileges for functions and procedures used in the query.
NABoolean RemoveNARoutineEntryFromCache = FALSE ;
if (bindWA->getUdrStoiList().entries())
{
for(Int32 i=0; i<(Int32)bindWA->getUdrStoiList().entries(); i++)
{
// Privilege info for the user/routine combination is stored in the
// NARoutine object.
OptUdrOpenInfo *udrStoi = (bindWA->getUdrStoiList())[i];
NARoutine* rtn = udrStoi->getNARoutine();
PrivMgrUserPrivs *pPrivInfo = rtn->getPrivInfo();
NABoolean insertQIKeys = FALSE;
if (QI_enabled && (rtn->getSecKeySet().entries() > 0))
insertQIKeys = TRUE;
if (pPrivInfo == NULL)
{
RemoveNARoutineEntryFromCache = TRUE ;
*CmpCommon::diags() << DgSqlCode( -1034 );
}
// Verify that the user has execute priv
else
{
if (pPrivInfo->hasPriv(EXECUTE_PRIV))
{
// do this only if QI is enabled and object has security keys defined
if ( insertQIKeys )
findKeyAndInsertInOutputList(rtn->getSecKeySet(), userHashValue, EXECUTE_PRIV);
}
// plan requires privilege but user has none, report an error
else
{
RemoveNARoutineEntryFromCache = TRUE ;
*CmpCommon::diags()
<< DgSqlCode( -4482 )
<< DgString0( "EXECUTE" )
<< DgString1( udrStoi->getUdrName() );
}
}
if ( RemoveNARoutineEntryFromCache )
{
bindWA->setFailedForPrivileges(TRUE);
// If routine exists in cache, add it to the list to remove
NARoutineDB *pRoutineDBCache = bindWA->getSchemaDB()->getNARoutineDB();
NARoutineDBKey key(rtn->getSqlName(), bindWA->wHeap());
NARoutine *cachedNARoutine = pRoutineDBCache->get(bindWA, &key);
if (cachedNARoutine != NULL)
pRoutineDBCache->moveRoutineToDeleteList(cachedNARoutine, &key);
}
} // for loop over UDRs
} // end if any UDRs.
// ==> Check privs on any CoprocAggrs used in the query.
for (Int32 i=0; i<(Int32)bindWA->getCoProcAggrList().entries(); i++)
{
RemoveNATableEntryFromCache = FALSE ; // Initialize each time through loop
ExeUtilHbaseCoProcAggr *coProcAggr = (bindWA->getCoProcAggrList())[i];
NATable* tab = bindWA->getSchemaDB()->getNATableDB()->
get(coProcAggr->getCorrName(), bindWA, NULL);
Int32 numSecKeys = 0;
// Privilege info for the user/table combination is stored in the NATable
// object.
PrivMgrUserPrivs* pPrivInfo = tab->getPrivInfo();
PrivMgrUserPrivs privInfo;
// System metadata tables do not, by default, have privileges stored in the
// NATable structure. Go ahead and retrieve them now.
if (!pPrivInfo)
{
CmpSeabaseDDL cmpSBD(STMTHEAP);
if (cmpSBD.switchCompiler(CmpContextInfo::CMPCONTEXT_TYPE_META))
{
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) == 0)
*CmpCommon::diags() << DgSqlCode( -4400 );
return FALSE;
}
retcode = privInterface.getPrivileges( tab->objectUid().get_value(),
tab->getObjectType(), thisUserID,
privInfo);
cmpSBD.switchBackCompiler();
if (retcode != STATUS_GOOD)
{
bindWA->setFailedForPrivileges( TRUE );
RemoveNATableEntryFromCache = TRUE;
*CmpCommon::diags() << DgSqlCode( -1034 );
return FALSE;
}
pPrivInfo = &privInfo;
}
// Verify that the user has select priv
// Select priv is needed for EXPLAIN requests, so no special check is done
NABoolean insertQIKeys = FALSE;
if (QI_enabled && (tab->getSecKeySet().entries()) > 0)
insertQIKeys = TRUE;
if (pPrivInfo->hasPriv(SELECT_PRIV))
{
// do this only if QI is enabled and object has security keys defined
if ( insertQIKeys )
findKeyAndInsertInOutputList(tab->getSecKeySet(), userHashValue, SELECT_PRIV );
}
// plan requires privilege but user has none, report an error
else
{
bindWA->setFailedForPrivileges( TRUE );
tab->setRemoveFromCacheBNC(TRUE); // To be removed by CmpMain before Compilation retry
*CmpCommon::diags()
<< DgSqlCode( -4481 )
<< DgString0( "SELECT" )
<< DgString1( tab->getTableName().getQualifiedNameAsAnsiString() );
}
} // for loop over coprocs
// ==> Check privs on any sequence generators used in the query.
for (Int32 i=0; i<(Int32)bindWA->getSeqValList().entries(); i++)
{
SequenceValue *seqVal = (bindWA->getSeqValList())[i];
NATable* tab = const_cast<NATable*>(seqVal->getNATable());
// No need to save priv info in NATable object representing a sequence;
// these NATables are not cached.
PrivMgrUserPrivs privInfo;
CmpSeabaseDDL cmpSBD(STMTHEAP);
if (cmpSBD.switchCompiler(CmpContextInfo::CMPCONTEXT_TYPE_META))
{
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) == 0)
*CmpCommon::diags() << DgSqlCode( -4400 );
return FALSE;
}
retcode = privInterface.getPrivileges(tab->objectUid().get_value(),
COM_SEQUENCE_GENERATOR_OBJECT,
thisUserID, privInfo);
cmpSBD.switchBackCompiler();
if (retcode != STATUS_GOOD)
{
bindWA->setFailedForPrivileges(TRUE);
RemoveNATableEntryFromCache = TRUE;
*CmpCommon::diags() << DgSqlCode( -1034 );
return FALSE;
}
// Verify that the user has usage priv
if (privInfo.hasPriv(USAGE_PRIV))
{
// Do we need to add any QI keys to the plan?
}
// plan requires privilege but user has none, report an error
else
{
bindWA->setFailedForPrivileges( TRUE );
RemoveNATableEntryFromCache = TRUE;
*CmpCommon::diags()
<< DgSqlCode( -4491 )
<< DgString0( "USAGE" )
<< DgString1( tab->getTableName().getQualifiedNameAsAnsiString());
}
} // for loop over sequences
return !bindWA->failedForPrivileges() ;
}
void RelRoot::findKeyAndInsertInOutputList( ComSecurityKeySet KeysForTab
, const uint32_t userHashValue
, const PrivType which
)
{
ComSecurityKey dummyKey;
ComQIActionType objectActionType =
dummyKey.convertBitmapToQIActionType ( which, ComSecurityKey::OBJECT_IS_OBJECT );
ComSecurityKey * UserSchemaKey = NULL;
ComSecurityKey * UserObjectKey = NULL;
ComSecurityKey * RoleSchemaKey = NULL;
ComSecurityKey * RoleObjectKey = NULL;
ComSecurityKey * BestKey = NULL;
ComSecurityKey * thisKey = &(KeysForTab[0]);
uint32_t hashValueOfPublic = 0;
// NOTE: hashValueOfPublic will be the same for all keys, so we generate it only once.
if ( KeysForTab.entries() > 0 )
hashValueOfPublic = thisKey->generateHash(PUBLIC_USER);
// Traverse List looking for ANY appropriate ComSecurityKey
for ( Int32 ii = 0; ii < (Int32)(KeysForTab.entries()); ii++ )
{
thisKey = &(KeysForTab[ii]);
if ( thisKey->getSecurityKeyType() == objectActionType )
{
if ( thisKey->getSubjectHashValue() == hashValueOfPublic ||
thisKey->getSubjectHashValue() == userHashValue )
{
if ( ! UserObjectKey ) UserObjectKey = thisKey;
}
else if ( ! RoleObjectKey ) RoleObjectKey = thisKey;
}
else {;} // Not right action type, just continue traversing.
}
if ( UserObjectKey ) BestKey = UserObjectKey ;
else if ( RoleObjectKey ) BestKey = RoleObjectKey ;
if ( BestKey == NULL)
return; // Sometimes there aren't any security keys
securityKeySet_.insert(*BestKey);
uint32_t SubjHashValue = BestKey->getSubjectHashValue();
hashValueOfPublic = BestKey->generateHash(PUBLIC_USER);
// Check whether this privilege was granted to PUBLIC. If so, nothing more to check.
if ( SubjHashValue == hashValueOfPublic )
return;
while ( SubjHashValue != userHashValue ) //While we see a ComSecurityKey for a Role
{
NABoolean found = FALSE;
for ( Int32 ii = 0; ii < (Int32)(KeysForTab.entries()); ii++ )
{
// If this ComSecurityKey is a GRANT type and the grantee (the object)
// is the Role specified by SubjHashValue, then break out of inner loop.
ComSecurityKey * thisKey = &(KeysForTab[ii]);
if ( ( thisKey->getObjectHashValue() == SubjHashValue ) &&
( (thisKey->getSecurityKeyType() == COM_QI_USER_GRANT_ROLE ) ) )
{
securityKeySet_.insert(*thisKey); // Insert this GRANT type ComSecurityKey into the Plan
found = TRUE;
SubjHashValue = thisKey->getSubjectHashValue();
break; // We found the user or Role which granted the user the privilege
}
}
// found should never be FALSE
CMPASSERT(found)
}
}
// -----------------------------------------------------------------------
// member functions for class GroupByAgg
// -----------------------------------------------------------------------
RelExpr *GroupByAgg::bindNode(BindWA *bindWA)
{
NABoolean specialMode =
((CmpCommon::getDefault(MODE_SPECIAL_1) == DF_ON) ||
(CmpCommon::getDefault(MODE_SPECIAL_2) == DF_ON));
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
//
// add any aggregate functions found in the parent node(s)
//
BindScope *currScope = bindWA->getCurrentScope();
aggregateExpr_ += currScope->getUnresolvedAggregates();
currScope->getUnresolvedAggregates().clear();
//
// Bind the child nodes.
//
currScope->context()->lookAboveToDecideSubquery() = TRUE;
bindChildren(bindWA);
currScope->context()->lookAboveToDecideSubquery() = FALSE;
if (bindWA->errStatus()) return this;
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
// QSTUFF
NAString fmtdList(bindWA->wHeap());
LIST(TableNameMap*) xtnmList(bindWA->wHeap());
bindWA->getTablesInScope(xtnmList, &fmtdList);
// can be removed when supporting aggregates on streams
if (getGroupAttr()->isStream()){
*CmpCommon::diags() << DgSqlCode(-4162) << DgString0(fmtdList);
bindWA->setErrStatus();
return this;
}
if ((getGroupAttr()->isEmbeddedUpdateOrDelete()) ||
(bindWA->isEmbeddedIUDStatement())) {
*CmpCommon::diags() << DgSqlCode(-4163) << DgString0(fmtdList)
<< (getGroupAttr()->isEmbeddedUpdate() ?
DgString1("UPDATE"):DgString1("DELETE"));
bindWA->setErrStatus();
return this;
}
// QSTUFF
// if unresolved aggregate functions have been found in the children of the
// Groupby node, that would mean that we are referencing aggregates before
// the groupby operation is performed
//
if (checkUnresolvedAggregates(bindWA))
return this;
//
// Detach the item expression tree for the grouping column list, bind it,
// convert it to a ValueIdSet, and attach it to the GroupByAgg node.
//
ItemExpr *groupExprTree = removeGroupExprTree();
if (groupExprTree) {
currScope->context()->inGroupByClause() = TRUE;
groupExprTree->convertToValueIdSet(groupExpr(), bindWA, ITM_ITEM_LIST);
currScope->context()->inGroupByClause() = FALSE;
if (bindWA->errStatus()) return this;
ValueIdList groupByList(groupExpr());
for (CollIndex i = 0; i < groupByList.entries(); i++)
{
ValueId vid = groupByList[i];
vid.getItemExpr()->setIsGroupByExpr(TRUE);
}
if (((CmpCommon::getDefault(GROUP_BY_USING_ORDINAL) != DF_OFF) ||
(specialMode)) &&
(groupExprTree != NULL) &&
(getParentRootSelectList() != NULL))
{
RETDesc * childRETDesc = child(0)->getRETDesc();
ItemExprList origSelectList(getParentRootSelectList(), bindWA->wHeap());
for (CollIndex i = 0; i < groupByList.entries(); i++)
{
ValueId vid = groupByList[i];
if((vid.getItemExpr()->getOperatorType() == ITM_SEL_INDEX)&&
(((SelIndex*)(vid.getItemExpr()))->renamedColNameInGrbyClause()))
{
ULng32 indx = ((SelIndex*)(vid.getItemExpr()))->getSelIndex() - 1;
if (origSelectList.entries() > indx &&
origSelectList[indx]->getOperatorType() == ITM_RENAME_COL)
{
const ColRefName &selectListColRefName =
*((RenameCol *)origSelectList[indx])->getNewColRefName();
ColumnNameMap *baseColExpr =
childRETDesc->findColumn(selectListColRefName);
if (baseColExpr)
{
groupExpr().remove(vid);
groupExpr().insert(baseColExpr->getValueId());
baseColExpr->getColumnDesc()->setGroupedFlag();
origSelectList[indx]->setInGroupByOrdinal(FALSE);
}
}
}
}
if (getSelPredTree())
{
ItemExpr * havingPred = (ItemExpr *) getSelPredTree();
// see if having expr refers to any renamed col in the select list.
// that is NOT a name exposed by child RETDesc.
// If it does, replace it with SelIndex.
// For now, do this for special1 mode and only if the having
// is a simple pred of the form: col <op> value.
// Later, we can extend this to all kind of having pred by
// traversing the having pred tree and replacing renamed cols.
NABoolean replaced = FALSE;
NABoolean notAllowedWithSelIndexInHaving = FALSE;
replaceRenamedColInHavingWithSelIndex(
bindWA, havingPred, origSelectList, replaced,
notAllowedWithSelIndexInHaving,child(0)->getRETDesc());
if (bindWA->errStatus())
return this;
if (replaced)
{
if (notAllowedWithSelIndexInHaving)
{
*CmpCommon::diags() << DgSqlCode(-4196) ;
bindWA->setErrStatus();
return this;
}
setSelIndexInHaving(TRUE);
}
}
setParentRootSelectList(NULL);
}
// Indicate that we are not in a scalar groupby. Any aggregate
// functions found in the select list or having clause cannot
// evaluate to NULL unless their argument is null.
currScope->context()->inScalarGroupBy() = FALSE;
}
//
// bind the having predicates and attach the resulting value id set
// to the node (as a selection predicate on the groupby node)
//
ItemExpr *havingPred = removeSelPredTree();
if (havingPred && NOT selIndexInHaving())
{
currScope->context()->inHavingClause() = TRUE;
havingPred->convertToValueIdSet(selectionPred(), bindWA, ITM_AND);
currScope->context()->inHavingClause() = FALSE;
if (bindWA->errStatus())
return this;
}
//
// Bind the base class.
//
RelExpr *boundExpr = bindSelf(bindWA);
if (bindWA->errStatus()) return boundExpr;
if ((havingPred) &&
(selIndexInHaving()))
{
addSelPredTree(havingPred);
}
//
// Get the aggregate expressions from the list that has accumulated
// in the current bind scope and clear the list in the bind scope --
// but first, if Tuple::bindNode()/checkTupleElementsAreAllScalar()
// created this node, add the subquery aggregate expr
// (Genesis 10-000221-6676).
//
if (aggregateExprTree_) { // only Binder, not Parser, should put anything here
// CMPASSERT(bindWA->getCurrentScope()->context()->inTupleList());
CMPASSERT(aggregateExprTree_->nodeIsBound() ||
aggregateExprTree_->child(0)->nodeIsBound());
aggregateExprTree_ = aggregateExprTree_->bindNode(bindWA);
if (bindWA->errStatus()) return boundExpr;
aggregateExpr_ += aggregateExprTree_->getValueId();
aggregateExprTree_ = NULL;
}
aggregateExpr_ += currScope->getUnresolvedAggregates();
currScope->getUnresolvedAggregates().clear();
getRETDesc()->setGroupedFlag();
return boundExpr;
} // GroupByAgg::bindNode()
// -----------------------------------------------------------------------
// member functions for class Scan
// -----------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////
// A list of 'fabricated' hostvar representing the hostvars is generated
// that will contain the primary key values. These primary key
// values are retrieved at runtime from the cursor statement
// specified in the 'current of' clause. A predicate of the
// form 'where pkey1 = :pkey1 and pkey2 = :pkey2...' is attached
// to the selection pred of this node. The hostvar values are
// then passed in by the root node to its child and they reach
// this node at runtime where the 'where' predicate is evaluated.
////////////////////////////////////////////////////////////////////////
void Scan::bindUpdateCurrentOf(BindWA *bindWA, NABoolean updateQry)
{
ValueIdList keyList = getTableDesc()->getClusteringIndex()->getIndexKey();
ItemExpr * rootPtr = NULL;
char hvName[30];
CollIndex i = 0;
for (i = 0; i < keyList.entries(); i++)
{
ValueId vid = keyList[i];
// Fabricate a name for the i'th host variable,
// make a hostvar,add it to pkeyHvarList.
sprintf(hvName,"_upd_pkey_HostVar%d",i);
HostVar *hv = new(bindWA->wHeap()) HostVar(hvName, &vid.getType(), TRUE);
hv->bindNode(bindWA);
pkeyHvarList().insert(hv->getValueId());
// Build a 'pkey = pkey_hvar' predicate.
ItemExpr * eqPred = new(bindWA->wHeap())
BiRelat(ITM_EQUAL, vid.getItemExpr(), hv);
if (!rootPtr)
rootPtr = eqPred;
else
rootPtr = new(bindWA->wHeap()) BiLogic(ITM_AND, rootPtr, eqPred);
} // loop over all pkey columns
if (updateQry)
{
ItemExpr * updCheckPtr = NULL;
ValueIdList nonKeyColList;
getTableDesc()->getClusteringIndex()->getNonKeyColumnList(nonKeyColList);
for (i = 0; i < nonKeyColList.entries(); i++)
{
ValueId vid = nonKeyColList[i];
// Fabricate a name for the i'th host variable,
// make a hostvar,add it to pkeyHvarList.
sprintf(hvName,"_upd_col_HostVar%d",i);
HostVar *hv = new(bindWA->wHeap()) HostVar(hvName, &vid.getType(), TRUE);
hv->bindNode(bindWA);
pkeyHvarList().insert(hv->getValueId());
// Build a 'col = col_hvar' predicate.
ItemExpr * eqPred = new(bindWA->wHeap())
BiRelat(ITM_EQUAL, vid.getItemExpr(), hv, TRUE);
if (!updCheckPtr)
updCheckPtr = eqPred;
else
updCheckPtr =
new(bindWA->wHeap()) BiLogic(ITM_AND, updCheckPtr, eqPred);
} // loop over all pkey columns
if (updCheckPtr)
{
updCheckPtr = new (bindWA->wHeap())
Case(NULL,
new (bindWA->wHeap())
IfThenElse(updCheckPtr,
new (bindWA->wHeap()) BoolVal(ITM_RETURN_TRUE),
new (bindWA->wHeap())
BoolVal(ITM_RETURN_TRUE,
new (bindWA->wHeap())
RaiseError(-(Lng32)EXE_CURSOR_UPDATE_CONFLICT))));
rootPtr = new(bindWA->wHeap()) BiLogic(ITM_AND, rootPtr, updCheckPtr);
}
}
// rootPtr->bindNode(bindWA);
// add this new tree to the existing selection predicate
addSelPredTree(rootPtr);
bindSelf(bindWA); // just in case
} // Scan::bindUpdateCurrentOf()
// Every Scan and every GenericUpdate has its own stoi,
// plus copies of some of these stoi's are copied to the BindWA
//
// The scan/gu stoi's will become ex_partn_access stoi's
//
// The stoiList copies in BindWA will have their security
// checked in the binder, in RelRoot::checkPrivileges
//
// Stoi's must exist for every table/view/MV/index.
// Stoi's that are not copied to the BindWA are those for which Ansi mandates
// that no security checking be done (e.g., indexes).
//
OptSqlTableOpenInfo *setupStoi(OptSqlTableOpenInfo *&optStoi_,
BindWA *bindWA,
const RelExpr *re,
const NATable *naTable,
const CorrName &corrName,
NABoolean noSecurityCheck)
{
// Get the PHYSICAL (non-Ansi/non-delimited) filename of the table or view.
CMPASSERT(!naTable->getViewText() || naTable->getViewFileName());
NAString fileName( naTable->getViewText() ?
(NAString)naTable->getViewFileName() :
naTable->getClusteringIndex()->
getFileSetName().getQualifiedNameAsString(),
bindWA->wHeap());
SqlTableOpenInfo * stoi_ = new (bindWA->wHeap()) SqlTableOpenInfo;
optStoi_ = new(bindWA->wHeap()) OptSqlTableOpenInfo(stoi_,
corrName,
bindWA->wHeap());
stoi_->setFileName(convertNAString(fileName, bindWA->wHeap()));
if (naTable->getIsSynonymTranslationDone())
{
stoi_->setAnsiName(convertNAString(
naTable->getSynonymReferenceName(),
bindWA->wHeap()));
}
else
{
stoi_->setAnsiName(convertNAString(
naTable->getTableName().getQualifiedNameAsAnsiString(),
bindWA->wHeap()));
}
if(naTable->isUMDTable() || naTable->isSMDTable()
|| naTable->isMVUMDTable() || naTable->isTrigTempTable())
{
stoi_->setIsMXMetadataTable(1);
}
if (NOT corrName.getCorrNameAsString().isNull())
{
NABoolean corrNameSpecified = TRUE;
if (corrNameSpecified)
{
stoi_->setCorrName(convertNAString(
corrName.getCorrNameAsString(),
bindWA->wHeap()));
}
}
// Materialized-View is considered as a regular table
stoi_->setSpecialTable(naTable->getSpecialType() != ExtendedQualName::NORMAL_TABLE &&
naTable->getSpecialType() != ExtendedQualName::MV_TABLE);
stoi_->setIsView(naTable->getViewText() ? TRUE : FALSE);
if (naTable->isHbaseTable())
stoi_->setIsHbase(TRUE);
stoi_->setLocationSpecified(corrName.isLocationNameSpecified() ||
corrName.isPartitionNameSpecified() );
stoi_->setUtilityOpen(corrName.isUtilityOpenIdSpecified());
stoi_->setUtilityOpenId(corrName.getUtilityOpenId());
stoi_->setIsNSAOperation(corrName.isNSAOperation());
if (! naTable->getViewText())
stoi_->setIsAudited(naTable->getClusteringIndex()->isAudited());
switch (re->getOperatorType())
{
case REL_UNARY_INSERT:
case REL_LEAF_INSERT:
stoi_->setInsertAccess();
break;
case REL_UNARY_UPDATE:
{
stoi_->setUpdateAccess();
if (((GenericUpdate*)re)->isMerge())
stoi_->setInsertAccess();
}
break;
case REL_UNARY_DELETE:
case REL_LEAF_DELETE:
{
stoi_->setDeleteAccess();
if (((GenericUpdate*)re)->isMerge())
stoi_->setInsertAccess();
if (((Delete*)re)->isFastDelete())
stoi_->setSelectAccess();
}
break;
case REL_SCAN:
case REL_LOCK:
case REL_UNLOCK:
case REL_HBASE_COPROC_AGGR:
stoi_->setSelectAccess();
break;
case REL_EXE_UTIL:
stoi_->setSelectAccess();
stoi_->setInsertAccess();
stoi_->setUpdateAccess();
stoi_->setDeleteAccess();
break;
default:
CMPASSERT(FALSE);
}
NABoolean validateTS = TRUE;
if ((naTable->getClusteringIndex() &&
naTable->getClusteringIndex()->isSystemTable()) ||
(NOT validateTS))
stoi_->setValidateTimestamp(FALSE);
else
stoi_->setValidateTimestamp(TRUE);
// MV --
// For INTERNAL REFRESH statements, leave only the insert on the MV itself.
if (re->getInliningInfo().isAvoidSecurityCheck() ||
(bindWA->isBindingMvRefresh() &&
(!naTable->isAnMV() || !stoi_->getInsertAccess())))
{
return NULL;
}
// In a SCAN, only the topmost view is inserted into BindWA StoiList
// (thus there will be no security check on underlying views/basetables,
// as Ansi says there shouldn't).
if (re->getOperatorType() == REL_SCAN && bindWA->viewCount())
{
return NULL;
}
// Genesis 10-980306-4309:
// Ansi says not supposed to be any security check on referenced tables,
// nor of course on indexes, RIs and temp tables which are not an Ansi
// notion to begin with.
if ((naTable->getSpecialType() == ExtendedQualName::TRIGTEMP_TABLE) ||
(naTable->getSpecialType() == ExtendedQualName::IUD_LOG_TABLE) ||
(naTable->getSpecialType() == ExtendedQualName::INDEX_TABLE) ||
(naTable->getSpecialType() == ExtendedQualName::RESOURCE_FORK))
{
return NULL;
}
if (noSecurityCheck)
{
return NULL;
}
if (re->getOperator().match(REL_ANY_GEN_UPDATE)&&
(((GenericUpdate*)re)->getUpdateCKorUniqueIndexKey()))
{
return NULL;
}
OptSqlTableOpenInfo *stoiInList = NULL;
for (CollIndex i=0; i < bindWA->getStoiList().entries(); i++)
if (strcmp(bindWA->getStoiList()[i]->getStoi()->fileName(), fileName) == 0) {
stoiInList = bindWA->getStoiList()[i];
break;
}
if (!stoiInList) {
stoiInList =
new(bindWA->wHeap()) OptSqlTableOpenInfo(
new (bindWA->wHeap()) SqlTableOpenInfo(*stoi_),
corrName,
bindWA->wHeap());
stoiInList->setTable((NATable*)naTable);
bindWA->getStoiList().insert(stoiInList);
bindWA->hbaseColUsageInfo()->insert((QualifiedName*)&naTable->getTableName());
} else {
// This is conceptually equivalent to
// stoiInList->AccessFlags |= stoi_->AccessFlags :
if (stoi_->getInsertAccess()) stoiInList->getStoi()->setInsertAccess();
if (stoi_->getUpdateAccess()) stoiInList->getStoi()->setUpdateAccess();
if (stoi_->getDeleteAccess()) stoiInList->getStoi()->setDeleteAccess();
if (stoi_->getSelectAccess()) stoiInList->getStoi()->setSelectAccess();
}
return stoiInList;
} // setupStoi()
//----------------------------------------------------------------------------
RelExpr *Scan::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// -- Triggers
// Is this a Scan on a temporary table inside the action of a statement trigger?
if (getTableName().isATriggerTransitionName(bindWA))
return buildTriggerTransitionTableView(bindWA); // Located in Inlining.cpp
// -- MV
// Is this a Scan on a log inside the select statement of a Materialized View?
// If so - maybe we need to replace this Scan with some other RelExpr tree.
// Ignore when inDDL() because the log may not exist yet.
if (!bindWA->inDDL() &&
getTableName().getSpecialType() == ExtendedQualName::NORMAL_TABLE)
{
const MvBindContext *pMvBindContext = bindWA->getClosestMvBindContext();
if (NULL != pMvBindContext)
{
RelExpr *replacementTree =
pMvBindContext->getReplacementFor(getTableName().getQualifiedNameObj());
if (replacementTree != NULL)
{
// We need to replace the Scan on the base table by some other tree.
// Make sure this tree has the same name as the Scan.
const CorrName& baseCorrName = getTableName();
replacementTree = new(bindWA->wHeap())
RenameTable(TRUE, replacementTree, baseCorrName);
// Move any selection predicates on the Scan to the tree.
replacementTree->addSelPredTree(removeSelPredTree());
// Bind the tree and return instead of the tree.
return replacementTree->bindNode(bindWA);
}
}
}
bindChildren(bindWA);
if (bindWA->errStatus()) return this;
// Get the NATable for this object.
//
NATable *naTable = bindWA->getNATable(getTableName());
if (bindWA->errStatus())
return this;
// Set up stoi. bindWA->viewCount is altered during expanding the view.
setupStoi(stoi_, bindWA, this, naTable, getTableName(), noSecurityCheck());
// If the object is a view, expand the view.
//
if (naTable->getViewText()) {
// Allow view on exception_table or any other special_table_name objects
ComBoolean specialTableFlagOn = Get_SqlParser_Flags(ALLOW_SPECIALTABLETYPE);
if (specialTableFlagOn == FALSE)
{
Set_SqlParser_Flags(ALLOW_SPECIALTABLETYPE);
SQL_EXEC_SetParserFlagsForExSqlComp_Internal(ALLOW_SPECIALTABLETYPE);
}
RelExpr * boundView = bindWA->bindView(getTableName(),
naTable,
accessOptions(),
removeSelPredTree(),
getGroupAttr(),
TRUE/*catmanCollectUsages*/);
// QSTUFF
// First we checked whether its a view and if so it must be updatable
// when using it for stream access or an embedded update or delete
if (!naTable->isUpdatable() && getGroupAttr()->isEmbeddedUpdateOrDelete()){
*CmpCommon::diags() << DgSqlCode(-4206)
<< DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString())
<< (getGroupAttr()->isEmbeddedUpdate() ?
DgString0("UPDATE") : DgString0("DELETE"));
bindWA->setErrStatus();
// restore ALLOW_SPECIALTABLETYPE setting
if (specialTableFlagOn == FALSE)
Reset_SqlParser_Flags(ALLOW_SPECIALTABLETYPE);
return NULL;
}
if (!naTable->isUpdatable() && getGroupAttr()->isStream()){
*CmpCommon::diags() << DgSqlCode(-4151)
<< DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
if (specialTableFlagOn == FALSE)
Reset_SqlParser_Flags(ALLOW_SPECIALTABLETYPE);
return NULL;
}
// Second we make sure the the underlying base table is key sequenced
// in case of embedded d/u and streams
// -- for as long as we don't support entry sequenced tables
if (boundView->getLeftmostScanNode()) {
// this is not a "create view V(a) as values(3)" kind of a view
const NATable * baseTable =
boundView->getLeftmostScanNode()->getTableDesc()->getNATable();
if (getGroupAttr()->isStream()) {
if (!baseTable->getClusteringIndex()->isKeySequenced()) {
*CmpCommon::diags() << DgSqlCode(-4204)
<< DgTableName(
baseTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
if (specialTableFlagOn == FALSE)
Reset_SqlParser_Flags(ALLOW_SPECIALTABLETYPE);
return NULL;
}
}
if (getGroupAttr()->isEmbeddedUpdateOrDelete()){
if (!baseTable->getClusteringIndex()->isKeySequenced()){
*CmpCommon::diags() << DgSqlCode(-4205)
<< DgTableName(
baseTable->getTableName().getQualifiedNameAsAnsiString())
<< (getGroupAttr()->isEmbeddedUpdate() ?
DgString0("UPDATE") : DgString0("DELETE"));
bindWA->setErrStatus();
if (specialTableFlagOn == FALSE)
Reset_SqlParser_Flags(ALLOW_SPECIALTABLETYPE);
return NULL;
}
}
}
// QSTUFF
// restore ALLOW_SPECIALTABLETYPE setting
if (specialTableFlagOn == FALSE)
Reset_SqlParser_Flags(ALLOW_SPECIALTABLETYPE);
return boundView;
}
// -- MV
// If this is the expanded tree pass during CREATE MV, expand the MV into
// its SELECT tree, just like a regular view.
// Do this only for incremental MVs, otherwise they may introduce unsupported
// operators such as Union.
if (naTable->isAnMV() &&
bindWA->isExpandMvTree() &&
naTable->getMVInfo(bindWA)->isIncremental())
{
CMPASSERT(bindWA->inDDL());
return bindExpandedMaterializedView(bindWA, naTable);
}
// Do not allow to select from an un initialized MV
if (naTable->isAnMV() && !bindWA->inDDL() && !bindWA->isBindingMvRefresh())
{
if (naTable->verifyMvIsInitializedAndAvailable(bindWA))
return NULL;
}
// Allocate a TableDesc and attach it to the Scan node.
// This call also allocates a RETDesc, attached to the BindScope,
// which we want to attach also to the Scan.
//
// disable override schema for synonym
NABoolean os = FALSE;
if ( ( bindWA->overrideSchemaEnabled() )
&& ( ! naTable->getSynonymReferenceName().isNull() ) )
{
os = bindWA->getToOverrideSchema();
bindWA->setToOverrideSchema(FALSE);
}
TableDesc * tableDesc = NULL;
if ((NOT isHbaseScan()) || (! getTableDesc()))
{
tableDesc = bindWA->createTableDesc(naTable, getTableName(),
FALSE, getHint());
}
else
tableDesc = getTableDesc();
// restore override schema setting
if ( ( bindWA->overrideSchemaEnabled() )
&& ( ! naTable->getSynonymReferenceName().isNull() ) )
bindWA->setToOverrideSchema(os);
// before attaching set the selectivity hint defined by the user for this
// table
if (tableDesc && getHint() &&
getTableName().getSpecialType() == ExtendedQualName::NORMAL_TABLE)
{
double s;
s = getHint()->getSelectivity();
if (0.0 <= s && s <= 1.0) {
SelectivityHint *selHint = new (STMTHEAP) SelectivityHint();
selHint->setScanSelectivityFactor(s);
tableDesc->setSelectivityHint(selHint);
}
if (getHint()->getCardinality() >= 1.0) {
s = getHint()->getCardinality();
CostScalar scanCard(s);
if((scanCard.getValue() - floor(scanCard.getValue())) > 0.00001)
scanCard = ceil(scanCard.getValue());
CardinalityHint *cardHint = new (STMTHEAP) CardinalityHint();
cardHint->setScanCardinality(scanCard);
tableDesc->setCardinalityHint(cardHint);
}
}
setTableDesc(tableDesc);
if (bindWA->errStatus()) return this;
setRETDesc(bindWA->getCurrentScope()->getRETDesc());
if ((CmpCommon::getDefault(ALLOW_DML_ON_NONAUDITED_TABLE) == DF_OFF) &&
(naTable && naTable->getClusteringIndex() && !naTable->getClusteringIndex()->isAudited()))
{
*CmpCommon::diags() << DgSqlCode(-4211)
<< DgTableName(
naTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return NULL;
}
// restricted partitions for HBase table
if (naTable->isHbaseTable() &&
(naTable->isPartitionNameSpecified() ||
naTable->isPartitionRangeSpecified()))
{
PartitioningFunction * partFunc = naTable->getClusteringIndex()->getPartitioningFunction();
// find the salt column and apply a predicate on the salt column.
// For Hash2, since the partittion key columns are columns used to build
// the _SALT_ column, we need to search all columns for the _SALT_ column.
const NAColumnArray &ccCols =
(partFunc && partFunc->castToHash2PartitioningFunction())?
naTable->getClusteringIndex()->getAllColumns()
:
naTable->getClusteringIndex()->getPartitioningKeyColumns();
NABoolean saltColFound = FALSE;
for (CollIndex i=0; i<ccCols.entries() && !saltColFound; i++)
{
if (ccCols[i]->isComputedColumn() &&
ccCols[i]->getColName() ==
ElemDDLSaltOptionsClause::getSaltSysColName())
{
saltColFound = TRUE;
// create a predicate "_SALT_" = <num> or
// "_SALT_" between <num> and <num>
Int32 beginPartNum = partFunc->getRestrictedBeginPartNumber() - 1;
Int32 endPartNum = partFunc->getRestrictedEndPartNumber() - 1;
// fill in defaults, indicated by -1 (-2 after subtraction above)
if (beginPartNum < 0)
beginPartNum = 0;
if (endPartNum < 0)
endPartNum = partFunc->getCountOfPartitions() - 1;
ItemExpr *partPred = NULL;
ColReference *saltColRef = new(bindWA->wHeap()) ColReference(
new(bindWA->wHeap()) ColRefName(
ccCols[i]->getFullColRefName(), bindWA->wHeap()));
if (beginPartNum == endPartNum)
{
partPred = new(bindWA->wHeap()) BiRelat
(ITM_EQUAL,
saltColRef,
new(bindWA->wHeap()) ConstValue(beginPartNum,bindWA->wHeap()));
}
else
{
partPred = new(bindWA->wHeap()) Between
(saltColRef,
new(bindWA->wHeap()) ConstValue(beginPartNum,bindWA->wHeap()),
new(bindWA->wHeap()) ConstValue(endPartNum,bindWA->wHeap()));
}
ItemExpr *newSelPred = removeSelPredTree();
if (newSelPred)
newSelPred = new(bindWA->wHeap()) BiLogic(ITM_AND,
newSelPred,
partPred);
else
newSelPred = partPred;
// now add the partition predicates
addSelPredTree(newSelPred->bindNode(bindWA));
}
}
if (!saltColFound)
{
// not allowed to select individual partitions from HBase tables
// unless they are salted
char buf[20];
snprintf(buf, 20, "%d", partFunc->getRestrictedBeginPartNumber());
*CmpCommon::diags() << DgSqlCode(-1276)
<< DgString0(buf)
<< DgTableName(
naTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return NULL;
}
}
// Bind the base class.
//
RelExpr *boundExpr = bindSelf(bindWA);
if (bindWA->errStatus()) return this;
//
// Assign the set of columns that belong to the table to be scanned
// as the output values that can be produced by this scan.
//
getGroupAttr()->addCharacteristicOutputs(getTableDesc()->getColumnList());
getGroupAttr()->addCharacteristicOutputs(getTableDesc()->hbaseTSList());
// MV --
if (getInliningInfo().isMVLoggingInlined())
projectCurrentEpoch(bindWA);
// QSTUFF
// Second we make sure the the underlying base table is key sequenced in case
// of embedded d/u and streams
// -- for as long as we don't support entry sequenced tables
if (getGroupAttr()->isStream()){
if (!naTable->getClusteringIndex()->isKeySequenced() ||
naTable->hasVerticalPartitions()){
*CmpCommon::diags() << DgSqlCode(-4204)
<< DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return NULL;
}
if (!getTableDesc()->getClusteringIndex()->getNAFileSet()->isAudited()) {
// Stream access not allowed on a non-audited table
*CmpCommon::diags() << DgSqlCode(-4215)
<< DgTableName(
naTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return NULL;
}
}
if (getGroupAttr()->isEmbeddedUpdateOrDelete()){
if (!naTable->getClusteringIndex()->isKeySequenced()
|| naTable->hasVerticalPartitions()){
*CmpCommon::diags() << DgSqlCode(-4205)
<< DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString())
<< (getGroupAttr()->isEmbeddedUpdate() ?
DgString0("UPDATE") : DgString0("DELETE"));
bindWA->setErrStatus();
return NULL;
}
}
// QSTUFF
// Fix "browse access mode incorrectly starts transaction" genesis case
// 10-021111-1080. Here's a glimpse at what may have been the original
// intent of the old code (taken from RelExpr.h comment for the now
// defunct RelExpr::propagateAccessOptions):
//
// At parse time, user can specify statement level access options.
// (See SQL/ARK Language spec). These options are attached to the
// RelRoot node and could be different for different Scans in the query.
// All Scan and Update nodes under a RelRoot have the same Access
// type and the Lock Mode.
//
// The problem is propagateAccessOptions did not visit all the Scans,
// eg, it did not propagate to subquery Scans, and it did not propagate
// to internal RelRoots. This "push" model seems harder to understand
// and to do correctly.
//
// So, we go with the "pull" model. An interesting node such as a Scan,
// GenericUpdate, RelRoot that needs a user-specified access/lock mode
// can "pull" one from BindWA. BindWA already implements SQL scoping
// and visibility rules. It's easier to explain also. Each table
// reference inherits the user-specified access/lock mode of the
// nearest SQL scope, going from the table outwards. If the entire
// query has no user-specified access/lock mode, then it uses the
// session-level default access/lock mode.
//
// if we have no user-specified access options then
// get it from nearest enclosing scope that has one (if any)
if (!accessOptions().userSpecified()) {
StmtLevelAccessOptions *axOpts = bindWA->findUserSpecifiedAccessOption();
if (axOpts) {
accessOptions() = *axOpts;
}
}
// The above code is in RelRoot::bindNode also.
// It would be nice to refactor this common code; someday.
// See Halloween handling code in GenericUpdate::bindNode
if (accessOptions().userSpecified()) {
if ( accessOptions().accessType() == REPEATABLE_ ||
accessOptions().accessType() == STABLE_ ||
accessOptions().accessType() == BROWSE_
) {
naTable->setRefsIncompatibleDP2Halloween();
}
}
else {
TransMode::IsolationLevel il = CmpCommon::transMode()->getIsolationLevel();
if((CmpCommon::transMode()->ILtoAT(il) == REPEATABLE_ ) ||
(CmpCommon::transMode()->ILtoAT(il) == STABLE_ ) ||
(CmpCommon::transMode()->ILtoAT(il) == BROWSE_ )) {
naTable->setRefsIncompatibleDP2Halloween();
}
}
const NAString * tableLockVal =
ActiveControlDB()->getControlTableValue(
getTableName().getUgivenName(), "TABLELOCK");
if (*tableLockVal == "ON")
naTable->setRefsIncompatibleDP2Halloween();
//Embedded update/delete queries on partitioned table
//generates assertion when ATTEMPT_ASYNCHRONOUS_ACCESS
//flag is OFF.This is because split operator is used.
//Removing of split top operator causes some problems.
//Error 66 from file system is one of them.
//So, for now compiler will generate error if these
//conditions occur.
if (getGroupAttr()->isEmbeddedUpdateOrDelete() &&
naTable->getClusteringIndex()->isPartitioned() &&
(CmpCommon::getDefault(ATTEMPT_ASYNCHRONOUS_ACCESS) == DF_OFF)) {
*CmpCommon::diags() << DgSqlCode(-4321)
<< DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return NULL;
}
// Stream access not allowed on a multi-partioned access paths, when
// CQD ATTEMPT_ASYNCHRONOUS_ACCESS is set to OFF.If we find
// that all access paths are partitioned we give an error.
if (getGroupAttr()->isStream() &&
(CmpCommon::getDefault(ATTEMPT_ASYNCHRONOUS_ACCESS) == DF_OFF)) {
NABoolean atleastonenonpartitionedaccess = FALSE;
NAFileSetList idescList = naTable->getIndexList();
for(CollIndex i = 0;
i < idescList.entries() && !atleastonenonpartitionedaccess; i++)
if(!(idescList[i]->isPartitioned()) )
atleastonenonpartitionedaccess = TRUE;
if (!atleastonenonpartitionedaccess) {
*CmpCommon::diags() << DgSqlCode(-4320)
<< DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return NULL;
}
}
if (hbaseAccessOptions_)
{
if (hbaseAccessOptions_->isMaxVersions())
{
hbaseAccessOptions_->setHbaseVersions
(
getTableDesc()->getClusteringIndex()->getNAFileSet()->numMaxVersions()
);
}
}
return boundExpr;
} // Scan::bindNode()
//----------------------------------------------------------------------------
RelExpr *Scan::bindExpandedMaterializedView(BindWA *bindWA, NATable *naTable)
{
CollHeap *heap = bindWA->wHeap();
MVInfoForDML *mvInfo = naTable->getMVInfo(bindWA);
QualifiedName mvName(mvInfo->getNameOfMV(), 3, heap, bindWA);
CorrName mvCorrName(mvName, heap, getTableName().getCorrNameAsString());
RelExpr *viewTree = mvInfo->buildMVSelectTree();
viewTree = new(heap) RenameTable(TRUE, viewTree, mvCorrName);
viewTree->addSelPredTree(removeSelPredTree());
RelExpr *boundExpr = viewTree->bindNode(bindWA);
if (bindWA->errStatus())
return this;
if (naTable->getClusteringIndex()->hasSyskey())
{
// In case the MV on top of this MV is an MJV, it needs the SYSKEY
// column of this MV. Since the SYSKEY column is not projected from
// the select list of this MV, just fake it. It's value will never be
// used anyway - just it's existance.
ConstValue *dummySyskey = new(heap) ConstValue(0);
dummySyskey->changeType(new(heap) SQLLargeInt());
ItemExpr *dummySyskeyCol = dummySyskey->bindNode(bindWA);
if (bindWA->errStatus())
return this;
ColRefName syskeyName("SYSKEY", mvCorrName);
boundExpr->getRETDesc()->addColumn(bindWA,
syskeyName,
dummySyskeyCol->getValueId(),
SYSTEM_COLUMN);
}
bindWA->getCurrentScope()->setRETDesc(boundExpr->getRETDesc());
return boundExpr;
}
//----------------------------------------------------------------------------
// This Scan needs to project the CurrentEpoch column.
// Create and bind the CurrentEpoch function
void Scan::projectCurrentEpoch(BindWA *bindWA)
{
ItemExpr *currEpoch =
new(bindWA->wHeap()) GenericUpdateOutputFunction(ITM_CURRENTEPOCH);
currEpoch->bindNode(bindWA);
// Add it to the RETDesc
ColRefName virtualColName(InliningInfo::getEpochVirtualColName());
getRETDesc()->addColumn(bindWA, virtualColName, currEpoch->getValueId());
// And force the generator to project it even though it is not
// a column in the IndexDesc.
ValueIdSet loggingCols;
loggingCols.insert(currEpoch->getValueId());
setExtraOutputColumns(loggingCols);
}
// -----------------------------------------------------------------------
// methods for class Tuple
// -----------------------------------------------------------------------
// Genesis 10-990226-4329 and 10-000221-6676.
static RelExpr *checkTupleElementsAreAllScalar(BindWA *bindWA, RelExpr *re)
{
if (!re) return NULL;
RETDesc *rd = re->getRETDesc();
CMPASSERT(rd);
// an empty tuple is okay (dummy for Triggers, e.g.)
const ColumnDescList &cols = *rd->getColumnList();
for (CollIndex i = cols.entries(); i--; ) {
ColumnDesc *col = cols[i];
Subquery *subq = (Subquery *)cols[i]->getValueId().getItemExpr();
if (subq->isASubquery()) {
if (cols.entries() > 1 && subq->getDegree() > 1) {
// 4125 The select list of a subquery in a VALUES clause must be scalar.
*CmpCommon::diags() << DgSqlCode(-4125);
bindWA->setErrStatus();
return NULL;
}
else if (cols.entries() == 1) { // if cols.entries() > 1 && subq->getDegree() > 1
// we do not want to make the transformation velow. We want to keep the
// values clause, so that it cann be attached by a tsj to the subquery
// during transform.
CMPASSERT(subq->isARowSubquery());
if (CmpCommon::getDefault(COMP_BOOL_137) == DF_ON)
{
ValueIdList subqSelectList;
RETDesc *subqRD = subq->getSubquery()->getRETDesc()->nullInstantiate(
bindWA, TRUE/*forceCast for GenRelGrby*/, subqSelectList);
subq->getSubquery()->setRETDesc(subqRD);
ItemExpr *agg = new(bindWA->wHeap())
Aggregate(ITM_ONE_ROW, subqSelectList.rebuildExprTree());
RelExpr * gby = new(bindWA->wHeap())
GroupByAgg(subq->getSubquery(), REL_GROUPBY, NULL, agg);
NABoolean save = bindWA->getCurrentScope()->context()->inTupleList();
bindWA->getCurrentScope()->context()->inTupleList() = TRUE;
gby = gby->bindNode(bindWA);
bindWA->getCurrentScope()->context()->inTupleList() = save;
return gby;
}
else
{
return subq->getSubquery();
}
}
}
}
return re;
}
RelExpr *Tuple::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// Used by rowsets. We search for occurrences of arrays within this node to
// replace them with scalar variables
if (bindWA->getHostArraysArea() && !bindWA->getHostArraysArea()->done())
{
RelExpr *boundExpr = bindWA->getHostArraysArea()->modifyTupleNode(this);
if (boundExpr)
return checkTupleElementsAreAllScalar(bindWA, boundExpr);
}
bindChildren(bindWA);
if (bindWA->errStatus()) return this;
// Detach the item expression tree for the value list and bind it.
// We use counterForRowValues() and pass in parent, for DEFAULT processing
// (Ansi 7.1 SR 1).
//
CollIndex counterRowVals = 0;
CMPASSERT(!bindWA->getCurrentScope()->context()->counterForRowValues());
bindWA->getCurrentScope()->context()->counterForRowValues() = &counterRowVals;
//
setRETDesc(bindRowValues(bindWA, removeTupleExprTree(), tupleExpr(), this, FALSE));
if (bindWA->errStatus()) return this;
//
bindWA->getCurrentScope()->context()->counterForRowValues() = NULL;
// Do NOT set currently scoped RETDesc to this VALUES(...) RETDesc --
// makes "select * from t where ((values(1)),a) = (1,2);"
// fail with error 4001 "column A not found, no named tables in scope"
//
// bindWA->getCurrentScope()->setRETDesc(getRETDesc());
// Bind the base class.
//
RelExpr *boundExpr = bindSelf(bindWA);
// -- Trigger
if (bindWA->errStatus()) return this;
//
//for case 10-020716-5497
RelExpr *newExpr = checkTupleElementsAreAllScalar(bindWA, boundExpr);
//before doing anything with newExpr make sure it is not null it can
//be null if there is an error incheckTupleElementsAreAllScalar.
getGroupAttr()->addCharacteristicOutputs(tupleExpr());
return newExpr;
} // Tuple::bindNode()
// -----------------------------------------------------------------------
// methods for class TupleList
// -----------------------------------------------------------------------
RelExpr *TupleList::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
RelExpr * boundExpr = NULL;
bindChildren(bindWA);
if (bindWA->errStatus()) return this;
ExprValueId eVid(tupleExprTree());
ItemExprTreeAsList tupleList(&eVid, ITM_ITEM_LIST);
NABoolean castTo = castToList().entries() > 0;
if (tupleExprTree()->containsSubquery() ||
tupleExprTree()->containsUDF()
#ifndef NDEBUG
||
getenv("UNIONED_TUPLES")
#endif
) {
// Make a union'ed tree of all the tuples in tupleList. ##
// This is done coz TupleList doesn't handle transformation ##
// of subqueries in tuples correctly yet. ##
CollIndex nTupleListEntries = (CollIndex)tupleList.entries();
for (CollIndex i = 0; i < nTupleListEntries ; i++) {
ItemExpr *ituple = tupleList[i]->child(0)->castToItemExpr();
RelExpr *rtuple = new(bindWA->wHeap()) Tuple(ituple);
rtuple = rtuple->bindNode(bindWA);
if (bindWA->errStatus()) return this;
// If INSERTing a TupleList, make some Assign's (even tmp's work!)
// to do some error-checking for MP-NCHAR-as-single-byte target columns.
//
// Similar code exists in
// (a) the loop further down, (b) TupleList::codeGen()
// and yes, it needs to be in all three places.
//
// NOTE: tmpAssign MUST BE ON HEAP --
// Cannot be done with a stack-allocated tmpAssign
// because ItemExpr destructor will delete children,
// which we (and parent) are still referencing!
if (castTo) {
const ColumnDescList &itms = *rtuple->getRETDesc()->getColumnList();
for (CollIndex j = 0; j < (CollIndex)itms.entries(); j++) {
ValueId src = itms[j]->getValueId();
Assign *tmpAssign = new(bindWA->wHeap())
Assign(castToList()[j].getItemExpr(), src.getItemExpr());
tmpAssign = (Assign *)tmpAssign->bindNode(bindWA);
if (bindWA->errStatus()) return this;
}
}
if (!boundExpr)
boundExpr = rtuple;
else
boundExpr = new(bindWA->wHeap()) Union(boundExpr, rtuple);
} // for loop over tupleList
CMPASSERT(boundExpr);
return boundExpr->bindNode(bindWA);
} // containsSubquery
// Detach the item expression tree for the value list and bind it.
// We use counterForRowValues() and pass in parent, for DEFAULT processing
// (Ansi 7.1 SR 1).
//
CollIndex counterRowVals = 0;
CMPASSERT(!bindWA->getCurrentScope()->context()->counterForRowValues());
bindWA->getCurrentScope()->context()->counterForRowValues() = &counterRowVals;
// tupleExprTree() contains a list of tuples.
// Each tuple is also a list of values (this list may contain one item).
// Bind all values in all the tuples.
// Check that the number of elements in each tuple is the same,
// and that the types of corresponding elements are compatible.
//
numberOfTuples_ = tupleList.entries();
CollIndex prevTupleNumEntries = NULL_COLL_INDEX;
// A list of ValueIdUnions nodes. Will create as many as there are
// entries in each tuple. The valIds from corresponding elements of
// the tuples will be added so that each ValueIdUnion represents a
// column of the tuple virtual table. Used to determine the
// union-compatible type to be used for the result type produced by
// the tuplelist.
//
ItemExprList vidUnions(bindWA->wHeap());
ValueIdUnion *vidUnion;
CollIndex i = 0;
CollIndex nEntries = (CollIndex)tupleList.entries() ;
for (i = 0; i < nEntries ; i++) {
counterRowVals = 0;
ValueIdList vidList;
ItemExpr *tuple = tupleList[i]->child(0)->castToItemExpr();
tuple->convertToValueIdList(vidList, bindWA, ITM_ITEM_LIST, this);
if (bindWA->errStatus())
return NULL;
if (prevTupleNumEntries == NULL_COLL_INDEX) {
prevTupleNumEntries = vidList.entries();
}
else if (prevTupleNumEntries != vidList.entries()) {
// 4126 The row-value-ctors of a VALUES must be of equal degree.
*CmpCommon::diags() << DgSqlCode(-4126);
bindWA->setErrStatus();
return NULL;
}
// Genesis 10-980611-7153
if (castTo && prevTupleNumEntries != castToList().entries()) break;
for (CollIndex j = 0; j < prevTupleNumEntries; j++) {
// If any unknown type in the tuple, coerce it to the target type.
// Also do same MP-NCHAR magic as above.
if (castTo) {
ValueId src = vidList[j];
src.coerceType(castToList()[j].getType());
// tmpAssign MUST BE ON HEAP -- see note above!
Assign *tmpAssign = new(bindWA->wHeap())
Assign(castToList()[j].getItemExpr(), src.getItemExpr());
tmpAssign = (Assign *)tmpAssign->bindNode(bindWA);
if (bindWA->errStatus())
return this;
}
if(i == 0) {
ValueIdList vids;
// Create an empty ValueIdUnion. Will create as many as there
// are entries in each tuple. Add the valIds from
// corresponding elements of the tuples so that each
// ValueIdUnion represents a column of the tuple virtual
// table.
//
vidUnion = new(bindWA->wHeap())
ValueIdUnion(vids, NULL_VALUE_ID);
vidUnion->setWasDefaultClause(TRUE);
vidUnions.insertAt(j, vidUnion);
}
// Add the valIds from corresponding elements of the tuples so
// that each ValueIdUnion represents a column of the tuple
// virtual table.
//
vidUnion = (ValueIdUnion *)vidUnions[j];
vidUnion->setSource((Lng32)i, vidList[j]);
if (NOT vidList[j].getItemExpr()->wasDefaultClause())
vidUnion->setWasDefaultClause(FALSE);
} // for loop over entries in tuple
} // for loop over tupleList
if (castTo && prevTupleNumEntries != castToList().entries())
{
// 4023 degree of row value constructor must equal that of target table
*CmpCommon::diags() << DgSqlCode(-4023)
<< DgInt0((Lng32)prevTupleNumEntries)
<< DgInt1((Lng32)castToList().entries());
bindWA->setErrStatus();
return NULL;
}
// do INFER_CHARSET fixup
if (!doInferCharSetFixup(bindWA, CharInfo::ISO88591, prevTupleNumEntries,
tupleList.entries())) {
return NULL;
}
ItemExpr * outputList = NULL;
for (CollIndex j = 0; j < prevTupleNumEntries; j++) {
// Get the ValueIdUnion node corresponding to this column of the
// tuple list virtual table
//
vidUnion = (ValueIdUnion *)vidUnions[j];
if (castTo) {
// Make sure the place holder type can support all the values in
// the tuple list and target column
//
vidUnion->setSource(numTuples(), castToList()[j]);
}
vidUnion->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
if (castTo) {
// Check that the source and target types are compatible.
// Cannot be done with a stack-allocated tmpAssign
// because ItemExpr destructor will delete children,
// which we (and parent) are still referencing!
Assign *tmpAssign = new(bindWA->wHeap())
Assign(castToList()[j].getItemExpr(), vidUnion);
if ( CmpCommon::getDefault(ALLOW_IMPLICIT_CHAR_CASTING) == DF_ON )
{
tmpAssign->tryToDoImplicitCasting(bindWA);
}
const NAType *targetType = tmpAssign->synthesizeType();
if (!targetType) {
bindWA->setErrStatus();
return NULL;
}
}
NAType *phType = vidUnion->getValueId().getType().newCopy(bindWA->wHeap());
NATypeToItem *placeHolder = new(bindWA->wHeap()) NATypeToItem(phType);
Cast * cnode;
if (castTo)
{
cnode = new(bindWA->wHeap()) Cast(placeHolder, phType, ITM_CAST, TRUE);
if (vidUnion->getValueId().getItemExpr()->wasDefaultClause())
cnode->setWasDefaultClause(TRUE);
}
else
cnode = new(bindWA->wHeap()) Cast(placeHolder, phType);
cnode->setConstFoldingDisabled(TRUE);
cnode->bindNode(bindWA);
if (!outputList)
outputList = cnode;
else
outputList = new(bindWA->wHeap()) ItemList(outputList, cnode);
}
setRETDesc(bindRowValues(bindWA, outputList, tupleExpr(), this, FALSE));
if (bindWA->errStatus()) return this;
bindWA->getCurrentScope()->context()->counterForRowValues() = NULL;
// Bind the base class.
//
boundExpr = bindSelf(bindWA);
if (bindWA->errStatus()) return this;
// need to add system columns as well....?
NABoolean inSubquery = FALSE;
BindScope *currScope = bindWA->getCurrentScope();
BindScope *prevScope = bindWA->getPreviousScope(currScope);
if (prevScope)
inSubquery = prevScope->context()->inSubquery();
if (inSubquery)
{
// need to change tupleExpr() & make it null-instantiated as RETDesc stores
// null instantiated columns (most probably these are constants, but not
// necessarily)
const ColumnDescList *viewColumns = getRETDesc()->getColumnList();
tupleExpr().clear();
for (CollIndex k=0; k < viewColumns->entries(); k++)
{
ValueId vid = (*viewColumns)[k]->getValueId();
// Special logic in Normalizer to optimize away a LEFT JOIN is not to
// be explored there, as this is not a LEFT JOIN
// Genesis case: 10-010312-1675
// If the query were to be a LEFT JOIN, we would not be here
if (vid.getItemExpr()->getOperatorType() == ITM_INSTANTIATE_NULL)
{
((InstantiateNull *)vid.getItemExpr())->NoCheckforLeftToInnerJoin
= TRUE;
}
tupleExpr().insert(vid);
}
}
getGroupAttr()->addCharacteristicOutputs(tupleExpr());
return boundExpr;
} // TupleList::bindNode()
// set vidlist = ith tuple of this tuplelist and return TRUE
RelExpr* TupleList::getTuple
(BindWA *bindWA, ValueIdList& vidList, CollIndex i)
{
ExprValueId eVid(tupleExprTree());
ItemExprTreeAsList tupleList(&eVid, ITM_ITEM_LIST);
ItemExpr *tuple = tupleList[i]->child(0)->castToItemExpr();
tuple->convertToValueIdList(vidList, bindWA, ITM_ITEM_LIST, this);
return bindWA->errStatus() ? NULL : this;
}
// set needsFixup to TRUE iff tuplelist needs INFER_CHARSET fixup
RelExpr*
TupleList::needsCharSetFixup(BindWA *bindWA,
CollIndex arity,
CollIndex nTuples,
NAList<NABoolean> &strNeedsFixup,
NABoolean &needsFixup)
{
// assume it needs no INFER_CHARSET fixup until proven otherwise
needsFixup = FALSE;
if (CmpCommon::wantCharSetInference()) {
CollIndex t, x;
for (x = 0; x < arity; x++) { // initialize
strNeedsFixup.insert(FALSE);
}
// go thru tuplelist looking for unprefixed string literals
for (t = 0; t < nTuples; t++) {
// get tuple
ValueIdList tup;
if (!getTuple(bindWA, tup, t)) {
return NULL; // something wrong
}
else {
// go thru columns of tuple looking for unprefixed string literals
for (x = 0; x < arity; x++) {
if (!strNeedsFixup[x] && tup[x].inferableCharType()) {
strNeedsFixup[x] = TRUE;
needsFixup = TRUE;
}
}
}
}
}
return this; // all OK
}
// find fixable strings' inferredCharTypes
RelExpr*
TupleList::pushDownCharType(BindWA *bindWA,
enum CharInfo::CharSet cs,
NAList<const CharType*> &inferredCharType,
NAList<NABoolean> &strNeedsFixup,
CollIndex arity,
CollIndex nTuples)
{
// mimic CharType::findPushDownCharType() logic
const CharType* dctp = CharType::desiredCharType(cs);
NAList<const CharType*> sampleCharType(CmpCommon::statementHeap(),arity);
NAList<Int32> total(CmpCommon::statementHeap(),arity);
NAList<Int32> ct (CmpCommon::statementHeap(),arity);
CollIndex t, x;
for (x = 0; x < arity; x++) { // initialize
total.insert(0);
ct.insert(0);
sampleCharType.insert(NULL);
}
// go thru tuplelist looking for fixable strings' inferredCharType
for (t = 0; t < nTuples; t++) {
// get tuple
ValueIdList tup;
if (!getTuple(bindWA, tup, t)) {
return NULL; // something wrong
}
else {
// go thru tuple looking for fixable strings' inferredCharType
for (x = 0; x < arity; x++) {
if (strNeedsFixup[x]) {
total[x] += 1;
const CharType *ctp;
if (tup[x].hasKnownCharSet(&ctp)) {
ct[x] += 1;
if (sampleCharType[x] == NULL) {
sampleCharType[x] = ctp;
}
}
}
}
}
}
for (x = 0; x < arity; x++) {
if (ct[x] == total[x]) {
// all have known char set or none need fixup
inferredCharType.insert(NULL); // nothing to fix
}
else {
inferredCharType.insert(sampleCharType[x] ? sampleCharType[x] : dctp);
}
}
return this; // all OK
}
// do INFER_CHARSET fixup
RelExpr*
TupleList::doInferCharSetFixup(BindWA *bindWA,
enum CharInfo::CharSet cs,
CollIndex arity,
CollIndex nTuples)
{
NABoolean needsFixup;
NAList<NABoolean> strNeedsFixup(CmpCommon::statementHeap(),arity);
RelExpr *result = needsCharSetFixup
(bindWA, arity, nTuples, strNeedsFixup, needsFixup);
if (!result || // something went wrong
!needsFixup) { // no INFER_CHARSET fixup needed
return result;
}
else { // some string literal needs INFER_CHARSET fixup
NAList<const CharType*> inferredCharType(CmpCommon::statementHeap(),arity);
if (!pushDownCharType(bindWA, cs, inferredCharType,
strNeedsFixup, arity, nTuples)) {
return NULL; // something went wrong
}
else {
// go thru tuplelist fixing up literals' char sets
CollIndex t, x;
for (t = 0; t < nTuples; t++) {
// get tuple
ValueIdList tup;
if (!getTuple(bindWA, tup, t)) {
return NULL; // something went wrong
}
else {
// go thru tuple fixing up literals' char sets
for (x = 0; x < arity; x++) {
if (strNeedsFixup[x] && tup[x].inferableCharType()) {
// coerce literal to have column's inferred char set
tup[x].coerceType(*(inferredCharType[x]), NA_CHARACTER_TYPE);
}
}
}
}
}
}
return this;
}
// -----------------------------------------------------------------------
// member functions for class RenameTable
// -----------------------------------------------------------------------
RelExpr *RenameTable::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc()); // -- Triggers
return this;
}
//
// Create a new table name scope.
//
bindWA->getCurrentScope()->xtnmStack()->createXTNM();
// code to enforce the specification that if an index expression is specified
// with a rowset and the index is included in the derived table, the index
// must be the last column of the derived column list
if((getTableName().getCorrNameAsString() != "Rowset___") && (getArity() != 0))
{
if(child(0)->getOperatorType() == REL_ROWSET)
{
NAString indexExpr(bindWA->wHeap());
NAString lastString("", bindWA->wHeap());
ItemExpr *tempPtr;
indexExpr = ((Rowset *)getChild(0))->getIndexName();
if((indexExpr != "") && newColNamesTree_)
{
for (tempPtr = newColNamesTree_; tempPtr; tempPtr=tempPtr->child(1))
{
Int32 arity = tempPtr->getArity();
if(arity == 1)
{
lastString = ((RenameCol *)tempPtr)->getNewColRefName()->getColName();
}
}
if(indexExpr != lastString)
{
*CmpCommon::diags() << DgSqlCode(-30012)
<< DgString0(indexExpr)
<< DgString1(getTableName().getCorrNameAsString());
bindWA->setErrStatus();
return NULL;
}
}
}
}
//
// Bind the child nodes.
//
bindChildren(bindWA);
if (bindWA->errStatus()) return this;
//
// Remove the table name scope.
//
bindWA->getCurrentScope()->xtnmStack()->removeXTNM();
//
// Create the result table.
//
RETDesc *resultTable = new (bindWA->wHeap()) RETDesc(bindWA);
const RETDesc &sourceTable = *child(0)->getRETDesc();
const CorrName &tableName = getTableName();
ItemExpr *derivedColTree = removeColNameTree();
ItemExprList derivedColList(bindWA->wHeap());
const NAString *simpleColNameStr;
CollIndex i;
//
// Check that there are an equal number of columns to values.
//
if (derivedColTree) {
derivedColList.insertTree(derivedColTree);
if (derivedColList.entries() != sourceTable.getDegree()) {
// 4016 The number of derived columns must equal the degree of the derived table.
*CmpCommon::diags() << DgSqlCode(-4016)
#pragma nowarn(1506) // warning elimination
<< DgInt0(derivedColList.entries()) << DgInt1(sourceTable.getDegree());
#pragma warn(1506) // warning elimination
bindWA->setErrStatus();
delete resultTable;
return this;
}
}
//
// Insert the derived column names into the result table.
// By ANSI 6.3 SR 6 (applies to explicit derived column list),
// duplicate names are not allowed.
// If user did not specify a derived column name list,
// expose the select list's column names (implicit derived column list);
// ANSI does not say that these cannot be duplicates --
// if there's a later (i.e. in an outer scope) reference to a duplicately
// named column, ColReference::bindNode will issue an error
// (in particular, if all references are to constants, e.g. "count(*)",
// then duplicates are not disallowed in the implicit derived column list!).
//
// When Create View DDL uses this Binder, we must enforce
// ANSI 11.19 SR 8 + 9, clearly disallowing dups/ambigs
// (and disallowing implem-dependent names, i.e. our unnamed '(expr)' cols!).
//
for (i = 0; i < sourceTable.getDegree(); i++) {
//
if (derivedColTree) { // explicit derived column list
CMPASSERT(derivedColList[i]->getOperatorType() == ITM_RENAME_COL);
simpleColNameStr = &((RenameCol *) derivedColList[i])->
getNewColRefName()->getColName();
if (*simpleColNameStr != "") { // named column, not an expression
if (resultTable->findColumn(*simpleColNameStr)) {
ColRefName errColName(*simpleColNameStr, tableName);
// 4017 Derived column name $ColumnName was specified more than once.
*CmpCommon::diags() << DgSqlCode(-4017)
<< DgColumnName(errColName.getColRefAsAnsiString());
bindWA->setErrStatus();
delete resultTable;
return this;
}
}
} else // implicit derived column list
simpleColNameStr = &sourceTable.getColRefNameObj(i).getColName();
//
ColRefName colRefName(*simpleColNameStr, tableName);
ValueId valId = sourceTable.getValueId(i);
resultTable->addColumn(bindWA, colRefName, valId);
} // for-loop
//
// Insert system columns similarly, completely ignoring dup names.
//
const ColumnDescList &sysColList = *sourceTable.getSystemColumnList();
for (i = 0; i < sysColList.entries(); i++) {
simpleColNameStr = &sysColList[i]->getColRefNameObj().getColName();
if (NOT resultTable->findColumn(*simpleColNameStr)) {
ColRefName colRefName(*simpleColNameStr, tableName);
ValueId valId = sysColList[i]->getValueId(); // (slight diff from the
resultTable->addColumn(bindWA, colRefName, valId, SYSTEM_COLUMN); //above)
}
}
setRETDesc(resultTable);
// MVs --
// When binding INTERNAL REFRESH commands, the SYSKEY and @OP columns should
// be propageted to the scope above, even when they are not specified in the
// select list.
if (bindWA->isPropagateOpAndSyskeyColumns())
getRETDesc()->propagateOpAndSyskeyColumns(bindWA, FALSE);
bindWA->getCurrentScope()->setRETDesc(resultTable);
//
// Insert the table name into the XTNM,
// casting away constness on the correlation name
// in order to have default cat+sch filled in.
//
bindWA->getCurrentScope()->getXTNM()->insertNames(bindWA,
(CorrName &)tableName);
if (bindWA->errStatus()) {
delete resultTable;
return this;
}
if (getViewNATable())
{
const NATable * natable = getViewNATable() ;
const ColumnDescList &columnsRET = *(resultTable->getColumnList());
for (i = 0; i < natable->getColumnCount(); i++)
{
columnsRET[i]->setViewColPosition(
((natable->getNAColumnArray())[i])->getPosition());
columnsRET[i]->setViewFileName((const char*)natable->getViewFileName());
}
}
//
// Bind the base class.
//
return bindSelf(bindWA);
} // RenameTable::bindNode()
// -----------------------------------------------------------------------
// member functions for class RenameReference
// -----------------------------------------------------------------------
// This method replaces the RETDesc of the current scope, with a new RETDesc
// that contains the columns of the transition values (OLD@ and NEW@) but
// with correlation names specified by the user in the REFERENCING clause
// of the row trigger.
void RenameReference::prepareRETDescWithTableRefs(BindWA *bindWA)
{
CollIndex refsToFind = getRefList().entries();
CollIndex refsFound = 0;
RETDesc *retDesc;
// First find the NEW@ and OLD@ tables in one of the scopes.
BindScope *scope = bindWA->getCurrentScope();
// For each BindScope,
while ((scope!=NULL) && (refsToFind > refsFound))
{ // until we find all the references.
retDesc = scope->getRETDesc();
// Skip if an empty RETDesc
if ((retDesc!=NULL) && !retDesc->isEmpty())
{
// For each reference to change
for (CollIndex i=0; i<refsToFind; i++)
// Find the table name in the RETDesc, and save a pointer to it's
// column list in the TableRefName object.
if(getRefList().at(i).lookupTableName(retDesc))
refsFound++;
}
// Get the next BindScope to search.
scope = bindWA->getPreviousScope(scope);
} // while not done
RETDesc *resultTable = new (bindWA->wHeap()) RETDesc(bindWA);
// Create an empty RETDesc for the current scope.
bindWA->getCurrentScope()->setRETDesc(resultTable);
// For each table reference, add to the RETDesc of the current scope, the
// columns of the columns of the referenced tables with the new referencing
// names as correlation names.
for (CollIndex i=0; i<refsToFind; i++)
getRefList()[i].bindRefColumns(bindWA);
}
// The RenaneReference node renames values flowing down through it.
// It is used above a row trigger body, to implement the REFERENCING clause
// of the trigger definition - renaming the OLD and NEW transition variables
// to user specified names.
//
// This bind is top-down, so we first prepare the RETDesc, and then bind
// the children using this RETDesc.
RelExpr *RenameReference::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// Save the current RETDesc.
RETDesc *prevRETDesc = bindWA->getCurrentScope()->getRETDesc();
// Replace the RETDesc of the current scope with one that contains the user
// names (MY_NEW, MY_OLD) instead of the reference names (NEW@, OLD@).
prepareRETDescWithTableRefs(bindWA);
// Bind the child nodes, in a new BindScope.
// If we don't open a new scope here, the bindChildren() method will
// overwrite the RETDesc of the current scope with NULL.
bindWA->initNewScope();
bindChildren(bindWA);
if (bindWA->errStatus())
return this;
// Bind the base class.
RelExpr *boundNode = bindSelf(bindWA);
// Save this scope's outer references before removing the scope.
const ValueIdSet myOuterRefs = bindWA->getCurrentScope()->getOuterRefs();
setRETDesc(bindWA->getCurrentScope()->getRETDesc());
bindWA->removeCurrentScope();
bindWA->getCurrentScope()->setRETDesc(prevRETDesc);
// Now merge the outer references into the previous scope.
bindWA->getCurrentScope()->mergeOuterRefs(myOuterRefs);
return boundNode;
} // RenameReference::bindNode()
// -----------------------------------------------------------------------
// member functions for class BeforeTrigger
// -----------------------------------------------------------------------
//////////////////////////////////////////////////////////////////////////////
// Find the name and position of a column SET to by this before Trigger.
// The targetColName is an output parameter, saving the bindSetClause()
// method the work of finding the column name.
// The naTable parameter is NULL during DML. and is only used for DDL
// semantic checks.
//////////////////////////////////////////////////////////////////////////////
Lng32 BeforeTrigger::getTargetColumn(CollIndex i, // Index of Assign expr.
ColRefName* targetColName,
const NATable *naTable)
{
ItemExpr *currentAssign = setList_->at(i);
CMPASSERT(currentAssign->getOperatorType() == ITM_ASSIGN);
ItemExpr *targetColReference = currentAssign->child(0);
CMPASSERT(targetColReference->getOperatorType() == ITM_REFERENCE);
ColRefName& targetColRefName =
((ColReference *)targetColReference)->getColRefNameObj();
if (targetColName != NULL) // return the column name to the binder.
*targetColName = targetColRefName;
const NAString& colName = targetColRefName.getColName();
// If called during DML binding of the BeforeTrigger node, the
// column position will not be used, because the check for duplicate
// SET columns was done in DDL time.
if (naTable == NULL)
return 0;
// We get here from DDL binding of the BeforeTrigger node, or from
// the Inlining code.
NAColumn *colObj = naTable->getNAColumnArray().getColumn(colName);
// If colObj is NULL, it's a bad column name.
if (colObj == NULL)
return -1;
return colObj->getPosition();
}
//////////////////////////////////////////////////////////////////////////////
// This method is called only during DDL (CREATE TRIGGER) of a before trigger
// with a SET clause.
// Each of the columns updated by the SET clause goes through several
// semantic checks, that cannot be done in the parser.
//////////////////////////////////////////////////////////////////////////////
void BeforeTrigger::doSetSemanticChecks(BindWA *bindWA, RETDesc *origRETDesc)
{
UpdateColumns localCols = UpdateColumns(FALSE);
ColRefName currentCol;
const NATable *scanNaTable = NULL;
NABoolean isUpdateOp=FALSE;
Scan *scanNode = getLeftmostScanNode();
CMPASSERT(scanNode != NULL);
scanNaTable = scanNode->getTableDesc()->getNATable();
CorrName oldCorr(OLDCorr);
if (origRETDesc->getQualColumnList(oldCorr))
isUpdateOp = TRUE;
for (CollIndex i=0; i<setList_->entries(); i++)
{
// Get the name and position of the Assign target column.
Lng32 targetColPosition = getTargetColumn(i, ¤tCol, scanNaTable);
if (!currentCol.getCorrNameObj().isATriggerTransitionName(bindWA, TRUE))
{
// 11017 Left hand of SET assignment must be qualified with the name of the NEW transition variable
*CmpCommon::diags() << DgSqlCode(-11017) ; // must be NEW name
bindWA->setErrStatus();
return;
}
if (targetColPosition == -1)
{
// 11022 Column $0~ColumnName is not a column in table $0~TableName
NAString tableName = scanNaTable->getTableName().getQualifiedNameAsString();
*CmpCommon::diags() << DgSqlCode(-11022)
<< DgColumnName(currentCol.getColName())
<< DgTableName(tableName);
bindWA->setErrStatus();
return;
}
// We need to check for duplicate SET columns in DDL time only.
if (localCols.contains(targetColPosition))
{
// 4022 column specified more than once
*CmpCommon::diags() << DgSqlCode(-4022)
<< DgColumnName(currentCol.getColName());
bindWA->setErrStatus();
return;
}
localCols.addColumn(targetColPosition);
// Is this a SET into a column that is part of the clustering key?
// This is only allowed on Inserts, not on Updates (Deletes never get here).
if (isUpdateOp &&
scanNaTable->getNAColumnArray().getColumn(targetColPosition)->isClusteringKey())
{
// 4033 Column $0~ColumnName is a primary or clustering key column and cannot be updated.
*CmpCommon::diags() << DgSqlCode(-4033)
<< DgColumnName(currentCol.getColName());
bindWA->setErrStatus();
return;
}
}
}
//////////////////////////////////////////////////////////////////////////////
// This method is called for before triggers that use the SET clause.
// For each column to be set using SET MYNEW.<colname> = <setExpr> do:
// 1. Find NEW@.<colname> in origRETDesc.
// 2. Verify that there is such a column, and that the user is allowd to
// change it.
// 3. Get the column's ItemExpr expression, and save it in passThruExpr.
// 4. Create an ItemExpr tree as follows:
// case
// |
// IfThenElse
// / | \
// condition setExpr passThruExpr
//
// where condition is the WHEN clause expression.
// 5. Bind this new expression in the RETDesc of the current scope.
// 6. remove NEW@.<colname> from origRETDesc, and re-insert it as the new
// expression.
//////////////////////////////////////////////////////////////////////////////
void BeforeTrigger::bindSetClause(BindWA *bindWA, RETDesc *origRETDesc, CollHeap *heap)
{
// Semantic checks are only needed during DDL.
if (bindWA->inDDL())
{
doSetSemanticChecks(bindWA, origRETDesc);
if (bindWA->errStatus())
return;
}
CorrName newCorr(NEWCorr);
const TableRefName *newRefName = getRefList().findTable(newCorr);
CMPASSERT(newRefName!=NULL);
CorrName newRef = newRefName->getTableCorr();
ColRefName currentCol;
// For each Assign expression in the list.
for (CollIndex i=0; i<setList_->entries(); i++)
{
// Get the name and position of the Assign target column.
Lng32 targetColPosition = getTargetColumn(i, ¤tCol, NULL);
currentCol.getCorrNameObj() = newRef;
ItemExpr *setExpr = setList_->at(i)->child(1);
// Find the current value of this NEW@ column.
ColumnNameMap *currentColExpr = origRETDesc->findColumn(currentCol);
CMPASSERT(currentColExpr != NULL); // Otherwise we would have been thrown with error 11022 - see above.
ItemExpr *passThruExpr = currentColExpr->getValueId().getItemExpr();
ItemExpr *colExpr = NULL;
if (whenClause_ == NULL)
// After we add the support for reading the trigger status from
// the resource fork, and adding it to the condition, we should
// never get here.
colExpr = setExpr;
else
{
IfThenElse *ifExpr = new(heap)
IfThenElse(whenClause_, setExpr, passThruExpr);
colExpr = new(heap) Case(NULL, ifExpr);
}
colExpr = colExpr->bindNode(bindWA);
if (bindWA->errStatus())
return;
// Now remove and re-insert the column to the original RETDesc,
// that will be restored at the bottom of the method.
currentCol.getCorrNameObj() = newCorr;
origRETDesc->delColumn(bindWA, currentCol, USER_COLUMN);
origRETDesc->addColumn(bindWA, currentCol, colExpr->getValueId());
// force binding of the assign here so that type incompatability is caught
// during DDL
if (bindWA->inDDL())
{
ItemExpr *currentAssign = setList_->at(i);
CMPASSERT(currentAssign->getOperatorType() == ITM_ASSIGN);
currentAssign->bindNode(bindWA);
}
}
}
//////////////////////////////////////////////////////////////////////////////
// This method is called for before triggers that use the SIGNAL clause.
// 1. Find the "virtual execId column" in origRETDesc.
// 3. Get the column's ItemExpr expression, and save it in passThruExpr.
// 4. Create an ItemExpr tree as follows:
// case
// |
// IfThenElse
// / | \
// AND passThruExpr passThruExpr
// / \
// condition RaiseError
//
// where condition is the WHEN clause expression, and RaiseError is the
// SIGNAL expression.
// 5. Bind this new expression in the RETDesc of the current scope.
// 6. remove "virtual execId column" from origRETDesc, and re-insert it as
// the new expression.
//
// The value of the expression is always the passThruExpr, for type
// compatibility. since if the SIGNAL fires, the actual value returned does
// not matter. The AND will evaluate the RaiseError only if the condition
// evaluates to TRUE.
//////////////////////////////////////////////////////////////////////////////
void BeforeTrigger::bindSignalClause(BindWA *bindWA, RETDesc *origRETDesc, CollHeap *heap)
{
if (bindWA->inDDL())
{
// In DDL time (CREATE TRIGGER) all we need is to bind the signal
// expression for semantic checks.
signal_->bindNode(bindWA);
if (bindWA->errStatus())
return;
}
else
{
// The SIGNAL expression is piggy-backed on the Unique ExecuteID
// value inserted into the temp table.
ColumnNameMap *execIdCol =
origRETDesc->findColumn(InliningInfo::getExecIdVirtualColName());
CMPASSERT(execIdCol != NULL);
const ColRefName& ExecIdColName = execIdCol->getColRefNameObj();
ItemExpr *passThruExpr = execIdCol->getValueId().getItemExpr();
ItemExpr *whenAndSignal = NULL;
// Case 10-040604-5021:
// General AND logic uses "short circuiting" as follows: if the
// left side is FALSE, evaluation of the right side is skipped, and
// the result returned is FALSE. The following expression depends on
// evaluation of the right side being skipped whenever the left side
// is NOT TRUE, (i.e., FALSE or NULL). Therefore, an IS TRUE unary
// predicate must be placed above the actual WHEN condition. Otherwise,
// the signal will fire when the WHEN condition evaluates to NULL.
if (whenClause_ != NULL)
{
if (whenClause_->getOperatorType() == ITM_AND ||
whenClause_->getOperatorType() == ITM_OR)
{
ItemExpr *isTrueExpr = new (heap) UnLogic(ITM_IS_TRUE, whenClause_);
whenAndSignal = new(heap) BiLogic(ITM_AND, isTrueExpr, signal_);
}
else
{
whenAndSignal = new(heap) BiLogic(ITM_AND, whenClause_, signal_);
}
}
else
// After we add the support for reading the trigger status from
// the resource fork, and adding it to the condition, we should
// never get here.
whenAndSignal = signal_;
// For type compatibity, the original value is used whatever the
// WHEN clause evaluates to. However, if it evaluates to TRUE, the
// evaluation of the signal expression will throw an SQLERROR.
ItemExpr *condSignalExpr = new(heap)
Case(NULL, new(heap)
IfThenElse(whenAndSignal, passThruExpr, passThruExpr));
condSignalExpr = condSignalExpr->bindNode(bindWA);
if (bindWA->errStatus())
return;
// Now delete the original "virtual column" from the RETDesc, and
// re-insert it with the new value.
origRETDesc->delColumn(bindWA, ExecIdColName, USER_COLUMN);
origRETDesc->addColumn(bindWA, ExecIdColName, condSignalExpr->getValueId());
}
}
//////////////////////////////////////////////////////////////////////////////
// This bind is bottom-up, so we first bind the children, and then use
// and change the RETDesc they created.
//////////////////////////////////////////////////////////////////////////////
RelExpr *BeforeTrigger::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
bindChildren(bindWA);
if (bindWA->errStatus())
return this;
// Now we know that we have the columns of OLD@ and NEW@ in the RETDesc
// of the current scope. Save this scope so we can update it and restore
// it when we are done.
RETDesc *origRETDesc = bindWA->getCurrentScope()->getRETDesc();
CollHeap *heap = bindWA->wHeap();
CollIndex refsToFind = getRefList().entries();
// For each reference to change, Find the table name in the RETDesc,
// and save a pointer to it's column list in the TableRefName object.
CollIndex i=0;
for (i=0; i<refsToFind; i++)
getRefList().at(i).lookupTableName(origRETDesc);
// Create an empty RETDesc for the current scope.
// It will contain the names the user specified (MY_NEW, MY_OLD) for the
// OLD@ and NEW@ transition variables, and will be used to bind this
// node only.
bindWA->getCurrentScope()->setRETDesc(new(heap) RETDesc(bindWA));
// For each table reference, add to the RETDesc of the current scope,
// the columns of the referenced tables with the new referencing names
// as correlation names.
for (i=0; i<refsToFind; i++)
getRefList().at(i).bindRefColumns(bindWA);
// First bind the condition. The ValueId will be used later (possibly
// multiple times) so that during execution, the expression will be
// evaluated only once.
if (whenClause_ != NULL)
{
whenClause_ = whenClause_->bindNode(bindWA);
if (bindWA->errStatus())
return this;
}
// Use the bound condition to prepare the conditional expression
// for each column modified by the trigger (SET MY_NEW.a = ...)
if (setList_ != NULL)
bindSetClause(bindWA, origRETDesc, heap);
// Use the bound condition to prepare the conditional SIGNAL
// expression, on the ExecuteId "virtual column".
if (signal_ != NULL)
bindSignalClause(bindWA, origRETDesc, heap);
if (bindWA->errStatus())
return this;
// We don't need the RETDesc of the current scope anymore. Restore the
// original RETDesc with the updated columns.
bindWA->getCurrentScope()->setRETDesc(origRETDesc);
if (parentTSJ_ != NULL)
{
// If this is the top most before trigger, save a copy of the RETDesc
// for use by the transformNode() pass.
RETDesc *savedRETDesc = new(heap) RETDesc(bindWA, *origRETDesc);
setRETDesc(savedRETDesc);
}
//
// Bind the base class.
//
RelExpr *boundNode = bindSelf(bindWA);
return boundNode;
} // BeforeTrigger::bindNode()
// -----------------------------------------------------------------------
// member functions for class Insert
// -----------------------------------------------------------------------
// LCOV_EXCL_START - cnu
static void bindInsertRRKey(BindWA *bindWA, Insert *insert,
ValueIdList &sysColList, CollIndex i)
{
// For a KS round-robin partitioned table, the system column
// (for now there is only one, SYSKEY) is initialized via the expression
// "ProgDistribKey(partNum, rowPos, totalNumParts)".
//
const NAFileSet *fs =
insert->getTableDesc()->getClusteringIndex()->getNAFileSet();
// For now, round-robin partitioned tables are always stored in
// key-sequenced files, and there is only one system column (SYSKEY)
// which is at the beginning of the record.
CMPASSERT(fs->isKeySequenced() && i==0);
CollHeap *heap = bindWA->wHeap();
// Host variables that provide access to partition number,
// row position, and total number of partitions --
// supplied at run-time by the executor insert node.
//
ItemExpr *partNum = new (heap)
HostVar("_sys_hostVarInsertPartNum",
new (heap) SQLInt(FALSE,FALSE), // int unsigned not null
TRUE // is system-generated
);
partNum->synthTypeAndValueId();
insert->partNumInput() = partNum->getValueId(); // for later use in codeGen
ItemExpr *rowPos = new (heap)
HostVar("_sys_hostVarInsertRowPos",
new (heap) SQLInt(FALSE,FALSE), // int unsigned not null
TRUE // is system-generated
);
rowPos->synthTypeAndValueId();
insert->rowPosInput() = rowPos->getValueId(); // for later use in codeGen
ItemExpr *totNumParts = new (heap)
HostVar("_sys_hostVarInsertTotNumParts",
new (heap) SQLInt(FALSE,FALSE), // int unsigned not null
TRUE // is system-generated
);
totNumParts->synthTypeAndValueId();
insert->totalNumPartsInput() = totNumParts->getValueId(); // for later use
// Generate expression to compute a round-robin key. Parameters to
// ProgDistribKey are the partition number, the row position (which
// is chosen randomly; the insert node will retry if a number is
// selected that is already in use), and the total number of
// partitions.
ItemExpr *rrKey = new (heap) ProgDistribKey(partNum, rowPos, totNumParts);
// Build and set round-robin key expression.
Assign *assign = new (heap)
Assign(sysColList[i].getItemExpr(), rrKey, FALSE /*not user-specified*/);
assign->bindNode(bindWA);
insert->rrKeyExpr() = assign->getValueId();
} // bindInsertRRKey
// LCOV_EXCL_STOP
RelExpr *Insert::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// Set local binding flags
setInUpdateOrInsert(bindWA, this, REL_INSERT);
// The 8108 (unique constraint on an ID column) error must be raised
// only for system generated IDENTITY values and not for
// user generated ID values. We use the GenericUpdate::identityColumnUniqueIndex_
// to indicate to the EID that 8108 should be raised in place of 8102.
// This variable is used to indicate that there is an IDENTITY column
// in the table for which the system is generating the value
// This is NULL if "DEFAULT VALUES" was specified,
// non-NULL if a query-expr child was specified: VALUES.., TABLE.., SELECT..
RelExpr *someNonDefaultValuesSpecified = child(0);
// Set flag for firstN in context
if (child(0) && child(0)->getOperatorType() == REL_ROOT) // Indicating subquery
if (child(0)->castToRelExpr() &&
child(0)->castToRelExpr()->getFirstNRows() >= 0)
if (bindWA &&
bindWA->getCurrentScope() &&
bindWA->getCurrentScope()->context())
bindWA->getCurrentScope()->context()->firstN() = TRUE;
if (NOT someNonDefaultValuesSpecified) { // "DEFAULT VALUES" specified
// Kludge up a dummy child before binding the GenericUpdate tree
setChild(0, new(bindWA->wHeap()) Tuple(new(bindWA->wHeap()) SystemLiteral(0)));
}
// Bind the GenericUpdate tree.
//
RETDesc *incomingRETDescForSource = bindWA->getCurrentScope()->getRETDesc();
RelExpr *boundExpr = GenericUpdate::bindNode(bindWA);
if (bindWA->errStatus())
return boundExpr;
const NAFileSet* fileset = getTableDesc()->getNATable()->getClusteringIndex();
const NAColumnArray& partKeyCols = fileset->getPartitioningKeyColumns();
if (getTableDesc()->getNATable()->isHiveTable())
{
if (partKeyCols.entries() > 0)
{
// Insert into partitioned tables would require computing the target
// partition directory name, something we don't support yet.
*CmpCommon::diags() << DgSqlCode(-4222)
<< DgString0("Insert into partitioned Hive tables");
bindWA->setErrStatus();
return this;
}
RelExpr * mychild = child(0);
const HHDFSTableStats* hTabStats =
getTableDesc()->getNATable()->getClusteringIndex()->getHHDFSTableStats();
const char * hiveTablePath;
NAString hostName;
Int32 hdfsPort;
NAString tableDir;
NABoolean result;
char fldSep[2];
char recSep[2];
memset(fldSep,'\0',2);
memset(recSep,'\0',2);
fldSep[0] = hTabStats->getFieldTerminator();
recSep[0] = hTabStats->getRecordTerminator();
// don't rely on timeouts to invalidate the HDFS stats for the target table,
// make sure that we invalidate them right after compiling this statement,
// at least for this process
((NATable*)(getTableDesc()->getNATable()))->setClearHDFSStatsAfterStmt(TRUE);
// inserting into tables with multiple partitions is not yet supported
CMPASSERT(hTabStats->entries() == 1);
hiveTablePath = (*hTabStats)[0]->getDirName();
result = ((HHDFSTableStats* )hTabStats)->splitLocation
(hiveTablePath, hostName, hdfsPort, tableDir) ;
if (!result) {
*CmpCommon::diags() << DgSqlCode(-4224)
<< DgString0(hiveTablePath);
bindWA->setErrStatus();
return this;
}
// NABoolean isSequenceFile = (*hTabStats)[0]->isSequenceFile();
const NABoolean isSequenceFile = hTabStats->isSequenceFile();
RelExpr * unloadRelExpr =
new (bindWA->wHeap())
FastExtract( mychild,
new (bindWA->wHeap()) NAString(hiveTablePath),
new (bindWA->wHeap()) NAString(hostName),
hdfsPort,
TRUE,
new (bindWA->wHeap()) NAString(getTableName().getQualifiedNameObj().getObjectName()),
FastExtract::FILE,
bindWA->wHeap());
RelExpr * boundUnloadRelExpr = unloadRelExpr->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
((FastExtract*)boundUnloadRelExpr)->setRecordSeparator(recSep);
((FastExtract*)boundUnloadRelExpr)->setDelimiter(fldSep);
((FastExtract*)boundUnloadRelExpr)->setOverwriteHiveTable(getOverwriteHiveTable());
((FastExtract*)boundUnloadRelExpr)->setSequenceFile(isSequenceFile);
if (getOverwriteHiveTable())
{
RelExpr * newRelExpr = new (bindWA->wHeap())
ExeUtilFastDelete(getTableName(),
NULL,
(char*)"hive_truncate",
CharInfo::ISO88591,
FALSE,
TRUE,
TRUE,
TRUE,
bindWA->wHeap(),
TRUE,
new (bindWA->wHeap()) NAString(tableDir),
new (bindWA->wHeap()) NAString(hostName),
hdfsPort);
//new root to prevent error 4056 when binding
newRelExpr = new (bindWA->wHeap()) RelRoot(newRelExpr);
RelExpr *blockedUnion = new (bindWA->wHeap()) Union(newRelExpr, boundUnloadRelExpr);
((Union*)blockedUnion)->setBlockedUnion();
((Union*)blockedUnion)->setSerialUnion();
RelExpr *boundBlockedUnion = blockedUnion->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
return boundBlockedUnion;
}
return boundUnloadRelExpr;
}
if(!(getOperatorType() == REL_UNARY_INSERT &&
(child(0)->getOperatorType() == REL_TUPLE || // VALUES (1,'b')
child(0)->getOperatorType() == REL_TUPLE_LIST || // VALUES (1,'b'),(2,'Y')
child(0)->getOperatorType() == REL_UNION)) && // VALUES with subquery
(getOperatorType() != REL_LEAF_INSERT))
{
setInsertSelectQuery(TRUE);
}
// if table has a lob column, then fix up any reference to LOBinsert
// function in the source values list.
//
if ((getOperatorType() == REL_UNARY_INSERT) &&
(getTableDesc()->getNATable()->hasLobColumn()) &&
(child(0)->getOperatorType() == REL_TUPLE || // VALUES (1,'b')
child(0)->getOperatorType() == REL_TUPLE_LIST)) // VALUES (1,'b'),(2,'Y')
{
if (child(0)->getOperatorType() == REL_TUPLE_LIST)
{
TupleList * tl = (TupleList*)(child(0)->castToRelExpr());
for (CollIndex x = 0; x < (UInt32)tl->numTuples(); x++)
{
ValueIdList tup;
if (!tl->getTuple(bindWA, tup, x))
{
bindWA->setErrStatus();
return boundExpr; // something went wrong
}
for (CollIndex n = 0; n < tup.entries(); n++)
{
ItemExpr * ie = tup[n].getItemExpr();
if (ie->getOperatorType() == ITM_LOBINSERT)
{
// cannot have this function in a values list with multiple
// tuples. Use a single tuple.
*CmpCommon::diags() << DgSqlCode(-4483);
bindWA->setErrStatus();
return boundExpr;
LOBinsert * li = (LOBinsert*)ie;
li->insertedTableObjectUID() =
getTableDesc()->getNATable()->objectUid().castToInt64();
li->lobNum() = n;
li->insertedTableSchemaName() =
getTableDesc()->getNATable()->
getTableName().getSchemaName();
}
} // for
} // for
} // if tuplelist
} // if
// Prepare for any IDENTITY column checking later on
NAString identityColumnName;
NABoolean identityColumnGeneratedAlways = FALSE;
identityColumnGeneratedAlways =
getTableDesc()->isIdentityColumnGeneratedAlways(&identityColumnName);
if ((getTableName().isVolatile()) &&
(CmpCommon::context()->sqlSession()->volatileSchemaInUse()) &&
(getTableName().getSpecialType() == ExtendedQualName::NORMAL_TABLE) &&
((ActiveSchemaDB()->getDefaults()).getAsLong(IMPLICIT_UPD_STATS_THRESHOLD) > -1) &&
(bindWA->isInsertSelectStatement()) &&
(NOT getTableDesc()->getNATable()->isVolatileTableMaterialized()))
{
if (NOT Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL))
// if (NOT Get_SqlParser_Flags(NO_IMPLICIT_VOLATILE_TABLE_UPD_STATS))
{
// treat this insert as a volatile load stmt.
RelExpr * loadVolTab =
new (bindWA->wHeap())
ExeUtilLoadVolatileTable(getTableName(),
this,
bindWA->wHeap());
boundExpr = loadVolTab->bindNode(bindWA);
if (bindWA->errStatus())
return boundExpr;
return boundExpr;
}
else
{
NATable * nat = (NATable*)(getTableDesc()->getNATable());
nat->setIsVolatileTableMaterialized(TRUE);
}
}
// Now we have to create the following three collections:
//
// - newRecExpr()
// An unordered set of Assign nodes of the form
// "col1 = value1, col2 = value2, ..." which is used by Norm/Optimizer.
//
// - newRecExprArray()
// An ordered array of Assign nodes of the same form,
// ordered by column position, which is used by Generator.
// This array must have the following properties:
//
// - All columns not specified in the insert statement must be
// Assign'ed with their default values.
//
// - If this is a key-sequenced table with a (non-RR) SYSKEY column,
// we must create the first entry in the newRecExprArray
// to be "SYSKEY_COL = 0". This is a placeholder where the timestamp
// value will be moved at runtime. Round-robin SYSKEY columns are
// initialized via an expression of the form "SYSKEY_COL =
// ProgDistribKey(..params..)". SYSKEY columns for other table
// organizations are handled by the file system or disk process.
//
// - updateToSelectMap()
// A ValueIdMap that can be used to rewrite value ids of the
// target table in terms of the source table and vice versa.
// The top value ids are target value ids, the bottom value ids
// are those of the source.
//
NABoolean view = bindWA->getNATable(getTableName())->getViewText() != NULL;
ValueIdList tgtColList, userColList, sysColList, *userColListPtr;
CollIndexList colnoList;
CollIndex totalColCount, defaultColCount, i;
getTableDesc()->getSystemColumnList(sysColList);
//
// Detach the column list and bind the columns to the target table.
// Set up "colnoList" to map explicitly specified columns to where
// in the ordered array we will be inserting later.
//
ItemExpr *columnTree = removeInsertColTree();
CMPASSERT(NOT columnTree || someNonDefaultValuesSpecified);
if (columnTree || (view && someNonDefaultValuesSpecified)) {
//
// INSERT INTO t(colx,coly,...) query-expr;
// INSERT INTO v(cola,colb,...) query-expr;
// INSERT INTO v query-expr;
// where query-expr is VALUES..., TABLE..., or SELECT...,
// but not DEFAULT VALUES.
// userColList is the full list of columns in the target table
// colnoList contains, for those columns specified in tgtColList,
// their ordinal position in the target table user column list
// (i.e., not counting system columns, which can't be specified
// in the insert column list); e.g. '(Z,X,Y)' -> [3,1,2]
//
CMPASSERT(NOT columnTree ||
columnTree->getOperatorType() == ITM_REFERENCE ||
columnTree->getOperatorType() == ITM_ITEM_LIST);
getTableDesc()->getUserColumnList(userColList);
userColListPtr = &userColList;
RETDesc *columnLkp;
if (columnTree) {
// bindRowValues will bind using the currently scoped RETDesc left in
// by GenericUpdate::bindNode, which will be that of the naTableTop
// (topmost view or table), *not* that of the base table (getTableDesc()).
columnLkp = bindRowValues(bindWA, columnTree, tgtColList, this, FALSE);
if (bindWA->errStatus()) return boundExpr;
}
else
{
columnLkp = bindWA->getCurrentScope()->getRETDesc();
columnLkp->getColumnList()->getValueIdList(tgtColList);
}
if (GU_DEBUG) {
// LCOV_EXCL_START - dpm
cerr << "columnLkp " << flush;
columnLkp->display();
// LCOV_EXCL_STOP
}
for (i = 0; i < columnLkp->getDegree(); i++) {
// Describes column in the base table:
ValueId source = columnLkp->getValueId(i);
const NAColumn *nacol = source.getNAColumn();
// Gets name of the column in this (possibly view) table:
const ColRefName colName = columnLkp->getColRefNameObj(i);
// solution 10-081114-7315
if (bindWA->inDDL() && bindWA->isInTrigger ())
{
if (!userColListPtr->contains(source))
{
// 4001 column not found
*CmpCommon::diags() << DgSqlCode(-4001)
<< DgColumnName(colName.getColName())
<< DgString0(getTableName().getQualifiedNameObj().getQualifiedNameAsAnsiString())
<< DgString1(bindWA->getDefaultSchema().getSchemaNameAsAnsiString());
bindWA->setErrStatus();
delete columnLkp;
return boundExpr;
}
}
if (columnLkp->findColumn(colName)->isDuplicate()) {
// 4022 column specified more than once
*CmpCommon::diags() << DgSqlCode(-4022)
<< DgColumnName(colName.getColName());
bindWA->setErrStatus();
delete columnLkp;
return boundExpr;
}
colnoList.insert(nacol->getPosition());
// Commented out this assert, as Assign::bindNode below emits nicer errmsg
// CMPASSERT((long)nacol->getPosition() - (long)firstColNumOnDisk >= 0);
}
if (columnTree) {
delete columnLkp;
columnLkp = NULL;
}
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
}
else {
//
// INSERT INTO t query-expr;
// INSERT INTO t DEFAULT VALUES;
// INSERT INTO v DEFAULT VALUES;
// userColListPtr points to tgtColList (which is the full list)
// userColList not used (because tgtColList already is the full list)
// colnoList remains empty (because tgtColList is already in order)
// if no system columns, set to list of user cols otherwise
getTableDesc()->getUserColumnList(tgtColList);
userColListPtr = &tgtColList;
if (sysColList.entries()) {
// set up colnoList to indicate the user columns, to help
// binding DEFAULT clauses in DefaultSpecification::bindNode()
for (CollIndex uc=0; uc<tgtColList.entries(); uc++) {
colnoList.insert(tgtColList[uc].getNAColumn()->getPosition());
}
}
}
// Compute total number of columns. Note that there may be some unused
// entries in newRecExprArray(), in the following cases:
// - An SQL/MP entry sequenced table, entry 0 will not be used as
// the syskey (col 0) is not stored in that type of table
// - For computed columns that are not stored on disk
totalColCount = userColListPtr->entries() + sysColList.entries();
newRecExprArray().resize(totalColCount);
// Make sure children are bound -- GenericUpdate::bindNode defers
// their binding to now if this is an INSERT..VALUES(..),
// because only now do we have target column position info for
// correct binding of INSERT..VALUES(..,DEFAULT,..)
// in DefaultSpecification::bindNode.
//
// Save current RETDesc and XTNM.
// Bind the source in terms of the original RETDesc,
// with target column position info available through
// bindWA->getCurrentScope()->context()->updateOrInsertNode()
// (see DefaultSpecification::bindNode, calls Insert::getColDefaultValue).
// Restore RETDesc and XTNM.
//
RETDesc *currRETDesc = bindWA->getCurrentScope()->getRETDesc();
bindWA->getCurrentScope()->setRETDesc(incomingRETDescForSource);
bindWA->getCurrentScope()->xtnmStack()->createXTNM();
setTargetUserColPosList(colnoList);
// if my child is a TupleList, then all tuples are to be converted/cast
// to the corresponding target type of the tgtColList.
// Pass on the tgtColList to TupleList so it can generate the Cast nodes
// with the target types during the TupleList::bindNode.
TupleList *tl = NULL;
if (child(0)->getOperatorType() == REL_TUPLE_LIST) {
tl = (TupleList *)child(0)->castToRelExpr();
tl->castToList() = tgtColList;
}
if (getTolerateNonFatalError() != RelExpr::UNSPECIFIED_) {
HostArraysWA * arrayWA = bindWA->getHostArraysArea() ;
if (arrayWA && arrayWA->hasHostArraysInTuple()) {
if (getTolerateNonFatalError() == RelExpr::NOT_ATOMIC_)
arrayWA->setTolerateNonFatalError(TRUE);
else
arrayWA->setTolerateNonFatalError(FALSE); // Insert::tolerateNonfatalError == ATOMIC_
}
else if (NOT arrayWA->getRowwiseRowset()) {
// NOT ATOMIC only for rowset inserts
*CmpCommon::diags() << DgSqlCode(-30025) ;
bindWA->setErrStatus();
return boundExpr;
}
}
bindChildren(bindWA);
if (bindWA->errStatus()) return this;
// if this is an insert into native hbase table in _ROW_ format, then
// validate that only REL_TUPLE or REL_TUPLE_LIST is being used.
if ((getOperatorType() == REL_UNARY_INSERT) &&
(getTableDesc()->getNATable()->isHbaseRowTable()))
{
NABoolean isError = FALSE;
if (NOT (child(0)->getOperatorType() == REL_TUPLE || // VALUES (1,'b')
child(0)->getOperatorType() == REL_TUPLE_LIST)) // VALUES (1,'b'),(2,'Y')
{
isError = TRUE;
}
// Also make sure that inserts into column_details field of _ROW_ format
// hbase virtual table are being done through column_create function.
// For ex: insert into hbase."_ROW_".hb values ('1', column_create('cf:a', '100'))
//
if ((NOT isError) && (child(0)->getOperatorType() == REL_TUPLE))
{
ValueIdList &tup = ((Tuple*)(child(0)->castToRelExpr()))->tupleExpr();
if (tup.entries() == 2) // can only have 2 entries
{
ItemExpr * ie = tup[1].getItemExpr();
if (ie && ie->getOperatorType() != ITM_HBASE_COLUMN_CREATE)
{
isError = TRUE;
}
}
else
isError = TRUE;
}
if ((NOT isError) && (child(0)->getOperatorType() == REL_TUPLE_LIST))
{
TupleList * tl = (TupleList*)(child(0)->castToRelExpr());
for (CollIndex x = 0; x < (UInt32)tl->numTuples(); x++)
{
ValueIdList tup;
if (!tl->getTuple(bindWA, tup, x))
{
isError = TRUE;
}
if (NOT isError)
{
if (tup.entries() == 2) // must have 2 entries
{
ItemExpr * ie = tup[1].getItemExpr();
if (ie->getOperatorType() != ITM_HBASE_COLUMN_CREATE)
{
isError = TRUE;
}
}
else
isError = TRUE;
} // if
} // for
} // if
if (isError)
{
*CmpCommon::diags() << DgSqlCode(-1429);
bindWA->setErrStatus();
return boundExpr;
}
}
// the only time that tgtColList.entries()(Insert's colList) != tl->castToList().entries()
// (TupleList's colList) is when DEFAULTS are removed in TupleList::bindNode() for insert
// into table with IDENTITY column, where the system generates the values
// for it using SG (Sequence Generator).
// See TupleList::bindNode() for detailed comments.
// When tgtColList.entries()(Insert's col list) is not
// equal to tl->castToList().entries() (TupleList's column list)
// make sure the correct colList is used during binding.
ValueIdList newTgtColList;
if(tl && (tgtColList.entries() != tl->castToList().entries()))
{
newTgtColList = tl->castToList();
CMPASSERT(newTgtColList.entries() == (tgtColList.entries() -1));
}
else
newTgtColList = tgtColList;
setTargetUserColPosList();
bindWA->getCurrentScope()->xtnmStack()->removeXTNM();
bindWA->getCurrentScope()->setRETDesc(currRETDesc);
NABoolean bulkLoadIndex = bindWA->isTrafLoadPrep() && noIMneeded() ;
if (someNonDefaultValuesSpecified)
// query-expr child specified
{
const RETDesc &sourceTable = *child(0)->getRETDesc();
if ((sourceTable.getDegree() != newTgtColList.entries())&& !bulkLoadIndex) {
// 4023 degree of row value constructor must equal that of target table
*CmpCommon::diags() << DgSqlCode(-4023)
#pragma nowarn(1506) // warning elimination
<< DgInt0(sourceTable.getDegree()) << DgInt1(tgtColList.entries());
#pragma warn(1506) // warning elimination
bindWA->setErrStatus();
return boundExpr;
}
OptSqlTableOpenInfo* stoiInList = NULL;
for (CollIndex ii=0; ii < bindWA->getStoiList().entries(); ii++)
{
if (getOptStoi() && getOptStoi()->getStoi())
{
if (strcmp((bindWA->getStoiList())[ii]->getStoi()->fileName(),
getOptStoi()->getStoi()->fileName()) == 0)
{
stoiInList = bindWA->getStoiList()[ii];
break;
}
}
}
// Combine the ValueIdLists for the column list and value list into a
// ValueIdSet (unordered) of Assign nodes and a ValueIdArray (ordered).
// Maintain a ValueIdMap between the source and target value ids.
CollIndex i2 = 0;
const ColumnDescList *viewColumns = NULL;
if (getBoundView())
viewColumns = getBoundView()->getRETDesc()->getColumnList();
if (bulkLoadIndex) {
setRETDesc(child(0)->getRETDesc());
}
for (i = 0; i < tgtColList.entries() && i2 < newTgtColList.entries(); i++) {
if(tgtColList[i] != newTgtColList[i2])
continue;
ValueId target = tgtColList[i];
ValueId source ;
if (!bulkLoadIndex)
source = sourceTable.getValueId(i2);
else {
ColRefName & cname = ((ColReference *)(baseColRefs()[i2]))->getColRefNameObj();
source = sourceTable.findColumn(cname)->getValueId();
}
CMPASSERT(target != source);
const NAColumn *nacol = target.getNAColumn();
const NAType &sourceType = source.getType();
const NAType &targetType = target.getType();
if ( DFS2REC::isFloat(sourceType.getFSDatatype()) &&
DFS2REC::isNumeric(targetType.getFSDatatype()) &&
(getTableDesc()->getNATable()->getPartitioningScheme() ==
COM_HASH_V1_PARTITIONING ||
getTableDesc()->getNATable()->getPartitioningScheme() ==
COM_HASH_V2_PARTITIONING) )
{
const NAColumnArray &partKeyCols = getTableDesc()->getNATable()
->getClusteringIndex()->getPartitioningKeyColumns();
for (CollIndex j=0; j < partKeyCols.entries(); j++)
{
if (partKeyCols[j]->getPosition() == nacol->getPosition())
{
ItemExpr *ie = source.getItemExpr();
ItemExpr *cast = new (bindWA->wHeap())
Cast(ie, &targetType, ITM_CAST);
cast = cast->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
source = cast->getValueId();
}
}
}
Assign *assign = new (bindWA->wHeap())
Assign(target.getItemExpr(), source.getItemExpr());
assign->bindNode(bindWA);
if(bindWA->errStatus())
return NULL;
if (stoiInList && !getUpdateCKorUniqueIndexKey())
{
if(!getBoundView())
stoiInList->addInsertColumn(nacol->getPosition());
else
{
NABoolean found = FALSE;
for (CollIndex k=0; k < viewColumns->entries(); k++) {
if ((*viewColumns)[k]->getValueId() == target) {
stoiInList->addInsertColumn((Lng32) k);
found = TRUE;
// Updatable views cannot have any underlying basetable column
// appear more than once, so it's safe to break out of the loop.
break;
}
} // loop k
CMPASSERT(found);
}
}
//
// Check for automatically inserted TRANSLATE nodes.
// Such nodes are inserted by the Implicit Casting And Translation feature.
// If this node has a child TRANSLATE node, then that TRANSLATE node
// is the real "source" that we must use from here on.
//
ItemExpr *assign_child = assign->child(1);
if ( assign_child->getOperatorType() == ITM_CAST )
{
const NAType& type = assign_child->getValueId().getType();
if ( type.getTypeQualifier() == NA_CHARACTER_TYPE )
{
ItemExpr *assign_grndchld = assign_child->child(0);
if ( assign_grndchld->getOperatorType() == ITM_TRANSLATE )
{
source = assign_grndchld->getValueId();
CMPASSERT(target != source);
}
}
}
const NAType *colType = nacol->getType();
if (!colType->isSupportedType()) {
*CmpCommon::diags() << DgSqlCode(-4027) // 4027 table not insertable
<< DgTableName(nacol->getNATable()->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
}
if (bindWA->errStatus()) return boundExpr;
newRecExprArray().insertAt(nacol->getPosition(), assign->getValueId());
newRecExpr().insert(assign->getValueId());
const NAType& assignSrcType = assign->getSource().getType();
// if ( <we added some type of conversion> AND
// ( <tgt and src are both character> AND
// (<they are big and errors can occur> OR <charsets differ> OR <difference between tgt and src lengths is large>)))
// OR
// ( <we changed the basic type and we allow incompatible types> )
// )
// <then incorporate this added conversion into the updateToSelectMap>
if ( source != assign->getSource() &&
((assignSrcType.getTypeQualifier() == NA_CHARACTER_TYPE &&
sourceType.getTypeQualifier() == NA_CHARACTER_TYPE &&
((assign->getSource().getItemExpr()->getOperatorType() == ITM_CAST &&
sourceType.errorsCanOccur(assignSrcType) &&
sourceType.getNominalSize() >
CmpCommon::getDefaultNumeric(LOCAL_MESSAGE_BUFFER_SIZE)*1024) ||
// Temporary code to fix QC4395 in M6. For M7, try to set source
// to the right child of the assign after calling assign->bindNode.
// We should then be able to eliminate this entire if statement
// as well as the code to check for TRANSLATE nodes above.
((CharType &) assignSrcType).getCharSet() !=
((CharType &) sourceType).getCharSet() ||
// The optimizer may ask for source data to be partitioned or sorted on original source columns
// This is the reason we need to choose the else branch below unless we have a particular reason
// to do otherwise. Each of the conditions in this if statement reflects one of those partcular
// conditions. The bottomValues of updateToSelectMap will be placed in their entirety in the
// characteristic outputs of the source node. Outputs of the source node may be used to allocate
// buffers at runtime and therefore we would like to keep the output as small as possible.
// If the source cannot be partioned/sorted on a column because we have assign-getSource in the bottomValues
// then the cost is that data will be repartitioned with an additional exchange node. If the difference in
// length between source and assignSrc is large then the cost of repartition is less than the cost of
// allocating and using large buffers.
sourceType.getNominalSize() > (assignSrcType.getNominalSize() +
(ActiveSchemaDB()->getDefaults()).getAsLong(COMP_INT_98)) // default value is 512
))
||
// If we allow incompatible type assignments, also include the
// added cast into the updateToSelectMap
assignSrcType.getTypeQualifier() != sourceType.getTypeQualifier() &&
CmpCommon::getDefault(ALLOW_INCOMPATIBLE_ASSIGNMENT) == DF_ON))
{
updateToSelectMap().addMapEntry(target,assign->getSource());
}
else
{
updateToSelectMap().addMapEntry(target,source);
}
i2++;
}
}
setBoundView(NULL);
// Is the table round-robin (horizontal) partitioned?
PartitioningFunction *partFunc =
getTableDesc()->getClusteringIndex()->getNAFileSet()->
getPartitioningFunction();
NABoolean isRRTable =
partFunc && partFunc->isARoundRobinPartitioningFunction();
// Fill in default values for any columns not explicitly specified.
//
if (someNonDefaultValuesSpecified) // query-expr child specified, set system cols
defaultColCount = totalColCount - newTgtColList.entries();
else // "DEFAULT VALUES" specified
defaultColCount = totalColCount;
if (identityColumnGeneratedAlways)
defaultColCount = totalColCount;
if (defaultColCount) {
NAWchar zero_w_Str[2]; zero_w_Str[0] = L'0'; zero_w_Str[1] = L'\0'; // wide version
CollIndex sysColIx = 0, usrColIx = 0;
for (i = 0; i < totalColCount; i++) {
ValueId target;
NABoolean isASystemColumn = FALSE;
const NAColumn *nacol = NULL;
// find column on position i in the system or user column lists
if (sysColIx < sysColList.entries() &&
sysColList[sysColIx].getNAColumn()->getPosition() == i)
{
isASystemColumn = TRUE;
target = sysColList[sysColIx];
}
else
{
CMPASSERT((*userColListPtr)[usrColIx].getNAColumn()->getPosition() == i);
target = (*userColListPtr)[usrColIx];
}
nacol = target.getNAColumn();
// if we need to add the default value, we don't have a new rec expr yet
if (NOT newRecExprArray().used(i)) {
// check for SQL/MP entry sequenced tables omitted above
const char* defaultValueStr = NULL;
ItemExpr * defaultValueExpr = NULL;
NABoolean needToDeallocateColDefaultValueStr = FALSE;
// Used for datetime columns with COM_CURRENT_DEFAULT.
//
NAType *castType = NULL;
if (isASystemColumn) {
if (isRRTable) {
bindInsertRRKey(bindWA, this, sysColList, sysColIx);
if (bindWA->errStatus()) return boundExpr;
}
if (nacol->isComputedColumn())
{
CMPASSERT(target.getItemExpr()->getOperatorType() == ITM_BASECOLUMN);
ValueId defaultExprValId = ((BaseColumn *) target.getItemExpr())->
getComputedColumnExpr();
ValueIdMap updateToSelectMapCopy(updateToSelectMap());
// Use a copy to rewrite the value, to avoid requesting additional
// values from the child. We ask the child for all entries in this
// map in GenericUpdate::pushdownCoveredExpr().
updateToSelectMapCopy.rewriteValueIdDown(defaultExprValId, defaultExprValId);
defaultValueExpr = defaultExprValId.getItemExpr();
}
else
defaultValueStr = (char *)zero_w_Str;
}
else { // a user column (cf. Insert::getColDefaultValue)
CMPASSERT(NOT nacol->isComputedColumn()); // computed user cols not yet supported
defaultValueStr = nacol->getDefaultValue();
}
if (NOT defaultValueStr && NOT defaultValueExpr) {
// 4024 column has neither a default nor an explicit value.
*CmpCommon::diags() << DgSqlCode(-4024) << DgColumnName(nacol->getColName());
bindWA->setErrStatus();
return boundExpr;
}
if (defaultValueStr) {
// If the column has a default class of COM_CURRENT_DEFAULT,
// cast the default value (which is CURRENT_TIMESTAMP) to
// the type of the column. Here we capture the type of the
// column. COM_CURRENT_DEFAULT is only used for Datetime
// columns.
//
if (nacol->getDefaultClass() == COM_CURRENT_DEFAULT) {
castType = nacol->getType()->newCopy(bindWA->wHeap());
}
else if ((nacol->getDefaultClass() == COM_IDENTITY_GENERATED_ALWAYS) ||
(nacol->getDefaultClass() == COM_IDENTITY_GENERATED_BY_DEFAULT)) {
setSystemGeneratesIdentityValue(TRUE);
}
// Bind the default value, make an Assign, etc, as above
Parser parser(bindWA->currentCmpContext());
// save the current parserflags setting
ULng32 savedParserFlags = Get_SqlParser_Flags (0xFFFFFFFF);
Set_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL);
Set_SqlParser_Flags(ALLOW_VOLATILE_SCHEMA_IN_TABLE_NAME);
defaultValueExpr = parser.getItemExprTree(defaultValueStr);
CMPASSERT(defaultValueExpr);
// Restore parser flags settings to what they originally were
Assign_SqlParser_Flags (savedParserFlags);
} // defaultValueStr != NULL
Assign *assign = NULL;
// If the default value string was successfully parsed,
// Create an ASSIGN node and bind.
//
if (defaultValueExpr) {
// If there is a non-NULL castType, then cast the default
// value to the castType. This is used in the case of
// datetime value with COM_CURRENT_DEFAULT. The default
// value will be CURRENT_TIMESTAMP for all datetime types,
// so must cast the CURRENT_TIMESTAMP to the type of the
// column.
//
if(castType) {
defaultValueExpr = new (bindWA->wHeap())
Cast(defaultValueExpr, castType);
}
// system generates value for IDENTITY column.
if (defaultValueExpr->getOperatorType() == ITM_IDENTITY &&
(CmpCommon::getDefault(COMP_BOOL_210) == DF_ON))
{
// SequenceGenerator::createSequenceSubqueryExpression()
// is called for introducing the subquery in
// defaultValueExpr::bindNode() (IdentityVar::bindNode()).
// We bind here to make sure the correct subquery
// is used.
defaultValueExpr = defaultValueExpr->bindNode(bindWA);
}
if (((isUpsertLoad()) ||
((isUpsert()) && (getTableDesc()->getNATable()-> isSQLMXAlignedTable()))) &&
(NOT defaultValueExpr->getOperatorType() == ITM_IDENTITY) &&
(NOT isASystemColumn))
{
// for 'upsert using load' construct, all values must be specified so
// data could be loaded using inserts.
// If some values are missing, then it becomes an update.
*CmpCommon::diags() << DgSqlCode(-4246) ;
bindWA->setErrStatus();
return boundExpr;
}
assign = new (bindWA->wHeap())
Assign(target.getItemExpr(), defaultValueExpr,
FALSE /*not user-specified*/);
assign->bindNode(bindWA);
}
//
// Note: Parser or Binder errors from MP texts are possible.
//
if (!defaultValueExpr || bindWA->errStatus()) {
// 7001 Error preparing default on <column> for <table>.
*CmpCommon::diags() << DgSqlCode(-7001)
<< DgString0(defaultValueStr)
<< DgString1(nacol->getFullColRefNameAsAnsiString());
bindWA->setErrStatus();
return boundExpr;
}
newRecExprArray().insertAt(i, assign->getValueId());
newRecExpr().insert(assign->getValueId());
updateToSelectMap().addMapEntry(target,defaultValueExpr->getValueId());
if (needToDeallocateColDefaultValueStr && defaultValueStr != NULL)
{
NADELETEBASIC((NAWchar*)defaultValueStr, bindWA->wHeap());
defaultValueStr = NULL;
}
if (--defaultColCount == 0)
break; // tiny performance hack
} // NOT newRecExprArray().used(i)
else
{
if (nacol->getDefaultClass() == COM_IDENTITY_GENERATED_ALWAYS)
{
Assign * assign = (Assign*)newRecExprArray()[i].getItemExpr();
ItemExpr * ie = assign->getSource().getItemExpr();
if (NOT ie->wasDefaultClause())
{
*CmpCommon::diags() << DgSqlCode(-3428)
<< DgString0(nacol->getColName());
bindWA->setErrStatus();
return boundExpr;
}
}
}
if (isASystemColumn)
sysColIx++;
else
usrColIx++;
} // for i < totalColCount
} // defaultColCount
// Now add the default values created as part of the Assigns above
// to the charcteristic inputs. The user specified values are added
// to the characteristic inputs during GenericUpdate::bindNode
// executed earlier as part of this method.
getGroupAttr()->addCharacteristicInputs(bindWA->
getCurrentScope()->
getOuterRefs());
if (isRRTable) {
// LCOV_EXCL_START -
const LIST(IndexDesc *) indexes = getTableDesc()->getIndexes();
for(i = 0; i < indexes.entries(); i++) {
indexes[i]->getPartitioningFunction()->setAssignPartition(TRUE);
}
// LCOV_EXCL_STOP
}
// It is a system generated identity value if
// identityColumn() != NULL_VALUE_ID. The identityColumn()
// is set two places (1) earlier in this method.
// (2) DefaultSpecification::bindNode()
// The IDENTITY column of type GENERATED ALWAYS AS IDENTITY
// must be specified in the values list as (DEFAULT) or
// must be excluded from the values list forcing the default.
if (identityColumnGeneratedAlways &&
NOT systemGeneratesIdentityValue())
{
// The IDENTITY column type of GENERATED ALWAYS AS IDENTITY
// can not be used with user specified values.
// However, if the override CQD is set, then
// allow user specified values to be added
// for a GENERATED ALWAYS AS IDENTITY column.
if (CmpCommon::getDefault(OVERRIDE_GENERATED_IDENTITY_VALUES) == DF_OFF)
{
*CmpCommon::diags() << DgSqlCode(-3428)
<< DgString0(identityColumnName.data());
bindWA->setErrStatus();
return boundExpr;
}
}
ItemExpr *orderByTree = removeOrderByTree();
if (orderByTree) {
bindWA->getCurrentScope()->context()->inOrderBy() = TRUE;
bindWA->getCurrentScope()->setRETDesc(child(0)->getRETDesc());
orderByTree->convertToValueIdList(reqdOrder(), bindWA, ITM_ITEM_LIST);
bindWA->getCurrentScope()->context()->inOrderBy() = FALSE;
if (bindWA->errStatus()) return NULL;
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
}
setInUpdateOrInsert(bindWA);
// Triggers --
NABoolean insertFromValuesList =
getOperatorType() == REL_UNARY_INSERT &&
(child(0)->getOperatorType() == REL_TUPLE || // VALUES (1,'b')
child(0)->getOperatorType() == REL_TUPLE_LIST || // VALUES (1,'b'),(2,'Y')
child(0)->getOperatorType() == REL_UNION); // VALUES with subquery
// Insert from values that gets input from above should not use flow,
// for performance. Cases, other than TUPLE, should be investigated.
if (bindWA->findNextScopeWithTriggerInfo() != NULL
&& (getGroupAttr()->getCharacteristicInputs() != NULL)
&& (insertFromValuesList))
setNoFlow(TRUE);
if (getUpdateCKorUniqueIndexKey())
{
SqlTableOpenInfo * scanStoi = getLeftmostScanNode()->getOptStoi()->getStoi();
short updateColsCount = scanStoi->getColumnListCount();
getOptStoi()->getStoi()->setColumnListCount(updateColsCount);
getOptStoi()->getStoi()->setColumnList(new (bindWA->wHeap()) short[updateColsCount]);
for (short i=0; i<updateColsCount; i++)
getOptStoi()->getStoi()->setUpdateColumn(i,scanStoi->getUpdateColumn(i));
}
if ((getIsTrafLoadPrep()) &&
(getTableDesc()->getCheckConstraints().entries() != 0 ||
getTableDesc()->getNATable()->getRefConstraints().entries() != 0 ))
{
// enabling/disabling constraints is not supported yet
//4486--Constraints not supported with bulk load. Disable the constraints and try again.
*CmpCommon::diags() << DgSqlCode(-4486)
<< DgString0("bulk load") ;
}
if (getIsTrafLoadPrep())
{
PartitioningFunction *pf = getTableDesc()->getClusteringIndex()->getPartitioningFunction();
const NodeMap* np;
Lng32 partns = 1;
if ( pf && (np = pf->getNodeMap()) )
{
partns = np->getNumEntries();
if(partns > 1 && CmpCommon::getDefault(ATTEMPT_ESP_PARALLELISM) == DF_OFF)
// 4490 - BULK LOAD into a salted table is not supported if ESP parallelism is turned off
*CmpCommon::diags() << DgSqlCode(-4490);
}
}
if (isUpsertThatNeedsMerge()) {
boundExpr = xformUpsertToMerge(bindWA);
return boundExpr;
}
else if (NOT (isMerge() || noIMneeded()))
boundExpr = handleInlining(bindWA, boundExpr);
// turn OFF Non-atomic Inserts for ODBC if we have detected that Inlining is needed
// necessary warnings have been generated in handleInlining method.
if (CmpCommon::getDefault(ODBC_PROCESS) == DF_ON) {
if (bindWA->getHostArraysArea() &&
(NOT bindWA->getHostArraysArea()->getRowwiseRowset()) &&
!(bindWA->getHostArraysArea()->getTolerateNonFatalError()))
setTolerateNonFatalError(RelExpr::UNSPECIFIED_);
}
// When mtsStatement_ or bulkLoadIndex is set Insert needs to return rows;
// so potential outputs are added (note that it's not replaced) to
// the Insert node. Currently mtsStatement_ is set
// for MTS queries and embedded insert queries.
if (isMtsStatement() || bulkLoadIndex)
{
if(isMtsStatement())
setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA, getTableDesc()));
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
ValueIdList outputs;
getRETDesc()->getValueIdList(outputs, USER_AND_SYSTEM_COLUMNS);
ValueIdSet potentialOutputs;
getPotentialOutputValues(potentialOutputs);
potentialOutputs.insertList(outputs);
setPotentialOutputValues(potentialOutputs);
// this flag is set to indicate optimizer not to pick the
// TupleFlow operator
setNoFlow(TRUE);
}
return boundExpr;
} // Insert::bindNode()
/* Upsert into a table with an index is converted into a Merge to avoid
the problem described in LP 1460771. An upsert may overwrite an existing row
in the base table (identical to the update when matched clause of Merge) or
it may insert a new row into the base table (identical to insert when not
matched clause of merge). If the upsert caused a row to be updated in the
base table then the old version of the row will have to be deleted from
indexes, and a new version inserted. Upsert is being transformed to merge
so that we can delete the old version of an updated row from the index.
*/
NABoolean Insert::isUpsertThatNeedsMerge() const
{
if (!isUpsert() || getIsTrafLoadPrep() ||
(getTableDesc()->isIdentityColumnGeneratedAlways() &&
getTableDesc()->hasIdentityColumnInClusteringKey()) ||
getTableDesc()->getClusteringIndex()->getNAFileSet()->hasSyskey() ||
!(getTableDesc()->hasSecondaryIndexes()))
return FALSE;
return TRUE;
}
RelExpr* Insert::xformUpsertToMerge(BindWA *bindWA)
{
const ValueIdList &tableCols = updateToSelectMap().getTopValues();
const ValueIdList &sourceVals = updateToSelectMap().getBottomValues();
Scan * inputScan =
new (bindWA->wHeap())
Scan(CorrName(getTableDesc()->getCorrNameObj(), bindWA->wHeap()));
ItemExpr * keyPred = NULL;
ItemExpr * keyPredPrev = NULL;
ItemExpr * setAssign = NULL;
ItemExpr * setAssignPrev = NULL;
ItemExpr * insertVal = NULL;
ItemExpr * insertValPrev = NULL;
ItemExpr * insertCol = NULL;
ItemExpr * insertColPrev = NULL;
BaseColumn* baseCol;
ColReference * targetColRef;
int predCount = 0;
int setCount = 0;
ValueIdSet myOuterRefs;
for (CollIndex i = 0; i<tableCols.entries(); i++)
{
baseCol = (BaseColumn *)(tableCols[i].getItemExpr()) ;
if (baseCol->getNAColumn()->isSystemColumn())
continue;
targetColRef = new(bindWA->wHeap()) ColReference(
new(bindWA->wHeap()) ColRefName(
baseCol->getNAColumn()->getFullColRefName(), bindWA->wHeap()));
if (baseCol->getNAColumn()->isClusteringKey())
{
keyPredPrev = keyPred;
keyPred = new (bindWA->wHeap())
BiRelat(ITM_EQUAL, targetColRef,
sourceVals[i].getItemExpr());
predCount++;
if (predCount > 1)
{
keyPred = new(bindWA->wHeap()) BiLogic(ITM_AND,
keyPredPrev,
keyPred);
}
}
else
{
setAssignPrev = setAssign;
setAssign = new (bindWA->wHeap())
Assign(targetColRef, sourceVals[i].getItemExpr());
setCount++;
if (setCount > 1)
{
setAssign = new(bindWA->wHeap()) ItemList(setAssign,setAssignPrev);
}
}
myOuterRefs += sourceVals[i];
insertValPrev = insertVal;
insertColPrev = insertCol ;
insertVal = sourceVals[i].getItemExpr();
insertCol = new(bindWA->wHeap()) ColReference(
new(bindWA->wHeap()) ColRefName(
baseCol->getNAColumn()->getFullColRefName(), bindWA->wHeap()));
if (i > 0)
{
insertVal = new(bindWA->wHeap()) ItemList(insertVal,insertValPrev);
insertCol = new(bindWA->wHeap()) ItemList(insertCol,insertColPrev);
}
}
inputScan->addSelPredTree(keyPred);
RelExpr * re = NULL;
re = new (bindWA->wHeap())
MergeUpdate(CorrName(getTableDesc()->getCorrNameObj(), bindWA->wHeap()),
NULL,
REL_UNARY_UPDATE,
inputScan,
setAssign,
insertCol,
insertVal,
bindWA->wHeap(),
NULL);
((MergeUpdate *)re)->setXformedUpsert();
ValueIdSet debugSet;
if (child(0) && (child(0)->getOperatorType() != REL_TUPLE))
{
RelExpr * mu = re;
re = new(bindWA->wHeap()) Join
(child(0), re, REL_TSJ_FLOW, NULL);
((Join*)re)->doNotTransformToTSJ();
((Join*)re)->setTSJForMerge(TRUE);
((Join*)re)->setTSJForMergeWithInsert(TRUE);
((Join*)re)->setTSJForWrite(TRUE);
if (bindWA->hasDynamicRowsetsInQuery())
mu->getGroupAttr()->addCharacteristicInputs(myOuterRefs);
else
re->getGroupAttr()->addCharacteristicInputs(myOuterRefs);
}
re = re->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
return re;
}
RelExpr *HBaseBulkLoadPrep::bindNode(BindWA *bindWA)
{
//CMPASSERT((CmpCommon::getDefault(TRAF_LOAD) == DF_ON &&
// CmpCommon::getDefault(TRAF_LOAD_HFILE) == DF_ON));
if (nodeIsBound())
{
return this;
}
Insert * newInsert = new (bindWA->wHeap())
Insert(getTableName(),
NULL,
REL_UNARY_INSERT,
child(0)->castToRelExpr());
newInsert->setInsertType(UPSERT_LOAD);
newInsert->setIsTrafLoadPrep(true);
newInsert->setCreateUstatSample(getCreateUstatSample());
// Pass the flag to bindWA to guarantee that a range partitioning is
// always used for all source and target tables.
bindWA->setIsTrafLoadPrep(TRUE);
RelExpr *boundNewInsert = newInsert->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
return boundNewInsert;
}
// This is a callback from DefaultSpecification::bindNode
// called from Insert::bindNode
// (you need to understand the latter to understand this).
//
const char *Insert::getColDefaultValue(BindWA *bindWA, CollIndex i) const
{
CMPASSERT(canBindDefaultSpecification());
CollIndexList &colnoList = *targetUserColPosList_;
CollIndex pos = colnoList.entries() ? colnoList[i] : i;
const ValueIdList &colList = getTableDesc()->getColumnList();
if (colList.entries() <= pos) {
// 4023 degree of row value constructor must equal that of target table
*CmpCommon::diags() << DgSqlCode(-4023)
#pragma nowarn(1506) // warning elimination
<< DgInt0(++pos)
#pragma warn(1506) // warning elimination
#pragma nowarn(1506) // warning elimination
<< DgInt1(colList.entries());
#pragma warn(1506) // warning elimination
bindWA->setErrStatus();
return NULL;
}
ValueId target = colList[pos];
const NAColumn *nacol = target.getNAColumn();
const char* defaultValueStr = nacol->getDefaultValue();
CharInfo::CharSet mapCS = CharInfo::ISO88591;
NABoolean mapCS_hasVariableWidth = CharInfo::isVariableWidthMultiByteCharSet(mapCS);
size_t defaultValueWcsLen = 0;
NAWchar *defaultValueWcs = (NAWchar *) defaultValueStr;
NABoolean ucs2StrLitPrefix = FALSE;
if (nacol->getDefaultClass() == COM_USER_DEFINED_DEFAULT &&
nacol->getType() &&
nacol->getType()->getTypeQualifier() == NA_CHARACTER_TYPE &&
((CharType*)(nacol->getType()))->getCharSet() == CharInfo::ISO88591 &&
mapCS_hasVariableWidth &&
defaultValueWcs != NULL &&
nacol->getNATable()->getObjectSchemaVersion() >= COM_VERS_2300 &&
(defaultValueWcsLen = NAWstrlen(defaultValueWcs)) > 6 &&
( ( ucs2StrLitPrefix = ( NAWstrncmp(defaultValueWcs, NAWSTR("_UCS2\'"), 6) == 0 )) ||
( defaultValueWcsLen > 10 &&
NAWstrncmp(defaultValueWcs, NAWSTR("_ISO88591\'"), 10) == 0 )) &&
defaultValueWcs[defaultValueWcsLen-1] == NAWCHR('\''))
{
NAWcharBuf *pWcharBuf = NULL;
if (ucs2StrLitPrefix)
{
// Strip the leading _UCS2 prefix.
pWcharBuf =
new (bindWA->wHeap()) NAWcharBuf(&defaultValueWcs[5],
defaultValueWcsLen - 5,
bindWA->wHeap());
}
else
{
// Keep the leading _ISO88591 prefix.
pWcharBuf =
new (bindWA->wHeap()) NAWcharBuf(defaultValueWcs,
defaultValueWcsLen,
bindWA->wHeap());
}
charBuf *pCharBuf = NULL; // must set this variable to NULL so the
// following function call will allocate
// space for the output literal string
Int32 errorcode = 0;
pCharBuf = unicodeTocset(*pWcharBuf, bindWA->wHeap(),
pCharBuf, mapCS, errorcode);
// Earlier releases treated the converted multibyte character
// string, in ISO_MAPPING character set, as if it is a string of
// ISO88591 characters and then convert it back to UCS-2 format;
// i.e., for each byte in the string, insert an extra byte
// containing the binary zero value.
NADELETE(pWcharBuf, NAWcharBuf, bindWA->wHeap());
pWcharBuf = NULL; // must set this variable to NULL to force the
// following call to allocate space for the
// the output literal string
pWcharBuf = ISO88591ToUnicode(*pCharBuf, bindWA->wHeap(), pWcharBuf);
// Prepare the converted literal string for the following CAST
// function by setting pColDefaultValueStr to point to the string
NAWchar *pWcs = NULL;
if (ucs2StrLitPrefix)
{
pWcs = new (bindWA->wHeap()) NAWchar[10+NAWstrlen(pWcharBuf->data())];
NAWstrcpy(pWcs, NAWSTR("_ISO88591"));
}
else
{
pWcs = new (bindWA->wHeap()) NAWchar[1+NAWstrlen(pWcharBuf->data())];
pWcs[0] = NAWCHR('\0');
}
NAWstrcat(pWcs, pWcharBuf->data());
defaultValueStr = (char *)pWcs;
NADELETE(pWcharBuf, NAWcharBuf, bindWA->wHeap());
NADELETE(pCharBuf, charBuf, bindWA->wHeap());
}
if (NOT defaultValueStr AND bindWA) {
// 4107 column has no default so DEFAULT cannot be specified.
*CmpCommon::diags() << DgSqlCode(-4107) << DgColumnName(nacol->getColName());
bindWA->setErrStatus();
}
return defaultValueStr;
} // Insert::getColDefaultValue()
// -----------------------------------------------------------------------
// member functions for class Update
// -----------------------------------------------------------------------
RelExpr *Update::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// Set flag for firstN in context
if (child(0) && child(0)->getOperatorType() == REL_SCAN)
if (child(0)->castToRelExpr() &&
((Scan *)(child(0)->castToRelExpr()))->getFirstNRows() >= 0)
if (bindWA &&
bindWA->getCurrentScope() &&
bindWA->getCurrentScope()->context())
bindWA->getCurrentScope()->context()->firstN() = TRUE;
setInUpdateOrInsert(bindWA, this, REL_UPDATE);
RelExpr * boundExpr = GenericUpdate::bindNode(bindWA);
if (bindWA->errStatus()) return NULL;
setInUpdateOrInsert(bindWA);
if (getTableDesc()->getNATable()->isHbaseCellTable())
{
*CmpCommon::diags() << DgSqlCode(-1425)
<< DgTableName(getTableDesc()->getNATable()->getTableName().
getQualifiedNameAsAnsiString())
<< DgString0("Reason: Cannot update an hbase table in CELL format. Use ROW format for this operation.");
bindWA->setErrStatus();
return this;
}
// QSTUFF
if (getGroupAttr()->isStream() &&
!getGroupAttr()->isEmbeddedUpdateOrDelete()) {
*CmpCommon::diags() << DgSqlCode(-4173);
bindWA->setErrStatus();
return this;
}
// QSTUFF
if (NOT bindWA->errStatus() AND
NOT getTableDesc()->getVerticalPartitions().isEmpty())
{
// 4058 UPDATE query cannot be used on a vertically partitioned table.
*CmpCommon::diags() << DgSqlCode(-4058) <<
DgTableName(getTableDesc()->getNATable()->getTableName().
getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return this;
}
// make sure scan done as part of an update runs in serializable mode so a
// tsj(scan,update) implementation of a update runs as an atomic operation
if (child(0)->getOperatorType() == REL_SCAN) {
Scan *scanNode = (Scan*)(child(0)->castToRelExpr());
if (!scanNode->accessOptions().userSpecified()) {
scanNode->accessOptions().updateAccessOptions
(TransMode::ILtoAT(TransMode::SERIALIZABLE_));
}
}
// if FIRST_N is requested, insert a FirstN node.
if ((getOperatorType() == REL_UNARY_UPDATE) &&
(child(0)->getOperatorType() == REL_SCAN))
{
Scan * scanNode = (Scan *)(child(0)->castToRelExpr());
if ((scanNode->getFirstNRows() != -1) &&
(getGroupAttr()->isEmbeddedUpdateOrDelete()))
{
*CmpCommon::diags() << DgSqlCode(-4216);
bindWA->setErrStatus();
return NULL;
}
if (scanNode->getFirstNRows() >= 0)
{
FirstN * firstn = new(bindWA->wHeap())
FirstN(scanNode, scanNode->getFirstNRows(), NULL);
firstn->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
setChild(0, firstn);
}
}
// if rowset is used in set clause a direct rowset that is not in subquery
// must be present in the where clause
if ((bindWA->getHostArraysArea()) &&
(bindWA->getHostArraysArea()->hasHostArraysInSetClause()) &&
(!(bindWA->getHostArraysArea()->hasHostArraysInWhereClause()))) {
*CmpCommon::diags() << DgSqlCode(-30021) ;
bindWA->setErrStatus();
return this;
}
NABoolean transformUpdateKey = updatesClusteringKeyOrUniqueIndexKey(bindWA);
if (bindWA->errStatus()) // error occurred in updatesCKOrUniqueIndexKey()
return this;
NABoolean xnsfrmHbaseUpdate = FALSE;
if ((hbaseOper()) && (NOT isMerge()))
{
if (CmpCommon::getDefault(HBASE_TRANSFORM_UPDATE_TO_DELETE_INSERT) == DF_ON)
{
xnsfrmHbaseUpdate = TRUE;
}
else if ((CmpCommon::getDefault(HBASE_TRANSFORM_UPDATE_TO_DELETE_INSERT) == DF_SYSTEM) &&
(getTableDesc()->getNATable()->hasSecondaryIndexes()))
{
xnsfrmHbaseUpdate = TRUE;
}
else if (avoidHalloween())
{
xnsfrmHbaseUpdate = TRUE;
}
else if (getCheckConstraints().entries())
{
xnsfrmHbaseUpdate = TRUE;
}
}
if (xnsfrmHbaseUpdate)
{
boundExpr = transformHbaseUpdate(bindWA);
}
else if ((transformUpdateKey) && (NOT isMerge()))
{
boundExpr = transformUpdatePrimaryKey(bindWA);
}
else
boundExpr = handleInlining(bindWA, boundExpr);
if (bindWA->errStatus()) // error occurred in transformUpdatePrimaryKey()
return this; // or handleInlining()
return boundExpr;
} // Update::bindNode()
// -----------------------------------------------------------------------
// member functions for class MergeUpdate
// -----------------------------------------------------------------------
RelExpr *MergeUpdate::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
bindWA->initNewScope();
if ((isMerge()) &&
(child(0)))
{
ItemExpr *selPred = child(0)->castToRelExpr()->selPredTree();
if (selPred || where_)
{
NABoolean ONhasSubquery = (selPred && selPred->containsSubquery());
NABoolean ONhasAggr = (selPred && selPred->containsAnAggregate());
NABoolean whrHasSubqry = FALSE;
if (ONhasSubquery || ONhasAggr ||
(where_ && ((whrHasSubqry=where_->containsSubquery()) ||
where_->containsAnAggregate())))
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0
(ONhasSubquery ? "Subquery in ON clause not allowed." :
(ONhasAggr ? "aggregate function in ON clause not allowed." :
(whrHasSubqry ?
"subquery in UPDATE ... WHERE clause not allowed." :
"aggregate function in UPDATE ... WHERE clause not allowed.")));
bindWA->setErrStatus();
return this;
}
ItemExpr *ONhasUDF = (selPred ? selPred->containsUDF() : NULL);
ItemExpr *whereHasUDF = (where_ ? where_->containsUDF() : NULL);
if (ONhasUDF || whereHasUDF)
{
*CmpCommon::diags() << DgSqlCode(-4471)
<< DgString0
(((UDFunction *)(ONhasUDF ? ONhasUDF : whereHasUDF))->
getFunctionName().getExternalName());
bindWA->setErrStatus();
return this;
}
}
}
if ((isMerge()) &&
(recExprTree()))
{
if (recExprTree()->containsSubquery())
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" Subquery in SET clause not allowed.");
bindWA->setErrStatus();
return this;
}
if (recExprTree()->containsUDF())
{
*CmpCommon::diags() << DgSqlCode(-4471)
<< DgString0(((UDFunction *)recExprTree()->containsUDF())->
getFunctionName().getExternalName());
bindWA->setErrStatus();
return this;
}
}
// if insertValues, then this is an upsert stmt.
if (insertValues())
{
if (insertValues()->containsSubquery())
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" Subquery in INSERT clause not allowed.");
bindWA->setErrStatus();
return this;
}
if (insertValues()->containsUDF())
{
*CmpCommon::diags() << DgSqlCode(-4471)
<< DgString0(((UDFunction *)insertValues()->containsUDF())->
getFunctionName().getExternalName());
bindWA->setErrStatus();
return this;
}
Tuple * tuple = new (bindWA->wHeap()) Tuple(insertValues());
Insert * ins = new (bindWA->wHeap())
Insert(getTableName(),
NULL,
REL_UNARY_INSERT,
tuple,
insertCols(),
NULL);
ins->setInsertType(Insert::SIMPLE_INSERT);
if (isMergeUpdate())
ins->setIsMergeUpdate(TRUE);
else
ins->setIsMergeDelete(TRUE);
ins->setTableDesc(getTableDesc());
bindWA->getCurrentScope()->xtnmStack()->createXTNM();
ins = (Insert*)ins->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
bindWA->getCurrentScope()->xtnmStack()->removeXTNM();
mergeInsertRecExpr() = ins->newRecExpr();
mergeInsertRecExprArray() = ins->newRecExprArray();
}
NATable *naTable = bindWA->getNATable(getTableName());
if (bindWA->errStatus())
return NULL;
if (naTable->getViewText() != NULL)
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" View not allowed.");
bindWA->setErrStatus();
return NULL;
}
if ((naTable->isHbaseCellTable()) ||
(naTable->isHbaseRowTable()))
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0("Hbase tables not supported.");
bindWA->setErrStatus();
return NULL;
}
if (naTable->isHiveTable())
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0("Hive tables not supported.");
bindWA->setErrStatus();
return NULL;
}
bindWA->setMergeStatement(TRUE);
RelExpr * boundExpr = Update::bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
if (checkForMergeRestrictions(bindWA))
return NULL;
if (where_) {
bindWA->getCurrentScope()->context()->inWhereClause() = TRUE;
where_->convertToValueIdSet(mergeUpdatePred(), bindWA, ITM_AND);
bindWA->getCurrentScope()->context()->inWhereClause() = FALSE;
if (bindWA->errStatus()) return NULL;
// any values added by where_ to Outer References Set should be
// added to input values that must be supplied to this MergeUpdate
getGroupAttr()->addCharacteristicInputs
(bindWA->getCurrentScope()->getOuterRefs());
}
bindWA->removeCurrentScope();
bindWA->setMergeStatement(TRUE);
return boundExpr;
} // MergeUpdate::bindNode()
// -----------------------------------------------------------------------
// member functions for class Delete
// -----------------------------------------------------------------------
RelExpr *Delete::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// Save the current scope and node for children to peruse if necessary.
BindContext *context = bindWA->getCurrentScope()->context();
if (context) {
context->deleteScope() = bindWA->getCurrentScope();
context->deleteNode() = this;
if (getFirstNRows() >= 0) context->firstN() = TRUE;
}
RelExpr * boundExpr = GenericUpdate::bindNode(bindWA);
if (bindWA->errStatus()) return boundExpr;
if ((csl_) &&
(NOT getTableDesc()->getNATable()->isHbaseRowTable()))
{
*CmpCommon::diags() << DgSqlCode(-1425)
<< DgTableName(getTableDesc()->getNATable()->getTableName().
getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return this;
}
if (getTableDesc()->getNATable()->isHbaseCellTable())
{
*CmpCommon::diags() << DgSqlCode(-1425)
<< DgTableName(getTableDesc()->getNATable()->getTableName().
getQualifiedNameAsAnsiString())
<< DgString0("Reason: Cannot delete from an hbase table in CELL format. Use ROW format for this operation.");
bindWA->setErrStatus();
return this;
}
// QSTUFF
if (getGroupAttr()->isStream() &&
!getGroupAttr()->isEmbeddedUpdateOrDelete()) {
*CmpCommon::diags() << DgSqlCode(-4180);
bindWA->setErrStatus();
return this;
}
// QSTUFF
// Not only are check constraints on a DELETE nonsensical,
// but they can cause VEGReference::replaceVEGReference to assert
// with valuesToBeBound.isEmpty (Genesis 10-980202-0718).
//
// in case we are binding a generic update within a generic update
// due to view expansion we would like to ensure that all constraints
// are checked properly for the update operation performed on the
// underlying base table
if (NOT (bindWA->inViewExpansion() && bindWA->inGenericUpdate())) { // QSTUFF
getTableDesc()->checkConstraints().clear();
checkConstraints().clear();
}
if (getTableDesc()->getClusteringIndex()->getNAFileSet()->isEntrySequenced())
{
// 4018 DELETE query cannot be used against an Entry-Seq table.
*CmpCommon::diags() << DgSqlCode(-4018) <<
DgTableName(getTableDesc()->getNATable()->getTableName().
getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return this;
}
if (NOT getTableDesc()->getVerticalPartitions().isEmpty())
{
// 4029 DELETE query cannot be used on a vertically partitioned table.
*CmpCommon::diags() << DgSqlCode(-4029) <<
DgTableName(getTableDesc()->getNATable()->getTableName().
getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return this;
}
Scan *scanNode = NULL;
// make sure scan done as part of a delete runs in serializable mode so a
// tsj(scan,delete) implementation of a delete runs as an atomic operation
if (child(0)->getOperatorType() == REL_SCAN) {
scanNode = (Scan*)(child(0)->castToRelExpr());
if (!scanNode->accessOptions().userSpecified()) {
scanNode->accessOptions().updateAccessOptions
(TransMode::ILtoAT(TransMode::SERIALIZABLE_));
}
}
BindScope *prevScope = NULL;
BindScope *currScope = bindWA->getCurrentScope();
NABoolean inUnion = FALSE;
while (currScope && !inUnion)
{
BindContext *currContext = currScope->context();
if (currContext->inUnion())
{
inUnion = TRUE;
}
prevScope = currScope;
currScope = bindWA->getPreviousScope(prevScope);
}
RelRoot *root = bindWA->getTopRoot();
if (getFirstNRows() >= 0) // First N Delete
{
CMPASSERT(getOperatorType() == REL_UNARY_DELETE);
// First N Delete on a partitioned table. Not considered a MTS delete.
if (getTableDesc()->getClusteringIndex()->isPartitioned())
{
if (root->getCompExprTree() || inUnion ) // for unions we know there is a select
{ // outer selectnot allowed for "non-MTS" first N delete
*CmpCommon::diags() << DgSqlCode(-4216);
bindWA->setErrStatus();
return this;
}
RelExpr * childNode = child(0)->castToRelExpr();
FirstN * firstn = new(bindWA->wHeap())
FirstN(childNode, getFirstNRows(), NULL);
firstn->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
setChild(0, firstn);
setFirstNRows(-1);
}
else
{
// First N delete on a single partition. This is considered a MTS Delete.
if ((bindWA->getHostArraysArea()) &&
((bindWA->getHostArraysArea()->hasHostArraysInWhereClause()) ||
(bindWA->getHostArraysArea()->getHasSelectIntoRowsets())))
{ // MTS delete not supported with rowsets
*CmpCommon::diags() << DgSqlCode(-30037);
bindWA->setErrStatus();
return this;
}
if (scanNode && scanNode->getSelectionPred().containsSubquery())
{
// MTS Delete not supported with subquery in where clause
*CmpCommon::diags() << DgSqlCode(-4138);
bindWA->setErrStatus();
return this;
}
if (root->hasOrderBy())
{ // mts delete not supported with order by
*CmpCommon::diags() << DgSqlCode(-4189);
bindWA->setErrStatus();
return this;
}
if (root->getCompExprTree() || // MTS Delete has an outer select
bindWA->isInsertSelectStatement() || // Delete inside an Insert Select statement, Soln:10-061103-0274
inUnion ) // for unions we know there is a select
{
if (root->getFirstNRows() < -1 ||
inUnion) // for unions we wish to raise a union
{ // The outer select has a Last 1/0 clause // specific error later, so set the flag now.
setMtsStatement(TRUE);
}
else
{ // raise an error if no Last 1 clause is found.
*CmpCommon::diags() << DgSqlCode(-4136);
bindWA->setErrStatus();
return this;
}
}
}
}
// Triggers --
if ((NOT isFastDelete()) && (NOT noIMneeded()))
boundExpr = handleInlining(bindWA, boundExpr);
else if (hbaseOper() && (getGroupAttr()->isEmbeddedUpdateOrDelete()))
{
setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA));
CorrName corrOLDTable (getScanNode(TRUE)->getTableDesc()->getCorrNameObj().getQualifiedNameObj(),
bindWA->wHeap(),"OLD");
// expose OLD table columns
getRETDesc()->addColumns(bindWA, *child(0)->getRETDesc(), &corrOLDTable);
ValueIdList outputs;
getRETDesc()->getValueIdList(outputs, USER_AND_SYSTEM_COLUMNS);
addPotentialOutputValues(outputs);
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
}
if (isMtsStatement())
bindWA->setEmbeddedIUDStatement(TRUE);
if (getFirstNRows() > 0)
{
// create a firstN node to delete FIRST N rows, if no such node was created
// during handleInlining. Occurs when DELETE FIRST N is used on table with no
// dependent objects.
FirstN * firstn = new(bindWA->wHeap())
FirstN(boundExpr, getFirstNRows());
if (NOT(scanNode && scanNode->getSelectionPred().containsSubquery()))
firstn->setCanExecuteInDp2(TRUE);
firstn->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
setFirstNRows(-1);
boundExpr = firstn;
}
if (csl())
{
for (Lng32 i = 0; i < csl()->entries(); i++)
{
NAString * nas = (NAString*)(*csl())[i];
bindWA->hbaseColUsageInfo()->insert
((QualifiedName*)&getTableDesc()->getNATable()->getTableName(), nas);
}
}
return boundExpr;
} // Delete::bindNode()
// -----------------------------------------------------------------------
// member functions for class MergeDelete
// -----------------------------------------------------------------------
RelExpr *MergeDelete::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
bindWA->initNewScope();
if ((isMerge()) &&
(child(0)) &&
(child(0)->castToRelExpr()->selPredTree()))
{
if (child(0)->castToRelExpr()->selPredTree()->containsSubquery())
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" Subquery in ON clause not allowed.");
bindWA->setErrStatus();
return this;
}
if (child(0)->castToRelExpr()->selPredTree()->containsUDF())
{
*CmpCommon::diags() << DgSqlCode(-4471)
<< DgString0(((UDFunction *)child(0)->
castToRelExpr()->selPredTree()->
containsUDF())->
getFunctionName().getExternalName());
bindWA->setErrStatus();
return this;
}
}
// if insertValues, then this is an upsert stmt.
if (insertValues())
{
if (insertValues()->containsSubquery())
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" Subquery in INSERT clause not allowed.");
bindWA->setErrStatus();
return this;
}
if (insertValues()->containsUDF())
{
*CmpCommon::diags() << DgSqlCode(-4471)
<< DgString0(((UDFunction *)insertValues()->
containsUDF())->
getFunctionName().getExternalName());
bindWA->setErrStatus();
return this;
}
Tuple * tuple = new (bindWA->wHeap()) Tuple(insertValues());
Insert * ins = new (bindWA->wHeap())
Insert(getTableName(),
NULL,
REL_UNARY_INSERT,
tuple,
insertCols(),
NULL);
ins->setInsertType(Insert::SIMPLE_INSERT);
ins->setIsMergeDelete(TRUE);
ins->setTableDesc(getTableDesc());
bindWA->getCurrentScope()->xtnmStack()->createXTNM();
ins = (Insert*)ins->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
bindWA->getCurrentScope()->xtnmStack()->removeXTNM();
mergeInsertRecExpr() = ins->newRecExpr();
mergeInsertRecExprArray() = ins->newRecExprArray();
}
NATable *naTable = bindWA->getNATable(getTableName());
if (bindWA->errStatus())
return NULL;
if (naTable->getViewText() != NULL)
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" View not allowed.");
bindWA->setErrStatus();
return NULL;
}
bindWA->setMergeStatement(TRUE);
RelExpr * boundExpr = Delete::bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
if (checkForMergeRestrictions(bindWA))
return NULL;
bindWA->removeCurrentScope();
bindWA->setMergeStatement(TRUE);
return boundExpr;
} // MergeDelete::bindNode()
static const char NEWTable [] = "NEW"; // QSTUFF: corr for embedded d/u
static const char OLDTable [] = "OLD"; // QSTUFF: corr for embedded d/u
// QSTUFF
// this method binds both, the set clauses applied to the after
// image as well as the set clauses applied to the before image
// the new set on rollback clause allows an application to modify
// the before image.
// delete from tab set on rollback x = 1;
// update tab set x = 1 set on rollback x = 2;
#pragma nowarn(770) // warning elimination
void GenericUpdate::bindUpdateExpr(BindWA *bindWA,
ItemExpr *recExpr,
ItemExprList &assignList,
RelExpr *boundView,
Scan *scanNode,
SET(short) &stoiColumnSet,
NABoolean onRollback)
{
RETDesc *origScope = NULL;
ValueIdSet &newRecExpr =
(onRollback == TRUE) ? newRecBeforeExpr() : this->newRecExpr();
ValueIdArray &newRecExprArray =
(onRollback == TRUE) ? newRecBeforeExprArray() : this->newRecExprArray();
if (onRollback &&
((!getTableDesc()->getClusteringIndex()->getNAFileSet()->isAudited()) ||
(getTableDesc()->getNATable()->hasLobColumn()))) {
// SET ON ROLLBACK clause is not allowed on a non-audited table
*CmpCommon::diags() << DgSqlCode(-4214)
<< DgTableName(getTableDesc()->getNATable()->getTableName().
getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return;
}
CollIndex i, j;
CollIndexList colnoList; // map of col nums (row positions)
CollIndex a = assignList.entries();
const ColumnDescList *viewColumns = NULL;
// if this is a view then get the columns of the view
if (boundView) {
viewColumns = boundView->getRETDesc()->getColumnList();
}
// if the GU has a SET ON ROLLBACK clause this method is called
// twice: once to bind the columns in the SET clause and a second
// time to bind the columns in the SET ON ROLLBACK clause.
// Initially the update column list of the stoi_ is empty.
// If this is the second call, store the update column list
// from the first call.
short *stoiColumnList = NULL;
CollIndex currColumnCount = 0;
if (currColumnCount = stoi_->getStoi()->getColumnListCount())
{
stoiColumnList = new (bindWA->wHeap()) short[currColumnCount];
for (i = 0; i < currColumnCount; i++)
stoiColumnList[i] = stoi_->getStoi()->getUpdateColumn(i);
}
stoi_->getStoi()->setColumnList(new (bindWA->wHeap()) short[a + currColumnCount]);
for (i = 0; i < a; i++) {
CMPASSERT(assignList[i]->getOperatorType() == ITM_ASSIGN);
assignList[i]->child(0)->bindNode(bindWA); // LHS
if (bindWA->errStatus()) return;
const NAColumn *nacol = assignList[i]->child(0).getNAColumn();
if(getOperatorType() == REL_UNARY_UPDATE)
{
stoi_->getStoi()->setUpdateColumn(i, (short) nacol->getPosition());
stoi_->getStoi()->incColumnListCount();
stoi_->addUpdateColumn(nacol->getPosition());
}
const NAType *colType = nacol->getType();
if (!colType->isSupportedType()) {
*CmpCommon::diags() << DgSqlCode(-4028) // 4028 table not updatatble
<< DgTableName(nacol->getNATable()->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return;
}
// If this is a sequence generator IDENTITY column
// with a default type of GENERATED ALWAYS,
// then post error -3428. GENERATED ALWAYS
// IDENTITY columns may not be updated.
if(getOperatorType() == REL_UNARY_UPDATE &&
CmpCommon::getDefault(COMP_BOOL_210) == DF_ON &&
nacol->isIdentityColumnAlways())
{
*CmpCommon::diags() << DgSqlCode(-3428)
<< DgString0(nacol->getColName());
bindWA->setErrStatus();
return;
}
colnoList.insert(nacol->getPosition()); // save colno for next loop
// in case its not a view we record the column position of the
// base table, otherwise that of the view
if (NOT boundView)
stoiColumnSet.insert((short) nacol->getPosition());
// if this is a view get the positions of the columns
// within the view that are being updated.
if (boundView) {
ValueId vid = assignList[i]->child(0).getValueId();
NABoolean found = FALSE;
for (CollIndex k=0; k < viewColumns->entries(); k++) {
if ((*viewColumns)[k]->getValueId() == vid) {
stoiColumnSet.insert((short) k);
found = TRUE;
// Updatable views cannot have any underlying basetable column
// appear more than once, so it's safe to break out of the loop.
break;
}
} // loop k
CMPASSERT(found);
} // boundView
} // loop i<a
// If this is the second call to this method, restore the update
// columns bound in the first call
if (currColumnCount)
{
for (i = a; i < (currColumnCount + a); i++)
{
stoi_->getStoi()->setUpdateColumn(i, stoiColumnList[i-a]);
stoi_->addUpdateColumn(stoiColumnList[i-a]);
}
}
// RHS: Bind the right side of the Assigns such that the source expressions
// reference the columns of the source table.
//
//### With a cascade of views, should this be "getRETDesc" as is,
//### or "scanNode->getRETDesc" ? --?
//### Should I set this->setRD to be the target(new)tbl at the beginning,
//### explicitly say "scanNode..." here? --i think not
//
if (GU_DEBUG) GU_DEBUG_Display(bindWA, this, "u");
origScope = bindWA->getCurrentScope()->getRETDesc();
// this sets the scope to the scan table for the before values
// the previous scope was to the "UPDATE" table
// we will reset the scope before returning in order not to introduce
// hidden side effects but have the generic update explicitely point
// to the scan scope
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
//this has to be done after binding the LHS because of triggers
//Soln :10-050110-3403 : Don't side-effect the SET on ROLLBACK list
//when we come down to process it the next time over.So process only
//the assignList
ItemExpr* tempExpr = assignList.convertToItemExpr();
tempExpr->convertToValueIdSet(newRecExpr, bindWA, ITM_ITEM_LIST);
if (bindWA->errStatus()) return;
if (NOT onRollback)
{
for (ValueId v = newRecExpr.init(); newRecExpr.next(v);
newRecExpr.advance(v))
{
CMPASSERT(v.getItemExpr()->getOperatorType() == ITM_ASSIGN);
// remove all the onrollack expressions
if (((Assign *)v.getItemExpr())->onRollback())
{
newRecExpr.remove(v);
}
}
}
else
{
for (ValueId v = newRecExpr.init(); newRecExpr.next(v);
newRecExpr.advance(v))
{
CMPASSERT(v.getItemExpr()->getOperatorType() == ITM_ASSIGN);
// remove all the NON-onrollack expressions
if ((getOperatorType() == REL_UNARY_UPDATE) &&
!(((Assign *)v.getItemExpr())->onRollback()))
{
newRecExpr.remove(v);
}
}
if (getOperatorType() == REL_UNARY_DELETE)
{
recExpr->convertToValueIdSet(this->newRecExpr(), bindWA, ITM_ITEM_LIST);
}
}
// now we built the RHS
// Now we have our colnoList map with which to build a temporary array
// (with holes) and get the update columns ordered (eliminating dups).
// Actually we store the ids of the bound Assign nodes corresponding
// to the columns, of course.
//
CollIndex totalColCount = getTableDesc()->getColumnList().entries();
#pragma nowarn(1506) // warning elimination
ValueIdArray holeyArray(totalColCount);
#pragma warn(1506) // warning elimination
ValueId assignId; // i'th newRecExpr valueid
for (i = 0, assignId = newRecExpr.init(); // bizarre ValueIdSet iter
newRecExpr.next(assignId);
i++, newRecExpr.advance(assignId)) {
j = colnoList[i];
if (holeyArray.used(j)) {
const NAColumn *nacol = holeyArray[j].getItemExpr()->child(0).getNAColumn();
//4022 target column multiply specified
*CmpCommon::diags() << DgSqlCode(-4022) << DgColumnName(nacol->getColName());
bindWA->setErrStatus();
return;
}
holeyArray.insertAt(j, assignId);
}
//
// Now we have the holey array. The next loop ignores unused entries
// and copies the used entries into newRecExprArray(), with no holes.
// It also builds a list of the columns being updated that contain
// a column on the right side of the SET assignment expression.
//
// Entering this loop, i is the number of specified update columns;
// exiting, j is.
//
CMPASSERT(i == a);
// we built a map between identifical old and new columns, i.e. columns
// which are not updated and thus identical. We insert the resulting
// equivalence relationships e.g. old.a = new.a during transformation
// into the respective VEGGIES this allows the optimizer to select index
// scan for satisfying order requirements specified by an order by clause
// on new columns, e.g.
// select * from (update t set y = y + 1 return new.a) t order by a;
// we cannot get the benefit of this VEG for a merge statement when IM is required
// allowing a VEG in this case causes corruption on base table key values because
// we use the "old" value of key column from fetchReturnedExpr, which can be junk
// in case there is no row to update/delete, and a brand bew row is being inserted
NABoolean mergeWithIndex = isMerge() && getTableDesc()->hasSecondaryIndexes() ;
if ((NOT onRollback) && (NOT mergeWithIndex)){
for (i = 0;i < totalColCount; i++){
if (!(holeyArray.used(i))){
oldToNewMap().addMapEntry(
scanNode->getTableDesc()->
getColumnList()[i].getItemExpr()->getValueId(),
getTableDesc()->
getColumnList()[i].getItemExpr()->getValueId());
}
}
}
// when binding a view which contains an embedded update
// we must map update valueids to scan value ids
// to allow for checking of access rights.
for (i = 0; i < getTableDesc()->getColumnList().entries();i++)
bindWA->getUpdateToScanValueIds().addMapEntry(
getTableDesc()->getColumnList()[i].getItemExpr()->getValueId(),
scanNode->getTableDesc()->getColumnList()[i].getItemExpr()->getValueId());
newRecExprArray.resize(i);
TableDesc *scanDesc = scanNode->getTableDesc();
NABoolean rightContainsColumn = FALSE;
for (i = j = 0; i < totalColCount; i++) {
if (holeyArray.used(i)) {
ValueId assignExpr = holeyArray[i];
newRecExprArray.insertAt(j++, assignExpr);
ItemExpr *right = assignExpr.getItemExpr()->child(1);
// even if a column is set to a constant we mark it
// as updated to prevent indices covering this column from
// being used for access
ItemExpr *left = assignExpr.getItemExpr()->child(0);
scanDesc->addColUpdated(left->getValueId());
if (right->containsColumn())
rightContainsColumn = TRUE;
}
}
// WITH NO ROLLBACK not supported if rightside of update
// contains a column expression. Also this feature is not
// supported with the SET ON ROLLBACK feature
if (isNoRollback() ||
(CmpCommon::transMode()->getRollbackMode() == TransMode::NO_ROLLBACK_))
{
if ((rightContainsColumn && CmpCommon::getDefault(ALLOW_RISKY_UPDATE_WITH_NO_ROLLBACK) == DF_OFF) || onRollback)
{
NAString warnMsg = "";
if(rightContainsColumn)
{
warnMsg = "Suggestion: Set ALLOW_RISKY_UPDATE_WITH_NO_ROLLBACK CQD to ON to allow";
if (getOperatorType() == REL_UNARY_DELETE)
warnMsg += " DELETE ";
else
warnMsg += " UPDATE ";
warnMsg += "command with right-hand side SET clause consisting of columns.";
}
if (getOperatorType() == REL_UNARY_DELETE)
*CmpCommon::diags() << DgSqlCode(-3234) << DgString0(warnMsg);
else
*CmpCommon::diags() << DgSqlCode(-3233) << DgString0(warnMsg);
bindWA->setErrStatus();
return ;
}
}
CMPASSERT(j == a);
bindWA->getCurrentScope()->setRETDesc(origScope);
}
#pragma warn(770) // warning elimination
void getScanPreds(RelExpr *start, ValueIdSet &preds)
{
RelExpr *result = start;
while (result) {
preds += result->selectionPred();
if (result->getOperatorType() == REL_SCAN) break;
if (result->getArity() > 1) {
return ;
}
result = result->child(0);
}
return;
}
// Note that this is the R2 compatible way to handle Halloween problem.
// This update (only insert for now) contains a reference to the
// target in the source. This could potentially run into the so
// called Halloween problem. Determine if this is a case we may be
// able to handle. The cases that we handle are:
//
// -- The reference to the target is in a subquery
// -- There any number of references to the target in the source
// -- The subquery cannot be a row subquery.
// -- The subquery must contain only one source (the reference to the target)
// --
//
// Return TRUE if this does represent a Halloween problem and the caller will
// then issue the error message
//
// Return FALSE is this is a case we can handle. Set the
// 'avoidHalloweenR2' flag in the subquery and this generic Update so
// that the optimizer will pick a plan that is Halloween safe.
//
NABoolean GenericUpdate::checkForHalloweenR2(Int32 numScansToFind)
{
// If there are no scans, no problem, return okay (FALSE)
//
if(numScansToFind == 0) {
return FALSE;
}
// Allow any number of scans
// Do not support for general NEO users.
if (CmpCommon::getDefault(MODE_SPECIAL_1) == DF_OFF)
return TRUE;
// Number of scans of the target table found so far.
//
Int32 numHalloweenScans = 0;
// Get the primary source of the generic update. We are looking for
// the halloween scans in the predicates of this scan node
//
ValueIdSet preds;
getScanPreds(this, preds);
Subquery *subq;
// Search the preds of this scan for subqueries.
//
// ValueIdSet &preds = scanNode->selectionPred();
for(ValueId p = preds.init(); preds.next(p); preds.advance(p)) {
ItemExpr *pred = p.getItemExpr();
// If this pred contains a subquery, find the scans
//
if(pred->containsSubquery()) {
ValueIdSet subqPreds;
subqPreds += pred->getValueId();
// Search all the preds and their children
//
while(subqPreds.entries()) {
ValueIdSet children;
for(ValueId s = subqPreds.init();
subqPreds.next(s);
subqPreds.advance(s)) {
ItemExpr *term = s.getItemExpr();
// Found a subquery, now look for the scan...
//
if(term->isASubquery()) {
subq = (Subquery *)term;
// We don't support row subqueries, keep looking for the scan
// in the next subquery.
if(!subq->isARowSubquery()) {
// Is this the subquery that has the scan of the table
// we are updating?
//
Scan *halloweenScan = subq->getSubquery()->getScanNode(FALSE);
if(halloweenScan) {
// Is this the scan we are looking for?
//
if(halloweenScan->getTableDesc()->getNATable() ==
getTableDesc()->getNATable()) {
subq->setAvoidHalloweenR2(this);
numHalloweenScans++;
}
}
}
}
// Follow all the children as well.
//
for(Int32 i = 0; i < term->getArity(); i++) {
children += term->child(i)->getValueId();
}
}
subqPreds = children;
}
}
}
setAvoidHalloweenR2(numScansToFind);
// If we found and marked all the halloween scans, then return FALSE (allow).
// We have marked the subqueries to avoid the halloween problem. This will
// force the optimizer to pick a plan that will be safe.
//
if(numHalloweenScans == numScansToFind)
return FALSE;
return TRUE;
}
// See ANSI 7.9 SR 12 + 6.3 SR 8 for definition of "updatable" table
// references; in particular, note that one of the requirements for a view's
// being updatable is that ultimately underlying it (passing through a
// whole stack of views) is *exactly one* wbase table -- i.e., no joins
// allowed.
//
RelExpr *GenericUpdate::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// QSTUFF
// we indicate that we are in a generic update. If we are
// already in a generic update we know that this time we are
// binding a generic update within a view.
// however be aware of the following scenario. We currently
// reject embedded updates and streams in the source but
// obviously allow view with embedded updates as a target.
// Since its already within a generic update we will only
// return the scan node to the insert
//
// insert into select ... from (update/delete ....) t;
//
// but not cause the update to be bound in when doing
//
// insert into viewWithDeleteOrUpdate values(...);
//
// in both cases we got an insert->update/delete->scan
NABoolean inGenericUpdate = FALSE;
if (getOperatorType() != REL_UNARY_INSERT)
inGenericUpdate = bindWA->setInGenericUpdate(TRUE);
NABoolean returnScanNode =
(inGenericUpdate && bindWA->inViewExpansion() &&
( getOperatorType() == REL_UNARY_DELETE ||
getOperatorType() == REL_UNARY_UPDATE ));
// those group attributes should be set only by the topmost
// generic update once we are invoked when already binding
// another generic we reset those group attributes since we
// already know that we will only return a scan node
if ((returnScanNode) && (child(0))) {
child(0)->getGroupAttr()->setStream(FALSE);
child(0)->getGroupAttr()->setSkipInitialScan(FALSE);
child(0)->getGroupAttr()->setEmbeddedIUD(NO_OPERATOR_TYPE);
}
// if we have no user-specified access options then
// get it from nearest enclosing scope that has one (if any)
if (!accessOptions().userSpecified()) {
StmtLevelAccessOptions *axOpts = bindWA->findUserSpecifiedAccessOption();
if (axOpts) {
accessOptions() = *axOpts;
}
}
// The above code is in Scan::bindNode also.
// It would be nice to refactor this common code; someday.
// Make sure we have the appropriate transaction mode & isolation level
// in order to do the update. Genesis 10-970922-3488.
// Keep this logic in sync with Generator::verifyUpdatableTransMode()!
Lng32 sqlcodeA = 0, sqlcodeB = 0;
// fix case 10-040429-7402 by checking our statement level access options
// first before declaring any error 3140/3141.
TransMode::IsolationLevel il;
ActiveSchemaDB()->getDefaults().getIsolationLevel
(il,
CmpCommon::getDefault(ISOLATION_LEVEL_FOR_UPDATES));
verifyUpdatableTrans(&accessOptions(), CmpCommon::transMode(),
il,
sqlcodeA, sqlcodeB);
if (sqlcodeA || sqlcodeB) {
// 3140 The isolation level cannot be READ UNCOMMITTED.
// 3141 The transaction access mode must be READ WRITE.
if (sqlcodeA) *CmpCommon::diags() << DgSqlCode(sqlcodeA);
if (sqlcodeB) *CmpCommon::diags() << DgSqlCode(sqlcodeB);
bindWA->setErrStatus();
return this;
}
Int64 transId=-1;
if ((isNoRollback() &&
(NOT (Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))) &&
((CmpCommon::transMode()->getAutoCommit() != TransMode::ON_ ) ||
(NAExecTrans(0, transId)))) {
// do not return an error if this is a showplan query being compiled
// in the second arkcmp.
const NAString * val =
ActiveControlDB()->getControlSessionValue("SHOWPLAN");
if (NOT ((val) && (*val == "ON")))
{
*CmpCommon::diags() << DgSqlCode(-3231); // Autocommit must be ON,
bindWA->setErrStatus(); // if No ROLLBACK is specified in IUD statement syntax
return this;
}
}
if (isNoRollback() ||
(CmpCommon::transMode()->getRollbackMode() == TransMode::NO_ROLLBACK_))
{
if ((child(0)->getGroupAttr()->isStream()) ||
(child(0)->getGroupAttr()->isEmbeddedUpdateOrDelete()) ||
(updateCurrentOf()))
{
if (getOperatorType() == REL_UNARY_DELETE)
*CmpCommon::diags() << DgSqlCode(-3234);
else
*CmpCommon::diags() << DgSqlCode(-3233);
bindWA->setErrStatus();
return this;
}
}
// The SQL standard as defined in ISO/IEC JTC 1/SC 32 date: 2009-01-12
// CD 9075-2:200x(E) published by ISO/IEC JTC 1/SC 32/WG 3
// "Information technology -- Database languages -- SQL --
// Part2: Foundation (SQL/Foundation)", page 920, section 14.14,
// page 918, section 14.13, page 900, section 14.9, page 898, section 14.8
// does allow correlation names in update & delete statements.
// Therefore, we delete this unnecessary restriction as part of the fix
// for genesis solution 10-090921-4747:
// Many places in this method assume the specified target table
// has no correlation name -- indeed, Ansi syntax does not allow one --
// this assert is to catch any future syntax-extensions we may do.
//
// E.g., see code marked
// ##SQLMP-SYNTAX-KLUDGE##
// in SqlParser.y + SqlParserAux.cpp,
// which add a non-Ansi corr name to all table refs
// when they really only should add to SELECTed tables.
// So here, in an INSERT/UPDATE/DELETEd table,
// we UNDO that kludge.
//
//if (!getTableName().getCorrNameAsString().isNull()) {
//CMPASSERT(SqlParser_NAMETYPE == DF_NSK ||
// HasMPLocPrefix(getTableName().getQualifiedNameObj().getCatalogName()));
//getTableName().setCorrName(""); // UNDO that kludge!
//}
// Genesis 10-980831-4973
if (((getTableName().isLocationNameSpecified() ||
getTableName().isPartitionNameSpecified()) &&
(!Get_SqlParser_Flags(ALLOW_SPECIALTABLETYPE))) &&
(getOperatorType() != REL_UNARY_DELETE)) {
*CmpCommon::diags() << DgSqlCode(-4061); // 4061 a partn not ins/upd'able
bindWA->setErrStatus();
return this;
}
// -- Triggers
// If this node is part of the action of a trigger,
// then don't count the rows that are affected.
if (bindWA->findNextScopeWithTriggerInfo() != NULL)
{
rowsAffected_ = DO_NOT_COMPUTE_ROWSAFFECTED;
// Does the table name match the name of one of the transition tables?
if (updatedTableName_.isATriggerTransitionName(bindWA))
{
// 11020 Ambiguous or illegal use of transition name $0~string0.
*CmpCommon::diags() << DgSqlCode(-11020)
<< DgString0(getTableName().getQualifiedNameAsString());
bindWA->setErrStatus();
return this;
}
}
// Get the NATable for this object, and an initial ref count.
// Set up stoi.
//
// We do not suppress mixed name checking in getNATable for R1
// from here, because prototype name executes through here. We
// want to check prototype name.
const NATable *naTable = bindWA->getNATable(getTableName());
if (bindWA->errStatus()) return this;
if (naTable && naTable->isHbaseTable())
hbaseOper() = TRUE;
if ((CmpCommon::getDefault(ALLOW_DML_ON_NONAUDITED_TABLE) == DF_OFF) &&
naTable && naTable->getClusteringIndex() &&
(!naTable->getClusteringIndex()->isAudited())
// && !bindWA->isBindingMvRefresh() // uncomment if non-audit MVs are ever supported
)
{
*CmpCommon::diags() << DgSqlCode(-4211)
<< DgTableName(
naTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return NULL;
}
// By setting the CQD OVERRIDE_SYSKEY to 'ON', the users
// are allowed to specify a SYSKEY value on an INSERT.
// We achieve this by treating a system column as a user column.
// This support is only provided for key sequenced files
// for MX and MP tables.
if (getOperatorType() == REL_UNARY_INSERT &&
naTable->hasSystemColumnUsedAsUserColumn() &&
naTable->getClusteringIndex()->isEntrySequenced())
{
*CmpCommon::diags() << DgSqlCode(-3410)
<< DgTableName(naTable->getTableName().getQualifiedNameAsString());
bindWA->setErrStatus();
return this;
}
Int32 beforeRefcount = naTable->getReferenceCount();
OptSqlTableOpenInfo *listedStoi
= setupStoi(stoi_, bindWA, this, naTable, getTableName());
if (getOperatorType() == REL_UNARY_INSERT &&
NOT naTable->isInsertable()) {
*CmpCommon::diags() << DgSqlCode(-4027) // 4027 table not insertable
<< DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return this;
}
if (NOT naTable->isUpdatable()) {
*CmpCommon::diags() << DgSqlCode(-4028) // 4028 table not updatable
<< DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return this;
}
if (naTable->isVerticalPartition()) {
// LCOV_EXCL_START - cnu
// On attempt to update an individual VP, say: 4082 table not accessible
*CmpCommon::diags() << DgSqlCode(-4082) <<
DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return this;
// LCOV_EXCL_STOP
}
if (naTable->isAnMV())
{
// we currently don't allow updating (deleting) MVs in a trigger action
if (bindWA->inDDL() && bindWA->isInTrigger ())
{
*CmpCommon::diags() << DgSqlCode(-11051);
bindWA->setErrStatus();
return this;
}
// This table is a materialized view. Are we allowed to change it?
if ((getTableName().getSpecialType() != ExtendedQualName::MV_TABLE) &&
(getTableName().getSpecialType() != ExtendedQualName::GHOST_MV_TABLE))
{
// The special syntax flag was not used -
// Only on request MV allows direct DELETE operations by the user.
MVInfoForDML *mvInfo = ((NATable *)naTable)->getMVInfo(bindWA);
if (mvInfo->getRefreshType() == COM_ON_REQUEST &&
getOperatorType() == REL_UNARY_DELETE)
{
// Set NOLOG flag.
setNoLogOperation();
}
else
{
// Direct update is only allowed for User Maintainable MVs.
if (mvInfo->getRefreshType() != COM_BY_USER)
{
// A Materialized View cannot be directly updated.
*CmpCommon::diags() << DgSqlCode(-12074);
bindWA->setErrStatus();
return this;
}
}
}
// If this is not an INTERNAL REFRESH command, make sure the MV is
// initialized and available.
// If this is FastDelete using parallel purgedata, do not enforce
// that MV is initialized.
if (!bindWA->isBindingMvRefresh())
{
if (NOT ((getOperatorType() == REL_UNARY_DELETE) &&
(((Delete*)this)->isFastDelete())))
{
if (naTable->verifyMvIsInitializedAndAvailable(bindWA))
return NULL;
}
}
}
if (naTable->isAnMVMetaData() &&
getTableName().getSpecialType() != ExtendedQualName::MVS_UMD)
{
if (getTableName().getPrototype() == NULL ||
getTableName().getPrototype()->getSpecialType() != ExtendedQualName::MVS_UMD)
{ // ERROR 12075: A Materialized View Metadata Table cannot be directly updated.
*CmpCommon::diags() << DgSqlCode(-12075);
bindWA->setErrStatus();
return this;
}
}
if ((naTable->isSeabaseTable()) &&
(naTable->isSeabaseMDTable() ||
naTable->isSeabasePrivSchemaTable()) &&
(NOT naTable->isUserUpdatableSeabaseMDTable()) &&
(NOT Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
// IUD on hbase metadata is only allowed for internal queries.
*CmpCommon::diags() << DgSqlCode(-1391)
<< DgString0(naTable->getTableName().getQualifiedNameAsAnsiString())
<< DgString1("metadata");
bindWA->setErrStatus();
return this;
}
else if ((naTable->isSeabaseTable()) &&
(naTable->getTableName().getSchemaName() == SEABASE_REPOS_SCHEMA) &&
(NOT Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
// IUD on hbase metadata is only allowed for internal queries.
*CmpCommon::diags() << DgSqlCode(-1391)
<< DgString0(naTable->getTableName().getQualifiedNameAsAnsiString())
<< DgString1("repository");
bindWA->setErrStatus();
return this;
}
if ((naTable->isHbaseTable()) &&
(naTable->isHbaseCellTable() || naTable->isHbaseRowTable()) &&
(CmpCommon::getDefault(HBASE_NATIVE_IUD) == DF_OFF))
{
*CmpCommon::diags() << DgSqlCode(-4223)
<< DgString0("Insert/Update/Delete on native hbase tables or in CELL/ROW format is");
bindWA->setErrStatus();
return this;
}
if (naTable->isHiveTable() &&
(getOperatorType() != REL_UNARY_INSERT) &&
(getOperatorType() != REL_LEAF_INSERT))
{
*CmpCommon::diags() << DgSqlCode(-4223)
<< DgString0("Update/Delete on Hive table is");
bindWA->setErrStatus();
return this;
}
NABoolean insertFromValuesList =
(getOperatorType() == REL_UNARY_INSERT &&
(child(0)->getOperatorType() == REL_TUPLE || // VALUES(1,'b')
child(0)->getOperatorType() == REL_TUPLE_LIST || // VALUES(1,'b'),(2,'Y')
child(0)->getOperatorType() == REL_UNION)) || // VALUES..(with subquery inside the list)
getOperatorType() == REL_LEAF_INSERT; // index type of inserts
if((!insertFromValuesList) && (getOperatorType() == REL_UNARY_INSERT))
bindWA->setInsertSelectStatement(TRUE);
// an update/delete node is created as an update/delete with child
// of a scan node by parser. If this is the case, then no security
// checks are needed on child Scan node.
if ((getOperatorType() == REL_UNARY_UPDATE ||
getOperatorType() == REL_UNARY_DELETE) &&
(child(0) && (child(0)->getOperatorType() == REL_SCAN))) {
Scan * scanNode = (Scan *)(child(0)->castToRelExpr());
scanNode->setNoSecurityCheck(TRUE);
}
// Setting the begin index for TableViewUsageList to zero, instead
// of the bindWA->tableViewUsageList().entries(); Becasue
// bindWA->tableViewUsageList().entries() sets the index to the current
//entry in the list, which excludes previous statements executed in a CS.
CollIndex begSrcUsgIx = 0;
if (!insertFromValuesList) {
//
// Create a new table name scope for the source table (child node).
// Bind the source.
// Reset scope context/naming.
//
bindWA->getCurrentScope()->xtnmStack()->createXTNM();
bindChildren(bindWA);
if (bindWA->errStatus()) return this;
bindWA->getCurrentScope()->xtnmStack()->removeXTNM();
// QSTUFF
// we currently don't support streams and embedded updates
// for "insert into select from" statements.
if (getOperatorType() == REL_UNARY_INSERT){
if (child(0)->getGroupAttr()->isStream()){
*CmpCommon::diags() << DgSqlCode(-4170);
bindWA->setErrStatus();
return this;
}
if (child(0)->getGroupAttr()->isEmbeddedUpdateOrDelete() ||
child(0)->getGroupAttr()->isEmbeddedInsert()){
*CmpCommon::diags() << DgSqlCode(-4171)
<< DgString0(getGroupAttr()->getOperationWithinGroup());
bindWA->setErrStatus();
return this;
}
}
// binding a generic update within a generic update
// can only occur when binding an updatable view containing
// an embedded delete or embedded update. We don't continue
// binding the generic update and but return the bound scan node.
// the scan node may be either a base table scan or a RenameTable
// node in case we are updating a view
// Since an embedded generic update may have referred to the OLD
// and NEW table we set a binder flag causing the table name to
// be changed to the name of the underlying scan table in the
// RelRoot on top of the generic update. Since we
// know that the normalizer has checked before allowing an update
// on the view that not both, i.e.new and old column values have been
// referred this is a safe operation.
if (returnScanNode){
// this line is a hack to get through Update::bindNode on the return
setTableDesc(getScanNode()->getTableDesc());
bindWA->setInGenericUpdate(inGenericUpdate);
bindWA->setRenameToScanTable (TRUE);
NATable *nTable = bindWA->getNATable(getTableName());
// decr once for just getting it here
// and again to compensate for the reference taken out
// previously which becomes obsolete since we just return a scan node
nTable->decrReferenceCount();
nTable->decrReferenceCount();
return getViewScanNode();
}
// QSTUFF
}
else {
// else, Insert::bindNode does VALUES(...) in its Assign::bindNode loop
// in particular, it does VALUES(..,DEFAULT,..)
}
#ifndef NDEBUG
GU_DEBUG_Display(bindWA, this, "incoming", NULL, TRUE);
#endif
// QSTUFF
// in case of an insert operation we don't set it initially in order
// to prevent that an embedded update or delete may be accidentially
// removed from a source view. However we need it for binding the
// target because it may be a view and its embedded updates have to
// be removed.
if (getOperatorType() == REL_UNARY_INSERT)
inGenericUpdate = bindWA->setInGenericUpdate(TRUE);
CMPASSERT(NOT(updateCurrentOf() &&
getGroupAttr()->isEmbeddedUpdateOrDelete()));
// this is a patch to allow for embedded updates in view definitions
ParNameLocList * pLoc = NULL;
if (getGroupAttr()->isEmbeddedUpdate()) {
pLoc = bindWA->getNameLocListPtr();
bindWA->setNameLocListPtr(NULL);
}
// QSTUFF
// Allocate a TableDesc and attach it to the node.
//
// Note that for Update/Delete, which always have a Scan node attached
// (see below), we cannot reuse the Scan's TableDesc:
// GenMapTable.C doesn't find the proper ValueIds when processing an
// update/delete on a table with an index.
// So we must always create a new (target) TableDesc, always a base table.
//
// Note that bindWA->getCurrentScope()->setRETDesc() is implicitly called:
// 1) by createTableDesc, setting it to this new (target) base table;
// 2) by bindView (if called), resetting it to the view's RenameTable RETDesc
// atop the new (target) table.
//
const NATable *naTableTop = naTable;
NABoolean isView = naTable->getViewText() != NULL;
RelExpr *boundView = NULL; // ## delete when done with it?
Scan *scanNode = NULL;
if (getOperatorType() == REL_UNARY_INSERT ||
getOperatorType() == REL_LEAF_INSERT) {
if (isView) { // INSERT into a VIEW:
//
// Expand the view definition as if it were a Scan child of the Insert
// (like all children, must have its own table name scope).
//
bindWA->getCurrentScope()->xtnmStack()->createXTNM();
boundView = bindWA->bindView(getTableName(),
naTable,
accessOptions(),
removeSelPredTree(),
getGroupAttr());
#ifndef NDEBUG
GU_DEBUG_Display(bindWA, this, "bv1", boundView);
#endif
if (bindWA->errStatus()) return this;
scanNode = boundView->getScanNode();
bindWA->getCurrentScope()->xtnmStack()->removeXTNM();
}
}
else if (getOperatorType() == REL_UNARY_UPDATE ||
getOperatorType() == REL_UNARY_DELETE) {
scanNode = getScanNode();
}
if (updateCurrentOf()) {
CMPASSERT(scanNode);
scanNode->bindUpdateCurrentOf(bindWA,
(getOperatorType() == REL_UNARY_UPDATE));
if (bindWA->errStatus()) return this;
}
// As previous comments indicated, we're creating a TableDesc for the target,
// the underlying base table. Here we go and do it:
NABoolean isScanOnDifferentTable = FALSE;
if (isView) {
// This binding of the view sets up the target RETDesc.
// This is the first bindView for UPDATE and DELETE on a view,
// and the second for INSERT into a view (yes, we *do* need to do it again).
boundView = bindWA->bindView(getTableName(),
naTable,
accessOptions(),
removeSelPredTree(),
getGroupAttr(),
TRUE); // QSTUFF
setTableDesc(boundView->getScanNode()->getTableDesc());
if ((getOperatorType() == REL_INSERT)||
(getOperatorType() == REL_UNARY_INSERT) ||
(getOperatorType() == REL_LEAF_INSERT))
{
((Insert *)this)->setBoundView(boundView);
}
// for triggers
if (scanNode)
{
const NATable *naTableLocal = scanNode->getTableDesc()->getNATable();
if ((naTableLocal != naTable) && (naTableLocal->getSpecialType() == ExtendedQualName::TRIGTEMP_TABLE))
isScanOnDifferentTable = TRUE;
}
} else if (NOT (getUpdateCKorUniqueIndexKey() && (getOperatorType() == REL_UNARY_INSERT))) {
// an insert that is introduced to implement a phase of update primary key already
// has the right tabledesc (obtained from the update that it is replacing), so
// do not create another tablesdesc for such an insert.
if (scanNode)
naTable = scanNode->getTableDesc()->getNATable();
CorrName tempName(naTableTop->getTableName(),
bindWA->wHeap(),
"",
getTableName().getLocationName(),
getTableName().getPrototype());
tempName.setUgivenName(getTableName().getUgivenName());
tempName.setSpecialType(getTableName().getSpecialType());
// tempName.setIsVolatile(getTableName().isVolatile());
TableDesc * naTableToptableDesc = bindWA->createTableDesc(
naTableTop,
tempName);
if(naTableToptableDesc)
{
naTableToptableDesc->setSelectivityHint(NULL);
naTableToptableDesc->setCardinalityHint(NULL);
}
setTableDesc(naTableToptableDesc);
// Now naTable has the Scan's table, and naTableTop has the GU's table.
isScanOnDifferentTable = (naTable != naTableTop);
}
if (bindWA->errStatus())
return this;
// QSTUFF
// in case of a delete or update we may have to bind set clauses.
// first we bind the left target column, second we bind the right hand side
// we also have to separate the set on rollback clauses in a separate
// list. The set clauses generate a newRecExpr list, the set on rollback
// clause generate a newRecBeforeExpr list.
// we add the old to new valueid map as it allows us to generate
// a subset operator in the presence of order by. the compiler
// needs to understand that the old and new valueids are identical
// inlined trigger may update and scan different tables
if ((getOperatorType() == REL_UNARY_DELETE) &&
(!isScanOnDifferentTable && !getUpdateCKorUniqueIndexKey())) {
const ValueIdList &dkeys =
getTableDesc()->getClusteringIndex()->getClusteringKeyCols();
const ValueIdList &skeys =
scanNode->getTableDesc()->getClusteringIndex()->getClusteringKeyCols();
CollIndex j = skeys.entries();
for (CollIndex i = 0; i < j; i++) {
oldToNewMap().addMapEntry(skeys[i].getItemExpr()->getValueId(),
dkeys[i].getItemExpr()->getValueId());
}
}
ItemExpr *recExpr = removeNewRecExprTree();
if (recExpr &&
(getOperatorType() == REL_UNARY_DELETE ||
getOperatorType() == REL_UNARY_UPDATE)) {
ItemExprList recList(recExpr, bindWA->wHeap());
ItemExprList recBeforeList(bindWA->wHeap());
SET(short) stoiColumnSet(bindWA->wHeap());
// in case a delete statement has a recEpxr, set on rollback
// clauses have been defined and need to be bound
// as part of binding any set on rollback clause we have check
// that no contraints are defined for the specific clauses; otherwise
// the statement is rejected.
// the target columns are bound to the update table, the source
// columns are bound to the scan table
if (getOperatorType() == REL_UNARY_DELETE){
recBeforeList.insert(recList);
bindUpdateExpr(bindWA,recExpr,recBeforeList,boundView,scanNode,stoiColumnSet,TRUE);
if (bindWA->errStatus()) return this;
}
// in case of an update operator we have to separate the set and
// set on rollback clauses
if (getOperatorType() == REL_UNARY_UPDATE) {
CMPASSERT(recList.entries());
NABoolean leftIsList = FALSE;
NABoolean rightIsList = FALSE;
NABoolean legalSubqUdfExpr = FALSE;
for (CollIndex i = 0;i < recList.entries(); i++){
CMPASSERT(recList[i]->getOperatorType() == ITM_ASSIGN);
if (recList[i]->child(0)->getOperatorType() == ITM_ITEM_LIST)
leftIsList = TRUE;
if (recList[i]->child(1)->getOperatorType() == ITM_ITEM_LIST)
rightIsList = TRUE;
if (((Assign *)recList[i])->onRollback()){
// On rollback clause currently not allowed with update lists.
if ((leftIsList) || (rightIsList))
{
*CmpCommon::diags() << DgSqlCode(-3242)
<< DgString0(" ON ROLLBACK not supported with SET lists.");
bindWA->setErrStatus();
return this;
}
// CMPASSERT((NOT leftIsList) && (NOT rightIsList))
recBeforeList.insert(recList[i]);
recList.removeAt(i);
i--;
}
}
if ((leftIsList) &&
(NOT rightIsList) &&
(recList.entries() == 1) &&
((recList[0]->child(1)->getOperatorType() == ITM_ROW_SUBQUERY) ||
(recList[0]->child(1)->getOperatorType() == ITM_USER_DEF_FUNCTION)))
{
ItemExpr * expr = NULL;
// Both Subqueries and UDFs are now using the ValueIdProxy
// to carry the each of the valueIds representing the select list
// or UDF outputs. The transformation of the ValueIdProxy will do the
// right thing, and we don't need setSubqInUpdateAssing() anymore.
// Bind the subquery
if (recList[0]->child(1)->getOperatorType() == ITM_ROW_SUBQUERY)
{
RowSubquery * rs =
(RowSubquery*)(recList[0]->child(1)->castToItemExpr());
// Not sure that we ever have a subquery without a REL_ROOT
// left this additional check from the old code.
if (rs->getSubquery()->getOperatorType() == REL_ROOT)
{
rs = (RowSubquery *) rs->bindNode(bindWA);
if (bindWA->errStatus())
return this;
legalSubqUdfExpr = TRUE;
expr = (ItemExpr *) rs;
}
}
else
{
UDFunction * rudf =
(UDFunction*)(recList[0]->child(1)->castToItemExpr());
// Need to bind the UDFunction to get its outputs.
rudf = (UDFunction *) rudf->bindNode(bindWA);
if (bindWA->errStatus())
return this;
legalSubqUdfExpr = TRUE;
expr = (ItemExpr *) rudf;
}
// Update the recList with the bound itemExpr
recList[0]->child(1) = expr;
// Use the ItemExprList to flatten the Subquery or UDF
ItemExprList *exprList = (ItemExprList *) new(bindWA->wHeap())
ItemExprList(expr,bindWA->wHeap());
// Convert the ItemExprList to a Tree
ItemExpr * ie = exprList->convertToItemExpr();
ie = ie->bindNode(bindWA);
if (bindWA->errStatus())
return this;
Assign * assignNode = (Assign *)recList[0];
assignNode->child(1) = ie;
rightIsList = TRUE;
}
if ((leftIsList) || (rightIsList)) // some elements as lists
{
ItemExprList newRecList(bindWA->wHeap());
for (CollIndex i = 0; i < recList.entries(); i++)
{
Assign * assignNode = (Assign *)recList[i];
// Need to bind any UDFs or Subqieries in the expression
// so that we know the degree before we expand the list.
assignNode->child(0) =
assignNode->child(0)->bindUDFsOrSubqueries(bindWA);
if (bindWA->errStatus())
return this;
// Need to bind any UDFs or Subqieries in the expression
// so that we know the degree before we expand the list.
assignNode->child(1) =
assignNode->child(1)->bindUDFsOrSubqueries(bindWA);
if (bindWA->errStatus())
return this;
ItemExprList leftList(assignNode->child(0), bindWA->wHeap());
ItemExprList rightList(assignNode->child(1), bindWA->wHeap());
Lng32 numLeftElements = (Lng32) leftList.entries();
Lng32 numRightElements = (Lng32) rightList.entries();
// See if ALLOW_SUBQ_IN_SET is enabled. It is enabled if
// the default is ON, or if the default is SYSTEM and
// ALLOW_UDF is ON.
NABoolean allowSubqInSet_Enabled = FALSE;
DefaultToken allowSubqTok =
CmpCommon::getDefault(ALLOW_SUBQ_IN_SET);
if ((allowSubqTok == DF_ON) ||
(allowSubqTok == DF_SYSTEM))
allowSubqInSet_Enabled = TRUE;
if (!allowSubqInSet_Enabled)
{
for (CollIndex j = 0; j < rightList.entries(); j++)
{
if (((numLeftElements > 1) ||
(numRightElements > 1)) &&
(((rightList[j]->getOperatorType() == ITM_ROW_SUBQUERY) ||
(rightList[j]->getOperatorType() == ITM_VALUEID_PROXY)) &&
(legalSubqUdfExpr == FALSE)))
{
*CmpCommon::diags() << DgSqlCode(-3242)
<< DgString0(" Multiple elements or multiple subqueries are not allowed in this SET clause.");
bindWA->setErrStatus();
return this;
}
}
}
if (numLeftElements != numRightElements)
{
*CmpCommon::diags() << DgSqlCode(-4023)
<< DgInt0(numRightElements)
<< DgInt1(numLeftElements);
bindWA->setErrStatus();
return this;
}
// create newRecList with one Assign node for each element.
for (CollIndex k = 0; k < leftList.entries(); k++)
{
ItemExpr * leftIE = leftList[k];
ItemExpr * rightIE = rightList[k];
Assign *assign = new (bindWA->wHeap())
Assign(leftIE, rightIE);
// We do not bind the above Assign as it will be done
// in bindUpdateExpr below. (bug #1893)
newRecList.insert(assign);
}
} // for
bindUpdateExpr(bindWA,recExpr,newRecList,boundView,scanNode,stoiColumnSet);
if (bindWA->errStatus())
return this;
} // some elements as lists
else
{ // no elements as lists
if (recList.entries()){
bindUpdateExpr(bindWA,recExpr,recList,boundView,scanNode,stoiColumnSet);
if (bindWA->errStatus()) return this;
}
}
if (recBeforeList.entries()){
bindUpdateExpr(bindWA,recExpr,recBeforeList,boundView,scanNode,stoiColumnSet,TRUE);
if (bindWA->errStatus()) return this;
}
} // UNARY_UPDATE
// now we record the columns updated for the SqlTableOpenInfo
if (listedStoi) {
listedStoi->getStoi()->setColumnListCount((short)stoiColumnSet.entries());
short *stoiColumnList = new (bindWA->wHeap())
short[stoiColumnSet.entries()];
for (CollIndex i = 0; i < stoiColumnSet.entries(); i++)
{
stoiColumnList[i] = stoiColumnSet[i];
listedStoi->addUpdateColumn(stoiColumnSet[i]);
}
listedStoi->getStoi()->setColumnList(stoiColumnList);
}
// the previous implementation assumed that the scope points
// to the scan table; we don't want to disturb the code and
// make that happen --
#ifndef NDEBUG
GU_DEBUG_Display(bindWA, this, "u");
#endif
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
}
// QSTUFFF
CollIndex endSrcUsgIx = bindWA->tableViewUsageList().entries();
if ((!isScanOnDifferentTable) &&
(((getOperatorType() == REL_UNARY_INSERT) &&
!insertFromValuesList && !getGroupAttr()->isEmbeddedInsert()) ||
(getOperatorType() == REL_UNARY_UPDATE) ||
(getOperatorType() == REL_UNARY_DELETE))){
// Special handling of statements that could suffer the
// Halloween problem, e.g., "insert into t select from t"
// or "insert into v select from t", if v references t
DBG( if (getenv("TVUSG_DEBUG")) bindWA->tableViewUsageList().display(); )
const NATable *naTableBase = naTable;
const QualifiedName *viewName = NULL;
if (isView) {
// Currently, per Ansi rules, we can only insert through a view if
// there is a single underlying base table without joins or unions.
// Since we are binding the view twice for INSERTS,
// the variable beforeRefcount for the *single* base table has to be 2.
//
beforeRefcount = beforeRefcount + 1;
naTableBase = getTableDesc()->getNATable();
viewName = &naTable->getTableName();
}
if ((getOperatorType() == REL_UNARY_UPDATE ||
getOperatorType() == REL_UNARY_DELETE) &&
(child(0)->getOperatorType() == REL_SCAN)) {
// The table is referenced twice; once for the update/delete and
// the second time for the scan below it.
beforeRefcount = beforeRefcount + 1;
}
const QualifiedName &tableBaseName = naTableBase->getTableName();
Int32 afterRefcount = naTableBase->getReferenceCount();
NABoolean isSGTableType = getTableName().getSpecialType() == ExtendedQualName::SG_TABLE;
NAString viewFmtdList(bindWA->wHeap());
Int32 baseSeenInSrc = 0;
// The views on the table do not need to be obtained
// if the table type is a SEQUENCE GENERATOR
if (!isSGTableType)
baseSeenInSrc = bindWA->tableViewUsageList().getViewsOnTable(
begSrcUsgIx, endSrcUsgIx,
bindWA->viewCount(),
tableBaseName,
getTableName().getSpecialType(),
viewName,
viewFmtdList);
NABoolean halloween = FALSE;
if (CmpCommon::getDefault(R2_HALLOWEEN_SUPPORT) == DF_ON) {
if (beforeRefcount != afterRefcount) {
// Check to see if we can support this update.
//
if(checkForHalloweenR2(afterRefcount - beforeRefcount)) {
halloween = TRUE;
}
}
else {
Scan *scanSrc = getScanNode(FALSE/*no assert*/);
if ((baseSeenInSrc > beforeRefcount) &&
((scanSrc && scanSrc->getTableName().isLocationNameSpecified())||
(getTableName().isLocationNameSpecified()))) {
halloween = TRUE;
}
if (Get_SqlParser_Flags(ALLOW_SPECIALTABLETYPE)) {
if ((scanSrc && scanSrc->getTableName().isLocationNameSpecified())||
(getTableName().isLocationNameSpecified())){
// Do not enforce Halloween check if it is a
// partition only operation.
// We assume the programmer knows what he's doing
// -- hopefully, by doing insert/update/delete
// operations as part of Partition Management
// (Move Partition Boundary or Split Partition or
// Merge Partition. See TEST057 and TEST058)
halloween = FALSE;
}
}
}
if (halloween) {
CMPASSERT(!(isView && viewFmtdList.isNull()));
*CmpCommon::diags() << DgSqlCode(viewFmtdList.isNull() ? -4026 : -4060)
<< DgTableName(
tableBaseName.getQualifiedNameAsAnsiString())
<< DgString0(viewFmtdList);
bindWA->setErrStatus();
return this;
}
}
else {
// Support for self-referencing updates/Halloween problem.
if (beforeRefcount != afterRefcount) {
setAvoidHalloween(TRUE);
bindWA->getTopRoot()->setAvoidHalloween(TRUE);
// Decide if access mode (default or specified) is compatible
// with the use of DP2 locks. If access mode was specified,
// it is a property of the naTableBase.
NABoolean cannotUseDP2Locks =
naTableBase->getRefsIncompatibleDP2Halloween();
// Now check the transaction isolation level, which can override
// the access mode. Note that il was initialized above for the
// check for an updatable trans, i.e., errors 3140 and 3141.
if((CmpCommon::transMode()->ILtoAT(il) == REPEATABLE_ ) ||
(CmpCommon::transMode()->ILtoAT(il) == STABLE_ ) ||
(CmpCommon::transMode()->ILtoAT(il) == BROWSE_ ))
cannotUseDP2Locks = TRUE;
// Save the result with this GenericUpdate object. It will be
// used when the nextSubstitute methods of TSJFlowRule or TSJRule
// call GenericUpdate::configTSJforHalloween.
if (NOT getHalloweenCannotUseDP2Locks())
setHalloweenCannotUseDP2Locks(cannotUseDP2Locks);
// Keep track of which table in the query is the self-ref table.
// This is a part of the fix for solution 10-071204-9253.
((NATable *)naTableBase)->setIsHalloweenTable();
}
else {
Scan *scanSrc = getScanNode(FALSE/*no assert*/);
if ((baseSeenInSrc > beforeRefcount) &&
((scanSrc && scanSrc->getTableName().isLocationNameSpecified())||
(getTableName().isLocationNameSpecified()))) {
halloween = TRUE;
}
if (Get_SqlParser_Flags(ALLOW_SPECIALTABLETYPE)) {
if ((scanSrc && scanSrc->getTableName().isLocationNameSpecified())||
(getTableName().isLocationNameSpecified())){
// Do not enforce Halloween check if it is a
// partition only operation.
// We assume the programmer knows what he's doing
// -- hopefully, by doing insert/update/delete
// operations as part of Partition Management
// (Move Partition Boundary or Split Partition or
// Merge Partition. See TEST057 and TEST058)
halloween = FALSE;
}
}
if (halloween) {
CMPASSERT(!(isView && viewFmtdList.isNull()));
*CmpCommon::diags() << DgSqlCode(viewFmtdList.isNull() ? -4026 : -4060)
<< DgTableName(
tableBaseName.getQualifiedNameAsAnsiString())
<< DgString0(viewFmtdList);
bindWA->setErrStatus();
return this;
}
}
}
}
// Bind the base class.
// Allocate an empty RETDesc and attach it to this node, *but* leave the
// currently scoped RETDesc (that of naTableTop) as is, for further binding
// in caller Insert::bindNode or LeafInsert/LeafDelete::bindNode.
//
RelExpr *boundExpr = bindSelf(bindWA);
CMPASSERT(boundExpr == this); // assumed by RETDesc/RI/IM code below
if (bindWA->errStatus()) return boundExpr;
setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA));
// Copy the check constraints to the private memory of the GenericUpdate.
//
checkConstraints() = getTableDesc()->getCheckConstraints();
// Create a key expression for the table to be updated.
// The code specific to the Insert node is handled in Insert::bindNode.
//
if (getOperatorType() == REL_UNARY_UPDATE ||
getOperatorType() == REL_UNARY_DELETE) {
// SQL syntax requires (and the parser ensures) that a direct descendant
// (passing thru views) of an update/delete node is a scan node on the
// same table that is being updated (note that normalizer transformations
// may change this at a later time).
// An exception to this rule happens when before triggers are inlined.
// In this case, the update/delete on the subject table is driven by
// a Scan on a temp table. The primary key columns of the subject table are
// a subset of the primary key columns of the temp table, and using the
// same column names, but not neccessarily in the same order.
//
// Update/Delete nodes require expressions in their newRecExpr that can
// be used to form the primary key of the table to update/delete.
//
const NAColumnArray &keyColArray =
getTableDesc()->getNATable()->getClusteringIndex()->getIndexKeyColumns();
CollIndex numKeyCols = keyColArray.entries();
const NAColumnArray &scanColArray =
scanNode->getTableDesc()->getNATable()->getNAColumnArray();
for (CollIndex i = 0; i < numKeyCols; i++) {
// The scan node and the update/delete node both use the SAME table,
// so their column names are also the same.
//
Lng32 colPos = keyColArray[i]->getPosition();
ItemExpr *guCol = getTableDesc()->getColumnList()[colPos].getItemExpr();
ItemExpr *scanCol; // - Triggers
if (!isScanOnDifferentTable)
scanCol = scanNode->getTableDesc()->getColumnList()[colPos].getItemExpr();
else
{
// Make sure this is a BaseColumn.
CMPASSERT(guCol->getOperatorType() == ITM_BASECOLUMN);
// Find the column name.
const NAString& colName = ((BaseColumn *)guCol)->getColName();
// Find a column with the same name, in the table from the Scan node.
// SYSKEY is an exception since its name in the temp table is "@SYSKEY"
ExtendedQualName::SpecialTableType tableType =
scanNode->getTableDesc()->getCorrNameObj().getSpecialType();
NAColumn *scanNaCol = NULL;
if (ExtendedQualName::TRIGTEMP_TABLE == tableType && colName == "SYSKEY")
{
scanNaCol = scanColArray.getColumn("@SYSKEY");
}
else
{
scanNaCol = scanColArray.getColumn(colName);
}
CMPASSERT(scanNaCol != NULL)
// Get the position of this column in the Scan table.
Lng32 scanColPos = scanNaCol->getPosition();
// Get the Scan BaseColumn.
scanCol = scanNode->getTableDesc()->getColumnList()[scanColPos].getItemExpr();
}
ItemExpr *newKeyPred = new (bindWA->wHeap())
BiRelat(ITM_EQUAL, guCol, scanCol);
newKeyPred->bindNode(bindWA);
beginKeyPred().insert(newKeyPred->getValueId());
updateToSelectMap().addMapEntry(
newKeyPred->child(0)->getValueId(),
newKeyPred->child(1)->getValueId());
} // loop over key columns
// All of the indexes also require expressions that can be used to
// form the primary key of the index to update/delete. Create these
// item expressions here.
// (From here to the end of the loop over indexes structurally resembles
// GenericUpdate::imBindAllIndexes(), but has significant differences.)
//
// Remember the value ID's of the scan node index columns for
// code generation time.
//
if ((this->getOperatorType() == REL_UNARY_UPDATE) && isScanOnDifferentTable)
{
setScanIndexDesc(NULL); // for triggers
}
else
{
setScanIndexDesc(scanNode->getTableDesc()->getClusteringIndex());
}
} // REL_UNARY_UPDATE or REL_UNARY_DELETE
// QSTUFF
// we need to check whether this code is executed as part of a create view
// ddl operation using bindWA->inDDL() and prevent indices, contraints and
// triggers to be added as the catalog manager binding functions cannot
// handle it right now
// QSTUFF
// QSTUFF hack !
if (getGroupAttr()->isEmbeddedUpdate())
bindWA->setNameLocListPtr(pLoc);
bindWA->setInGenericUpdate(inGenericUpdate);
// QSTUFF
// set flag that we are binding an Insert/Update/Delete operation
// Used to disable Join optimization when necessary
bindWA->setBindingIUD();
return boundExpr;
} // GenericUpdate::bindNode()
NABoolean GenericUpdate::checkForMergeRestrictions(BindWA *bindWA)
{
if (!isMerge())
return FALSE;
ValueIdList tempVIDlist;
getTableDesc()->getIdentityColumn(tempVIDlist);
NAColumn *identityCol = NULL;
if (tempVIDlist.entries() > 0)
{
ValueId valId = tempVIDlist[0];
identityCol = valId.getNAColumn();
}
// MERGE on a table with BLOB columns is not supported
if (getTableDesc()->getNATable()->hasLobColumn())
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" LOB column not allowed.");
bindWA->setErrStatus();
return TRUE;
}
if (getTableDesc()->hasUniqueIndexes())
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" unique indexes not allowed.");
bindWA->setErrStatus();
return TRUE;
}
if ((accessOptions().accessType() == SKIP_CONFLICT_) ||
(getGroupAttr()->isStream()) ||
(newRecBeforeExprArray().entries() > 0)) // set on rollback
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" Stream, skip conflict or SET ON ROLLBACK not allowed.");
bindWA->setErrStatus();
return TRUE;
}
if (getGroupAttr()->isEmbeddedUpdateOrDelete())
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" Embedded update/deletes not allowed.");
bindWA->setErrStatus();
return TRUE;
}
if ((getInliningInfo().hasInlinedActions()) ||
(getInliningInfo().isEffectiveGU()))
{
if ((getInliningInfo().hasTriggers()) ||
(getInliningInfo().hasRI()))
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" RI or Triggers not allowed.");
bindWA->setErrStatus();
return TRUE;
}
}
return FALSE;
}
// This class LeafInsert and its companion LeafDelete
// are currently used only by Index Maintenance,
// but we ought not make any assumptions.
// ##IM: It might be useful to add a flag such as GenericUpdate::isIndexTable_
// ##IM: and set it to TRUE in createIMNode().
//
RelExpr *LeafInsert::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
#ifndef NDEBUG
if (GU_DEBUG) cerr << "\nLeafInsert " << getUpdTableNameText() << endl;
#endif
setInUpdateOrInsert(bindWA, this, REL_INSERT);
RelExpr *boundExpr = GenericUpdate::bindNode(bindWA);
if (bindWA->errStatus()) return boundExpr;
// Make newRecExprArray_ be an ordered set of assign nodes of the form
// "ixcol1 = basetblcol1, ixcol2 = basecol2, ..." (for Index Maintenance)
// Note: For SQL/MP tables, ixcol0 is the keytag, and will need to be
// handled differently from other columns.
const ValueIdList &tgtcols = getTableDesc()->getColumnList();
CMPASSERT(tgtcols.entries() == baseColRefs().entries());
for (CollIndex i = 0; i < tgtcols.entries(); i++) {
Assign *assign;
assign = new (bindWA->wHeap())
Assign(tgtcols[i].getItemExpr(), baseColRefs()[i], FALSE);
assign->bindNode(bindWA);
if (bindWA->errStatus()) return NULL;
newRecExprArray().insertAt(i, assign->getValueId());
newRecExpr().insert(assign->getValueId());
updateToSelectMap().addMapEntry(assign->getTarget(), assign->getSource());
}
// RelExpr::bindSelf (in GenericUpdate::bindNode) has done this line, but now
// any outer refs discovered in bindNode's in the above loop must be added.
// For Index Maintenance, these must be exactly the set of baseColRefs vids
// (all the target index cols are from the locally-scoped RETDesc left by
// the GenericUpdate::bindNode).
getGroupAttr()->addCharacteristicInputs(bindWA->getCurrentScope()->getOuterRefs());
// The NATable of getTableName() had been set to INDEX_TABLE so that
// getNATable would search the right namespace.
// Now we make the Optimizer treat this as a regular table, not an index
// (in particular, don't have it choose VSBB sidetree-insert).
//
// The TableDesc setting may be redundant/unnecessary, but we do it
// for completeness and safety.
//
// -- Triggers
// If it is NOT an index table (like maybe a TRIGTEMP_TABLE), leave it alone
if (getTableName().getSpecialType() == ExtendedQualName::INDEX_TABLE)
{
getTableName().setSpecialType(ExtendedQualName::NORMAL_TABLE);
getTableDesc()->getCorrNameObj().setSpecialType(ExtendedQualName::NORMAL_TABLE);
}
setInUpdateOrInsert(bindWA);
return boundExpr;
} // LeafInsert::bindNode()
RelExpr *LeafDelete::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
#ifndef NDEBUG
if (GU_DEBUG) cerr << "\nLeafDelete " << getUpdTableNameText() << endl;
#endif
if (getPreconditionTree()) {
ValueIdSet pc;
getPreconditionTree()->convertToValueIdSet(pc, bindWA, ITM_AND);
if (bindWA->errStatus())
return this;
setPreconditionTree(NULL);
setPrecondition(pc);
}
RelExpr *boundExpr = GenericUpdate::bindNode(bindWA);
if (bindWA->errStatus()) return boundExpr;
//Set the beginKeyPred
if (TriggersTempTable *tempTableObj = getTrigTemp())
{
const ValueIdList &keycols = getTableDesc()->getClusteringIndex()->getIndexKey();
ItemExpr *keyExpr;
// Normal case - use the UniqueExecuteId builtin function.
keyExpr = new(bindWA->wHeap()) UniqueExecuteId();
ItemExpr *tempKeyPred = new(bindWA->wHeap()) BiRelat(ITM_EQUAL, keycols[0].getItemExpr(), keyExpr);
tempKeyPred->bindNode(bindWA);
if (bindWA->errStatus()) return NULL;
beginKeyPred().insert(tempKeyPred->getValueId());
// Create the ItemExpr for the constant UniqueIudNum
ItemExpr *col2 = new(bindWA->wHeap())
ColReference(new(bindWA->wHeap()) ColRefName(UNIQUEIUD_COLUMN));
// Compare it to the correct offset.
BindWA::uniqueIudNumOffset offset = BindWA::uniqueIudNumForInsert ;
ItemExpr *iudConst = new(bindWA->wHeap()) ConstValue(bindWA->getUniqueIudNum(offset));
ItemExpr *predIudId = new(bindWA->wHeap()) BiRelat(ITM_EQUAL, keycols[1].getItemExpr(), iudConst);
predIudId->bindNode(bindWA);
if (bindWA->errStatus()) return NULL;
beginKeyPred().insert(predIudId->getValueId());
for (CollIndex i = 2; i<keycols.entries(); i++)
{
ItemExpr *keyPred = NULL;
ItemExpr *keyItemExpr = keycols[i].getItemExpr();
ItemExpr *baseItemExpr = NULL;
Lng32 keyColPos = keycols[i].getNAColumn()->getPosition();
baseItemExpr = baseColRefs()[keyColPos];
keyPred = new (bindWA->wHeap())
BiRelat(ITM_EQUAL, keyItemExpr, baseItemExpr);
keyPred->bindNode(bindWA);
if (bindWA->errStatus()) return NULL;
beginKeyPred().insert(keyPred->getValueId());
}
}
else
{
const ValueIdList &keycols = getTableDesc()->getClusteringIndex()->getIndexKey();
for (CollIndex i = 0; i < keycols.entries() ; i++)
{
ItemExpr *keyPred = 0;
ItemExpr *keyItemExpr = keycols[i].getItemExpr();
Lng32 keyColPos = keycols[i].getNAColumn()->getPosition();
ItemExpr *baseItemExpr = NULL;
// For a unique index (for undo) we are passing in all the index
// columns in baseColRefs. So we need to find the index key col
// position in the index col list and compare the key columns with
// it's corresponding column in the index column list
if (isUndoUniqueIndex())
baseItemExpr = baseColRefs()[keyColPos];
else
baseItemExpr = baseColRefs()[i];
keyPred = new (bindWA->wHeap())
BiRelat(ITM_EQUAL, keyItemExpr, baseItemExpr);
keyPred->bindNode(bindWA);
if (bindWA->errStatus()) return NULL;
beginKeyPred().insert(keyPred->getValueId());
}
}
if (isUndoUniqueIndex())
{
setUpExecPredForUndoUniqueIndex(bindWA) ;
}
if (getTrigTemp())
{
setUpExecPredForUndoTempTable(bindWA);
}
// See LeafInsert::bindNode for comments on remainder of this method.
getGroupAttr()->addCharacteristicInputs(bindWA->getCurrentScope()->getOuterRefs());
getTableName().setSpecialType(ExtendedQualName::NORMAL_TABLE);
getTableDesc()->getCorrNameObj().setSpecialType(ExtendedQualName::NORMAL_TABLE);
return boundExpr;
} // LeafDelete::bindNode()
void LeafDelete::setUpExecPredForUndoUniqueIndex(BindWA *bindWA)
{
// Set up the executor predicate . Used in the case of Undo to undo the
// exact row that caused an error.Note that if we used only the key
// columns to undo, we may end up undoing existing rows .
// This is done only for unique indexes
ItemExpr *execPred = NULL;
const ValueIdList &indexCols = getTableDesc()->getClusteringIndex()->getIndexColumns();
for ( CollIndex i = 0; i < indexCols.entries(); i++)
{
execPred = new (bindWA->wHeap())
BiRelat(ITM_EQUAL, indexCols[i].getItemExpr(), baseColRefs()[i]);
execPred->bindNode(bindWA);
if (bindWA->errStatus()) return ;
executorPred() += execPred->getValueId();
}
return;
}
void LeafDelete::setUpExecPredForUndoTempTable(BindWA *bindWA)
{
ItemExpr *execPred = NULL;
const ValueIdList &tempCols = getTableDesc()->getClusteringIndex()->getIndexColumns();
for ( CollIndex i = 0; i < tempCols.entries(); i++)
{
NAString colName(tempCols[i].getNAColumn()->getColName());
if (colName.data()[0] == '@' && colName.compareTo("@SYSKEY"))
continue;
execPred = new (bindWA->wHeap())
BiRelat(ITM_EQUAL, tempCols[i].getItemExpr(), baseColRefs()[i]);
execPred->bindNode(bindWA);
if (bindWA->errStatus()) return;
executorPred() += execPred->getValueId();
}
return;
}
// -----------------------------------------------------------------------
// RelRoutine
// -----------------------------------------------------------------------
// LCOV_EXCL_START - rfi
RelExpr *RelRoutine::bindNode(BindWA *bindWA)
{
CMPASSERT(0); // For the time being, all classes above implement their own.
//
// Allocate an RETDesc and attach it to this and the BindScope.
// Needs to occur in later classes when we know if we are at table
// type or not..
// XXX setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA, getTableDesc()));
// bindWA->getCurrentScope()->setRETDesc(getRETDesc());
//
// Bind the base class.
//
RelExpr *boundExpr = bindSelf(bindWA);
if (bindWA->errStatus()) return boundExpr;
//
// Assign the set of columns that belong to the virtual table
// as the output values that can be produced by this node.
//
// XXX done in later clasees
// getGroupAttr()->addCharacteristicOutputs(getTableDesc()->getColumnList());
return boundExpr;
} // RelRoutine::bindNode()
// LCOV_EXCL_STOP
// -----------------------------------------------------------------------
// BuiltinTableValuedFunction
// will be called by
// ExplainFunc and StatisticsFunc
// Rely on function implementation in TableValuedFunction
// -----------------------------------------------------------------------
// -----------------------------------------------------------------------
// Explain/Statistics/HiveMD Func
// -----------------------------------------------------------------------
RelExpr *BuiltinTableValuedFunction::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
//
// Bind the child nodes.
//
bindChildren(bindWA);
if (bindWA->errStatus())
return this;
//
// Check if there is already an NATable for the Explain/Statistics table.
//
if (getOperatorType() == REL_EXPLAIN ||
getOperatorType() == REL_STATISTICS ||
getOperatorType() == REL_HIVEMD_ACCESS ||
getOperatorType() == REL_HBASE_ACCESS)
{
NATable *naTable = NULL;
if (getOperatorType() == REL_HBASE_ACCESS)
{
// should not reach here
CMPASSERT(0);
}
else
{
CorrName corrName(getVirtualTableName());
corrName.setSpecialType(ExtendedQualName::VIRTUAL_TABLE);
NATable *naTable = bindWA->getSchemaDB()->getNATableDB()->
get(&corrName.getExtendedQualNameObj());
if (NOT naTable)
{
desc_struct *tableDesc = createVirtualTableDesc();
if (tableDesc)
naTable = bindWA->getNATable(corrName, FALSE/*catmanUsages*/, tableDesc);
if ( ! tableDesc || bindWA->errStatus() )
return this;
}
// Allocate a TableDesc and attach it to this.
//
TableDesc * td = bindWA->createTableDesc(naTable, corrName);
if (! td || bindWA->errStatus())
return this;
setTableDesc(td);
if (bindWA->errStatus())
return this;
}
if (getProcAllParamsTree())
{
((ItemExpr *)getProcAllParamsTree())->convertToValueIdList(getProcAllParamsVids(), bindWA, ITM_ITEM_LIST);
if (bindWA->errStatus())
return this;
// Clear the Tree since we now have gotten vids for all the parameters.
setProcAllParamsTree(NULL);
Lng32 sqlcode = 0;
if (getProcAllParamsVids().entries() != numParams())
{
sqlcode = -4067;
// 4067 Explain/Statistics requires two operands, of type character.
*CmpCommon::diags() << DgSqlCode(sqlcode) << DgString0(getTextForError());
bindWA->setErrStatus();
return NULL;
}
// type any param arguments to fixed char since runtime explain
// expects arguments to be fixed char.
Lng32 len = (Lng32)CmpCommon::getDefaultNumeric(VARCHAR_PARAM_DEFAULT_SIZE);
SQLChar c(len);
for (Lng32 i = 0; i < numParams(); i++)
{
getProcAllParamsVids()[i].coerceType(c, NA_CHARACTER_TYPE);
if (getProcAllParamsVids()[i].getType().getTypeQualifier() != NA_CHARACTER_TYPE)
{
sqlcode = -4067;
// 4067 Explain/Statistics requires two operands, of type character.
*CmpCommon::diags() << DgSqlCode(sqlcode) << DgString0(getTextForError());
bindWA->setErrStatus();
return NULL;
}
const NAType &typ = getProcAllParamsVids()[i].getType();
CharInfo::CharSet chld_cs = ((const CharType&)typ).getCharSet();
ItemExpr *ie;
if ( chld_cs == CharInfo::UNICODE )
{
ie = new (bindWA->wHeap()) Translate(
getProcAllParamsVids()[i].getItemExpr(),
Translate::UNICODE_TO_ISO88591);
ie = ie->bindNode(bindWA);
getProcAllParamsVids()[i] = ie->getValueId();
}
if (bindWA->errStatus())
return NULL;
// For Explain and Statistics all parameters are inputs
getProcInputParamsVids().insert(getProcAllParamsVids());
} // for
}
} // if
return TableValuedFunction::bindNode(bindWA);
}
// -----------------------------------------------------------------------
// TableValuedFunction
// -----------------------------------------------------------------------
RelExpr *TableValuedFunction::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
//
// Bind the child nodes.
//
bindChildren(bindWA);
if (bindWA->errStatus())
return this;
//
// Allocate an RETDesc and attach it to this and the BindScope.
//
setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA, getTableDesc()));
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
//
// Bind the base class.
//
RelExpr *boundExpr = bindSelf(bindWA);
if (bindWA->errStatus()) return boundExpr;
//
// Assign the set of columns that belong to the virtual table
// as the output values that can be produced by this node.
//
getGroupAttr()->addCharacteristicOutputs(getTableDesc()->getColumnList());
return boundExpr;
} // TableValuedFunction::bindNode()
// -----------------------------------------------------------------------
// Member functions for classes Control*
// must be written allowing for a NULL BindWA to be passed in!
//
// This happens when called from the SQLC/SQLCO Preprocessor,
// which needs to bind certain "static-only" statements --
// those which evaluate to STATIC_ONLY_WITH_WORK_FOR_PREPROCESSOR --
// see ControlAbstractClass::isAStaticOnlyStatement().
// -----------------------------------------------------------------------
RelExpr * ControlAbstractClass::bindNode(BindWA *bindWA)
{
if (nodeIsBound()) return this;
// Early return if called by SQLC/SQLCO Preprocessor
if (!bindWA) return this;
// Allocate an empty RETDesc and attach it to this node and the BindScope.
setRETDesc(new(bindWA->wHeap()) RETDesc(bindWA));
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return bindSelf(bindWA);
} // ControlAbstractClass::bindNode()
RelExpr * ControlQueryShape::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// remember the required shape in the control table
if (alterArkcmpEnvNow())
{
if (getShape())
ActiveControlDB()->setRequiredShape(this);
else
{
// no shape passed in. Hold or Restore.
if (holdShape())
ActiveControlDB()->saveCurrentCQS();
else
ActiveControlDB()->restoreCurrentCQS();
if (ActiveControlDB()->getRequiredShape())
ActiveControlDB()->getRequiredShape()->holdShape() = holdShape();
}
}
return ControlAbstractClass::bindNode(bindWA);
} // ControlQueryShape::bindNode()
RelExpr * ControlQueryDefault::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// Alter the current Defaults settings if this is a static CQD.
//
// "AffectYourself" is coming to you courtesy of the Staple Singers:
// 'Affect yourself, na na na, na na na na, affect yourself, re re re re.'
// It's neat to find such Binder-relevant lyrics, eh?
//
NABoolean affectYourself = alterArkcmpEnvNow();
assert(!bindWA || bindWA->getSchemaDB() == ActiveSchemaDB());
NADefaults &defs = ActiveSchemaDB()->getDefaults();
defs.setState(NADefaults::SET_BY_CQD);
if ( defs.isReadonlyAttribute(token_) == TRUE )
{
Int32 attrNum = defs.lookupAttrName(token_);
if (stricmp(value_, defs.getValue(attrNum)) != 0 )
{
if (CmpCommon::getDefault(DISABLE_READ_ONLY) == DF_OFF)
{
if (bindWA) bindWA->setErrStatus();
*CmpCommon::diags() << DgSqlCode(-4130) << DgString0(token_);
return NULL;
}
}
}
if (holdOrRestoreCQD_ == 0)
{
attrEnum_ = affectYourself ? defs.validateAndInsert(token_, value_, reset_)
: defs.validate (token_, value_, reset_);
if (attrEnum_ < 0)
{
if (bindWA) bindWA->setErrStatus();
return NULL;
}
// remember this control in the control table
if (affectYourself)
ActiveControlDB()->setControlDefault(this);
}
else if ((holdOrRestoreCQD_ > 0) && (affectYourself))
{
attrEnum_ = defs.holdOrRestore(token_, holdOrRestoreCQD_);
if (attrEnum_ < 0)
{
if (bindWA) bindWA->setErrStatus();
return NULL;
}
}
return ControlAbstractClass::bindNode(bindWA);
} // ControlQueryDefault::bindNode()
RelExpr * ControlTable::bindNode(BindWA *bindWA)
{
if (nodeIsBound()) return this;
CMPASSERT(bindWA); // can't handle it yet if called from SQLC Preprocessor
// remember this control in the control table
tableName_->applyDefaults(bindWA, bindWA->getDefaultSchema());
NABoolean ok = alterArkcmpEnvNow() ?
ActiveControlDB()->setControlTableValue(this) :
ActiveControlDB()->validate(this);
if (NOT ok)
{
if (bindWA) bindWA->setErrStatus();
return NULL;
}
return ControlAbstractClass::bindNode(bindWA);
} // ControlTable::bindNode()
RelExpr * ControlSession::bindNode(BindWA *bindWA)
{
if (nodeIsBound()) return this;
// remember this control in the control session
NABoolean ok = alterArkcmpEnvNow() ?
ActiveControlDB()->setControlSessionValue(this) :
ActiveControlDB()->validate(this);
if (NOT ok)
{
if (bindWA) bindWA->setErrStatus();
return NULL;
}
return ControlAbstractClass::bindNode(bindWA);
} // ControlSession::bindNode()
RelExpr * SetSessionDefault::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
if (getOperatorType() == REL_SET_SESSION_DEFAULT)
{
// trim leading and trailing spaces from token_ and value_
// and upcase token
token_ = token_.strip(NAString::both);
value_ = value_.strip(NAString::both);
token_.toUpper();
// TBD: perhaps add a component privilege that allows others
// to set parserflags
if ((token_ == "SET_PARSERFLAGS") ||
(token_ == "RESET_PARSERFLAGS"))
{
if (!ComUser::isRootUserID())
{
*CmpCommon::diags() << DgSqlCode(-1017);
bindWA->setErrStatus();
return this;
}
}
}
return ControlAbstractClass::bindNode(bindWA);
} // SetSessionDefault::bindNode()
// -----------------------------------------------------------------------
// member function for class RelSetTimeout
// -----------------------------------------------------------------------
RelExpr * RelSetTimeout::bindNode(BindWA *bindWA)
{
if (nodeIsBound()) return this;
// Allocate an empty RETDesc and attach it to this node and the BindScope.
setRETDesc(new(bindWA->wHeap()) RETDesc(bindWA));
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
if (timeoutValueExpr_) { // bind the timeout-value expression
timeoutValueExpr_->bindNode(bindWA);
if (bindWA->errStatus()) return this;
}
if ( ! strcmp("*", userTableName_.getCorrNameAsString()) )
isForAllTables_ = TRUE ;
HostVar *proto = userTableName_.getPrototype() ;
// Check for the not-supported "SET STREAM TIMEOUT" on a specific stream
if ( isStream_ && ! isForAllTables_ ) {
*CmpCommon::diags() << DgSqlCode(-3187);
bindWA->setErrStatus();
return this;
}
if ( isForAllTables_ ) { /* do nothing */ }
else if ( proto ) { // it is a HOSTVAR or DEFINE
userTableName_.applyDefaults(bindWA, bindWA->getDefaultSchema());
CMPASSERT ( proto->isPrototypeValid() ) ;
userTableName_.getPrototype()->bindNode(bindWA);
} else { // i.e., an explicit table name was specified
// Get the NATable for this table.
NATable *naTable = bindWA->getNATable(userTableName_, FALSE);
if (bindWA->errStatus()) return this; // e.g. error: table does not exist
if ( naTable->getViewText() ) { // can not set lock timeout on a view
*CmpCommon::diags() << DgSqlCode(-3189);
bindWA->setErrStatus();
return this;
}
// Extract and keep the physical file name
const NAFileSet * clstInd = naTable->getClusteringIndex() ;
setPhysicalFileName( clstInd->getFileSetName().getQualifiedNameAsString().data() );
}
// Bind the base class.
return bindSelf(bindWA);
}
// -----------------------------------------------------------------------
// member functions for class Describe
// (see sqlcomp/CmpDescribe.cpp for execution of the request)
// -----------------------------------------------------------------------
RelExpr *Describe::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// SHOWCONTROL DEFAULT "magic string"; -- see ShowSchema.h and ExSqlComp.cpp
if (getFormat() == CONTROL_DEFAULTS_) {
if (getDescribedTableName().getQualifiedNameObj().getObjectName() ==
ShowSchema::ShowControlDefaultSchemaMagic())
{
// Return info in an error message (a warning msg doesn't cut it).
const SchemaName &catsch = bindWA->getDefaultSchema();
NAString cat(catsch.getCatalogNameAsAnsiString(),bindWA->wHeap());
NAString sch(catsch.getUnqualifiedSchemaNameAsAnsiString(),bindWA->wHeap());
//
if (SqlParser_NAMETYPE == DF_NSK) {
// LCOV_EXCL_START - nsk
// The cat & sch from the BindWA are really from MPLOC.
// Get the real ANSI cat & sch, prepending them to the strings
// and put the MPLOC info in parens.
const SchemaName &csAnsi = ActiveSchemaDB()->getDefaultSchema();
NAString cAnsi(csAnsi.getCatalogNameAsAnsiString(),bindWA->wHeap());
NAString sAnsi(csAnsi.getUnqualifiedSchemaNameAsAnsiString(),bindWA->wHeap());
cat.prepend(cAnsi + " (");
cat += ")";
sch.prepend(sAnsi + " (");
sch += ")";
// LCOV_EXCL_STOP
}
*CmpCommon::diags() << DgSqlCode(-ABS(ShowSchema::DiagSqlCode()))
<< DgCatalogName(cat) << DgSchemaName (sch);
bindWA->setErrStatus();
return this;
}
if (getDescribedTableName().getQualifiedNameObj().getObjectName() ==
GetControlDefaults::GetExternalizedDefaultsMagic())
{
// Return info in an error message (a warning msg doesn't cut it).
NAString cqdPairs(bindWA->wHeap());
size_t lenN, lenV;
char lenbufN[10], lenbufV[10];
const char *nam, *val;
NADefaults &defs = bindWA->getSchemaDB()->getDefaults();
for (CollIndex i = 0; i < defs.numDefaultAttributes(); i++ ) {
if (defs.getCurrentDefaultsAttrNameAndValue(i, nam, val, TRUE)) {
lenN = strlen(nam);
lenV = strlen(val);
CMPASSERT(lenN <= 999 && lenV <= 999); // %3d coming up next
sprintf(lenbufN, "%3d", (UInt32)lenN);
sprintf(lenbufV, "%3d", (UInt32)lenV);
cqdPairs += NAString(lenbufN) + nam + lenbufV + val;
}
}
*CmpCommon::diags()
<< DgSqlCode(-ABS(GetControlDefaults::DiagSqlCode()))
<< DgString0(cqdPairs);
bindWA->setErrStatus();
return this;
}
}
// Create a descriptor for a virtual table to look like this:
//
// CREATE TABLE DESCRIBE__ (DESCRIBE__COL VARCHAR(3000) NOT NULL);
// For SeaQuest Unicode:
// CREATE TABLE DESCRIBE__ (DESCRIBE__COL VARCHAR(3000 BYTES) CHARACTER SET UTF8 NOT NULL);
//
#define MAX_DESCRIBE_LEN 3000 // e.g., SQL/MP Views.ViewText column
// readtabledef_allocate_desc requires that HEAP (STMTHEAP) be used for new's!
desc_struct * table_desc = readtabledef_allocate_desc(DESC_TABLE_TYPE);
table_desc->body.table_desc.tablename = new HEAP char[strlen("DESCRIBE__")+1];
strcpy(table_desc->body.table_desc.tablename, "DESCRIBE__");
// see nearly identical code below for indexes file desc
desc_struct * files_desc = readtabledef_allocate_desc(DESC_FILES_TYPE);
table_desc->body.table_desc.files_desc = files_desc;
files_desc->body.files_desc.fileorganization = KEY_SEQUENCED_FILE;
Lng32 colnumber = 0, offset = 0;
desc_struct * column_desc = readtabledef_make_column_desc(
table_desc->body.table_desc.tablename,
"DESCRIBE__COL",
colnumber, // INOUT
REC_BYTE_V_ASCII,
MAX_DESCRIBE_LEN,
offset); // INOUT
column_desc->body.columns_desc.character_set = CharInfo::UTF8;
column_desc->body.columns_desc.encoding_charset = CharInfo::UTF8;
table_desc->body.table_desc.colcount = colnumber;
table_desc->body.table_desc.record_length = offset;
desc_struct * index_desc = readtabledef_allocate_desc(DESC_INDEXES_TYPE);
index_desc->body.indexes_desc.tablename = table_desc->body.table_desc.tablename;
index_desc->body.indexes_desc.indexname = table_desc->body.table_desc.tablename;
index_desc->body.indexes_desc.ext_indexname = table_desc->body.table_desc.tablename;
index_desc->body.indexes_desc.keytag = 0; // primary index
index_desc->body.indexes_desc.record_length = table_desc->body.table_desc.record_length;
index_desc->body.indexes_desc.colcount = table_desc->body.table_desc.colcount;
index_desc->body.indexes_desc.blocksize = 4096; // anything > 0
// Cannot simply point to same files desc as the table one,
// because then ReadTableDef::deleteTree frees same memory twice (error)
desc_struct * i_files_desc = readtabledef_allocate_desc(DESC_FILES_TYPE);
index_desc->body.indexes_desc.files_desc = i_files_desc;
i_files_desc->body.files_desc.fileorganization = KEY_SEQUENCED_FILE;
desc_struct * key_desc = readtabledef_allocate_desc(DESC_KEYS_TYPE);
key_desc->body.keys_desc.indexname = index_desc->body.indexes_desc.indexname;
key_desc->body.keys_desc.keyseqnumber = 1;
key_desc->body.keys_desc.tablecolnumber = 0;
key_desc->body.keys_desc.ordering= 0;
index_desc->body.indexes_desc.keys_desc = key_desc;
table_desc->body.table_desc.columns_desc = column_desc;
table_desc->body.table_desc.indexes_desc = index_desc;
//
// Get the NATable for this object.
//
CorrName corrName("DESCRIBE__");
corrName.setSpecialType(ExtendedQualName::VIRTUAL_TABLE);
NATable *naTable = bindWA->getNATable(corrName, FALSE/*CatBind*/, table_desc);
if (bindWA->errStatus())
return this;
//
// Allocate a TableDesc (which is not the table_desc we just constructed)
// and attach it to the Scan node.
//
setTableDesc(bindWA->createTableDesc(naTable, corrName));
if (bindWA->errStatus())
return this;
//
// Allocate an RETDesc and attach it to the Scan node and the BindScope.
//
setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA, getTableDesc()));
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
//
// Bind the described table CorrName member, the children, and the base class.
//
if (! describedTableName_.getQualifiedNameObj().getObjectName().isNull())
{
if ((getFormat() >= CONTROL_FIRST_) &&
(getFormat() <= CONTROL_LAST_))
{
describedTableName_.applyDefaults(bindWA, bindWA->getDefaultSchema());
}
else
{
// do not override schema for showddl
bindWA->setToOverrideSchema(FALSE);
// if this is a showlabel command on a resource fork,
// but the describedTableName
// is not a fully qualified rfork name, then get the rfork name
// for the specified table.
if ((getFormat() == Describe::LABEL_) &&
(describedTableName_.getSpecialType() == ExtendedQualName::RESOURCE_FORK) &&
(describedTableName_.getLocationName().isNull()))
{
describedTableName_.setSpecialType(ExtendedQualName::NORMAL_TABLE);
NATable *naTable = bindWA->getNATable(describedTableName_);
if (NOT bindWA->errStatus())
{
// replace the describedTableName with its rfork name.
describedTableName_.setSpecialType(ExtendedQualName::RESOURCE_FORK);
NAString rforkName = naTable->getClusteringIndex()->getFileSetName().getQualifiedNameAsString();
char * rforkNameData = (char*)(rforkName.data());
rforkNameData[rforkName.length()-1] += 1;
describedTableName_.setLocationName(rforkName);
}
}
// check if we need to consider public schema before
// describedTableName_ is qualified by getNATable
if (describedTableName_.getQualifiedNameObj().getSchemaName().isNull())
setToTryPublicSchema(TRUE);
bindWA->getNATable(describedTableName_);
if (bindWA->errStatus())
{
// if volatile related error, return it.
// Otherwise, clear diags and let this error be caught
// when describe is executed.
if ((CmpCommon::diags()->mainSQLCODE() == -4190) ||
(CmpCommon::diags()->mainSQLCODE() == -4191) ||
(CmpCommon::diags()->mainSQLCODE() == -4192) ||
(CmpCommon::diags()->mainSQLCODE() == -4193) ||
(CmpCommon::diags()->mainSQLCODE() == -4155) || // define not supported
(CmpCommon::diags()->mainSQLCODE() == -4086) || // catch Define Not Found error
(CmpCommon::diags()->mainSQLCODE() == -30044)) // default schema access error
return this;
CmpCommon::diags()->clear();
bindWA->resetErrStatus();
}
}
if (pUUDFName_ NEQ NULL AND NOT pUUDFName_->getObjectName().isNull())
{
pUUDFName_->applyDefaults(bindWA->getDefaultSchema());
}
}
bindChildren(bindWA);
RelExpr *boundExpr = bindSelf(bindWA);
if (bindWA->errStatus()) return boundExpr;
//
// Assign the set of columns that belong to the table to be scanned
// as the output values that can be produced by this scan.
//
getGroupAttr()->addCharacteristicOutputs(getTableDesc()->getColumnList());
return boundExpr;
} // Describe::bindNode()
// -----------------------------------------------------------------------
// member functions for class RelLock
// -----------------------------------------------------------------------
RelExpr * RelLock::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// do not do override schema for this
bindWA->setToOverrideSchema(FALSE);
// Get the NATable for this object.
NATable *naTable = bindWA->getNATable(getTableName());
if (bindWA->errStatus())
return this;
NABoolean isView = !!naTable->getViewText();
if (isView && !naTable->isAnMV())
{
*CmpCommon::diags() << DgSqlCode(-4222)
<< DgString0("Views");
bindWA->setErrStatus();
return this;
}
else
{
baseTableNameList_.insert((CorrName *)getPtrToTableName());
}
Int32 locSpec = 0;
NAString tabNames(bindWA->wHeap());
for (CollIndex i = 0; i < baseTableNameList_.entries(); i++) {
naTable = bindWA->getNATable(*baseTableNameList_[i]);
if (bindWA->errStatus())
return this;
// Genesis 10-990212-6908:
// Ignore the user-specified correlation name --
// use just the 3-part tblname (and any LOCATION clause, etc).
// Then, insert only unique names into tabIds_ --
// to prevent XTNM duplicates (errmsg 4056)
// when multiple layered views reference the same table or corr-name.
CorrName bt(*baseTableNameList_[i]);
bt.setCorrName("");
NABoolean haveTDforThisBT = FALSE;
for (CollIndex j = 0; j < tabIds_.entries(); j++) {
if (bt == tabIds_[j]->getCorrNameObj()) {
haveTDforThisBT = TRUE;
break;
}
}
if (!haveTDforThisBT) {
if (bt.isLocationNameSpecified()) locSpec++;
tabNames += NAString(", ") +
bt.getQualifiedNameObj().getQualifiedNameAsAnsiString();
tabIds_.insert(bindWA->createTableDesc(naTable, bt));
if (bindWA->errStatus()) return this;
}
}
if (tabIds_.entries() > 1) {
CMPASSERT(locSpec == 0);
tabNames.remove(0, 2); // remove leading ", "
// Warning 4124: More than one table will be locked: $0~String0.
// (warning, so user realizes the effects of this command
// when run on a view which joins tables...).
*CmpCommon::diags() << DgSqlCode(+4124) << DgString0(tabNames);
}
if ((isView) ||
(tabIds_.entries() > 1) ||
(baseTableNameList_.entries() > 1) ||
(CmpCommon::getDefault(ATTEMPT_ESP_PARALLELISM) == DF_OFF))
{
parallelExecution_ = FALSE;
}
// Allocate an empty RETDesc and attach it to this node and the BindScope.
setRETDesc(new(bindWA->wHeap()) RETDesc(bindWA));
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
// Bind the base class.
return bindSelf(bindWA);
} // RelLock::bindNode()
// -----------------------------------------------------------------------
// member functions for class RelTransaction
// -----------------------------------------------------------------------
RelExpr * RelTransaction::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// Allocate an empty RETDesc and attach it to this node and the BindScope.
setRETDesc(new(bindWA->wHeap()) RETDesc(bindWA));
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
if (diagAreaSizeExpr_) {
diagAreaSizeExpr_->bindNode(bindWA);
if (bindWA->errStatus()) return this;
}
// "mode_" is NULL if BEGIN/COMMIT/ROLLBACK WORK, nonNULL if SET TRANSACTION.
if (mode_) {
if ((mode_->autoCommit() != TransMode::AC_NOT_SPECIFIED_) ||
(mode_->getAutoBeginOn() != 0) ||
(mode_->getAutoBeginOff() != 0))
{
CMPASSERT(mode_->isolationLevel() == TransMode::IL_NOT_SPECIFIED_ &&
mode_->accessMode() == TransMode::AM_NOT_SPECIFIED_);
}
else
{
// See Ansi 14.1, especially SR 4.
// Similar code must be maintained in
// comexe/ExControlArea::addControl() and NADefaults::validateAndInsert().
// SET TRANSACTION w/o specifying ISOLATION LEVEL reverts TransMode to
// the NADefaults setting of ISOLATION_LEVEL
// (which the user should set to SERIALIZABLE if they want
// SET TRANSACTION to be Ansi conformant).
if (mode_->isolationLevel() == TransMode::IL_NOT_SPECIFIED_)
{
if (CmpCommon::getDefault(ISOLATION_LEVEL_FOR_UPDATES) == DF_NONE)
bindWA->getSchemaDB()->getDefaults().getIsolationLevel(
mode_->isolationLevel()); // short int
else
bindWA->getSchemaDB()->getDefaults().getIsolationLevel(
mode_->isolationLevel(), // short int
CmpCommon::getDefault(ISOLATION_LEVEL_FOR_UPDATES));
}
if (mode_->accessMode() == TransMode::AM_NOT_SPECIFIED_)
mode_->updateAccessModeFromIsolationLevel(
mode_->getIsolationLevel()); // enum
// 3114 Transaction access mode RW is incompatible with isolation level RU
if (mode_->accessMode() == TransMode::READ_WRITE_ &&
mode_->isolationLevel() == TransMode::READ_UNCOMMITTED_) {
*CmpCommon::diags() << DgSqlCode(-3114);
bindWA->setErrStatus();
return this;
}
if (mode_->rollbackMode() == TransMode::ROLLBACK_MODE_NOT_SPECIFIED_)
mode_->rollbackMode() = TransMode::ROLLBACK_MODE_WAITED_ ;
// 4352 -
if (mode_->multiCommit() == TransMode::MC_ON_)
{
if (mode_->invalidMultiCommitCompatibility())
{
*CmpCommon::diags() << DgSqlCode(-4352);
bindWA->setErrStatus();
return this;
}
}
}
} // SET TRANSACTION stmt
// Bind the base class.
return bindSelf(bindWA);
}
// Transpose::bindNode - Bind the transpose node.
// Coming into the node (from the parser) there are two ItemExpr Trees:
//
// keyCol_: The ItemExpr contains a ColReference to the key column which
// is added by the transpose node. This pointer ia set to NULL by bindNode.
// If keyCol_ is NULL coming into the bindNode, then no key Column is
// generated for this transpose.
//
// transValsTree_: This ItemExpr tree contains a list of pairs which is
// NULL terminated (for ease of processing). Each pair contains in child(0),
// a list of transpose items for a given transpose set and in child(1), a
// list of ColReferences to the new value columns associated with this
// transpose set. A transpose item is a list of value expressions.
// This pointer is set to NULL by bindNode.
//
// For Example:
//
// SELECT *
// FROM Table
// TRANSPOSE A,B AS C1
// X,Y,Z as C2
// (1,'hello'),(2,'world') AS (C3, C4)
// KEY BY K1
//
// For the above query, after parsing, the TRANSPOSE node will look like:
//
// TRANSPOSE
// keyCol_ transValsTree_
// | |
// K1 O------O---------O---NULL
// | | |
// O O O--O
// |\ |\ | |\
// O C1 O C2 | C3 C4
// |\ |\ O---------O---NULL
// A O X O | |
// |\ |\ O O
// B NULL Y O |\ |\
// |\ 1 'hello' 2 'world'
// Z NULL
//
// O - represent ITM_LIST nodes.
//
// bindNode binds this structure to form a new structure contained in
// the vector of ValueIdLists, transUnionVector_.
//
// transUnionVector_: This is a vector of ValueIdLists. There is one entry
// for each transpose set, plus one entry for the key values. Each entry
// contains a list of ValueIdUnion Nodes. The first entry contains a list
// with one ValueIdUnion node. This node is for the Const. Values (1 - N)
// representing the Key Values. The other entries contain lists of
// ValueIdUnion nodes for the Transposed Values. Each of these entries of
// the vector represent a transpose set. If the transpose set contains a
// list of values, then there will be only one ValueIdUnion node in the
// list. If the transpose set contains a list of lists of values, then
// there will be as many ValueIdUnion nodes as there are items in the
// sublists. (see example below.)
// transUnionVector_ is generated in bindNode().
//
// transUnionVectorSize_: This is the number of entries in transUnionVector_.
//
// For the above query, after binding, the TRANSPOSE node will look like:
//
// TRANSPOSE
// transUnionVectorSize_: 4
// transUnionVector_:
// ValueIdUnion(1,2,3,4,5,6,7)
// ValueIdUnion(A,B)
// ValueIdUnion(X,Y,Z)
// ValueIdUnion(1,2) , ValueIdUnion('hello','world')
//
//
RelExpr *Transpose::bindNode(BindWA *bindWA)
{
// If this node has already been bound, we are done.
//
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
BindContext *curContext = bindWA->getCurrentScope()->context();
curContext->inTransposeClause() = TRUE;
// Bind the child nodes.
//
bindChildren(bindWA);
if (bindWA->errStatus()) return this;
// At this point the Transpose relational operator has two or three
// expressions:
// keyCol_ --- A ColReference to the new keyCol. (possibly NULL)
// transValsTree_ --- expressions for the transposed values and their
// ColReferences.
//
// transpose::bindNode() performs the following steps:
//
// 1 - Construct a list of transpose set expressions
// and a list of ColReferences associated with each transpose set
// expression.
//
// 2 - Allocate a return descriptor and add the columns from the
// childs descriptor to it.
//
// 3 - Allocate the transUnionVector_
//
// 4 - Construct a ValueIdUnion node for the Key Values. Bind this node.
// Add the keyColName to the return descriptor with the valueId of this
// node. Add the valueId of this node as the first entry of
// a ValueIdList in the first entry of transUnionVector_.
//
// 5 - For each transpose set, Construct as many ValueIdUnion nodes as
// there are values in each item of the transpose set. Within a
// given transpose set, the number of values per item must be the
// same. In the example above, the third transpose set contains the
// items (1, 'hello') and (2, 'world'). These both have two values per
// item. The others all have 1 value per item. The ValueIdUnions
// generated will contain the i'th value from each item. Bind each
// of these ValueUnionId nodes. Add the value column name to the
// return descriptor with the valueId of this node. Add the valueId
// of this node the ValueIdList in the proper entry of
// transUnionVector_.
//
// 6 - Set the return descriptor, and bindSelf.
//
CollIndex i, j, k;
CollIndex numTransSets = 0;
// Get a pointer to the head of this list of pairs.
// This is the last time we will have to reference this tree.
//
ItemExpr *transTree = (ItemExpr *)removeTransValsTree();
// Allocate two ItemExpr Lists. One for the list of lists of (lists of)
// expressions. And the other for the list of (lists of) ColReferences.
//
ItemExprList transSetsList(bindWA->wHeap());
ItemExprList newColsList(bindWA->wHeap());
// Populate these lists and
// determine how many transpose sets there are in this tree.
// In the example above, there should be three.
//
while (transTree) {
transSetsList.insert(transTree->child(0)->child(0));
newColsList.insert(transTree->child(0)->child(1));
numTransSets++;
transTree = transTree->child(1);
}
// Must have at least one value expression in the transpose values list.
//
CMPASSERT(numTransSets > 0);
// Using the example above, at this point:
//
// transSetsList newColsList
// | | | | | |
// O O O---------O---NULL C1 C2 O
// |\ |\ | | |\
// A O X O O O C3 C4
// |\ |\ |\ |\
// B NULL Y O 1 'hello' 2 'world'
// |\
// Z NULL
//
// Allocate the return descriptor. This will contain the
// columns of the child node as well as the new columns added
// by the transpose operator. The column order is:
//
// [childs columns][keyCol][valCol1][valCol2] ...
//
// Using the example, this would be:
//
// [childs columns], K1, C1, C2, C3, C4
//
RETDesc *resultTable = new(bindWA->wHeap()) RETDesc(bindWA);
// Add the columns from the child to the RETDesc.
//
const RETDesc &childTable = *child(0)->getRETDesc();
resultTable->addColumns(bindWA, childTable);
transUnionVectorSize_ = numTransSets + 1;
transUnionVector() = new(bindWA->wHeap())
ValueIdList[transUnionVectorSize_];
// Get the key column reference
// This is the last time we need this ItemExpr.
//
ColReference *keyColumn = (ColReference *)removeKeyCol();
// If no key column has been specified, then no key col will be
// generated.
//
if (keyColumn) {
//Get the key column name.
//
NAString keyColName(keyColumn->getColRefNameObj().getColName(), bindWA->wHeap());
// Construct and Bind the ValueIdUnion node as the union of constants
// from 1 to the total number of transpose expressions. In the above
// example this will be from 1 to 9, since there are 3 transpose sets
// and each set has 3 expressions.
//
ValueIdList constVals;
ItemExpr *constExpr;
CollIndex keyVal;
// For each expression in each transpose set.
//
for (i = 0, keyVal = 1; i < numTransSets; i++) {
// Determine how many expressions are in each transpose set.
//
CollIndex numTransItems = 0;
ItemExpr *transSet = transSetsList[i];
while (transSet) {
numTransItems++;
transSet = transSet->child(1);
}
for (j = 0; j < numTransItems; j++, keyVal++) {
// Construct the constant value
//
#pragma nowarn(1506) // warning elimination
constExpr = new(bindWA->wHeap()) SystemLiteral(keyVal);
#pragma warn(1506) // warning elimination
// Bind the constant value.
//
constExpr->bindNode(bindWA);
if (bindWA->errStatus()) return this;
// Insert the valueId into the list
//
constVals.insert(constExpr->getValueId());
}
}
// Construct the ValueIdUnion node which will represent the key Col.
//
ValueIdUnion *keyVidu = new(bindWA->wHeap())
ValueIdUnion(constVals, NULL_VALUE_ID);
// Bind the ValueIdUnion node.
//
keyVidu->bindNode(bindWA);
if (bindWA->errStatus()) return this;
// Add the key column to the RETDesc (as the union of all the constants)
//
resultTable->addColumn(bindWA, keyColName, keyVidu->getValueId());
// The ValueIdUnion for the Key Values is the first entry in
// the ValueIdList of the first entry of transUnionVector_.
//
transUnionVector()[0].insert(keyVidu->getValueId());
}
// For each transpose set,
// - bind the list of expressions.
// - Construct a ValueIdUnion node containing the resulting valueIds.
// - Bind this ValueIdUnion node
// - Add the associate column name to the return descriptor with the
// valueId of the ValueIdUnion node.
//
ValueIdList transVals;
for (i = 0; i < numTransSets; i++) {
// The column(s) associated with this transpose set.
// (will be used below, within the inner loop)
//
ItemExprList newCols(newColsList[i], bindWA->wHeap());
// Determine how many expressions are in each transpose set.
//
CollIndex numTransItems = 0;
ItemExpr *transSet = transSetsList[i];
ItemExprList transItemList(bindWA->wHeap());
// Populate this list.
//
while (transSet) {
transItemList.insert(transSet->child(0));
numTransItems++;
transSet = transSet->child(1);
}
ItemExprList transItem(transItemList[0], bindWA->wHeap());
CollIndex numTransVals = transItem.entries();
// For a given transpose set, the number of new columns declared
// must be the same as the number of items per value. In the example
// above, the third transpose set contains the items (1, 'hello') and
// the columns (C3,C4) both have two entries.
//
if (numTransVals != newCols.entries()) {
*CmpCommon::diags() << DgSqlCode(-4088);
bindWA->setErrStatus();
return this;
}
for (k = 0; k < numTransVals; k++) {
ItemExpr *transValueUnionExpr = NULL;
for (j = 0; j < numTransItems; j++) {
transItem.clear();
transItem.insertTree(transItemList[j], ITM_ITEM_LIST);
// Within a given transpose set, the number of values per item
// must be the same. In the example above, the third transpose
// set contains the items (1, 'hello') and (2, 'world'). These
// both have two values per item. The others all have 1 value
// per item.
//
if (numTransVals != transItem.entries()) {
*CmpCommon::diags() << DgSqlCode(-4088);
bindWA->setErrStatus();
return this;
}
if (transValueUnionExpr == NULL) {
transValueUnionExpr = transItem[k];
}
else
{
transValueUnionExpr = new (bindWA->wHeap())
ItemList(transValueUnionExpr, transItem[k]);
}
}
// Bind the Transpose Values expressions. Get the expression value Id's
//
transVals.clear();
if(transValueUnionExpr != NULL )
transValueUnionExpr->convertToValueIdList(transVals,
bindWA,
ITM_ITEM_LIST);
if (bindWA->errStatus()) return this;
// If there are more than one transpose set, the value columns
// generated by transpose can be NULL. So, make sure the typing is
// done properly. This is done by setting the first in the list to
// be nullable, then the ValueIdUnion will be nullable and the new
// column will be nullable. This is not done on the ValueIdUnion
// node itself, since it will add an Null Instantiate node, and
// we later assume that this node will always be a ValueIdUnion
// node.
//
if (numTransSets > 1) {
ValueId valId = transVals[0];
transVals[0] = valId.nullInstantiate(bindWA, FALSE);
}
// Construct and Bind the ValueIdUnion node for the transpose vals.
//
ValueIdUnion *valVidu = new(bindWA->wHeap())
ValueIdUnion(transVals, NULL_VALUE_ID);
valVidu->bindNode(bindWA);
if (bindWA->errStatus()) return this;
// Insert this valueIdUnion node into the list of valueIdUnions
// in the proper entry in transUnionVector_
//
transUnionVector()[i + 1].insert(valVidu->getValueId());
// Get the val column reference
//
ColReference *valCol = (ColReference *)newCols[k];
// Must have Column Refs to val column.
//
CMPASSERT(valCol);
//Get the val column name.
//
NAString valColName( valCol->getColRefNameObj().getColName(), bindWA->wHeap());
// Add the transpose column
// (as the union of all of the transposed value columns)
//
resultTable->addColumn(bindWA, valColName, valVidu->getValueId());
}
}
// Set the return descriptor
//
setRETDesc(resultTable);
bindWA->getCurrentScope()->setRETDesc(resultTable);
//
// Bind the base class.
//
return bindSelf(bindWA);
} // Transpose::bindNode()
// -----------------------------------------------------------------------
// The Pack node binds itself by componsing its packing expression from
// all the columns available in its child's RETDesc. The packed columns
// produced by the packing expression are then made available in the Pack
// node's own RETDesc.
// -----------------------------------------------------------------------
RelExpr* Pack::bindNode(BindWA* bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
bindChildren(bindWA);
if (bindWA->errStatus()) return this;
// ---------------------------------------------------------------------
// The Pack node has a packing expression stored as packingExprTree_
// before binding. If packingExprTree_ is NULL, we are just going to
// pick up all the columns from the output of its child. During binding,
// this tree is converted into a value id list.
// ---------------------------------------------------------------------
// Create and bind the packing factor item expression.
#pragma nowarn(1506) // warning elimination
ItemExpr* pfie = new (bindWA->wHeap()) SystemLiteral(packingFactorLong());
#pragma warn(1506) // warning elimination
pfie->bindNode(bindWA);
if (bindWA->errStatus()) return this;
// Insert vid of bound constant into packingFactor valueIdSet.
packingFactor().clear();
packingFactor().insert(pfie->getValueId());
// Create my RETDesc to hold the packed columns.
RETDesc* resultTable = new (bindWA->wHeap()) RETDesc (bindWA);
// Bind the tree if its present.
if (packingExprTree_)
{
ItemExpr* packExprTree = removePackingExprTree();
packExprTree->convertToValueIdList(packingExpr(), bindWA, ITM_ITEM_LIST);
if (bindWA->errStatus()) return this;
for (CollIndex i = 0; i < packingExpr().entries(); i++)
{
// Add all columns to result table.
NAString packedColName( "PACKEDCOL_", bindWA->wHeap());
packedColName += bindWA->fabricateUniqueName();
#pragma nowarn(1506) // warning elimination
Int32 length = packedColName.length();
#pragma warn(1506) // warning elimination
char * colName = new (bindWA->wHeap()) char[length + 1];
colName[length] = 0;
#pragma nowarn(1506) // warning elimination
str_cpy_all(colName, packedColName, packedColName.length());
#pragma warn(1506) // warning elimination
ColRefName colRefName(colName);
resultTable->addColumn(bindWA,
colRefName,
packingExpr().at(i),
USER_COLUMN,
colName);
}
}
else // no packing expr tree, get all the columns from child.
{
// Get RETDesc from child which is assumed to be a RelRoot. too strict?
const RETDesc& childTable = *child(0)->getRETDesc();
ValueIdList childTableVidList;
// These are only the user columns. Are SYS columns important?
childTable.getValueIdList(childTableVidList);
// Initialize packing expression.
packingExpr().clear();
// For each column in child's RETDesc, put a PackFunc() on top of it.
for (CollIndex i = 0; i < childTableVidList.entries(); i++)
{
ItemExpr* childItemExpr = childTableVidList[i].getItemExpr();
PackFunc* packedItemExpr = new (bindWA->wHeap())
PackFunc(childItemExpr,pfie);
// Bind the packed column.
packedItemExpr->bindNode(bindWA);
if (bindWA->errStatus()) return this;
// Insert into both the result table and my packingExpr_.
packingExpr().insert(packedItemExpr->getValueId());
// $$$ Any implications of this? Needed to be seen.
// Use the original column name as the packed column name. The index
// is on USER columns only. SYS columns matter?
ColRefName colRefName = childTable.getColRefNameObj(i);
const char* heading = childTable.getHeading(i);
// Insert into RETDesc for RelRoot above it to pick up as select-list.
resultTable->addColumn(bindWA,
colRefName,
packedItemExpr->getValueId(),
USER_COLUMN,
heading);
// $$$
// OR: start with making a copy of child's RETDesc and change each col
// to point to the vid for the packed column instead of the original.
}
}
// Set the result table, bind self and return.
setRETDesc(resultTable);
bindWA->getCurrentScope()->setRETDesc(resultTable);
bindSelf(bindWA);
// To test packing. Add a unpack node on top of this pack node to check.
char* env = getenv("PACKING_FACTOR");
if (env && atol(env) > 0)
{
Lng32 pf = atol(env);
ItemExpr* unPackExpr = NULL;
ItemExpr* rowFilter = NULL;
ItemExpr* unPackItem;
ItemExpr* numRows;
const NAType* typeInt = new(bindWA->wHeap()) SQLInt(TRUE,FALSE);
ValueIdList packedCols;
resultTable->getValueIdList(packedCols);
NAString hostVarName("_sys_UnPackIndex", bindWA->wHeap());
hostVarName += bindWA->fabricateUniqueName();
ItemExpr* indexHostVar = new(bindWA->wHeap())
HostVar(hostVarName,new(bindWA->wHeap()) SQLInt(TRUE,FALSE),TRUE);
indexHostVar->synthTypeAndValueId();
for (CollIndex i=0; i < packedCols.entries(); i++)
{
const NAType* colType =
&(packedCols[i].getItemExpr()->child(0)->getValueId().getType());
Lng32 width = colType->getNominalSize();
#pragma nowarn(1506) // warning elimination
Lng32 base = (colType->supportsSQLnullPhysical() ? (pf-1)/CHAR_BIT +1 : 0)
+ sizeof(Int32);
#pragma warn(1506) // warning elimination
// $$$ Some duplicate code to be moved to PackColDesc later.
ColRefName colRefName;
colRefName = resultTable->getColRefNameObj(i);
unPackItem = new(bindWA->wHeap())
UnPackCol(packedCols[i].getItemExpr(),
indexHostVar,
width,
base,
colType->supportsSQLnull(),
colType);
numRows = new(bindWA->wHeap())
UnPackCol(packedCols[i].getItemExpr(),
new(bindWA->wHeap()) SystemLiteral(0),
typeInt->getNominalSize(),
0,
FALSE,
typeInt);
unPackExpr = (unPackExpr ?
new(bindWA->wHeap()) ItemList(unPackExpr,unPackItem) :
unPackItem);
rowFilter = (rowFilter ?
new(bindWA->wHeap()) ItemList(rowFilter,numRows) :
numRows);
}
RelExpr* unpack =
new(bindWA->wHeap()) UnPackRows(pf,unPackExpr,rowFilter,NULL,
this, indexHostVar->getValueId());
return unpack->bindNode(bindWA);
}
return this;
} // Pack::bindNode()
RelExpr * Rowset::bindNode(BindWA* bindWA)
{
// If this node has already been bound, we are done.
if (nodeIsBound())
return this->transformRelexpr_;
if (bindWA->getHostArraysArea()) {
bindWA->getHostArraysArea()->done() = TRUE;
}
//
// Bind the child nodes.
//
bindChildren(bindWA);
if (bindWA->errStatus()) return this;
// Transform current node into a new subtree which performs access to
// RowSet based on the unpacking and tuple node expression operators.
// The formed tuple is composed of all input RowSet host variables:
// Rowset-tuple: array_hv1, array_hv2, ... array_hvN.
// The Unpack expression is used to retrieve the elements of the Rowset
// with an indexed operator. For example, retrieve values for index two
// for each Rowset host variable.
// The transformed subtree has the following structure
//
// UNPACK
// |
// TUPLE
//
// Note that the original Rowset relational expression has a rename node
// on top.
//
// First find the maxRowSetSize and its rowsetSizeExpr. The rowset size is
// the smallest declared dimension of the arrays composing the rowset.
// If a constant rowset size was given in the SQL statement, it must be
// samaller than the computed value.
NABoolean hasDifferentSizes = FALSE;
Lng32 maxRowsetSize = 0; /* Maximum number of Rows in Rowset */
ItemExpr *rowsetSizeExpr;
ItemExpr *hostVarTree;
// We get the list of input host vars, which is stored in the root of the
// parse tree
HostArraysWA *arrayArea = bindWA->getHostArraysArea();
RelRoot *root = bindWA->getTopRoot();
// Do any extra checking at this moment.
for (hostVarTree = inputHostvars_;
hostVarTree;
hostVarTree = hostVarTree->child(1)) {
CMPASSERT(hostVarTree->getOperatorType() == ITM_ITEM_LIST);
HostVar *hostVar = (HostVar *)hostVarTree->getChild(0);
if (hostVar->getOperatorType() != ITM_HOSTVAR ||
hostVar->getType()->getTypeQualifier() != NA_ROWSET_TYPE) {
// 30001 A rowset must be composed of host variable arrays
*CmpCommon::diags() << DgSqlCode(-30001);
bindWA->setErrStatus();
return NULL;
}
// Get the smallest dimension for rowset size
SQLRowset* hostVarType = (SQLRowset *)hostVar->getType();
if (hostVarType->getNumElements() <= 0) {
// 30004 The dimesion of the arrays composing the RowSet must be greater
// than zero. A value of $0~Int0 was given
*CmpCommon::diags() << DgSqlCode(-30004)
<< DgInt0((Int32)hostVarType->getNumElements());
bindWA->setErrStatus();
return NULL;
}
if (maxRowsetSize == 0)
maxRowsetSize = hostVarType->getNumElements();
else if (hostVarType->getNumElements() != maxRowsetSize) {
// 30005 The dimensions of the arrays composing the RowSet are
// different. The smallest dimesnion is assumed.
// This is just a warning
// Give the warning only once
if (hasDifferentSizes == FALSE) {
if (arrayArea->hasDynamicRowsets()) {
// 30015 The dimesion of the arrays composing the RowSet must be same
// in dynamic SQL
*CmpCommon::diags() << DgSqlCode(-30015) ;
bindWA->setErrStatus();
return NULL;
} // for static SQL this is only a warning.
hasDifferentSizes = TRUE;
*CmpCommon::diags() << DgSqlCode(30005);
}
// Pick the smallest one
if (hostVarType->getNumElements() < maxRowsetSize)
maxRowsetSize = hostVarType->getNumElements();
}
// Make sure that the element type null indicator and the corresponding
// rowset array are both nullable or not nullable. That is, force it
NAType* hostVarElemType = hostVarType->getElementType();
NABoolean hostVarElemNullInd = !(hostVar->getIndName().isNull());
// If hostVarType is Unknown then this a dynamic param that has been
// converted into a hostvar. For dynamic params there is no null
// indicator variable/param specified in the query text, so the previous
// check will always return FALSE. We will set all dynamic params to be
// nullable and let type synthesis infer nullability later on.
if (hostVarElemType->getTypeQualifier() == NA_UNKNOWN_TYPE)
hostVarElemNullInd = TRUE;
hostVarElemType->setNullable(hostVarElemNullInd);
}
// If a rowset size expression was produced during parsing, it is used
// to restrict the rowset size during execution. The expression must be
// an numeric literal (known at compile time) or an integer host variable
// (known at execution time). We do not allow other type of expression
// since the rowset size must be know before the statement is executed to
// avoid copying a lot when the host variable arrays are sent down the
// execution queue
// If there is no size specification of the form ROWSET <size> ( <list> ) then
// we take the size from ROWSET FOR INPUT SIZE <size>
if (!sizeExpr_ && bindWA->getHostArraysArea()) {
sizeExpr_ = bindWA->getHostArraysArea()->inputSize();
if ((bindWA->getHostArraysArea()->getInputArrayMaxSize() > 0) &&
(!sizeExpr_ )) {
// ODBC process is performing a bulk insert and we need to create
// an input parameter to simulate the functionality of ROWSET FOR INPUT
// SIZE ... syntax.
NAString name = "__arrayinputsize" ;
HostVar *node = new (bindWA->wHeap())
HostVar(name,
new(bindWA->wHeap()) SQLInt(TRUE,FALSE),
TRUE);
node->setHVRowsetForInputSize();
root->addAtTopOfInputVarTree(node);
sizeExpr_ = (ItemExpr *) node ;
}
}
if (sizeExpr_) {
if (sizeExpr_->getOperatorType() == ITM_CONSTANT) {
if (((ConstValue *)sizeExpr_)->getType()->getTypeQualifier()
!= NA_NUMERIC_TYPE) {
// 30003 Rowset size must be an integer host variable or an
// integer constant
*CmpCommon::diags() << DgSqlCode(-30003);
bindWA->setErrStatus();
return NULL;
}
if (((ConstValue *)sizeExpr_)->getExactNumericValue() <= 0) {
// 30004 The dimesion of the arrays composing the RowSet must be
// greater than zero. A value of $0~Int0 was given
*CmpCommon::diags() << DgSqlCode(-30004)
<< DgInt0((Int32) (((ConstValue *)sizeExpr_)
->getExactNumericValue()));
bindWA->setErrStatus();
return NULL;
}
if (((ConstValue *)sizeExpr_)->getExactNumericValue() > maxRowsetSize) {
// 30002 The given RowSet size ($0~Int0) must be smaller or
// equal to the smallest dimension ($1Int1) of the
// arrays composing the rowset
*CmpCommon::diags() << DgSqlCode(-30002)
<< DgInt0((Int32)
((ConstValue *)sizeExpr_)
->getExactNumericValue())
<< DgInt1(maxRowsetSize);
bindWA->setErrStatus();
return NULL;
}
else {
maxRowsetSize = (Lng32)((ConstValue *)sizeExpr_)->getExactNumericValue() ;
}
}
else
if (!((sizeExpr_->getOperatorType() == ITM_HOSTVAR &&
((HostVar *)sizeExpr_)->getType()->getTypeQualifier()
== NA_NUMERIC_TYPE) ||
(sizeExpr_->getOperatorType() == ITM_DYN_PARAM ) ||
((sizeExpr_->getOperatorType() == ITM_CAST) &&
(sizeExpr_->child(0)->getOperatorType() == ITM_DYN_PARAM))))
{
// 30003 Rowset size must be an integer host variable or an
// integer constant
*CmpCommon::diags() << DgSqlCode(-30014);
bindWA->setErrStatus();
return NULL;
}
// We return a -1 if the execution time rowset size exceeds the maximum
// declared size
ItemExpr *maxSize = new (bindWA->wHeap()) SystemLiteral(maxRowsetSize);
ItemExpr *neg = new (bindWA->wHeap()) SystemLiteral(-1);
ItemExpr *constrPred = new (bindWA->wHeap())
BiRelat(ITM_GREATER, sizeExpr_, maxSize);
rowsetSizeExpr = new (bindWA->wHeap())
IfThenElse(constrPred, neg, sizeExpr_);
// IfThenElse only works if Case is its parent.
rowsetSizeExpr = new (bindWA->wHeap()) Case (NULL, rowsetSizeExpr);
// At code generation time, it is assumed that the size expression
// is of size integer, so we do this cast. We do not allow null
// values.
rowsetSizeExpr = new (bindWA->wHeap())
Cast(rowsetSizeExpr, new (bindWA->wHeap()) SQLInt(TRUE,FALSE));
// For dynamic rowsets, the parameter specifying rowset for input size
// must be typed as an non-nullable integer.
if (sizeExpr_->getOperatorType() == ITM_DYN_PARAM ) {
sizeExpr_->synthTypeAndValueId();
SQLInt intType(TRUE,FALSE); // TRUE -> allow neagtive values, FALSE -> not nullable
(sizeExpr_->getValueId()).coerceType(intType, NA_NUMERIC_TYPE);
}
}
else
{
rowsetSizeExpr = new (bindWA->wHeap()) SystemLiteral(maxRowsetSize);
}
// Construct an index host variable to iterate over the elements of the
// rowset. The name of the host variable must be unique (fabricated
// by calling fabricateUniqueName). This host variable is bound since it
// is not an input of the parse tree. Call synthTypeAndValueId()
// which does the minimum binding.
NAString indexName(bindWA->wHeap());
if (indexExpr_) {
// Get the name.
indexName = ((ColReference *)indexExpr_)->getColRefNameObj().getColName();
} else {
indexName = "_sys_rowset_index" + bindWA->fabricateUniqueName();
}
const NAType *indexType = new (bindWA->wHeap()) SQLInt(TRUE, FALSE);
ItemExpr *indexHostVar = new (bindWA->wHeap())
HostVar(indexName, indexType,
TRUE // is system-generated
);
indexHostVar->synthTypeAndValueId();
// Generate the RowsetArrayScan expressions which are used to extract
// an element value of the rowset array given an index.
ItemExpr *unPackExpr = NULL;
for (hostVarTree = inputHostvars_;
hostVarTree;
hostVarTree = hostVarTree->child(1)) {
HostVar *hostVar = (HostVar *)hostVarTree->getChild(0);
SQLRowset* hostVarType = (SQLRowset *)hostVar->getType();
NAType* hostVarElemType = hostVarType->getElementType();
Lng32 hostVarElemSize = hostVarElemType->getTotalSize();
NABoolean hostVarElemNullInd = !(hostVar->getIndName().isNull());
// Force all host variable to have the same number of elements which was
// found previously
hostVarType->setNumElements(maxRowsetSize);
// The element size must be align
hostVarElemSize = ALIGN(hostVarElemSize,
hostVarElemType->getDataAlignment());
// Assign a valueId for this Host variable. UnPackRows node will need
// this valueId during its binding.
//hostVar->synthTypeAndValueId();
hostVar->bindNode(bindWA);
ItemExpr *unPackCol =
new (bindWA->wHeap())
RowsetArrayScan(hostVar, // Rowset Host Var array
indexHostVar, // Index
maxRowsetSize, // Cannot go over this size
hostVarElemSize, // Element size in bytes
hostVarElemNullInd,
hostVarElemType
);
// Construct a list of expressions to extract the Data value from
// the packed row. During normalization, this list (or a ValueIdList
// representing this list) will be reduced to the minimum required.
// This should be a NULL terminated list, unfortunately, there are
// many parts in the SQL/MX code that loops over the arity instead
// of checking for NULL terminated list...the effect a segmentation
// violation.
unPackExpr = (unPackExpr
? new (bindWA->wHeap()) ItemList(unPackExpr, unPackCol)
: unPackCol);
}
// enable rowsetrowcount for rowset update and deletes
// if the user has not turned the feature OFF.
// if we have rowsets in where clause and are not in a select
// then we have either rowset ypdate or delete, for direct rowsets.
if (arrayArea &&
(!(arrayArea->getHasDerivedRowsets())) &&
arrayArea->hasHostArraysInWhereClause() &&
(arrayArea->hasInputRowsetsInSelectPredicate() != HostArraysWA::YES_) &&
(CmpCommon::getDefault(ROWSET_ROW_COUNT) == DF_ON)) {
arrayArea->setRowsetRowCountArraySize(maxRowsetSize);
}
if (indexExpr_) {
/*
* Create and item expression to obtain the index
*/
ItemExpr *unPackCol =
new (bindWA->wHeap())
RowsetArrayScan(indexHostVar, // Index
indexHostVar, // Index
maxRowsetSize, // Cannot go over this size
indexType->getTotalSize(),
0,
indexType,
ITM_ROWSETARRAY_ROWID
);
unPackExpr = (unPackExpr
? new (bindWA->wHeap()) ItemList(unPackExpr, unPackCol)
: unPackCol);
}
// Now create a Tuple node to hang the children and input values of the
// actual Rowset Node to it. Make sure to copy the RelExpr part of Rowset
// to tuple.
// Kludge up a dummy child for the index
ItemExpr *inputs = ((indexExpr_)
? new (bindWA->wHeap()) ItemList(inputHostvars_,
indexHostVar)
: inputHostvars_);
Tuple *tupleExpr = new (bindWA->wHeap()) Tuple(inputs);
tupleExpr->setBlockStmt(isinBlockStmt());
copyTopNode(tupleExpr);
// Construct the replacement tree for the Rowset operator.
RelExpr *newSubTree = (new (bindWA->wHeap())
UnPackRows(maxRowsetSize,
unPackExpr,
rowsetSizeExpr,
NULL,
tupleExpr,
indexHostVar->getValueId()));
newSubTree->setBlockStmt(isinBlockStmt());
// do not set this flag for derived rowsets. This flag is used in generator to determine
// in onlj and TF TDB must set rownumber when encountering a execution time rowset error.
if (arrayArea &&
(!(arrayArea->getHasDerivedRowsets())) &&
(arrayArea->hasInputRowsetsInSelectPredicate() != HostArraysWA::YES_))
{
newSubTree->setRowsetIterator(TRUE);
}
// Move any predicate on the packed table to be on the result
// of unpacking.
newSubTree->addSelPredTree(removeSelPredTree());
// Remember the transform tree, just in case someone is trying to bind this
// node again.
transformRelexpr_ = newSubTree;
// Bind the new generated subtree.
return newSubTree->bindNode(bindWA);
} // Rowset::bindNode()
RelExpr * RowsetRowwise::bindNode(BindWA* bindWA)
{
// If this node has already been bound, we are done.
if (nodeIsBound())
return this->transformRelexpr_;
if (bindWA->getHostArraysArea()) {
bindWA->getHostArraysArea()->done() = TRUE;
}
//
// Bind the child nodes.
//
bindChildren(bindWA);
if (bindWA->errStatus())
return this;
// Transform current node into a new subtree which performs access to
// RowSet based on the unpacking.
// UNPACK
// |
// TUPLE
//
// We get the list of input host vars, which is stored in the root of the
// parse tree
HostArraysWA *arrayArea = bindWA->getHostArraysArea();
if ((arrayArea->rwrsMaxSize()->getOperatorType() != ITM_CONSTANT) ||
(((ConstValue *)arrayArea->rwrsMaxSize())->getType()->getTypeQualifier()
!= NA_NUMERIC_TYPE))
{
// 30003 Rowset size must be an integer host variable or an
// integer constant
*CmpCommon::diags() << DgSqlCode(-30003);
bindWA->setErrStatus();
return NULL;
}
// if partition number has been specified, then we don't unpack
// rows. The whole buffer is shipped to the specified partition.
if (arrayArea->partnNum())
return child(0)->castToRelExpr();
Lng32 maxRowsetSize =
(Lng32)((ConstValue *)arrayArea->rwrsMaxSize())->getExactNumericValue() ;
NAType * typ = new(bindWA->wHeap()) SQLInt(FALSE, FALSE);
ItemExpr * rwrsInputSizeExpr =
new(bindWA->wHeap()) Cast(arrayArea->inputSize(), typ);
if (bindWA->errStatus())
return this;
ItemExpr * rwrsMaxInputRowlenExpr =
new(bindWA->wHeap()) Cast(arrayArea->rwrsMaxInputRowlen(), typ);
if (bindWA->errStatus())
return this;
ItemExpr * rwrsBufferAddrExpr = arrayArea->rwrsBuffer();
if (bindWA->errStatus())
return this;
// Construct the replacement tree for the Rowset operator.
RelExpr *newSubTree = (new (bindWA->wHeap())
UnPackRows(maxRowsetSize,
rwrsInputSizeExpr,
rwrsMaxInputRowlenExpr,
rwrsBufferAddrExpr,
child(0)));
// Remember the transform tree, just in case someone is trying to bind this
// node again.
transformRelexpr_ = newSubTree;
// Bind the new generated subtree.
return newSubTree->bindNode(bindWA);
} // RowsetRowwise::bindNode()
// LCOV_EXCL_START - rfi
RelExpr * RowsetFor::bindNode(BindWA* bindWA)
{
// Binding of this node should not happen. It should have been eliminated
// by now by the pre-binding step. Its content is used to populate the
// RowSet node with options.
CMPASSERT(0);
return NULL;
}
// LCOV_EXCL_STOP
RelExpr * RowsetInto::bindNode(BindWA* bindWA)
{
// If this node has already been bound, we are done.
if (nodeIsBound())
return this->transformRelexpr_;
//
// Bind the child nodes.
//
bindChildren(bindWA);
if (bindWA->errStatus()) return this;
NABoolean hasDifferentSizes = FALSE;
Lng32 maxRowsetSize = 0; /* Maximum number of Rows in Rowset */
ULng32 numOutputHostvars = 0;
ItemExpr *rowsetSizeExpr;
ItemExpr *hostVarTree;
// Do any extra checking at this moment.
for (hostVarTree = outputHostvars_;
hostVarTree;
hostVarTree = hostVarTree->child(1)) {
numOutputHostvars++;
CMPASSERT(hostVarTree->getOperatorType() == ITM_ITEM_LIST);
HostVar *hostVar = (HostVar *)hostVarTree->getChild(0);
if (hostVar->getOperatorType() != ITM_HOSTVAR ||
hostVar->getType()->getTypeQualifier() != NA_ROWSET_TYPE) {
// 30001 A rowset must be composed of host variable arrays
*CmpCommon::diags() << DgSqlCode(-30001);
bindWA->setErrStatus();
return NULL;
}
// Get the smallest dimension for rowset size
SQLRowset* hostVarType = (SQLRowset *)hostVar->getType();
if (hostVarType->getNumElements() <= 0) {
// 30004 The dimesion of the arrays composing the RowSet must be greater
// than zero. A value of $0~Int0 was given
*CmpCommon::diags() << DgSqlCode(-30004)
<< DgInt0((Int32)hostVarType->getNumElements());
bindWA->setErrStatus();
return NULL;
}
if (maxRowsetSize == 0)
maxRowsetSize = hostVarType->getNumElements();
else if (hostVarType->getNumElements() != maxRowsetSize) {
// 30005 Warning: the dimensions of the arrays composing the RowSet are
// different. The smallest dimesnion is assumed.
// This is just a warning
// Give the warning only once
if (hasDifferentSizes == FALSE) {
hasDifferentSizes = TRUE;
*CmpCommon::diags() << DgSqlCode(30005);
}
// Pick the smallest one
if (hostVarType->getNumElements() < maxRowsetSize)
maxRowsetSize = hostVarType->getNumElements();
}
// Make sure that the element type null indicator and the corresponding
// rowset array are both nullable or not nullable. That is, force it
NAType* hostVarElemType = hostVarType->getElementType();
NABoolean hostVarElemNullInd = !(hostVar->getIndName().isNull());
hostVarElemType->setNullable(hostVarElemNullInd);
}
// If a rowset size expression was produced during parsing, it is used
// to restrict the rowset size during execution. The expression must be
// an numeric literal (known at compile time) or an integer host variable
// (known at execution time). We do not allow other type of expression
// since the rowset size must be know before the statement is executed to
// avoid copying a lot when the host variable arrays are sent down the
// execution queue
if (sizeExpr_) {
if (sizeExpr_->getOperatorType() == ITM_CONSTANT) {
if (((ConstValue *)sizeExpr_)->getType()->getTypeQualifier()
!= NA_NUMERIC_TYPE) {
// 30003 Rowset size must be an integer host variable or an
// integer constant
*CmpCommon::diags() << DgSqlCode(-30003);
bindWA->setErrStatus();
return NULL;
}
if (((ConstValue *)sizeExpr_)->getExactNumericValue() > maxRowsetSize) {
// 30002 The given RowSet size ($0~Int0) must be smaller or
// equal to the smallest dimension ($1Int1) of the
// arrays composing the rowset
*CmpCommon::diags() << DgSqlCode(-30002)
<< DgInt0((Int32)
((ConstValue *)sizeExpr_)
->getExactNumericValue())
<< DgInt1(maxRowsetSize);
bindWA->setErrStatus();
return NULL;
}
}
else
if (!(sizeExpr_->getOperatorType() == ITM_HOSTVAR &&
((HostVar *)sizeExpr_)->getType()->getFSDatatype()
== REC_BIN32_SIGNED)) {
// 30003 Rowset size must be an integer host variable or an
// integer constant
*CmpCommon::diags() << DgSqlCode(-30003);
bindWA->setErrStatus();
return NULL;
}
rowsetSizeExpr = sizeExpr_;
}
else
rowsetSizeExpr = new (bindWA->wHeap()) SystemLiteral(maxRowsetSize);
if (getGroupAttr()->isEmbeddedUpdateOrDelete()){
// 30020 Embedded update/delete cannot be used with SELECT...INTO and rowset.
*CmpCommon::diags() << DgSqlCode(-30020);
bindWA->setErrStatus();
return NULL;
}
// Generate the RowsetArrayInto expressions which are used to append
// an element value to the rowset array.
// Get RETDesc from its only child one which must be RelRoot type.
const RETDesc& childTable = *child(0)->getRETDesc();
ValueIdList childTableVidList;
childTable.getValueIdList(childTableVidList);
if (numOutputHostvars != childTableVidList.entries()) {
// 4094 The number of output host vars ($0) must equal the number of cols
*CmpCommon::diags() << DgSqlCode(-4094)
#pragma nowarn(1506) // warning elimination
<< DgInt0(numOutputHostvars) << DgInt1(childTableVidList.entries());
#pragma warn(1506) // warning elimination
bindWA->setErrStatus();
return NULL;
}
ItemExpr *packExpr = NULL;
Lng32 i;
for (hostVarTree = outputHostvars_, i = 0;
hostVarTree;
hostVarTree = hostVarTree->child(1), i++) {
HostVar *hostVar = (HostVar *)hostVarTree->getChild(0);
SQLRowset* hostVarType = (SQLRowset *)hostVar->getType();
NAType* hostVarElemType = hostVarType->getElementType();
// hostVarElemType->setNullable(TRUE);
Lng32 hostVarElemSize = hostVarElemType->getTotalSize();
NABoolean hostVarElemNullInd = !(hostVar->getIndName().isNull());
ItemExpr* sourceExpr = childTableVidList[i].getItemExpr();
ValueId sourceId = childTableVidList[i];
const NAType& targetType = *hostVarElemType;
sourceId.coerceType(targetType);
const NAType& sourceType = sourceId.getType();
NABoolean relaxCharTypeMatchingRule = FALSE;
// We make sure that the types that are coming from below this
// node match properly with the types it has
if (NOT targetType.isCompatible(sourceType)) {
// JQ
// Relaxing Characet Data Type mismatching rule.
if ( targetType.getTypeQualifier() == NA_CHARACTER_TYPE &&
sourceType.getTypeQualifier() == NA_CHARACTER_TYPE &&
((const CharType&)targetType).getCharSet() == CharInfo::UNICODE &&
((const CharType&)sourceType).getCharSet() == CharInfo::ISO88591
)
{
relaxCharTypeMatchingRule = TRUE;
}
if ( !relaxCharTypeMatchingRule ) {
// Incompatible assignment from type $0~String0 to type $1~String1
*CmpCommon::diags() << DgSqlCode(-30007)
<< DgString0(sourceType.getTypeSQLname(TRUE /*terse*/))
<< DgString1(targetType.getTypeSQLname(TRUE /*terse*/));
bindWA->setErrStatus();
return FALSE;
}
}
// Force all host variable to have the same number of elements which was
// found previously
hostVarType->setNumElements(maxRowsetSize);
// The element size must be align
hostVarElemSize = ALIGN(hostVarElemSize,
hostVarElemType->getDataAlignment());
// Preserve the length that is coming from the node below this one
if (hostVarElemType->getTypeQualifier() == NA_CHARACTER_TYPE &&
sourceType.getTypeQualifier() == NA_CHARACTER_TYPE) {
Int32 sourceSize = ((CharType *) &sourceType)->getDataStorageSize();
Int32 targetSize = ((CharType *) hostVarElemType)->getDataStorageSize();
if (sourceSize > targetSize ) {
// Adjust the layout size instead of changing the element size?
((CharType *) hostVarElemType)->setDataStorageSize(sourceSize);
}
}
if ( relaxCharTypeMatchingRule == TRUE )
sourceExpr = new (bindWA->wHeap())
Translate(sourceExpr, Translate::ISO88591_TO_UNICODE);
// If the type is external (for instance, decimal or varchar), we must first
// convert to our internal equivalent type
if (hostVarElemType->isExternalType()) {
NAType *internalType = hostVarElemType->equivalentType();
sourceExpr = new (bindWA->wHeap()) Cast(sourceExpr, internalType);
}
sourceExpr = new (bindWA->wHeap()) Cast(sourceExpr, hostVarElemType);
ItemExpr *packCol =
new (bindWA->wHeap())
RowsetArrayInto(sourceExpr,
rowsetSizeExpr, // Runtime size
maxRowsetSize, // Cannot go over this size
hostVarElemSize, // Element size in bytes
hostVarElemNullInd,
hostVarType
);
// Construct a list of expressions to append the Data value to the
// RowSet array. This list should be a NULL terminated list,
// unfortunately, there are many parts in the SQL/MX code that
// loops over the arity instead of checking for NULL terminated
// list...the effect a segmentation violation.
packExpr = (packExpr
? new (bindWA->wHeap()) ItemList(packExpr, packCol)
: packCol);
}
// Construct the replacement tree for the RowsetInto operator.
RelExpr *newSubTree = (new (bindWA->wHeap())
Pack(maxRowsetSize,
child(0)->castToRelExpr(),
packExpr));
newSubTree->setFirstNRows(getFirstNRows());
// If we have an ORDER BY when there is an INTO :array, then we
// add the requirement that the tuples that this Pack node will
// receive must be sorted
ValueIdList *ptrReqOrder;
ptrReqOrder = new (bindWA->wHeap())
ValueIdList(((RelRoot *) (RelExpr *) newSubTree->child(0))->reqdOrder());
((Pack *) newSubTree)->setRequiredOrder(*ptrReqOrder);
// Remember the transform tree, just in case someone is trying to bind this
// node again.
transformRelexpr_ = newSubTree;
// Bind the new generated subtree.
return newSubTree->bindNode(bindWA);
} // RowsetInto::bindNode
RelExpr *
IsolatedScalarUDF::bindNode (BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// If we have a RoutineDesc, it means we got transformed from a
// a UDFunction ItemExpr, and do NOT need to check all the metadata
// params etc.
if (getRoutineDesc() == NULL )
{
// If we get here, we created a IsolatedScalarUDF some other way
// than through the transformation of UDFunction. Either that or
// we have someone walking over our memory...
CMPASSERT(0);
bindWA->setErrStatus();
return this;
}
else
{
markAsBound();
}
return this;
} // IsolatedScalarUDF::bindNode ()
/*
* This method performs binder functions for the CALLSP node
* It performs semantic checks on the called stored procedure
* creates a Tuple child and allocates ValueIds for the parameters
* It also provides support for the CLI layer processing for OUT
* parameter processing.
*/
RelExpr *CallSP::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
if (bindWA->getHoldableType() == SQLCLIDEV_ANSI_HOLDABLE)
{
*CmpCommon::diags() << DgSqlCode(-4382);
bindWA->setErrStatus();
bindWA->setBindingCall (FALSE);
return this;
}
bindWA->setBindingCall (TRUE);
bindWA->setCurrOrdinalPosition (1);
bindWA->setCurrParamMode (COM_UNKNOWN_DIRECTION);
bindWA->clearHVorDPinSPDups ();
bindWA->setDupWarning (FALSE);
bindWA->setMaxResultSets(0);
// try PUBLIC SCHEMA only when no schema was specified
// and CQD PUBLIC_DEFAULT_SCHEMA is specified
NAString pSchema =
ActiveSchemaDB()->getDefaults().getValue(PUBLIC_SCHEMA_NAME);
ComSchemaName pubSchema(pSchema);
NAString pubSchemaIntName = "";
if ( (getRoutineName().getSchemaName().isNull()) &&
(!pubSchema.getSchemaNamePart().isEmpty()) )
{
pubSchemaIntName = pubSchema.getSchemaNamePart().getInternalName();
}
// Invoke GetNARoutine () to retrieve the corresponding NARoutine from
// NARoutineDB_
QualifiedName name = getRoutineName();
const SchemaName &defaultSchema =
bindWA->getSchemaDB ()->getDefaultSchema();
name.applyDefaults(defaultSchema);
setRoutineName(name);
bindWA->setCurrSPName(&name);
// in open source, only the SEABASE catalog is allowed.
// Return an error if some other catalog is being used.
if ((NOT name.isSeabase()) && (NOT name.isHive()))
{
*CmpCommon::diags()
<< DgSqlCode(-1002)
<< DgCatalogName(name.getCatalogName())
<< DgString0("");
bindWA->setErrStatus();
return NULL;
}
CmpSeabaseDDL cmpSBD((NAHeap*)bindWA->wHeap());
desc_struct *catRoutine =
cmpSBD.getSeabaseRoutineDesc(
name.getCatalogName(),
name.getSchemaName(),
name.getObjectName());
// try public schema
if ( !catRoutine &&
!pubSchemaIntName.isNull() )
{
getRoutineName().setSchemaName(pubSchemaIntName);
if ( !pubSchema.getCatalogNamePart().isEmpty() )
{
getRoutineName().setCatalogName(pubSchema.getCatalogNamePart().getInternalName());
}
// in open source, only the SEABASE catalog is allowed.
// Return an error if some other catalog is being used.
if ((NOT getRoutineName().isSeabase()) && (NOT getRoutineName().isHive()))
{
*CmpCommon::diags()
<< DgSqlCode(-1002)
<< DgCatalogName(getRoutineName().getCatalogName())
<< DgString0("");
bindWA->setErrStatus();
return NULL;
}
bindWA->resetErrStatus();
catRoutine =
cmpSBD.getSeabaseRoutineDesc(
getRoutineName().getCatalogName(),
getRoutineName().getSchemaName(),
getRoutineName().getObjectName());
if ( !bindWA->errStatus() && catRoutine )
{ // if found in public schema, do not show previous error
CmpCommon::diags()->clear();
}
}
if (bindWA->violateAccessDefaultSchemaOnly(getRoutineName()))
return NULL;
if ( NULL == catRoutine )
{
// Diagnostic error is set by the readRoutineDef, we just need to
// make sure the rest of the compiler knows that an error occurred.
bindWA->setBindingCall (FALSE);
bindWA->setErrStatus ();
return this;
}
// Create a new NARoutine object
Int32 error = FALSE;
NARoutine *routine = new (bindWA->wHeap()) NARoutine ( getRoutineName(),
catRoutine,
bindWA,
error );
if ( bindWA->errStatus () )
{
// Error
bindWA->setBindingCall (FALSE);
bindWA->setErrStatus ();
return this;
}
NABoolean createRETDesc=TRUE;
RoutineDesc *rDesc = new (bindWA->wHeap()) RoutineDesc(bindWA, routine);
if (rDesc == NULL || bindWA->errStatus ())
{
// Error
bindWA->setBindingCall (FALSE);
bindWA->setErrStatus ();
return this;
}
if (rDesc->populateRoutineDesc(bindWA, createRETDesc) == FALSE )
{
bindWA->setBindingCall (FALSE);
bindWA->setErrStatus ();
return this;
}
setRoutineDesc(rDesc);
//
// Semantic checks
//
// if in trigger and during DDL make sure to Fix up the name
// location list so that the name is fully qualified when stored
// in the TEXT metadata table
if ( bindWA->inDDL() && bindWA->isInTrigger () )
{
ParNameLocList *pNameLocList = bindWA->getNameLocListPtr();
if (pNameLocList)
{
ParNameLoc * pNameLoc
= pNameLocList->getNameLocPtr(getRoutineName().getNamePosition());
CMPASSERT(pNameLoc);
pNameLoc->setExpandedName(getRoutineName().getQualifiedNameAsAnsiString());
}
}
// Cannot support result sets or out params when
// SP is invoked within a trigger
if ( bindWA->isInTrigger () &&
getNARoutine()->hasOutParams ())
{
*CmpCommon::diags() << DgSqlCode(-UDR_BINDER_OUTPARAM_IN_TRIGGER)
<< DgTableName (getRoutineName().getQualifiedNameAsString());
bindWA->setErrStatus ();
bindWA->setBindingCall (FALSE);
return this;
}
if ( bindWA->isInTrigger () &&
getNARoutine()->hasResultSets ())
{
*CmpCommon::diags() << DgSqlCode(-UDR_BINDER_RESULTSETS_IN_TRIGGER)
<< DgTableName (getRoutineName().getQualifiedNameAsString());
bindWA->setErrStatus ();
bindWA->setBindingCall (FALSE);
return this;
}
const NAColumnArray ¶ms = getNARoutine()->getParams ();
CollIndex i = 0;
CollIndex numParams = getNARoutine()->getParamCount ();
CollIndex numSuppliedParams = countSuppliedParams (getRWProcAllParamsTree());
if (numSuppliedParams != numParams)
{
*CmpCommon::diags() << DgSqlCode(-UDR_BINDER_INCORRECT_PARAM_COUNT)
<< DgTableName(getRoutineName().getQualifiedNameAsString())
<< DgInt0((Lng32) numParams)
<< DgInt1((Lng32) numSuppliedParams);
bindWA->setErrStatus ();
bindWA->setBindingCall (FALSE);
return this;
}
short numResultSets = (short) getNARoutine()->getMaxResults();
bindWA->setMaxResultSets(numResultSets);
// On to the binding
// Invoke populateAndBindItemExpr, set up needed data structures
// Set up a RETDesc if we don't already have one.
RETDesc *resultTable = getRETDesc();
if (resultTable == NULL)
{
resultTable = new (bindWA->wHeap()) RETDesc(bindWA);
setRETDesc(resultTable);
}
populateAndBindItemExpr ( getRWProcAllParamsTree(),
bindWA );
if ( bindWA->errStatus ())
{
bindWA->setBindingCall (FALSE);
return this;
}
// Clear the Tree since we now have gotten vids for all the parameters.
setProcAllParamsTree(NULL);
// Now fix the param index value of the dynamic params or host vars
LIST (ItemExpr *) &bWA_HVorDPs = bindWA->getSpHVDPs();
CollIndex numHVorDPs = bWA_HVorDPs.entries();
ARRAY(ItemExpr *) local_HVorDPs(numHVorDPs);
CollIndex idx, idx1, idx2;
// Sort the ItemExpr in the order they appeared in the stmt
for (idx = 0; idx < numHVorDPs; idx++)
{
// Copy ItemExpr ptrs to a sorted Array.
local_HVorDPs.insertAt(bWA_HVorDPs[idx]->getHVorDPIndex() - 1,
bWA_HVorDPs[idx]);
}
// The following code goes through the list of Exprs and
// sets index values. The rules are:
// 1. When a DP or HV is repeated, all of them get the same
// index value which is equal to the index of the first occurrence
// 2. Two DPs or HVs are same if their names and the modes are same.
Int32 currParamIndex = 1;
for (idx1 = 0; idx1 < numHVorDPs; idx1++)
{
ItemExpr *src = local_HVorDPs[idx1];
const NAString &name1 = (src->getOperatorType() == ITM_HOSTVAR) ?
((HostVar *)src)->getName() : ((DynamicParam *)src)->getName();
ComColumnDirection mode1 = src->getParamMode();
NABoolean encounteredElement = FALSE;
for (idx2 = idx1; idx2 < numHVorDPs; idx2++)
{
ItemExpr *dest = local_HVorDPs[idx2];
if (!encounteredElement && dest->getHVorDPIndex() >= currParamIndex)
{
// The parameter is encountered the first time
encounteredElement = TRUE;
dest->setPMOrdPosAndIndex(dest->getParamMode(),
dest->getOrdinalPosition(),
currParamIndex);
continue;
}
// The parameter is already corrected
if (dest->getHVorDPIndex() < currParamIndex)
continue;
const NAString &name2 = (dest->getOperatorType() == ITM_HOSTVAR) ?
((HostVar *)dest)->getName() : ((DynamicParam *)dest)->getName();
ComColumnDirection mode2 = dest->getParamMode();
if (name2.compareTo("") == 0)
continue;
if (name1.compareTo(name2) == 0 && mode1 == mode2)
{
dest->setPMOrdPosAndIndex(dest->getParamMode(),
dest->getOrdinalPosition(),
currParamIndex);
}
}
if (encounteredElement)
currParamIndex++;
}
// Restore the bindWA's HVorDP list since it might be needed
// while binding the root node in case of HVs.
bindWA->clearHVorDPinSPDups();
for (idx = 0; idx < numHVorDPs; idx++)
bindWA->addHVorDPToSPDups(local_HVorDPs[idx]);
// Create a tuple child for any subqueries or UDF inputs
// The hasSubquery() / hasUDF() flag gets set in setInOrOutParam if any of our
// passed in parameters is a subquery.
if ((getProcInputParamsVids().entries() != 0) &&
(hasSubquery() || hasUDF()))
{
Tuple *inTuple = new (bindWA->wHeap())
Tuple(getProcInputParamsVids().rebuildExprTree(ITM_ITEM_LIST),
bindWA->wHeap());
if ( inTuple )
{
// Now set and bind the Tuple child
setChild (0, inTuple);
// Bind this Tuple child
inTuple->bindNode (bindWA);
if ( bindWA->errStatus ())
{
bindWA->setBindingCall (FALSE);
return this;
}
// Get each IN entry from the Tuple and put it in
//the super's list
// Need to clear the list to avoid duplicates
getProcInputParamsVids().clear();
// Now reinitialize the inputs based on the Tuple processing.
inTuple->getRETDesc ()->getValueIdList (getProcInputParamsVids());
} // if inTuple
else
{
// Out of memory ...
bindWA->setBindingCall (FALSE);
bindWA->setErrStatus();
return this;
}
} // if getProcInputParamVids().entries()
else
{
// If we dont have a subquery parameter, we dont need to go thru
// Optimization time rules and transformations, hence mark this
// as a physical node.
isPhysical_ = TRUE;
}
//
// Not sure whether we need to set the currently scoped RETDesc
// before binding the base class. Tuple::bindNode() does not do it
// so we won't either (for now)
//
//bindWA->getCurrentScope()->setRETDesc(getRETDesc());
// add the routine to the UdrStoiList. The UdrStoi list is used
// to check valid privileges
LIST(OptUdrOpenInfo *) udrList = bindWA->getUdrStoiList ();
ULng32 numUdrs = udrList.entries();
NABoolean udrReferenced = FALSE;
// See if UDR already exists
for (ULng32 stoiIndex = 0; stoiIndex < numUdrs; stoiIndex++)
{
if ( 0 ==
udrList[stoiIndex]->getUdrName().compareTo(
getRoutineName().getQualifiedNameAsAnsiString()
)
)
{
udrReferenced = TRUE;
break;
}
}
// UDR has not been defined, go ahead an add one
if ( FALSE == udrReferenced )
{
SqlTableOpenInfo *udrStoi = new (bindWA->wHeap ())SqlTableOpenInfo ();
udrStoi->setAnsiName ( convertNAString(
getRoutineName().getQualifiedNameAsAnsiString(),
bindWA->wHeap ())
);
OptUdrOpenInfo *udrOpenInfo = new (bindWA->wHeap ())
OptUdrOpenInfo( udrStoi
, getRoutineName().getQualifiedNameAsAnsiString()
, (NARoutine *)getNARoutine()
);
bindWA->getUdrStoiList().insert(udrOpenInfo);
}
//
// Bind the base class
//
RelExpr *boundExpr = bindSelf(bindWA);
if (bindWA->errStatus())
{
bindWA->setBindingCall (FALSE);
return boundExpr;
}
// Our characteristic inputs get set for us, we don't need to do it
// ourselves, however, we need to set our characteristic outputs
getGroupAttr()->addCharacteristicOutputs(getProcOutputParamsVids());
if (getNARoutine()->isProcedure())
bindWA->setHasCallStmts(TRUE);
bindWA->setBindingCall (FALSE);
return boundExpr;
} // CallSP::bindNode()
// This is the main entry point to walking the ItemExpr tree built by the
// parser, separating the IN and OUT parameters, setting appropriate
// characteristics of the IN/OUT parameters and binding them
// Currently only CallSP uses this code. If this routine needs to be shared
void IsolatedNonTableUDR::populateAndBindItemExpr ( ItemExpr *param,
BindWA *bindWA )
{
// This method is called recursively
CollIndex numParams = getEffectiveNARoutine()->getParamCount ();
CollIndex ordinalPosition = bindWA->getCurrOrdinalPosition ();
// No parameters, or we are done with the leaf node
if ( NULL == param )
{
return;
}
ComColumnDirection mode =
getEffectiveNARoutine()->getParams()[ordinalPosition-1]->getColumnMode ();
// This is the structure of the ItemExpr tree
// For 1 param
// ItemExpr
//
// 2 params
// ItemList
// / \
// Param1 Param2
//
// > 2 params
// ItemList
// / \
// Param1 ItemList
// / \
// Param2 ItemList
// ... ...
// ... ...
// / / \
// Param (N-2) / \
// / \
// Param(N-1) Param(N)
if ( ITM_ITEM_LIST == param->getOperatorType ())
{
// Use left child
CMPASSERT ((ItemExpr *) NULL != (*param).child (0));
populateAndBindItemExpr ( (*param).child(0),
bindWA );
if ( bindWA->errStatus ())
return;
// Now for the right child
CMPASSERT ((ItemExpr *) NULL != (*param).child (1));
populateAndBindItemExpr ( (*param).child(1),
bindWA );
return;
} // if ITM_ITEM_LIST == param->getOperatorType ()
// For all leaf nodes we must come here (see the recursive call to
// populateAndBindItemExp above)
// Set the bindWA's current ordinal position and parameter mode
// Let HV and DynamicParam's bindNode take care of the
// settings. To ensure this, do a bindNode here
bindWA->setCurrParamMode (mode);
param->bindNode (bindWA);
if (bindWA->errStatus ())
return;
// Add the IN or OUT params to their respective lists
// and also create and bind a new ItemExpr for INOUT and OUT
// params.
// Also bump up the ordinalPosition count since we are done with this
// parameter.
setInOrOutParam (param,/* ordinalPosition,*/ mode, bindWA);
if ( bindWA->errStatus ())
return;
bindWA->setCurrOrdinalPosition (bindWA->getCurrOrdinalPosition () + 1);
} // PopulateAndBindItemExpr
// LCOV_EXCL_START - rfi
void
IsolatedNonTableUDR::setInOrOutParam (ItemExpr *expr,
ComColumnDirection paramMode,
BindWA *bindWA)
{
// Should not get here..
CMPASSERT(FALSE);
}
// LCOV_EXCL_STOP
// This method separates the IN and OUT parameters Each IN/INOUT param
// is cast to the formal type (from NARoutine). This Cast'ed item expr
// is added to an ItemList tree to be passed to the Tuple ()
// constructor. For each OUT/INOUT, we create a NATypeToItem
// ItemExpr, bind it and add it to procOutParams_.
//
// This method is called once for each CALL statement argument. If an
// input argument to a CALL is an expression tree such as "? + ?" or
// "abs(:x)" then this method is called once for the entire tree.
//
// Side Effects: OUT: hasSubquery_
// neededValueIds_
// procAllParamsVids_
// procInputParamsVids_
// procOutputParamsVids_
void CallSP::setInOrOutParam ( ItemExpr *expr,
ComColumnDirection paramMode,
BindWA *bindWA)
{
// Depending on whether this is an IN or OUT parameter, we need to
// take different actions.
// For an IN (and INOUT) param, do the following
// Cast the parameter to its formal type and add it to the list of
// IN params. This will be used later to create a Tuple child and
// be bound by the Tuple itself
CollIndex ordinalPosition = bindWA->getCurrOrdinalPosition ();
const NAColumnArray &formalParams = getNARoutine()->getParams();
NAColumn &naColumn = *(formalParams[ordinalPosition-1]);
const NAType ¶mType = *(naColumn.getType());
// Don't really want to bind this, but how else can we
// get the ItemExpr's type
ItemExpr *boundExpr = expr->bindNode (bindWA);
if ( bindWA->errStatus ())
{
return;
}
//10-061031-0188-Begin
//Need to infer charset for string literals part of CALLSP
//parameters
ValueId inputTypeId = boundExpr->getValueId();
if(inputTypeId.getType().getTypeQualifier() == NA_CHARACTER_TYPE)
{
const CharType* stringLiteral = (CharType*)&(inputTypeId.getType());
if(CmpCommon::wantCharSetInference())
{
const CharType* desiredType =
CharType::findPushDownCharType(((CharType&)paramType).getCharSet(), stringLiteral, 0);
if ( desiredType )
inputTypeId.coerceType((NAType&)*desiredType, NA_CHARACTER_TYPE);
}
}
NABoolean throwInTranslateNode = FALSE;
CharInfo::CharSet paramCS = CharInfo::UnknownCharSet;
CharInfo::CharSet inputCS = CharInfo::UnknownCharSet;
const NABoolean isJdbc =
(CmpCommon::getDefault(JDBC_PROCESS) == DF_ON ? TRUE : FALSE);
const NABoolean isOdbc =
(CmpCommon::getDefault(ODBC_PROCESS) == DF_ON ? TRUE : FALSE);
const NAType &inputType = inputTypeId.getType();
//10-061031-0188-End
if ( COM_INPUT_COLUMN == paramMode ||
COM_INOUT_COLUMN == paramMode )
{
// If this input argument to the CALL is a single dynamic param
// then we want to record the formal parameter name. It will later
// be written into the query plan by the code generator and
// eventually if this CALL statement is DESCRIBEd, the formal
// param name gets returned in the SQLDESC_NAME descriptor entry.
if (expr->getOperatorType() == ITM_DYN_PARAM)
{
DynamicParam *dp = (DynamicParam *) expr;
dp->setUdrFormalParamName(naColumn.getColName());
}
// Check to see if we have a Subquery as an input
if ( !hasSubquery() )
hasSubquery() = expr->containsSubquery ();
// Check to see if we have a UDF as an input
if ( !hasUDF() )
hasUDF() = (expr->containsUDF () != NULL);
// Do type checking,
// If it is not a compatible type report an error
if (!( NA_UNKNOWN_TYPE == inputType.getTypeQualifier () ||
paramType.isCompatible(inputType) ||
expr->getOperatorType () == ITM_DYN_PARAM
)
)
{
if ( inputType.getTypeQualifier() == NA_CHARACTER_TYPE )
{
paramCS = ((CharType&)paramType).getCharSet();
inputCS = ((CharType&)inputType).getCharSet();
NABoolean CS_unknown = (paramCS == CharInfo::UnknownCharSet) ||
(inputCS == CharInfo::UnknownCharSet) ;
if ( paramType.NAType::isCompatible(inputType) &&
paramCS != inputCS &&
CS_unknown == FALSE &&
CmpCommon::getDefault(ALLOW_IMPLICIT_CHAR_CASTING) == DF_ON
)
throwInTranslateNode = TRUE;
}
if ( throwInTranslateNode == FALSE )
{
// Error, data types dont match
#pragma nowarn(1506) // warning elimination
*CmpCommon::diags() << DgSqlCode(-UDR_BINDER_PARAM_TYPE_MISMATCH)
<< DgInt0 (ordinalPosition)
<< DgTableName(getRoutineName().getQualifiedNameAsString())
<< DgString0 (inputType.getTypeSQLname (TRUE))
<< DgString1 (paramType.getTypeSQLname (TRUE));
#pragma warn(1506) // warning elimination
bindWA->setErrStatus ();
return;
}
} // if NOT isCompatible
// Create a Cast node if the types are not identical
if (! (inputType == paramType))
{
// First create a Translate node if the character sets are not identical
if ( throwInTranslateNode )
{
Int32 tran_type = find_translate_type( inputCS, paramCS );
ItemExpr * newTranslateChild =
new (bindWA->wHeap()) Translate(boundExpr, tran_type );
boundExpr = newTranslateChild->bindNode(bindWA);
if (bindWA->errStatus())
return;
// NOTE: Leave "expr" at it's old value as code below needs to check
// that original ItemExpr rather than the new Translate node.
}
Cast *retExpr = new (bindWA->wHeap())
Cast(boundExpr, ¶mType, ITM_CAST, TRUE);
boundExpr = retExpr->bindNode (bindWA);
if ( bindWA->errStatus ())
{
return;
}
}
// Fill the ValueIdList for all the params
getProcAllParamsVids().insert( boundExpr->getValueId());
// Fill the ValueIdList for Input params
getProcInputParamsVids().insert( boundExpr->getValueId());
} // if INPUT or INOUT
// For OUT (and INOUT) parameters, we create a NATypeToItem object,
// bind it and add it to the list of OUT parameters (procOutParams_)
if ( COM_OUTPUT_COLUMN == paramMode ||
COM_INOUT_COLUMN == paramMode )
{
if (!( ITM_HOSTVAR == expr->getOperatorType () ||
ITM_DYN_PARAM == expr->getOperatorType ()))
{
#pragma nowarn(1506) // warning elimination
*CmpCommon::diags() << DgSqlCode(-UDR_BINDER_OUTVAR_NOT_HV_OR_DP)
<< DgInt0(ordinalPosition)
<< DgTableName(getRoutineName().getQualifiedNameAsString());
#pragma warn(1506) // warning elimination
bindWA->setErrStatus ();
return;
} // if NOT HOSTVAR or DYNAMIC PARAM
NATypeToItem *paramTypeItem = new (bindWA->wHeap())
NATypeToItem (naColumn.mutateType());
ItemExpr *outputExprToBind = NULL;
outputExprToBind = paramTypeItem->bindNode (bindWA);
if ( bindWA->errStatus ())
{
return;
}
// Fill the ValueIdList for all the params
getProcAllParamsVids().insert( outputExprToBind->getValueId());
// Fill the ValueIdList for the output params
addProcOutputParamsVid(outputExprToBind->getValueId ());
//
// Populate our RETDesc
//
// It has already been alocated
RETDesc *resultTable = getRETDesc();
const NAString &formalParamName = naColumn.getColName();
const NAString *colParamName = &formalParamName;
// Set the userParamName
const NAString &userParamName =
// cannot use the boundExpr here as it will be a cast()
// for the HostVar or DynamicParam. Use the original
// ItemExpr pointer instead.
(ITM_HOSTVAR == expr->getOperatorType()) ?
((HostVar *)expr)->getName() :
((DynamicParam *)expr)->getName();
// Typically the name for this output column will be the formal
// parameter name. Exceptions:
// - No formal name was specified in the CREATE PROCEDURE. Use
// the (possibly empty) dynamic parameter or host variable name
// instead.
// - This is a JDBC or ODBC compile and the client is using a
// named host variable or dynamic parameter. JDBC and ODBC want
// us to use the client's name in this case.
if (formalParamName.isNull() ||
(!userParamName.isNull() && (isJdbc || isOdbc)))
{
colParamName = &userParamName;
}
ColRefName *columnName =
new (bindWA->wHeap())
ColRefName(*colParamName, bindWA->wHeap());
resultTable->addColumn(bindWA, *columnName, outputExprToBind->getValueId());
//
// We need the following line for static cursor declaration,
// according to a comment in bindRowValues()
//
cmpCurrentContext->saveRetrievedCols_ = resultTable->getDegree();
} // if OUTPUT or INOUT
} // setInOrOutParam
CollIndex RelRoutine::countSuppliedParams (ItemExpr *tree) const
{
CollIndex numParams=0;
if ( !tree ) return 0;
if (ITM_ITEM_LIST == tree->getOperatorType ())
{
numParams += countSuppliedParams (tree->child (0));
numParams += countSuppliedParams (tree->child (1));
}
else
numParams++;
return numParams;
} // RelRoutine::countSuppliedParams
void RelRoutine::gatherParamValueIds (const ItemExpr *tree, ValueIdList ¶msList) const
{
if ( !tree ) return;
if (ITM_ITEM_LIST == tree->getOperatorType ())
{
gatherParamValueIds (tree->child (0), paramsList);
gatherParamValueIds (tree->child (1), paramsList);
}
else
paramsList.insert(tree->getValueId());
} // RelRoutine::gatherParamValueIds
void ProxyFunc::createProxyFuncTableDesc(BindWA *bindWA, CorrName &corrName)
{
// Map column definitions into a desc_struct
desc_struct *tableDesc = createVirtualTableDesc();
// Map the desc_struct into an NATable. This will also add an
// NATable entry into the bindWA's NATableDB.
NATable *naTable =
bindWA->getNATable(corrName, FALSE /*catmanUsages*/, tableDesc);
if (bindWA->errStatus())
return;
// Allocate a TableDesc and attach it to this RelExpr instance
setTableDesc(bindWA->createTableDesc(naTable, corrName));
if (bindWA->errStatus())
return;
// Allocate a RETDesc and attach it to this and the BindScope
setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA, getTableDesc()));
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
}
RelExpr *ProxyFunc::bindNode(BindWA *bindWA)
{
// This method now serves as a common bind node for SPProxy and
// ExtractSource classes, where we before had SPProxyFunc::bindNode()
// and ExtractSource::bindNode().
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// Bind the child nodes
bindChildren(bindWA);
if (bindWA->errStatus())
return this;
// Declare a correlation name that is unique within this query
switch (getOperatorType())
{
case REL_EXTRACT_SOURCE:
virtualTableName_ = "EXTRACT_SOURCE_";
break;
case REL_SP_PROXY:
virtualTableName_ = "SP_RESULT_SET_";
break;
default:
CMPASSERT(0);
break;
}
virtualTableName_ += bindWA->fabricateUniqueName();
CorrName corrName(getVirtualTableName());
corrName.setSpecialType(ExtendedQualName::VIRTUAL_TABLE);
createProxyFuncTableDesc(bindWA, corrName);
if (bindWA->errStatus())
return this;
// Bind the base class
RelExpr *boundExpr = bindSelf(bindWA);
if (bindWA->errStatus())
return boundExpr;
// Assign the set of columns that belong to the virtual table
// as the output values that can be produced by this node.
getGroupAttr()->addCharacteristicOutputs(getTableDesc()->getColumnList());
return boundExpr;
} // ProxyFunc::bindNode()
RelExpr *TableMappingUDF::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// Create NARoutine object (no caching for TMUDF)
NARoutine *tmudfRoutine =NULL;
CorrName& tmfuncName = getUserTableName();
tmfuncName.setSpecialType(ExtendedQualName::VIRTUAL_TABLE);
QualifiedName name = getRoutineName();
const SchemaName &defaultSchema =
bindWA->getSchemaDB ()->getDefaultSchema();
name.applyDefaults(defaultSchema);
setRoutineName(name);
// Return an error if an unsupported catalog is being used.
if ((NOT name.isSeabase()) && (NOT name.isHive()))
{
*CmpCommon::diags()
<< DgSqlCode(-1002)
<< DgCatalogName(name.getCatalogName())
<< DgString0("");
bindWA->setErrStatus();
return NULL;
}
Lng32 diagsMark = CmpCommon::diags()->mark();
NABoolean errStatus = bindWA->errStatus();
tmudfRoutine = getRoutineMetadata(name, tmfuncName, bindWA);
if (tmudfRoutine == NULL)
{
// this could be a predefined TMUDF, which is not
// recorded in the metadata at this time
OperatorTypeEnum opType =
PredefinedTableMappingFunction::nameIsAPredefinedTMF(tmfuncName);
if (opType != REL_TABLE_MAPPING_UDF)
{
// yes, this is a predefined TMUDF
PredefinedTableMappingFunction *result;
// discard the errors from the failed name lookup
CmpCommon::diags()->rewind(diagsMark);
if (!errStatus)
bindWA->resetErrStatus();
// create a new RelExpr
result = new(bindWA->wHeap())
PredefinedTableMappingFunction(
tmfuncName,
const_cast<ItemExpr *>(getProcAllParamsTree()),
opType);
// copy data members of the base classes
TableMappingUDF::copyTopNode(result);
// set children
result->setArity(getArity());
for (int i=0; i<getArity(); i++)
result->child(i) = child(i);
// Abandon the current node and return the bound new node.
// Next time it will reach this method it will call an
// overloaded getRoutineMetadata() that will succeed.
return result->bindNode(bindWA);
}
// getRoutineMetadata has already set the diagnostics area
// and set the error status
CMPASSERT(bindWA->errStatus());
return NULL;
}
// Bind the child nodes.
bindChildren(bindWA);
if (bindWA->errStatus())
return this;
// Use information from child to populate childInfo_
NAHeap *heap = CmpCommon::statementHeap();
for(Int32 i = 0; i < getArity(); i++)
{
NAString childName(heap);
NAColumnArray childColumns(heap) ;
RETDesc *childRetDesc = child(i)->getRETDesc();
// Get Name
LIST(CorrName*) nameList;
childRetDesc->getXTNM().dumpKeys(nameList);
if (nameList.entries() == 1)
{
childName = (nameList[0])->getExposedNameAsString();
}
else
{
childName = "_inputTable" + bindWA->fabricateUniqueName();
}
// ask for histograms of all child outputs, since we don't
// know what the UDF will need and what predicates exist
// on passthru columns of the UDF
bindWA->getCurrentScope()->context()->inWhereClause() = TRUE;
// Get NAColumns
CollIndex numChildCols = childRetDesc->getColumnList()->entries();
for(CollIndex j=0; j < numChildCols; j++)
{
NAColumn * childCol = new (heap) NAColumn(
childRetDesc->getColRefNameObj(j).getColName().data(),
j,
childRetDesc->getType(j).newCopy(heap),
heap);
childColumns.insert(childCol);
bindWA->markAsReferencedColumn(childRetDesc->getValueId(j));
}
bindWA->getCurrentScope()->context()->inWhereClause() = FALSE;
// get child root
CMPASSERT(child(i)->getOperator().match(REL_ROOT) ||
child(i)->getOperator().match(REL_RENAME_TABLE));
RelRoot * myChild;
if (child(i)->getOperator().match(REL_RENAME_TABLE))
myChild = (RelRoot *) (child(i)->child(0).getPtr());
else
myChild = (RelRoot *) child(i).getPtr();
// output vidList from child RetDesc,
// can also get from child Root compExpr
ValueIdList vidList;
childRetDesc->getValueIdList(vidList, USER_COLUMN);
ValueIdSet childPartition(myChild->partitionArrangement());
ValueIdList childOrder(myChild->reqdOrder());
// request multi-column histograms for the PARTITION BY columns
bindWA->getCurrentScope()->context()->inGroupByClause() = TRUE;
// replace 1-based ordinals in the child's partition by / order by with
// actual columns
for (ValueId cp=childPartition.init();
childPartition.next(cp);
childPartition.advance(cp))
{
NABoolean negate;
ConstValue *cv = cp.getItemExpr()->castToConstValue(negate);
if (cv &&
cv->canGetExactNumericValue())
{
Lng32 scale = 0;
Int64 ordinal = cv->getExactNumericValue(scale);
if (!negate && scale == 0 && ordinal >= 1 && ordinal <= vidList.entries())
{
// remove this ValueId from the set and add the corresponding
// column value. Note that this won't cause problems with the
// iterator through the set, since we don't need to apply
// this conversion on the new element we are inserting
childPartition -= cp;
childPartition += vidList[ordinal-1];
}
else
{
*CmpCommon::diags()
<< DgSqlCode(-11154)
<< DgInt0(ordinal)
<< DgString0("PARTITION BY")
<< DgInt1(vidList.entries());
bindWA->setErrStatus();
return NULL;
}
}
bindWA->markAsReferencedColumn(cp);
}
bindWA->getCurrentScope()->context()->inGroupByClause() = FALSE;
for (CollIndex co=0; co<childOrder.entries(); co++)
{
NABoolean negate;
ItemExpr *ie = childOrder[co].getItemExpr();
ConstValue *cv = NULL;
if (ie->getOperatorType() == ITM_INVERSE)
ie = ie->child(0);
cv = ie->castToConstValue(negate);
if (cv &&
cv->canGetExactNumericValue())
{
Lng32 scale = 0;
Int64 ordinal = cv->getExactNumericValue(scale);
// replace the const value with the actual column
if (!negate && scale == 0 && ordinal >= 1 && ordinal <= vidList.entries())
if (ie == childOrder[co].getItemExpr())
{
// ascending order
childOrder[co] = vidList[ordinal-1];
}
else
{
// desc order, need to add an InverseOrder on top
ItemExpr *inv = new(bindWA->wHeap()) InverseOrder(
vidList[ordinal-1].getItemExpr());
inv->synthTypeAndValueId();
childOrder[co] = inv->getValueId();
}
else
{
*CmpCommon::diags()
<< DgSqlCode(-11154)
<< DgInt0(ordinal)
<< DgString0("ORDER BY")
<< DgInt1(vidList.entries());
bindWA->setErrStatus();
return NULL;
}
}
}
TableMappingUDFChildInfo * cInfo = new (heap) TableMappingUDFChildInfo(
childName,
childColumns,
myChild->getPartReqType(),
childPartition,
childOrder,
vidList);
childInfo_.insert(cInfo);
}
RoutineDesc *tmudfRoutineDesc = new (bindWA->wHeap()) RoutineDesc(bindWA, tmudfRoutine);
if (tmudfRoutineDesc == NULL || bindWA->errStatus ())
{
// Error
bindWA->setBindingCall (FALSE);
bindWA->setErrStatus ();
return this;
}
setRoutineDesc(tmudfRoutineDesc);
// xcnm will be empty because the routineDesc does not contain any
// output columns yet
RETDesc *rDesc = new (bindWA->wHeap()) RETDesc(bindWA, tmudfRoutineDesc);
bindWA->getCurrentScope()->setRETDesc(rDesc);
setRETDesc(rDesc);
dllInteraction_ = new (bindWA->wHeap()) TMUDFDllInteraction();
// ValueIDList of the actual input parameters
// (tmudfRoutine has formal parameters)
if (getProcAllParamsTree() && (getProcAllParamsVids().isEmpty() == TRUE))
{
((ItemExpr *)getProcAllParamsTree())->convertToValueIdList(
getProcAllParamsVids(), bindWA, ITM_ITEM_LIST);
if (bindWA->errStatus()) return NULL;
// Clear the Tree since we now have gotten vids for all the parameters.
setProcAllParamsTree(NULL);
}
getProcInputParamsVids().insert(getProcAllParamsVids());
// invoke the optional UDF compiler interface or a default
// implementation to validate scalar inputs and produce a list of
// output columns
NABoolean status = dllInteraction_->describeParamsAndMaxOutputs(this, bindWA);
if (!status)
{
bindWA->setErrStatus();
return NULL;
}
checkAndCoerceScalarInputParamTypes(bindWA);
if (bindWA->errStatus())
return NULL;
createOutputVids(bindWA);
if (bindWA->errStatus())
return NULL;
// create a ValueIdMap that allows us to translate
// output columns that are passed through back to
// input columns (outputs of the child), this can
// be used to push down predicates, translate
// required order and partitioning, etc.
status = dllInteraction_->createOutputInputColumnMap(
this,
udfOutputToChildInputMap_);
if (!status)
{
bindWA->setErrStatus();
return NULL;
}
// if this is a maintenance-type operation that must run on
// all nodes of the cluster or must run in parallel, regardless
// of the ATTEMPT_ESP_PARALLELISM CQD, then set a flag in the
// root node
if (getOperatorType() == REL_TABLE_MAPPING_BUILTIN_LOG_READER)
bindWA->getTopRoot()->setMustUseESPs(TRUE);
// add the routine to the UdrStoiList. The UdrStoi list is used
// to check valid privileges
LIST(OptUdrOpenInfo *) udrList = bindWA->getUdrStoiList ();
ULng32 numUdrs = udrList.entries();
NABoolean udrReferenced = FALSE;
// See if UDR already exists
for (ULng32 stoiIndex = 0; stoiIndex < numUdrs; stoiIndex++)
{
if ( 0 ==
udrList[stoiIndex]->getUdrName().compareTo(
getRoutineName().getQualifiedNameAsAnsiString()
)
)
{
udrReferenced = TRUE;
break;
}
}
// UDR has not been defined, go ahead an add one
if ( FALSE == udrReferenced )
{
SqlTableOpenInfo *udrStoi = new (bindWA->wHeap ())SqlTableOpenInfo ();
udrStoi->setAnsiName ( convertNAString(
getRoutineName().getQualifiedNameAsAnsiString(),
bindWA->wHeap ())
);
OptUdrOpenInfo *udrOpenInfo = new (bindWA->wHeap ())
OptUdrOpenInfo( udrStoi
, getRoutineName().getQualifiedNameAsAnsiString()
, (NARoutine *)getNARoutine()
);
bindWA->getUdrStoiList().insert(udrOpenInfo);
}
RelExpr *boundExpr = bindSelf(bindWA);
if (bindWA->errStatus())
return NULL;
return boundExpr;
}
RelExpr * FastExtract::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// check validity of target location
if (getTargetType() == FILE)
{
char reasonMsg[256];
NABoolean raiseError = FALSE;
if ((unsigned char)(getTargetName().data()[0]) != SLASH_C)
{
raiseError = TRUE;
sprintf(reasonMsg,"Relative path name was used");
}
else if (getTargetName().length() > 512)
{
raiseError = TRUE;
sprintf(reasonMsg,"Length exceeds 512 characters");
}
else
{
char * sqroot = getenv("MY_SQROOT");
if (sqroot && (! CmpCommon::context()->getSqlmxRegress()) &&
(strncmp(sqroot, getTargetName().data(),strlen(sqroot)) == 0))
{
raiseError = TRUE;
sprintf(reasonMsg,"Database system directory was used");
}
}
if (raiseError && strncmp(getTargetName().data(),"hdfs://",7) != 0 )
{
*CmpCommon::diags() << DgSqlCode(-4378) << DgString0(reasonMsg) ;
bindWA->setErrStatus();
return NULL;
}
}
if (getDelimiter().length() == 0)
{
delimiter_ = ActiveSchemaDB()->getDefaults().getValue(TRAF_UNLOAD_DEF_DELIMITER);
}
if (getNullString().length() == 0)
{
nullString_ = ActiveSchemaDB()->getDefaults().getValue(TRAF_UNLOAD_DEF_NULL_STRING);
}
if (getRecordSeparator().length() == 0)
{
recordSeparator_ = ActiveSchemaDB()->getDefaults().getValue(TRAF_UNLOAD_DEF_RECORD_SEPARATOR);
}
if (!isHiveInsert())
{
bindWA->setIsFastExtract();
}
// Bind the child nodes.
bindChildren(bindWA);
if (bindWA->errStatus())
return this;
// Use information from child to populate childInfo_
NAHeap *heap = CmpCommon::statementHeap();
RETDesc *childRETDesc = child(0)->getRETDesc();
// output vidList from child RetDesc,
// can also get from child Root compExpr
ValueIdList vidList;
childRETDesc->getValueIdList(vidList, USER_COLUMN);
setSelectList(vidList);
if (includeHeader())
{
const ColumnDescList &columnsRET = *(childRETDesc->getColumnList());
for (CollIndex i = 0; i < columnsRET.entries(); i++)
{
if (columnsRET[i]->getHeading())
header_ += columnsRET[i]->getHeading();
else if (!(columnsRET[i]->getColRefNameObj().isEmpty()))
header_ += columnsRET[i]->getColRefNameObj().getColName();
else
header_ += "EXPR";
if (i < (columnsRET.entries() -1))
{
header_ += " ";
header_ += delimiter_;
header_ += " ";
}
}
}
else
{
header_ = "NO_HEADER" ;
}
// no rows are returned from this operator.
// Allocate an empty RETDesc and attach it to this and the BindScope.
setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA));
RelExpr *boundExpr = bindSelf(bindWA);
if (bindWA->errStatus()) return NULL;
return boundExpr;
}
RelExpr * ControlRunningQuery::bindNode(BindWA *bindWA)
{
if (nodeIsBound()) {
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
//
// Check to see if user is authorized to control this query.
//
if (!isUserAuthorized(bindWA))
return NULL;
//
// Bind the child nodes.
//
bindChildren(bindWA);
if (bindWA->errStatus())
return this;
// no rows are returned from this operator.
// Allocate an empty RETDesc and attach it to this and the BindScope.
//
setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA));
//
// Bind the base class.
//
RelExpr *boundExpr = bindSelf(bindWA);
if (bindWA->errStatus())
return boundExpr;
ValueIdSet ov;
getPotentialOutputValues(ov);
getGroupAttr()->addCharacteristicOutputs(ov);
return boundExpr;
} // ControlRunningQuery::bindNode()
bool ControlRunningQuery::isUserAuthorized(BindWA *bindWA)
{
bool userHasPriv = false;
Int32 sessionID = ComUser::getSessionUser();
// Check to see if the current user owns the query id.
// This only has to be done for the Cancel query request.
// This option to check privilege is not available unless
// the query Id was supplied.
if ((action_ == Cancel) &&
(qs_ == ControlQid))
{
// The user ID associated with the query is stored in the QID.
// To be safe, copy the QID to a character string.
Int32 qidLen = queryId_.length();
char *pQid = new (bindWA->wHeap()) char[qidLen+1];
str_cpy_all(pQid, queryId_.data(), qidLen);
pQid[qidLen] = '\0';
// Set up the returned parameters
// Max username can be (128 * 2) + 2 (delimiters) + 1 (null indicator)
char username[2 * MAX_USERNAME_LEN + 2 + 1];
Int64 usernameLen = sizeof(username) - 1;
// Call function to extract the username from the QID
Int32 retcode = ComSqlId::getSqlQueryIdAttr(ComSqlId::SQLQUERYID_USERNAME,
pQid,
qidLen,
usernameLen,
&username[0]);
if (retcode == 0)
{
// The username stored in the QID is actually the userID preceeded with
// a "U". Check for a U and convert the succeeding characters
// to integer. This integer value is compared against the current userID.
username[usernameLen] = '\0';
if (username[0] == 'U')
{
Int64 userID = str_atoi(&username[1],usernameLen - 1);
if (sessionID == userID || sessionID == ComUser::getRootUserID())
userHasPriv = true;
}
// If userName does not begin with a 'U', ignore and continue
}
// If retcode != 0, continue, an invalid QID could be specified which
// is checked later in the code
}
// The current user does not own the query, see if the current user has
// the correct QUERY privilege. Code above only supports cancel, but other
// checks could be added. Component checks for all query operations.
if (!userHasPriv)
{
SQLOperation operation;
switch (ControlRunningQuery::action_)
{
case ControlRunningQuery::Suspend:
operation = SQLOperation::QUERY_SUSPEND;
break;
case ControlRunningQuery::Activate:
operation = SQLOperation::QUERY_ACTIVATE;
break;
case ControlRunningQuery::Cancel:
operation = SQLOperation::QUERY_CANCEL;
break;
default:
operation = SQLOperation::UNKNOWN;
}
NAString privMDLoc = CmpSeabaseDDL::getSystemCatalogStatic();
privMDLoc += ".\"";
privMDLoc += SEABASE_PRIVMGR_SCHEMA;
privMDLoc += "\"";
PrivMgrComponentPrivileges componentPriv(
privMDLoc.data(),CmpCommon::diags());
userHasPriv = componentPriv.hasSQLPriv(sessionID,operation,true);
if (!userHasPriv)
{
// ANSI requests a special SqlState for cancel requests
if (ControlRunningQuery::action_ == ControlRunningQuery::Cancel)
*CmpCommon::diags() << DgSqlCode(-8029);
else
*CmpCommon::diags() << DgSqlCode(-1017);
bindWA->setErrStatus();
}
if (bindWA->errStatus())
return false;
}
return true;
}// ControlRunningQuery::isUserAuthorized()
RelExpr * OSIMControl::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
//Create OptimizerSimulator if this is called first time.
if(!CURRCONTEXT_OPTSIMULATOR)
CURRCONTEXT_OPTSIMULATOR = new(CTXTHEAP) OptimizerSimulator(CTXTHEAP);
//in respond to force option of osim load,
//e.g. osim load from '/xxx/xxx/osim-dir', force
//if true, when loading osim tables/views/indexes
//existing objects with same qualified name
//will be droped first
CURRCONTEXT_OPTSIMULATOR->setForceLoad(isForceLoad());
//Set OSIM mode
if(!CURRCONTEXT_OPTSIMULATOR->setOsimModeAndLogDir(targetMode_, osimLocalDir_.data()))
{
bindWA->setErrStatus();
return this;
}
return ControlAbstractClass::bindNode(bindWA);
}
| 1 | 7,446 | I have forgotten why we thought this restriction is necessary. A user registers a Hive/HBase table with us but is not able to use the table with the registered name in DML. Will UPDATE STATs will be on the original name too (and update stats code will find out registered name and use it for Table_uid?). This was the registration concept is almost like an internal implementation idea, at some point could even be done by some utility program for all existing Hive/hBase tables, and the user need not be aware of it? I am struggling with a user would want to register a table, if they cannot use it by that name. | apache-trafodion | cpp |
@@ -40,6 +40,18 @@
struct ase_cfg_t *cfg;
+int app2sim_alloc_rx; // app2sim mesaage queue in RX mode
+int sim2app_alloc_tx; // sim2app mesaage queue in TX mode
+int app2sim_mmioreq_rx; // MMIO Request path
+int sim2app_mmiorsp_tx; // MMIO Response path
+int app2sim_umsg_rx; // UMSG message queue in RX mode
+int app2sim_portctrl_req_rx; // Port Control messages in Rx mode
+int app2sim_dealloc_rx;
+int sim2app_dealloc_tx;
+int sim2app_portctrl_rsp_tx;
+int sim2app_intr_request_tx;
+int intr_event_fds[MAX_USR_INTRS];
+
int glbl_test_cmplt_cnt; // Keeps the number of session_deinits received
volatile int sockserver_kill; | 1 | // Copyright(c) 2014-2017, Intel Corporation
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Intel Corporation nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
// **************************************************************************
/*
* Module Info:
* - Protocol backend for keeping IPCs alive
* - Interfacing with DPI-C, messaging
* - Interface to page table
*
* Language : C/C++
* Owner : Rahul R Sharma
* rahul.r.sharma@intel.com
* Intel Corporation
*
*/
#include "ase_common.h"
struct ase_cfg_t *cfg;
int glbl_test_cmplt_cnt; // Keeps the number of session_deinits received
volatile int sockserver_kill;
pthread_t socket_srv_tid;
// MMIO Respons lock
// pthread_mutex_t mmio_resp_lock;
// Variable declarations
char tstamp_filepath[ASE_FILEPATH_LEN];
char ccip_sniffer_file_statpath[ASE_FILEPATH_LEN];
// CONFIG,SCRIPT parameter paths received from SV (initial)
char sv2c_config_filepath[ASE_FILEPATH_LEN];
char sv2c_script_filepath[ASE_FILEPATH_LEN];
// ASE PID
int ase_pid;
// Workspace information log (information dump of
static FILE *fp_workspace_log;
// Memory access debug log
#ifdef ASE_DEBUG
FILE *fp_memaccess_log;
FILE *fp_pagetable_log;
#endif
uint64_t PHYS_ADDR_PREFIX_MASK;
int self_destruct_in_progress;
// work Directory location
char *ase_workdir_path;
// Incoming UMSG packet (allocated in ase_init, deallocated in start_simkill_countdown)
static struct umsgcmd_t *incoming_umsg_pkt;
// Incoming MMIO packet (allocated in ase_init, deallocated in start_simkill_countdown)
static struct mmio_t *incoming_mmio_pkt;
/*
* ASE capability register
* Purpose: This is the response for portctrl_cmd requests (as an ACK)
*/
struct ase_capability_t ase_capability = {
ASE_UNIQUE_ID,
/* UMsg feature interrupt */
#ifdef ASE_ENABLE_UMSG_FEATURE
1,
#else
0,
#endif
/* Interrupt feature interrupt */
#ifdef ASE_ENABLE_INTR_FEATURE
1,
#else
0,
#endif
/* 512-bit MMIO support */
#ifdef ASE_ENABLE_MMIO512
1
#else
0
#endif
};
const char *completed_str_msg = (char *)&ase_capability;
/*
* Generate scope data
*/
svScope scope;
void scope_function(void)
{
scope = svGetScope();
}
/*
* ASE instance already running
* - If instance is found, return its process ID, else return 0
*/
int ase_instance_running(void)
{
FUNC_CALL_ENTRY;
int ase_simv_pid;
// If Ready file does not exist
if (access(ASE_READY_FILENAME, F_OK) == -1) {
ase_simv_pid = 0;
}
// If ready file exists
else {
char *pwd_str;
pwd_str = ase_malloc(ASE_FILEPATH_LEN);
ase_simv_pid =
ase_read_lock_file(getcwd(pwd_str, ASE_FILEPATH_LEN));
free(pwd_str);
}
FUNC_CALL_EXIT;
return ase_simv_pid;
}
/*
* DPI: CONFIG path data exchange
*/
void sv2c_config_dex(const char *str)
{
// Allocate memory
memset(sv2c_config_filepath, 0, ASE_FILEPATH_LEN);
// Check that input string is not NULL
if (str == NULL) {
ASE_MSG("sv2c_config_dex => Input string is unusable\n");
} else {
// If Malloc fails
if (sv2c_config_filepath != NULL) {
// Attempt string copy and keep safe
ase_string_copy(sv2c_config_filepath, str,
ASE_FILEPATH_LEN);
#ifdef ASE_DEBUG
ASE_DBG("sv2c_config_filepath = %s\n",
sv2c_config_filepath);
#endif
// Check if file exists
if (access(sv2c_config_filepath, F_OK) == 0) {
ASE_MSG("+CONFIG %s file found !\n",
sv2c_config_filepath);
} else {
ASE_ERR
("** WARNING ** +CONFIG file was not found, will revert to DEFAULTS\n");
memset(sv2c_config_filepath, 0,
ASE_FILEPATH_LEN);
}
}
}
}
/*
* DPI: SCRIPT path data exchange
*/
void sv2c_script_dex(const char *str)
{
if (str == NULL) {
ASE_MSG("sv2c_script_dex => Input string is unusable\n");
} else {
memset(sv2c_script_filepath, 0, ASE_FILEPATH_LEN);
if (sv2c_script_filepath != NULL) {
ase_string_copy(sv2c_script_filepath, str,
ASE_FILEPATH_LEN);
#ifdef ASE_DEBUG
ASE_DBG("sv2c_script_filepath = %s\n",
sv2c_script_filepath);
#endif
// Check for existance of file
if (access(sv2c_script_filepath, F_OK) == 0) {
ASE_MSG("+SCRIPT %s file found !\n",
sv2c_script_filepath);
} else {
ASE_MSG
("** WARNING ** +SCRIPT file was not found, will revert to DEFAULTS\n");
memset(sv2c_script_filepath, 0,
ASE_FILEPATH_LEN);
}
}
}
}
/*
* DPI: Return ASE seed
*/
uint32_t get_ase_seed(void)
{
// return ase_seed;
return 0xFF;
}
/*
* DPI: WriteLine Data exchange
*/
void wr_memline_dex(cci_pkt *pkt)
{
FUNC_CALL_ENTRY;
uint64_t phys_addr;
uint64_t *wr_target_vaddr = (uint64_t *) NULL;
int intr_id;
//int ret_fd;
/* #ifndef DEFEATURE_ATOMICS */
/* uint64_t *rd_target_vaddr = (uint64_t*)NULL; */
/* long long cmp_qword; // Data to be compared */
/* long long new_qword; // Data to be writen if compare passes */
/* #endif */
if (pkt->mode == CCIPKT_WRITE_MODE) {
/*
* Normal write operation
* Takes Write request and performs verbatim
*/
// Get cl_addr, deduce wr_target_vaddr
phys_addr = (uint64_t) pkt->cl_addr << 6;
wr_target_vaddr =
ase_fakeaddr_to_vaddr((uint64_t) phys_addr);
// Write to memory
ase_memcpy(wr_target_vaddr, (char *) pkt->qword,
CL_BYTE_WIDTH);
// Success
pkt->success = 1;
} else if (pkt->mode == CCIPKT_INTR_MODE) {
/*
* Interrupt operation
*/
// Trigger interrupt action
intr_id = pkt->intr_id;
ase_interrupt_generator(intr_id);
// Success
pkt->success = 1;
}
/* #ifndef DEFEATURE_ATOMICS */
/* else if (pkt->mode == CCIPKT_ATOMIC_MODE) */
/* { */
/* /\* */
/* * This is a special mode in which read response goes back */
/* * WRITE request is responded with a READ response */
/* *\/ */
/* // Specifics of the requested compare operation */
/* cmp_qword = pkt->qword[0]; */
/* new_qword = pkt->qword[4]; */
/* // Get cl_addr, deduce rd_target_vaddr */
/* phys_addr = (uint64_t)pkt->cl_addr << 6; */
/* rd_target_vaddr = ase_fakeaddr_to_vaddr((uint64_t)phys_addr); */
/* // Perform read first and set response packet accordingly */
/* ase_memcpy((char*)pkt->qword, rd_target_vaddr, CL_BYTE_WIDTH); */
/* // Get cl_addr, deduct wr_target, use qw_start to determine exact qword */
/* wr_target_vaddr = (uint64_t*)( (uint64_t)rd_target_vaddr + pkt->qw_start*8 ); */
/* // CmpXchg output */
/* pkt->success = (int)__sync_bool_compare_and_swap (wr_target_vaddr, cmp_qword, new_qword); */
/* // Debug output */
/* #ifdef ASE_DEBUG */
/* */
/* ASE_DBG("CmpXchg_op=%d\n", pkt->success); */
/* */
/* #endif */
/* } */
/* #endif */
FUNC_CALL_EXIT;
}
/*
* DPI: ReadLine Data exchange
*/
void rd_memline_dex(cci_pkt *pkt)
{
FUNC_CALL_ENTRY;
uint64_t phys_addr;
uint64_t *rd_target_vaddr = (uint64_t *) NULL;
// Get cl_addr, deduce rd_target_vaddr
phys_addr = (uint64_t) pkt->cl_addr << 6;
rd_target_vaddr = ase_fakeaddr_to_vaddr((uint64_t) phys_addr);
// Read from memory
ase_memcpy((char *) pkt->qword, rd_target_vaddr, CL_BYTE_WIDTH);
FUNC_CALL_EXIT;
}
/*
* DPI: MMIO response
*/
void mmio_response(struct mmio_t *mmio_pkt)
{
FUNC_CALL_ENTRY;
// Lock channel
// pthread_mutex_lock (&mmio_resp_lock);
#ifdef ASE_DEBUG
print_mmiopkt(fp_memaccess_log, "MMIO Got ", mmio_pkt);
#endif
// Send MMIO Response
mqueue_send(sim2app_mmiorsp_tx, (char *) mmio_pkt, sizeof(mmio_t));
// Unlock channel
// pthread_mutex_unlock (&mmio_resp_lock);
FUNC_CALL_EXIT;
}
/*
* ASE Interrupt generator handle
*/
void ase_interrupt_generator(int id)
{
int cnt;
if (id >= MAX_USR_INTRS) {
ASE_ERR("SIM-C : Interrupt #%d > avail. interrupts (%d)!\n",
id, MAX_USR_INTRS);
return;
}
if (intr_event_fds[id] < 0) {
ASE_ERR("SIM-C : No valid event for AFU interrupt %d!\n", id);
} else {
uint64_t val = 1;
cnt = write(intr_event_fds[id], &val, sizeof(uint64_t));
if (cnt < 0) {
ASE_ERR("SIM-C : Error writing fd %d errno = %s\n",
intr_event_fds[id], strerror(errno));
} else {
ASE_MSG("SIM-C : AFU Interrupt event %d\n", id);
}
}
}
/*
* DPI: Reset response
*/
void sw_reset_response(void)
{
FUNC_CALL_ENTRY;
// Send portctrl_rsp message
mqueue_send(sim2app_portctrl_rsp_tx, completed_str_msg,
ASE_MQ_MSGSIZE);
FUNC_CALL_EXIT;
}
/*
* Count error flag ping/pong
*/
volatile int count_error_flag;
void count_error_flag_pong(int flag)
{
count_error_flag = flag;
}
/*
* Update global disable/enable
*/
int glbl_dealloc_allowed;
void update_glbl_dealloc(int flag)
{
glbl_dealloc_allowed = flag;
}
/*
* Populating required DFH in BBS
*/
// UMSG CSRs
uint64_t *csr_umsg_base_address;
/*
* Initialize: Populate FME DFH block
* When initialized, this is called
* update*function is called when UMSG is to be set up
*/
void initialize_fme_dfh(struct buffer_t *buf)
{
FUNC_CALL_ENTRY;
// Capability CSRs
uint64_t *csr_port_capability;
uint64_t *csr_port_umsg;
uint64_t *csr_umsg_capability;
uint64_t *csr_umsg_mode;
uint8_t *port_vbase = (uint8_t *) (uintptr_t) buf->pbase;
/*
* PORT CSRs
*/
// PORT_CAPABILITY
csr_port_capability = (uint64_t *) (port_vbase + 0x0030);
*csr_port_capability = (0x100 << 23) + (0x0 << 0);
// PORT_UMSG DFH
csr_port_umsg = (uint64_t *) (port_vbase + 0x2000);
*csr_port_umsg =
((uint64_t) 0x3 << 60) | ((uint64_t) 0x1000 << 39) | (0x11 <<
0);
/*
* UMSG settings
*/
// UMSG_CAPABILITY
csr_umsg_capability = (uint64_t *) (port_vbase + 0x2008);
*csr_umsg_capability = (0x0 << 9) + (0x0 << 8) + (0x8 << 0);
// UMSG_BASE_ADDRESS (only initalize address, update function will update CSR)
csr_umsg_base_address = (uint64_t *) (port_vbase + 0x2010);
// UMSG_MODE
csr_umsg_mode = (uint64_t *) (port_vbase + 0x2018);
*csr_umsg_mode = 0x0;
FUNC_CALL_EXIT;
}
// Update FME DFH after UMAS becomes known
void update_fme_dfh(struct buffer_t *umas)
{
// Write UMAS address
*csr_umsg_base_address = (uint64_t) umas->pbase;
}
int read_fd(int sock_fd)
{
struct msghdr msg = {0};
char buf[CMSG_SPACE(sizeof(int))];
struct event_request req = { .type = 0, .flags = 0 };
struct iovec io = { .iov_base = &req, .iov_len = sizeof(req) };
struct cmsghdr *cmsg;
int *fdptr;
memset(buf, '\0', sizeof(buf));
msg.msg_iov = &io;
msg.msg_iovlen = 1;
msg.msg_control = buf;
msg.msg_controllen = sizeof(buf);
cmsg = (struct cmsghdr *)buf;
cmsg->cmsg_len = CMSG_LEN(sizeof(int));
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
msg.msg_name = NULL;
msg.msg_namelen = 0;
msg.msg_iov = &io;
msg.msg_iovlen = 1;
msg.msg_control = cmsg;
msg.msg_controllen = CMSG_LEN(sizeof(int));
msg.msg_flags = 0;
if (recvmsg(sock_fd, &msg, 0) < 0) {
ASE_ERR("SIM-C : Unable to rcvmsg from socket\n");
return 1;
}
cmsg = CMSG_FIRSTHDR(&msg);
int vector_id = 0;
fdptr = (int *)CMSG_DATA((struct cmsghdr *)buf);
if (req.type == REGISTER_EVENT) {
vector_id = req.flags;
intr_event_fds[vector_id] = *fdptr;
}
if (req.type == UNREGISTER_EVENT) {
int i;
// locate the interrupt vector to unregister
// from the event handle
for (i = 0; i < MAX_USR_INTRS; i++) {
if (intr_event_fds[vector_id] == *fdptr)
intr_event_fds[vector_id] = -1;
}
}
return 0;
}
static void *start_socket_srv(void *args)
{
int res = 0;
int err_cnt = 0;
int sock_msg = 0;
errno_t err;
int sock_fd;
struct sockaddr_un saddr;
socklen_t addrlen;
struct timeval tv;
fd_set readfds;
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
sock_fd = socket(AF_UNIX, SOCK_STREAM, 0);
if (sock_fd == -1) {
ASE_ERR("SIM-C : Error opening event socket: %s",
strerror(errno));
err_cnt++;
return args;
}
// set socket type to non blocking
fcntl(sock_fd, F_SETFL, O_NONBLOCK);
fcntl(sock_fd, F_SETFL, O_ASYNC);
saddr.sun_family = AF_UNIX;
err = generate_sockname(saddr.sun_path);
if (err != EOK) {
ASE_ERR("%s: Error strncpy_s\n", __func__);
err_cnt++;
goto err;
}
// unlink previous addresses in use (if any)
unlink(saddr.sun_path);
addrlen = sizeof(struct sockaddr_un);
if (bind(sock_fd, (struct sockaddr *)&saddr, addrlen) < 0) {
ASE_ERR("SIM-C : Error binding event socket: %s\n",
strerror(errno));
err_cnt++;
goto err;
}
ASE_MSG("SIM-C : Creating Socket Server@%s...\n", saddr.sun_path);
if (listen(sock_fd, 5) < 0) {
ASE_ERR("SIM-C : Socket server listen failed with error:%s\n",
strerror(errno));
err_cnt++;
goto err;
}
ASE_MSG("SIM-C : Started listening on server %s\n", saddr.sun_path);
tv.tv_usec = 0;
FD_ZERO(&readfds);
do {
FD_SET(sock_fd, &readfds);
res = select(sock_fd+1, &readfds, NULL, NULL, &tv);
if (res < 0) {
ASE_ERR("SIM-C : select error=%s\n", strerror(errno));
err_cnt++;
break;
}
if (FD_ISSET(sock_fd, &readfds)) {
sock_msg = accept(sock_fd, (struct sockaddr *)&saddr,
&addrlen);
if (sock_msg == -1) {
ASE_ERR("SIM-C : accept error=%s\n",
strerror(errno));
err_cnt++;
break;
}
if (read_fd(sock_msg) != 0) {
err_cnt++;
break;
}
}
if (sockserver_kill)
break;
} while (res >= 0);
ASE_MSG("SIM-C : Exiting event socket server@%s...\n", saddr.sun_path);
err:
close(sock_msg);
close(sock_fd);
unlink(saddr.sun_path);
sockserver_kill = 0;
return args;
}
/* ********************************************************************
* ASE Listener thread
* --------------------------------------------------------------------
* vbase/pbase exchange THREAD
* when an allocate request is received, the buffer is copied into a
* linked list. The reply consists of the pbase, fakeaddr and fd_ase.
* When a deallocate message is received, the buffer is invalidated.
*
* MMIO Request
* Calls MMIO Dispatch task in ccip_emulator
*
* *******************************************************************/
int ase_listener(void)
{
// Buffer management variables
static struct buffer_t ase_buffer;
char incoming_alloc_msgstr[ASE_MQ_MSGSIZE];
char incoming_dealloc_msgstr[ASE_MQ_MSGSIZE];
int rx_portctrl_cmd;
int portctrl_value;
// Portctrl variables
char portctrl_msgstr[ASE_MQ_MSGSIZE];
char logger_str[ASE_LOGGER_LEN];
char umsg_mapstr[ASE_MQ_MSGSIZE];
// Session status
static int session_empty;
static char *glbl_session_id;
//umsg, lookup before issuing UMSG
static int glbl_umsgmode;
char umsg_mode_msg[ASE_LOGGER_LEN];
// FUNC_CALL_ENTRY;
// ---------------------------------------------------------------------- //
/*
* Port Control message
* Format: <cmd> <value>
* -----------------------------------------------------------------
* Supported commands |
* ASE_INIT <APP_PID> | Session control - sends PID to
* |
* AFU_RESET <0,1> | AFU reset handle
* UMSG_MODE <8-bit mask> | UMSG mode control
*
* ASE responds with "COMPLETED" as a string, there is no
* expectation of a string check
*
*/
// Simulator is not in lockdown mode (simkill not in progress)
if (self_destruct_in_progress == 0) {
if (mqueue_recv(app2sim_portctrl_req_rx, (char *)portctrl_msgstr, ASE_MQ_MSGSIZE) == ASE_MSG_PRESENT) {
sscanf(portctrl_msgstr, "%d %d", &rx_portctrl_cmd, &portctrl_value);
if (rx_portctrl_cmd == AFU_RESET) {
// AFU Reset control
portctrl_value = (portctrl_value != 0) ? 1 : 0 ;
// Wait until transactions clear
// AFU Reset trigger function will wait until channels clear up
afu_softreset_trig (0, portctrl_value);
// Reset response is returned from simulator once queues are cleared
// Simulator cannot be held up here.
} else if (rx_portctrl_cmd == UMSG_MODE) {
// Umsg mode setting here
glbl_umsgmode = portctrl_value & 0xFFFFFFFF;
snprintf(umsg_mode_msg, ASE_LOGGER_LEN, "UMSG Mode mask set to 0x%x", glbl_umsgmode);
buffer_msg_inject(1, umsg_mode_msg);
// Send portctrl_rsp message
mqueue_send(sim2app_portctrl_rsp_tx, completed_str_msg, ASE_MQ_MSGSIZE);
} else if (rx_portctrl_cmd == ASE_INIT) {
ASE_INFO("Session requested by PID = %d\n", portctrl_value);
// Generate new timestamp
put_timestamp();
// Generate session ID path
snprintf(tstamp_filepath, ASE_FILEPATH_LEN,
"%s/%s", ase_workdir_path,
TSTAMP_FILENAME);
// Print timestamp
glbl_session_id = ase_malloc(20);
get_timestamp(glbl_session_id);
ASE_MSG("Session ID => %s\n",
glbl_session_id);
session_empty = 0;
// Send portctrl_rsp message
mqueue_send(sim2app_portctrl_rsp_tx, completed_str_msg, ASE_MQ_MSGSIZE);
int thr_err = pthread_create(&socket_srv_tid,
NULL, &start_socket_srv, NULL);
if (thr_err != 0) {
ASE_ERR("FAILED Event server \
failed to start\n");
exit(1);
}
ASE_MSG("Event socket server started\n");
} else if (rx_portctrl_cmd == ASE_SIMKILL) {
#ifdef ASE_DEBUG
ASE_MSG("ASE_SIMKILL requested, processing options... \n");
#endif
sockserver_kill = 1;
// ------------------------------------------------------------- //
// Update regression counter
glbl_test_cmplt_cnt = glbl_test_cmplt_cnt + 1;
// Mode specific exit behaviour
if ((cfg->ase_mode == ASE_MODE_DAEMON_NO_SIMKILL) && (session_empty == 0)) {
ASE_MSG("ASE running in daemon mode (see ase.cfg)\n");
ASE_MSG("Reseting buffers ... Simulator RUNNING\n");
ase_reset_trig();
ase_destroy();
ASE_INFO("Ready to run next test\n");
session_empty = 1;
buffer_msg_inject(0, TEST_SEPARATOR);
} else if (cfg->ase_mode == ASE_MODE_DAEMON_SIMKILL) {
ASE_INFO("ASE Timeout SIMKILL will happen soon\n");
} else if (cfg->ase_mode == ASE_MODE_DAEMON_SW_SIMKILL) {
ASE_INFO("ASE recognized a SW simkill (see ase.cfg)... Simulator will EXIT\n");
run_clocks (500);
ase_perror_teardown();
start_simkill_countdown();
} else if (cfg->ase_mode == ASE_MODE_REGRESSION) {
if (cfg->ase_num_tests == glbl_test_cmplt_cnt) {
ASE_INFO("ASE completed %d tests (see supplied ASE config file)... Simulator will EXIT\n", cfg->ase_num_tests);
run_clocks (500);
ase_perror_teardown();
start_simkill_countdown();
} else {
ase_reset_trig();
}
}
// wait for server shutdown
pthread_join(socket_srv_tid, NULL);
// Check for simulator sanity -- if transaction counts dont match
// Kill the simulation ASAP -- DEBUG feature only
#ifdef ASE_DEBUG
count_error_flag_ping();
if (count_error_flag != 0) {
ASE_ERR
("** ERROR ** Transaction counts do not match, something got lost\n");
run_clocks(500);
ase_perror_teardown();
start_simkill_countdown();
}
#endif
// Send portctrl_rsp message
mqueue_send(sim2app_portctrl_rsp_tx,
completed_str_msg,
ASE_MQ_MSGSIZE);
// Clean up session OD
ase_free_buffer(glbl_session_id);
} else {
ASE_ERR
("Undefined Port Control function ... IGNORING\n");
// Send portctrl_rsp message
mqueue_send(sim2app_portctrl_rsp_tx,
completed_str_msg,
ASE_MQ_MSGSIZE);
}
}
// ------------------------------------------------------------------------------- //
/*
* Buffer Allocation Replicator
*/
// Receive a DPI message and get information from replicated buffer
ase_empty_buffer(&ase_buffer);
if (mqueue_recv
(app2sim_alloc_rx, (char *) incoming_alloc_msgstr,
ASE_MQ_MSGSIZE) == ASE_MSG_PRESENT) {
// Typecast string to buffer_t
ase_memcpy((char *) &ase_buffer,
incoming_alloc_msgstr,
sizeof(struct buffer_t));
// Allocate action
ase_alloc_action(&ase_buffer);
ase_buffer.is_privmem = 0;
if (ase_buffer.index == 0) {
ase_buffer.is_mmiomap = 1;
} else {
ase_buffer.is_mmiomap = 0;
}
// Format workspace info string
memset(logger_str, 0, ASE_LOGGER_LEN);
if (ase_buffer.is_mmiomap) {
snprintf(logger_str + strlen(logger_str),
ASE_LOGGER_LEN,
"MMIO map Allocated ");
initialize_fme_dfh(&ase_buffer);
} else if (ase_buffer.is_umas) {
snprintf(logger_str + strlen(logger_str),
ASE_LOGGER_LEN,
"UMAS Allocated ");
update_fme_dfh(&ase_buffer);
} else {
snprintf(logger_str + strlen(logger_str),
ASE_LOGGER_LEN,
"Buffer %d Allocated ",
ase_buffer.index);
}
snprintf(logger_str + strlen(logger_str),
ASE_LOGGER_LEN,
" (located /dev/shm/%s) =>\n",
ase_buffer.memname);
snprintf(logger_str + strlen(logger_str),
ASE_LOGGER_LEN,
"\t\tHost App Virtual Addr = 0x%" PRIx64
"\n", ase_buffer.vbase);
snprintf(logger_str + strlen(logger_str),
ASE_LOGGER_LEN,
"\t\tHW Physical Addr = 0x%" PRIx64
"\n", ase_buffer.fake_paddr);
snprintf(logger_str + strlen(logger_str),
ASE_LOGGER_LEN,
"\t\tHW CacheAligned Addr = 0x%" PRIx64
"\n", ase_buffer.fake_paddr >> 6);
snprintf(logger_str + strlen(logger_str),
ASE_LOGGER_LEN,
"\t\tWorkspace Size (bytes) = %" PRId32
"\n", ase_buffer.memsize);
snprintf(logger_str + strlen(logger_str),
ASE_LOGGER_LEN, "\n");
// Inject buffer message
buffer_msg_inject(1, logger_str);
// Standard oneline message ---> Hides internal info
ase_buffer_oneline(&ase_buffer);
// Write buffer information to file
if ((ase_buffer.is_mmiomap == 0)
|| (ase_buffer.is_privmem == 0)) {
// Flush info to file
if (fp_workspace_log != NULL) {
fprintf(fp_workspace_log, "%s",
logger_str);
fflush(fp_workspace_log);
}
}
// Debug only
#ifdef ASE_DEBUG
ase_buffer_info(&ase_buffer);
#endif
}
// ------------------------------------------------------------------------------- //
ase_empty_buffer(&ase_buffer);
if (mqueue_recv
(app2sim_dealloc_rx, (char *) incoming_dealloc_msgstr,
ASE_MQ_MSGSIZE) == ASE_MSG_PRESENT) {
// Typecast string to buffer_t
ase_memcpy((char *) &ase_buffer,
incoming_dealloc_msgstr,
sizeof(struct buffer_t));
// Format workspace info string
memset(logger_str, 0, ASE_LOGGER_LEN);
snprintf(logger_str + strlen(logger_str),
ASE_LOGGER_LEN,
"\nBuffer %d Deallocated =>\n",
ase_buffer.index);
snprintf(logger_str + strlen(logger_str),
ASE_LOGGER_LEN, "\n");
// Deallocate action
ase_dealloc_action(&ase_buffer, 1);
// Inject buffer message
buffer_msg_inject(1, logger_str);
// Standard oneline message ---> Hides internal info
ase_buffer.valid = ASE_BUFFER_INVALID;
ase_buffer_oneline(&ase_buffer);
// Debug only
#ifdef ASE_DEBUG
ase_buffer_info(&ase_buffer);
#endif
}
// ------------------------------------------------------------------------------- //
/*
* MMIO request listener
*/
// Receive csr_write packet
if (mqueue_recv
(app2sim_mmioreq_rx, (char *) incoming_mmio_pkt,
sizeof(struct mmio_t)) == ASE_MSG_PRESENT) {
// ase_memcpy(incoming_mmio_pkt, (mmio_t *)mmio_mapstr, sizeof(struct mmio_t));
#ifdef ASE_DEBUG
print_mmiopkt(fp_memaccess_log, "MMIO Sent",
incoming_mmio_pkt);
#endif
mmio_dispatch(0, incoming_mmio_pkt);
}
// ------------------------------------------------------------------------------- //
/*
* UMSG engine
*/
// cleanse string before reading
if (mqueue_recv
(app2sim_umsg_rx, (char *) umsg_mapstr,
sizeof(struct umsgcmd_t)) == ASE_MSG_PRESENT) {
ase_memcpy(incoming_umsg_pkt,
(umsgcmd_t *) umsg_mapstr,
sizeof(struct umsgcmd_t));
// Hint trigger
incoming_umsg_pkt->hint =
(glbl_umsgmode >> (4 * incoming_umsg_pkt->id))
& 0xF;
// dispatch to event processing
#ifdef ASE_ENABLE_UMSG_FEATURE
umsg_dispatch(0, incoming_umsg_pkt);
#else
ASE_ERR
("UMsg is only supported in the integrated configuration!\n");
ASE_ERR
(" Simulator will shut down now.\n");
start_simkill_countdown();
#endif
}
// ------------------------------------------------------------------------------- //
} else {
#ifdef ASE_DEBUG
ASE_DBG
("Simulator is in Lockdown mode, Simkill in progress\n");
sleep(1);
#endif
}
// FUNC_CALL_EXIT;
return 0;
}
// -----------------------------------------------------------------------
// DPI Initialize routine
// - Setup message queues
// - Start buffer replicator, csr_write listener thread
// -----------------------------------------------------------------------
int ase_init(void)
{
FUNC_CALL_ENTRY;
// Set loglevel
set_loglevel(ase_calc_loglevel());
// Set stdout bufsize to empty immediately
// setvbuf(stdout, NULL, _IONBF, 0);
setbuf(stdout, NULL);
// Set self_destruct flag = 0, SIMulator is not in lockdown
self_destruct_in_progress = 0;
// Graceful kill handlers
register_signal(SIGTERM, start_simkill_countdown);
register_signal(SIGINT, start_simkill_countdown);
register_signal(SIGQUIT, start_simkill_countdown);
register_signal(SIGHUP, start_simkill_countdown);
// Runtime error handler (print backtrace)
register_signal(SIGSEGV, backtrace_handler);
register_signal(SIGBUS, backtrace_handler);
register_signal(SIGABRT, backtrace_handler);
// Ignore SIGPIPE
signal(SIGPIPE, SIG_IGN);
// Get PID
ase_pid = getpid();
ASE_MSG("PID of simulator is %d\n", ase_pid);
// Allocate incoming_mmio_pkt
incoming_mmio_pkt = (struct mmio_t *) ase_malloc(sizeof(mmio_t));
// Allocate incoming_umsg_pkt
incoming_umsg_pkt =
(struct umsgcmd_t *) ase_malloc(sizeof(struct umsgcmd_t));
// ASE configuration management
// ase_config_parse(ASE_CONFIG_FILE);
ase_config_parse(sv2c_config_filepath);
// Evaluate IPCs
ipc_init();
ASE_MSG("Current Directory located at =>\n");
ASE_MSG("%s\n", ase_workdir_path);
// Create IPC cleanup setup
create_ipc_listfile();
// Sniffer file stat path
memset(ccip_sniffer_file_statpath, 0, ASE_FILEPATH_LEN);
snprintf(ccip_sniffer_file_statpath, ASE_FILEPATH_LEN,
"%s/ccip_warning_and_errors.txt", ase_workdir_path);
// Remove existing error log files from previous run
if (access(ccip_sniffer_file_statpath, F_OK) == 0) {
if (unlink(ccip_sniffer_file_statpath) == 0) {
ASE_MSG
("Removed sniffer log file from previous run\n");
}
}
/*
* Debug logs
*/
#ifdef ASE_DEBUG
// Create a memory access log
fp_memaccess_log = fopen("aseafu_access.log", "w");
if (fp_memaccess_log == NULL) {
ASE_ERR
(" [DEBUG] Memory access debug logger initialization failed !\n");
} else {
ASE_DBG("Memory access debug logger initialized\n");
}
// Page table tracker
fp_pagetable_log = fopen("ase_pagetable.log", "w");
if (fp_pagetable_log == NULL) {
ASE_ERR
(" [DEBUG] ASE pagetable logger initialization failed !\n");
} else {
ASE_DBG("ASE pagetable logger initialized\n");
}
#endif
// Set up message queues
ASE_MSG("Creating Messaging IPCs...\n");
int ipc_iter;
for (ipc_iter = 0; ipc_iter < ASE_MQ_INSTANCES; ipc_iter++)
mqueue_create(mq_array[ipc_iter].name);
// Open message queues
app2sim_alloc_rx =
mqueue_open(mq_array[0].name, mq_array[0].perm_flag);
app2sim_mmioreq_rx =
mqueue_open(mq_array[1].name, mq_array[1].perm_flag);
app2sim_umsg_rx =
mqueue_open(mq_array[2].name, mq_array[2].perm_flag);
sim2app_alloc_tx =
mqueue_open(mq_array[3].name, mq_array[3].perm_flag);
sim2app_mmiorsp_tx =
mqueue_open(mq_array[4].name, mq_array[4].perm_flag);
app2sim_portctrl_req_rx =
mqueue_open(mq_array[5].name, mq_array[5].perm_flag);
app2sim_dealloc_rx =
mqueue_open(mq_array[6].name, mq_array[6].perm_flag);
sim2app_dealloc_tx =
mqueue_open(mq_array[7].name, mq_array[7].perm_flag);
sim2app_portctrl_rsp_tx =
mqueue_open(mq_array[8].name, mq_array[8].perm_flag);
sim2app_intr_request_tx =
mqueue_open(mq_array[9].name, mq_array[9].perm_flag);
int i;
for (i = 0; i < MAX_USR_INTRS; i++)
intr_event_fds[i] = -1;
sockserver_kill = 0;
// Generate Completed message for portctrl
/* completed_str_msg = (char*)ase_malloc(ASE_MQ_MSGSIZE); */
/* snprintf(completed_str_msg, 10, "COMPLETED"); */
// Calculate memory map regions
ASE_MSG("Calculating memory map...\n");
calc_phys_memory_ranges();
// Random number for csr_pinned_addr
/* if (cfg->enable_reuse_seed) */
/* { */
/* ase_seed = ase_read_seed (); */
/* } */
/* else */
/* { */
/* ase_seed = generate_ase_seed(); */
/* ase_write_seed ( ase_seed ); */
/* } */
ase_write_seed (cfg->ase_seed);
srand(cfg->ase_seed);
// Open Buffer info log
fp_workspace_log = fopen("workspace_info.log", "wb");
if (fp_workspace_log == (FILE *) NULL) {
ase_error_report("fopen", errno, ASE_OS_FOPEN_ERR);
} else {
ASE_INFO_2
("Information about allocated buffers => workspace_info.log \n");
}
fflush(stdout);
FUNC_CALL_EXIT;
return 0;
}
// -----------------------------------------------------------------------
// ASE ready indicator: Print a message that ASE is ready to go.
// Controls run-modes
// -----------------------------------------------------------------------
int ase_ready(void)
{
FUNC_CALL_ENTRY;
// ASE-APP run command
char app_run_cmd[ASE_FILEPATH_LEN];
// Set test_cnt to 0
glbl_test_cmplt_cnt = 0;
// Write lock file
ase_write_lock_file();
// Display "Ready for simulation"
ASE_INFO
("** ATTENTION : BEFORE running the software application **\n");
ASE_INFO
("Set env(ASE_WORKDIR) in terminal where application will run (copy-and-paste) =>\n");
ASE_INFO("$SHELL | Run:\n");
ASE_INFO
("---------+---------------------------------------------------\n");
ASE_INFO("bash/zsh | export ASE_WORKDIR=%s\n", ase_workdir_path);
ASE_INFO("tcsh/csh | setenv ASE_WORKDIR %s\n", ase_workdir_path);
ASE_INFO
("For any other $SHELL, consult your Linux administrator\n");
ASE_INFO("\n");
// Run ase_regress.sh here
if (cfg->ase_mode == ASE_MODE_REGRESSION) {
ASE_INFO("Starting ase_regress.sh script...\n");
if ((sv2c_script_filepath != NULL)
&& (strlen(sv2c_script_filepath) != 0)) {
snprintf(app_run_cmd, ASE_FILEPATH_LEN, "%s &",
sv2c_script_filepath);
} else {
ase_string_copy(app_run_cmd, "./ase_regress.sh &",
ASE_FILEPATH_LEN);
}
// Run the regress application
if (system(app_run_cmd) == -1) {
ASE_INFO_2
("ASE had some problem starting script pointed by ASE_SCRIPT\n");
ASE_INFO_2("Tests may be run manually instead\n");
}
} else {
ASE_INFO("Ready for simulation...\n");
ASE_INFO("Press CTRL-C to close simulator...\n");
}
fflush(stdout);
FUNC_CALL_EXIT;
return 0;
}
/*
* DPI simulation timeout counter
* - When CTRL-C is pressed, start teardown sequence
* - TEARDOWN SEQUENCE:
* - Close and unlink message queues
* - Close and unlink shared memories
* - Destroy linked list
* - Delete .ase_ready
* - Send $finish to VCS
*/
void start_simkill_countdown(void)
{
FUNC_CALL_ENTRY;
#ifdef ASE_DEBUG
ASE_DBG("Caught a SIG\n");
#endif
// Close and unlink message queue
ASE_MSG("Closing message queue and unlinking...\n");
// Close message queues
mqueue_close(app2sim_alloc_rx);
mqueue_close(sim2app_alloc_tx);
mqueue_close(app2sim_mmioreq_rx);
mqueue_close(sim2app_mmiorsp_tx);
mqueue_close(app2sim_umsg_rx);
mqueue_close(app2sim_portctrl_req_rx);
mqueue_close(app2sim_dealloc_rx);
mqueue_close(sim2app_dealloc_tx);
mqueue_close(sim2app_portctrl_rsp_tx);
mqueue_close(sim2app_intr_request_tx);
int ipc_iter;
for (ipc_iter = 0; ipc_iter < ASE_MQ_INSTANCES; ipc_iter++)
mqueue_destroy(mq_array[ipc_iter].name);
// Destroy all open shared memory regions
ASE_MSG("Unlinking Shared memory regions.... \n");
// ase_destroy();
if (unlink(tstamp_filepath) == -1) {
ASE_MSG
("$ASE_WORKDIR/.ase_ready could not be deleted, please delete manually... \n");
} else {
ASE_MSG("Session code file removed\n");
}
// Final clean of IPC
final_ipc_cleanup();
// Close workspace log
if (fp_workspace_log != NULL) {
fclose(fp_workspace_log);
}
#ifdef ASE_DEBUG
if (fp_memaccess_log != NULL) {
fclose(fp_memaccess_log);
}
if (fp_pagetable_log != NULL) {
fclose(fp_pagetable_log);
}
#endif
// Remove session files
ASE_MSG("Cleaning session files...\n");
if (unlink(ase_ready_filepath) == -1) {
ASE_ERR
("Session file %s could not be removed, please remove manually !!\n",
ASE_READY_FILENAME);
}
// Print location of log files
ASE_INFO("Simulation generated log files\n");
ASE_INFO
(" Transactions file | $ASE_WORKDIR/ccip_transactions.tsv\n");
ASE_INFO
(" Workspaces info | $ASE_WORKDIR/workspace_info.log\n");
if (access(ccip_sniffer_file_statpath, F_OK) != -1) {
ASE_INFO
(" Protocol warning/errors | $ASE_WORKDIR/ccip_warning_and_errors.txt\n");
}
ASE_INFO
(" ASE seed | $ASE_WORKDIR/ase_seed.txt\n");
// Display test count
ASE_INFO("\n");
ASE_INFO("Tests run => %d\n", glbl_test_cmplt_cnt);
ASE_INFO("\n");
// Send a simulation kill command
ASE_INFO_2("Sending kill command...\n");
usleep(1000);
// Set scope
svSetScope(scope);
// Free memories
free(cfg);
free(ase_ready_filepath);
ase_free_buffer((char *) incoming_mmio_pkt);
ase_free_buffer((char *) incoming_umsg_pkt);
// ase_free_buffer (ase_workdir_path);
// Issue Simulation kill
simkill();
FUNC_CALL_EXIT;
}
/*
* ASE config parsing
* - Set default values for ASE configuration
* - See if a ase.cfg is available for overriding global values
* - If YES, parse and configure the cfg (ase_cfg_t) structure
*/
void ase_config_parse(char *filename)
{
FUNC_CALL_ENTRY;
FILE *fp = (FILE *) NULL;
char *line;
size_t len = 0;
char *parameter;
int value;
char *pch;
char *saveptr;
// User clock frequency
float f_usrclk;
// Allocate space to store ASE config
cfg = (struct ase_cfg_t *) ase_malloc(sizeof(struct ase_cfg_t));
// Allocate memory to store a line
line = ase_malloc(sizeof(char) * 80);
// Default values
cfg->ase_mode = ASE_MODE_DAEMON_NO_SIMKILL;
cfg->ase_timeout = 50000;
cfg->ase_num_tests = 1;
cfg->enable_reuse_seed = 0;
cfg->ase_seed = 9876;
cfg->enable_cl_view = 1;
cfg->usr_tps = DEFAULT_USR_CLK_TPS;
cfg->phys_memory_available_gb = 256;
// Fclk Mhz
f_usrclk = DEFAULT_USR_CLK_MHZ;
// Find ase.cfg OR not
if (access(filename, F_OK) != -1) {
// FILE exists, overwrite
fp = fopen(filename, "r");
if (fp == NULL) {
ASE_ERR
("%s supplied by +CONFIG could not be opened, IGNORED\n",
filename);
} else {
ASE_INFO_2("Reading %s configuration file \n",
filename);
// Parse file line by line
while (getline(&line, &len, fp) != -1) {
// Remove all invalid characters
remove_spaces(line);
remove_tabs(line);
remove_newline(line);
// Ignore strings begining with '#' OR NULL (compound NOR)
if ((line[0] != '#') && (line[0] != '\0')) {
parameter = strtok_r(line, "=\n", &saveptr);
if (parameter != NULL) {
if (ase_strncmp
(parameter, "ASE_MODE",
8) == 0) {
pch =
strtok_r(NULL,
"", &saveptr);
if (pch != NULL) {
cfg->
ase_mode
=
atoi
(pch);
}
} else
if (ase_strncmp
(parameter,
"ASE_TIMEOUT",
11) == 0) {
pch =
strtok_r(NULL,
"", &saveptr);
if (pch != NULL) {
cfg->
ase_timeout
=
atoi
(pch);
}
} else
if (ase_strncmp
(parameter,
"ASE_NUM_TESTS",
13) == 0) {
pch =
strtok_r(NULL,
"", &saveptr);
if (pch != NULL) {
cfg->
ase_num_tests
=
atoi
(pch);
}
} else
if (ase_strncmp
(parameter,
"ENABLE_REUSE_SEED",
17) == 0) {
pch =
strtok_r(NULL,
"", &saveptr);
if (pch != NULL) {
cfg->
enable_reuse_seed
=
atoi
(pch);
}
} else
if (ase_strncmp
(parameter,
"ASE_SEED",
8) == 0) {
pch =
strtok_r(NULL,
"", &saveptr);
if (pch != NULL) {
cfg->
ase_seed
=
atoi
(pch);
}
} else
if (ase_strncmp
(parameter,
"ENABLE_CL_VIEW",
14) == 0) {
pch =
strtok_r(NULL,
"", &saveptr);
if (pch != NULL) {
cfg->
enable_cl_view
=
atoi
(pch);
}
} else
if (ase_strncmp
(parameter,
"USR_CLK_MHZ",
11) == 0) {
pch =
strtok_r(NULL,
"", &saveptr);
if (pch != NULL) {
f_usrclk =
atof
(pch);
if (f_usrclk == 0.000000) {
ASE_ERR
("User Clock Frequency cannot be 0.000 MHz\n");
ASE_ERR
(" Reverting to %f MHz\n",
DEFAULT_USR_CLK_MHZ);
f_usrclk
=
DEFAULT_USR_CLK_MHZ;
cfg->
usr_tps
=
DEFAULT_USR_CLK_TPS;
} else
if
(f_usrclk
==
DEFAULT_USR_CLK_MHZ)
{
cfg->
usr_tps
=
DEFAULT_USR_CLK_TPS;
} else {
cfg->
usr_tps
=
(int)
(1E+12
/
(f_usrclk
*
pow
(1000,
2)));
#ifdef ASE_DEBUG
ASE_DBG
("usr_tps = %d\n",
cfg->
usr_tps);
#endif
if (f_usrclk != DEFAULT_USR_CLK_MHZ) {
ASE_INFO_2
("User clock Frequency was modified from %f to %f MHz\n",
DEFAULT_USR_CLK_MHZ,
f_usrclk);
}
}
}
} else
if (ase_strncmp
(parameter,
"PHYS_MEMORY_AVAILABLE_GB",
24) == 0) {
pch =
strtok_r(NULL,
"", &saveptr);
if (pch != NULL) {
value =
atoi
(pch);
if (value <
0) {
ASE_ERR
("Physical memory size is negative in %s\n",
filename);
ASE_ERR
(" Reverting to default 256 GB\n");
} else {
cfg->
phys_memory_available_gb
=
value;
}
}
} else {
ASE_INFO_2
("In config file %s, Parameter type %s is unidentified \n",
filename,
parameter);
}
}
}
}
}
/*
* ASE mode control
*/
switch (cfg->ase_mode) {
// Classic Server client mode
case ASE_MODE_DAEMON_NO_SIMKILL:
ASE_INFO_2
("ASE was started in Mode 1 (Server-Client without SIMKILL)\n");
cfg->ase_timeout = 0;
cfg->ase_num_tests = 0;
break;
// Server Client mode with SIMKILL
case ASE_MODE_DAEMON_SIMKILL:
ASE_INFO_2
("ASE was started in Mode 2 (Server-Client with SIMKILL)\n");
cfg->ase_num_tests = 0;
break;
// Long runtime mode (SW kills SIM)
case ASE_MODE_DAEMON_SW_SIMKILL:
ASE_INFO_2
("ASE was started in Mode 3 (Server-Client with Sw SIMKILL (long runs)\n");
cfg->ase_timeout = 0;
cfg->ase_num_tests = 0;
break;
// Regression mode (lets an SH file with
case ASE_MODE_REGRESSION:
ASE_INFO_2
("ASE was started in Mode 4 (Regression mode)\n");
cfg->ase_timeout = 0;
break;
// Illegal modes
default:
ASE_INFO_2
("ASE mode could not be identified, will revert to ASE_MODE = 1 (Server client w/o SIMKILL)\n");
cfg->ase_mode = ASE_MODE_DAEMON_NO_SIMKILL;
cfg->ase_timeout = 0;
cfg->ase_num_tests = 0;
}
// Close file
if (fp != NULL) {
fclose(fp);
}
} else {
// FILE does not exist
ASE_INFO_2("%s not found, using default values\n",
filename);
}
// Mode configuration
switch (cfg->ase_mode) {
case ASE_MODE_DAEMON_NO_SIMKILL:
ASE_INFO_2
("ASE Mode: Server-Client mode without SIMKILL\n");
break;
case ASE_MODE_DAEMON_SIMKILL:
ASE_INFO_2("ASE Mode: Server-Client mode with SIMKILL\n");
break;
case ASE_MODE_DAEMON_SW_SIMKILL:
ASE_INFO_2
("ASE Mode: Server-Client mode with SW SIMKILL (long runs)\n");
break;
case ASE_MODE_REGRESSION:
ASE_INFO_2("ASE Mode: ASE Regression mode\n");
break;
}
// Inactivity
if (cfg->ase_mode == ASE_MODE_DAEMON_SIMKILL)
ASE_INFO_2
("Inactivity kill-switch ... ENABLED after %d clocks \n",
cfg->ase_timeout);
else
ASE_INFO_2("Inactivity kill-switch ... DISABLED \n");
// Reuse seed
if (cfg->enable_reuse_seed != 0)
ASE_INFO_2("Reuse simulation seed ... ENABLED \n");
else {
ASE_INFO_2
("Reuse simulation seed ... DISABLED (will create one at $ASE_WORKDIR/ase_seed.txt) \n");
cfg->ase_seed = generate_ase_seed();
}
// ASE will be run with this seed
ASE_INFO_2("ASE Seed ... %d \n", cfg->ase_seed);
// CL view
if (cfg->enable_cl_view != 0)
ASE_INFO_2("ASE Transaction view ... ENABLED\n");
else
ASE_INFO_2("ASE Transaction view ... DISABLED\n");
// User clock frequency
ASE_INFO_2
("User Clock Frequency ... %.6f MHz, T_uclk = %d ps \n",
f_usrclk, cfg->usr_tps);
if (f_usrclk != DEFAULT_USR_CLK_MHZ) {
ASE_INFO_2
("** NOTE **: User Clock Frequency was changed from default %f MHz !\n",
DEFAULT_USR_CLK_MHZ);
}
// GBs of physical memory available
ASE_INFO_2("Amount of physical memory ... %d GB\n",
cfg->phys_memory_available_gb);
// Transfer data to hardware (for simulation only)
ase_config_dex(cfg);
// free memory
free(line);
FUNC_CALL_EXIT;
}
| 1 | 14,635 | Should most of these non-globals be static? | OPAE-opae-sdk | c |
@@ -14,6 +14,14 @@ class TestFakerName < Test::Unit::TestCase
assert @tester.name_with_middle.match(/(\w+\.? ?){3,4}/)
end
+ def test_first_name
+ assert @tester.first_name.match(/(\w+\.? ?){3,4}/)
+ end
+
+ def test_last_name
+ assert @tester.last_name.match(/(\w+\.? ?){3,4}/)
+ end
+
def test_prefix
assert @tester.prefix.match(/[A-Z][a-z]+\.?/)
end | 1 | require File.expand_path(File.dirname(__FILE__) + '/test_helper.rb')
class TestFakerName < Test::Unit::TestCase
def setup
@tester = Faker::Name
end
def test_name
assert @tester.name.match(/(\w+\.? ?){2,3}/)
end
def test_name_with_middle
assert @tester.name_with_middle.match(/(\w+\.? ?){3,4}/)
end
def test_prefix
assert @tester.prefix.match(/[A-Z][a-z]+\.?/)
end
def test_suffix
assert @tester.suffix.match(/[A-Z][a-z]*\.?/)
end
def test_job_titles
@job_titles = Faker::Name.job_titles
@job_titles.each do |title|
refute title.to_s.empty?
end
end
def test_initials
assert @tester.initials.match(/[A-Z]{3}/)
assert @tester.initials(2).match(/[A-Z]{2}/)
end
end
| 1 | 8,395 | Please do not approve PRs without tests!!!! | faker-ruby-faker | rb |
@@ -48,7 +48,7 @@ class WebEngineView(QWebEngineView):
else:
profile = webenginesettings.default_profile
page = WebEnginePage(theme_color=theme_color, profile=profile,
- parent=self)
+ parent=self, win_id=win_id)
self.setPage(page)
def shutdown(self): | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2018 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""The main browser widget for QtWebEngine."""
import functools
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QUrl, PYQT_VERSION
from PyQt5.QtGui import QPalette
from PyQt5.QtWebEngineWidgets import (QWebEngineView, QWebEnginePage,
QWebEngineScript)
from qutebrowser.browser import shared
from qutebrowser.browser.webengine import certificateerror, webenginesettings
from qutebrowser.config import config
from qutebrowser.utils import log, debug, usertypes, jinja, objreg, qtutils
class WebEngineView(QWebEngineView):
"""Custom QWebEngineView subclass with qutebrowser-specific features."""
def __init__(self, *, tabdata, win_id, private, parent=None):
super().__init__(parent)
self._win_id = win_id
self._tabdata = tabdata
theme_color = self.style().standardPalette().color(QPalette.Base)
if private:
profile = webenginesettings.private_profile
assert profile.isOffTheRecord()
else:
profile = webenginesettings.default_profile
page = WebEnginePage(theme_color=theme_color, profile=profile,
parent=self)
self.setPage(page)
def shutdown(self):
self.page().shutdown()
def createWindow(self, wintype):
"""Called by Qt when a page wants to create a new window.
This function is called from the createWindow() method of the
associated QWebEnginePage, each time the page wants to create a new
window of the given type. This might be the result, for example, of a
JavaScript request to open a document in a new window.
Args:
wintype: This enum describes the types of window that can be
created by the createWindow() function.
QWebEnginePage::WebBrowserWindow:
A complete web browser window.
QWebEnginePage::WebBrowserTab:
A web browser tab.
QWebEnginePage::WebDialog:
A window without decoration.
QWebEnginePage::WebBrowserBackgroundTab:
A web browser tab without hiding the current visible
WebEngineView.
Return:
The new QWebEngineView object.
"""
debug_type = debug.qenum_key(QWebEnginePage, wintype)
background = config.val.tabs.background
log.webview.debug("createWindow with type {}, background {}".format(
debug_type, background))
if wintype == QWebEnginePage.WebBrowserWindow:
# Shift-Alt-Click
target = usertypes.ClickTarget.window
elif wintype == QWebEnginePage.WebDialog:
log.webview.warning("{} requested, but we don't support "
"that!".format(debug_type))
target = usertypes.ClickTarget.tab
elif wintype == QWebEnginePage.WebBrowserTab:
# Middle-click / Ctrl-Click with Shift
# FIXME:qtwebengine this also affects target=_blank links...
if background:
target = usertypes.ClickTarget.tab
else:
target = usertypes.ClickTarget.tab_bg
elif wintype == QWebEnginePage.WebBrowserBackgroundTab:
# Middle-click / Ctrl-Click
if background:
target = usertypes.ClickTarget.tab_bg
else:
target = usertypes.ClickTarget.tab
else:
raise ValueError("Invalid wintype {}".format(debug_type))
tab = shared.get_tab(self._win_id, target)
return tab._widget # pylint: disable=protected-access
class WebEnginePage(QWebEnginePage):
"""Custom QWebEnginePage subclass with qutebrowser-specific features.
Attributes:
_is_shutting_down: Whether the page is currently shutting down.
_theme_color: The theme background color.
Signals:
certificate_error: Emitted on certificate errors.
shutting_down: Emitted when the page is shutting down.
navigation_request: Emitted on acceptNavigationRequest.
"""
certificate_error = pyqtSignal()
shutting_down = pyqtSignal()
navigation_request = pyqtSignal(usertypes.NavigationRequest)
def __init__(self, *, theme_color, profile, parent=None):
super().__init__(profile, parent)
self._is_shutting_down = False
self.featurePermissionRequested.connect(
self._on_feature_permission_requested)
self._theme_color = theme_color
self._set_bg_color()
config.instance.changed.connect(self._set_bg_color)
self.urlChanged.connect(self._inject_userjs)
@config.change_filter('colors.webpage.bg')
def _set_bg_color(self):
col = config.val.colors.webpage.bg
if col is None:
col = self._theme_color
self.setBackgroundColor(col)
@pyqtSlot(QUrl, 'QWebEnginePage::Feature')
def _on_feature_permission_requested(self, url, feature):
"""Ask the user for approval for geolocation/media/etc.."""
options = {
QWebEnginePage.Geolocation: 'content.geolocation',
QWebEnginePage.MediaAudioCapture: 'content.media_capture',
QWebEnginePage.MediaVideoCapture: 'content.media_capture',
QWebEnginePage.MediaAudioVideoCapture: 'content.media_capture',
}
messages = {
QWebEnginePage.Geolocation: 'access your location',
QWebEnginePage.MediaAudioCapture: 'record audio',
QWebEnginePage.MediaVideoCapture: 'record video',
QWebEnginePage.MediaAudioVideoCapture: 'record audio/video',
}
assert options.keys() == messages.keys()
if feature not in options:
log.webview.error("Unhandled feature permission {}".format(
debug.qenum_key(QWebEnginePage, feature)))
self.setFeaturePermission(url, feature,
QWebEnginePage.PermissionDeniedByUser)
return
yes_action = functools.partial(
self.setFeaturePermission, url, feature,
QWebEnginePage.PermissionGrantedByUser)
no_action = functools.partial(
self.setFeaturePermission, url, feature,
QWebEnginePage.PermissionDeniedByUser)
question = shared.feature_permission(
url=url, option=options[feature], msg=messages[feature],
yes_action=yes_action, no_action=no_action,
abort_on=[self.shutting_down, self.loadStarted])
if question is not None:
self.featurePermissionRequestCanceled.connect(
functools.partial(self._on_feature_permission_cancelled,
question, url, feature))
def _on_feature_permission_cancelled(self, question, url, feature,
cancelled_url, cancelled_feature):
"""Slot invoked when a feature permission request was cancelled.
To be used with functools.partial.
"""
if url == cancelled_url and feature == cancelled_feature:
try:
question.abort()
except RuntimeError:
# The question could already be deleted, e.g. because it was
# aborted after a loadStarted signal.
pass
def shutdown(self):
self._is_shutting_down = True
self.shutting_down.emit()
def certificateError(self, error):
"""Handle certificate errors coming from Qt."""
self.certificate_error.emit()
url = error.url()
error = certificateerror.CertificateErrorWrapper(error)
log.webview.debug("Certificate error: {}".format(error))
url_string = url.toDisplayString()
error_page = jinja.render(
'error.html', title="Error loading page: {}".format(url_string),
url=url_string, error=str(error))
if error.is_overridable():
ignore = shared.ignore_certificate_errors(
url, [error], abort_on=[self.loadStarted, self.shutting_down])
else:
log.webview.error("Non-overridable certificate error: "
"{}".format(error))
ignore = False
# We can't really know when to show an error page, as the error might
# have happened when loading some resource.
# However, self.url() is not available yet and self.requestedUrl()
# might not match the URL we get from the error - so we just apply a
# heuristic here.
# See https://bugreports.qt.io/browse/QTBUG-56207
log.webview.debug("ignore {}, URL {}, requested {}".format(
ignore, url, self.requestedUrl()))
if not ignore and url.matches(self.requestedUrl(), QUrl.RemoveScheme):
self.setHtml(error_page)
return ignore
def javaScriptConfirm(self, url, js_msg):
"""Override javaScriptConfirm to use qutebrowser prompts."""
if self._is_shutting_down:
return False
escape_msg = qtutils.version_check('5.11', compiled=False)
try:
return shared.javascript_confirm(url, js_msg,
abort_on=[self.loadStarted,
self.shutting_down],
escape_msg=escape_msg)
except shared.CallSuper:
return super().javaScriptConfirm(url, js_msg)
if PYQT_VERSION > 0x050700:
# WORKAROUND
# Can't override javaScriptPrompt with older PyQt versions
# https://www.riverbankcomputing.com/pipermail/pyqt/2016-November/038293.html
def javaScriptPrompt(self, url, js_msg, default):
"""Override javaScriptPrompt to use qutebrowser prompts."""
escape_msg = qtutils.version_check('5.11', compiled=False)
if self._is_shutting_down:
return (False, "")
try:
return shared.javascript_prompt(url, js_msg, default,
abort_on=[self.loadStarted,
self.shutting_down],
escape_msg=escape_msg)
except shared.CallSuper:
return super().javaScriptPrompt(url, js_msg, default)
def javaScriptAlert(self, url, js_msg):
"""Override javaScriptAlert to use qutebrowser prompts."""
if self._is_shutting_down:
return
escape_msg = qtutils.version_check('5.11', compiled=False)
try:
shared.javascript_alert(url, js_msg,
abort_on=[self.loadStarted,
self.shutting_down],
escape_msg=escape_msg)
except shared.CallSuper:
super().javaScriptAlert(url, js_msg)
def javaScriptConsoleMessage(self, level, msg, line, source):
"""Log javascript messages to qutebrowser's log."""
level_map = {
QWebEnginePage.InfoMessageLevel: usertypes.JsLogLevel.info,
QWebEnginePage.WarningMessageLevel: usertypes.JsLogLevel.warning,
QWebEnginePage.ErrorMessageLevel: usertypes.JsLogLevel.error,
}
shared.javascript_log_message(level_map[level], source, line, msg)
def acceptNavigationRequest(self,
url: QUrl,
typ: QWebEnginePage.NavigationType,
is_main_frame: bool):
"""Override acceptNavigationRequest to forward it to the tab API."""
type_map = {
QWebEnginePage.NavigationTypeLinkClicked:
usertypes.NavigationRequest.Type.link_clicked,
QWebEnginePage.NavigationTypeTyped:
usertypes.NavigationRequest.Type.typed,
QWebEnginePage.NavigationTypeFormSubmitted:
usertypes.NavigationRequest.Type.form_submitted,
QWebEnginePage.NavigationTypeBackForward:
usertypes.NavigationRequest.Type.back_forward,
QWebEnginePage.NavigationTypeReload:
usertypes.NavigationRequest.Type.reloaded,
QWebEnginePage.NavigationTypeOther:
usertypes.NavigationRequest.Type.other,
}
navigation = usertypes.NavigationRequest(url=url,
navigation_type=type_map[typ],
is_main_frame=is_main_frame)
self.navigation_request.emit(navigation)
return navigation.accepted
@pyqtSlot('QUrl')
def _inject_userjs(self, url):
"""Inject userscripts registered for `url` into the current page."""
if qtutils.version_check('5.8'):
# Handled in webenginetab with the builtin Greasemonkey
# support.
return
# Using QWebEnginePage.scripts() to hold the user scripts means
# we don't have to worry ourselves about where to inject the
# page but also means scripts hang around for the tab lifecycle.
# So clear them here.
scripts = self.scripts()
for script in scripts.toList():
if script.name().startswith("GM-"):
log.greasemonkey.debug("Removing script: {}"
.format(script.name()))
removed = scripts.remove(script)
assert removed, script.name()
def _add_script(script, injection_point):
new_script = QWebEngineScript()
new_script.setInjectionPoint(injection_point)
new_script.setWorldId(QWebEngineScript.MainWorld)
new_script.setSourceCode(script.code())
new_script.setName("GM-{}".format(script.name))
new_script.setRunsOnSubFrames(script.runs_on_sub_frames)
log.greasemonkey.debug("Adding script: {}"
.format(new_script.name()))
scripts.insert(new_script)
greasemonkey = objreg.get('greasemonkey')
matching_scripts = greasemonkey.scripts_for(url)
for script in matching_scripts.start:
_add_script(script, QWebEngineScript.DocumentCreation)
for script in matching_scripts.end:
_add_script(script, QWebEngineScript.DocumentReady)
for script in matching_scripts.idle:
_add_script(script, QWebEngineScript.Deferred)
| 1 | 21,368 | Now that you handle this in `webenginetab.py` you can undo all the changes in this file. | qutebrowser-qutebrowser | py |
@@ -30,7 +30,11 @@ import org.springframework.context.annotation.AnnotationConfigApplicationContext
public class JavaFXApplication extends Application {
public static void main(String[] args) {
- Application.launch(JavaFXApplication.class);
+ try {
+ Application.launch(JavaFXApplication.class);
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
}
@Override | 1 | /*
* Copyright (C) 2015-2017 PÂRIS Quentin
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
package org.phoenicis.javafx;
import javafx.application.Application;
import javafx.scene.image.Image;
import javafx.scene.text.Font;
import javafx.stage.Stage;
import org.phoenicis.javafx.controller.MainController;
import org.phoenicis.multithreading.ControlledThreadPoolExecutorServiceCloser;
import org.springframework.context.ConfigurableApplicationContext;
import org.springframework.context.annotation.AnnotationConfigApplicationContext;
public class JavaFXApplication extends Application {
public static void main(String[] args) {
Application.launch(JavaFXApplication.class);
}
@Override
public void start(Stage primaryStage) {
primaryStage.getIcons().add(new Image(getClass().getResourceAsStream("views/common/phoenicis.png")));
primaryStage.setTitle("Phoenicis");
loadFonts();
ConfigurableApplicationContext applicationContext = new AnnotationConfigApplicationContext(
AppConfiguration.class);
final MainController mainController = applicationContext.getBean(MainController.class);
mainController.show();
mainController.setOnClose(() -> {
applicationContext.getBean(ControlledThreadPoolExecutorServiceCloser.class).setCloseImmediately(true);
applicationContext.close();
});
}
private void loadFonts() {
Font.loadFont(getClass().getResource("views/common/mavenpro/MavenPro-Medium.ttf").toExternalForm(), 12);
Font.loadFont(getClass().getResource("views/common/roboto/Roboto-Medium.ttf").toExternalForm(), 12);
Font.loadFont(getClass().getResource("views/common/roboto/Roboto-Light.ttf").toExternalForm(), 12);
Font.loadFont(getClass().getResource("views/common/roboto/Roboto-Bold.ttf").toExternalForm(), 12);
}
}
| 1 | 11,909 | I think we should log the exception to a log file instead of printing it. The current behavior (not catching the exception) should lead to an automatic print of the exception to the terminal/console. | PhoenicisOrg-phoenicis | java |
@@ -57,7 +57,7 @@ func (r *Helper) Apply(obj []byte) (ApplyResult, error) {
if err != nil {
r.logger.WithError(err).
WithField("stdout", ioStreams.Out.(*bytes.Buffer).String()).
- WithField("stderr", ioStreams.ErrOut.(*bytes.Buffer).String()).Error("running the apply command failed")
+ WithField("stderr", ioStreams.ErrOut.(*bytes.Buffer).String()).Warn("running the apply command failed")
return "", err
}
return changeTracker.GetResult(), nil | 1 | package resource
import (
"bytes"
"io"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/cli-runtime/pkg/printers"
kcmdapply "k8s.io/kubernetes/pkg/kubectl/cmd/apply"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
)
// ApplyResult indicates what type of change was performed
// by calling the Apply function
type ApplyResult string
var (
// ConfiguredApplyResult is returned when a patch was submitted
ConfiguredApplyResult ApplyResult = "configured"
// UnchangedApplyResult is returned when no change occurred
UnchangedApplyResult ApplyResult = "unchanged"
// CreatedApplyResult is returned when a resource was created
CreatedApplyResult ApplyResult = "created"
// UnknownApplyResult is returned when the resulting action could not be determined
UnknownApplyResult ApplyResult = "unknown"
)
// Apply applies the given resource bytes to the target cluster specified by kubeconfig
func (r *Helper) Apply(obj []byte) (ApplyResult, error) {
fileName, err := r.createTempFile("apply-", obj)
if err != nil {
r.logger.WithError(err).Error("failed to create temp file for apply")
return "", err
}
defer r.deleteTempFile(fileName)
factory, err := r.getFactory("")
if err != nil {
r.logger.WithError(err).Error("failed to obtain factory for apply")
return "", err
}
ioStreams := genericclioptions.IOStreams{
In: &bytes.Buffer{},
Out: &bytes.Buffer{},
ErrOut: &bytes.Buffer{},
}
applyOptions, changeTracker, err := r.setupApplyCommand(factory, fileName, ioStreams)
if err != nil {
r.logger.WithError(err).Error("failed to setup apply command")
return "", err
}
err = applyOptions.Run()
if err != nil {
r.logger.WithError(err).
WithField("stdout", ioStreams.Out.(*bytes.Buffer).String()).
WithField("stderr", ioStreams.ErrOut.(*bytes.Buffer).String()).Error("running the apply command failed")
return "", err
}
return changeTracker.GetResult(), nil
}
// ApplyRuntimeObject serializes an object and applies it to the target cluster specified by the kubeconfig.
func (r *Helper) ApplyRuntimeObject(obj runtime.Object, scheme *runtime.Scheme) (ApplyResult, error) {
data, err := r.Serialize(obj, scheme)
if err != nil {
r.logger.WithError(err).Error("cannot serialize runtime object")
return "", err
}
return r.Apply(data)
}
func (r *Helper) setupApplyCommand(f cmdutil.Factory, fileName string, ioStreams genericclioptions.IOStreams) (*kcmdapply.ApplyOptions, *changeTracker, error) {
r.logger.Debug("setting up apply command")
o := kcmdapply.NewApplyOptions(ioStreams)
dynamicClient, err := f.DynamicClient()
if err != nil {
r.logger.WithError(err).Error("cannot obtain dynamic client from factory")
return nil, nil, err
}
o.DeleteOptions = o.DeleteFlags.ToOptions(dynamicClient, o.IOStreams)
o.OpenAPISchema, _ = f.OpenAPISchema()
o.Validator, err = f.Validator(false)
if err != nil {
r.logger.WithError(err).Error("cannot obtain schema to validate objects from factory")
return nil, nil, err
}
o.Builder = f.NewBuilder()
o.Mapper, err = f.ToRESTMapper()
if err != nil {
r.logger.WithError(err).Error("cannot obtain RESTMapper from factory")
return nil, nil, err
}
o.DynamicClient = dynamicClient
o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace()
if err != nil {
r.logger.WithError(err).Error("cannot obtain namespace from factory")
return nil, nil, err
}
tracker := &changeTracker{
internalToPrinter: func(string) (printers.ResourcePrinter, error) { return o.PrintFlags.ToPrinter() },
}
o.ToPrinter = tracker.ToPrinter
o.DeleteOptions.FilenameOptions.Filenames = []string{fileName}
return o, tracker, nil
}
type trackerPrinter struct {
setResult func()
internalPrinter printers.ResourcePrinter
}
func (p *trackerPrinter) PrintObj(o runtime.Object, w io.Writer) error {
if p.setResult != nil {
p.setResult()
}
return p.internalPrinter.PrintObj(o, w)
}
type changeTracker struct {
result []ApplyResult
internalToPrinter func(string) (printers.ResourcePrinter, error)
}
func (t *changeTracker) GetResult() ApplyResult {
if len(t.result) == 1 {
return t.result[0]
}
return UnknownApplyResult
}
func (t *changeTracker) ToPrinter(name string) (printers.ResourcePrinter, error) {
var f func()
switch name {
case "created":
f = func() { t.result = append(t.result, CreatedApplyResult) }
case "configured":
f = func() { t.result = append(t.result, ConfiguredApplyResult) }
case "unchanged":
f = func() { t.result = append(t.result, UnchangedApplyResult) }
}
p, err := t.internalToPrinter(name)
if err != nil {
return nil, err
}
return &trackerPrinter{
internalPrinter: p,
setResult: f,
}, nil
}
| 1 | 11,339 | These changes to the resource helpers have some more far-reaching implications as they also affect calls made in hive-operator, I believe. It's probably OK still, though. Ideally, the resource helper would not be the one doing the logging, since it cannot know the severity, but that is well beyond something that we should tackle for this PR. I would rather have the hive-operator log warnings than have the syncsetinstance controller log errors. | openshift-hive | go |
@@ -167,7 +167,7 @@ class ExportCategoryTableMap extends TableMap
*/
public function buildRelations()
{
- $this->addRelation('Export', '\\Thelia\\Model\\Export', RelationMap::ONE_TO_MANY, array('id' => 'export_category_id', ), 'CASCADE', 'RESTRICT', 'Exports');
+ $this->addRelation('Export', '\\Thelia\\Model\\Export', RelationMap::ONE_TO_MANY, array('id' => 'export_category_id', ), null, null, 'Exports');
$this->addRelation('ExportCategoryI18n', '\\Thelia\\Model\\ExportCategoryI18n', RelationMap::ONE_TO_MANY, array('id' => 'id', ), 'CASCADE', null, 'ExportCategoryI18ns');
} // buildRelations()
| 1 | <?php
namespace Thelia\Model\Map;
use Propel\Runtime\Propel;
use Propel\Runtime\ActiveQuery\Criteria;
use Propel\Runtime\ActiveQuery\InstancePoolTrait;
use Propel\Runtime\Connection\ConnectionInterface;
use Propel\Runtime\DataFetcher\DataFetcherInterface;
use Propel\Runtime\Exception\PropelException;
use Propel\Runtime\Map\RelationMap;
use Propel\Runtime\Map\TableMap;
use Propel\Runtime\Map\TableMapTrait;
use Thelia\Model\ExportCategory;
use Thelia\Model\ExportCategoryQuery;
/**
* This class defines the structure of the 'export_category' table.
*
*
*
* This map class is used by Propel to do runtime db structure discovery.
* For example, the createSelectSql() method checks the type of a given column used in an
* ORDER BY clause to know whether it needs to apply SQL to make the ORDER BY case-insensitive
* (i.e. if it's a text column type).
*
*/
class ExportCategoryTableMap extends TableMap
{
use InstancePoolTrait;
use TableMapTrait;
/**
* The (dot-path) name of this class
*/
const CLASS_NAME = 'Thelia.Model.Map.ExportCategoryTableMap';
/**
* The default database name for this class
*/
const DATABASE_NAME = 'thelia';
/**
* The table name for this class
*/
const TABLE_NAME = 'export_category';
/**
* The related Propel class for this table
*/
const OM_CLASS = '\\Thelia\\Model\\ExportCategory';
/**
* A class that can be returned by this tableMap
*/
const CLASS_DEFAULT = 'Thelia.Model.ExportCategory';
/**
* The total number of columns
*/
const NUM_COLUMNS = 5;
/**
* The number of lazy-loaded columns
*/
const NUM_LAZY_LOAD_COLUMNS = 0;
/**
* The number of columns to hydrate (NUM_COLUMNS - NUM_LAZY_LOAD_COLUMNS)
*/
const NUM_HYDRATE_COLUMNS = 5;
/**
* the column name for the ID field
*/
const ID = 'export_category.ID';
/**
* the column name for the REF field
*/
const REF = 'export_category.REF';
/**
* the column name for the POSITION field
*/
const POSITION = 'export_category.POSITION';
/**
* the column name for the CREATED_AT field
*/
const CREATED_AT = 'export_category.CREATED_AT';
/**
* the column name for the UPDATED_AT field
*/
const UPDATED_AT = 'export_category.UPDATED_AT';
/**
* The default string format for model objects of the related table
*/
const DEFAULT_STRING_FORMAT = 'YAML';
// i18n behavior
/**
* The default locale to use for translations.
*
* @var string
*/
const DEFAULT_LOCALE = 'en_US';
/**
* holds an array of fieldnames
*
* first dimension keys are the type constants
* e.g. self::$fieldNames[self::TYPE_PHPNAME][0] = 'Id'
*/
protected static $fieldNames = array (
self::TYPE_PHPNAME => array('Id', 'Ref', 'Position', 'CreatedAt', 'UpdatedAt', ),
self::TYPE_STUDLYPHPNAME => array('id', 'ref', 'position', 'createdAt', 'updatedAt', ),
self::TYPE_COLNAME => array(ExportCategoryTableMap::ID, ExportCategoryTableMap::REF, ExportCategoryTableMap::POSITION, ExportCategoryTableMap::CREATED_AT, ExportCategoryTableMap::UPDATED_AT, ),
self::TYPE_RAW_COLNAME => array('ID', 'REF', 'POSITION', 'CREATED_AT', 'UPDATED_AT', ),
self::TYPE_FIELDNAME => array('id', 'ref', 'position', 'created_at', 'updated_at', ),
self::TYPE_NUM => array(0, 1, 2, 3, 4, )
);
/**
* holds an array of keys for quick access to the fieldnames array
*
* first dimension keys are the type constants
* e.g. self::$fieldKeys[self::TYPE_PHPNAME]['Id'] = 0
*/
protected static $fieldKeys = array (
self::TYPE_PHPNAME => array('Id' => 0, 'Ref' => 1, 'Position' => 2, 'CreatedAt' => 3, 'UpdatedAt' => 4, ),
self::TYPE_STUDLYPHPNAME => array('id' => 0, 'ref' => 1, 'position' => 2, 'createdAt' => 3, 'updatedAt' => 4, ),
self::TYPE_COLNAME => array(ExportCategoryTableMap::ID => 0, ExportCategoryTableMap::REF => 1, ExportCategoryTableMap::POSITION => 2, ExportCategoryTableMap::CREATED_AT => 3, ExportCategoryTableMap::UPDATED_AT => 4, ),
self::TYPE_RAW_COLNAME => array('ID' => 0, 'REF' => 1, 'POSITION' => 2, 'CREATED_AT' => 3, 'UPDATED_AT' => 4, ),
self::TYPE_FIELDNAME => array('id' => 0, 'ref' => 1, 'position' => 2, 'created_at' => 3, 'updated_at' => 4, ),
self::TYPE_NUM => array(0, 1, 2, 3, 4, )
);
/**
* Initialize the table attributes and columns
* Relations are not initialized by this method since they are lazy loaded
*
* @return void
* @throws PropelException
*/
public function initialize()
{
// attributes
$this->setName('export_category');
$this->setPhpName('ExportCategory');
$this->setClassName('\\Thelia\\Model\\ExportCategory');
$this->setPackage('Thelia.Model');
$this->setUseIdGenerator(true);
// columns
$this->addPrimaryKey('ID', 'Id', 'INTEGER', true, null, null);
$this->addColumn('REF', 'Ref', 'VARCHAR', true, 255, null);
$this->addColumn('POSITION', 'Position', 'INTEGER', true, null, null);
$this->addColumn('CREATED_AT', 'CreatedAt', 'TIMESTAMP', false, null, null);
$this->addColumn('UPDATED_AT', 'UpdatedAt', 'TIMESTAMP', false, null, null);
} // initialize()
/**
* Build the RelationMap objects for this table relationships
*/
public function buildRelations()
{
$this->addRelation('Export', '\\Thelia\\Model\\Export', RelationMap::ONE_TO_MANY, array('id' => 'export_category_id', ), 'CASCADE', 'RESTRICT', 'Exports');
$this->addRelation('ExportCategoryI18n', '\\Thelia\\Model\\ExportCategoryI18n', RelationMap::ONE_TO_MANY, array('id' => 'id', ), 'CASCADE', null, 'ExportCategoryI18ns');
} // buildRelations()
/**
*
* Gets the list of behaviors registered for this table
*
* @return array Associative array (name => parameters) of behaviors
*/
public function getBehaviors()
{
return array(
'i18n' => array('i18n_table' => '%TABLE%_i18n', 'i18n_phpname' => '%PHPNAME%I18n', 'i18n_columns' => 'title', 'locale_column' => 'locale', 'locale_length' => '5', 'default_locale' => '', 'locale_alias' => '', ),
'timestampable' => array('create_column' => 'created_at', 'update_column' => 'updated_at', ),
);
} // getBehaviors()
/**
* Method to invalidate the instance pool of all tables related to export_category * by a foreign key with ON DELETE CASCADE
*/
public static function clearRelatedInstancePool()
{
// Invalidate objects in ".$this->getClassNameFromBuilder($joinedTableTableMapBuilder)." instance pool,
// since one or more of them may be deleted by ON DELETE CASCADE/SETNULL rule.
ExportTableMap::clearInstancePool();
ExportCategoryI18nTableMap::clearInstancePool();
}
/**
* Retrieves a string version of the primary key from the DB resultset row that can be used to uniquely identify a row in this table.
*
* For tables with a single-column primary key, that simple pkey value will be returned. For tables with
* a multi-column primary key, a serialize()d version of the primary key will be returned.
*
* @param array $row resultset row.
* @param int $offset The 0-based offset for reading from the resultset row.
* @param string $indexType One of the class type constants TableMap::TYPE_PHPNAME, TableMap::TYPE_STUDLYPHPNAME
* TableMap::TYPE_COLNAME, TableMap::TYPE_FIELDNAME, TableMap::TYPE_NUM
*/
public static function getPrimaryKeyHashFromRow($row, $offset = 0, $indexType = TableMap::TYPE_NUM)
{
// If the PK cannot be derived from the row, return NULL.
if ($row[TableMap::TYPE_NUM == $indexType ? 0 + $offset : static::translateFieldName('Id', TableMap::TYPE_PHPNAME, $indexType)] === null) {
return null;
}
return (string) $row[TableMap::TYPE_NUM == $indexType ? 0 + $offset : static::translateFieldName('Id', TableMap::TYPE_PHPNAME, $indexType)];
}
/**
* Retrieves the primary key from the DB resultset row
* For tables with a single-column primary key, that simple pkey value will be returned. For tables with
* a multi-column primary key, an array of the primary key columns will be returned.
*
* @param array $row resultset row.
* @param int $offset The 0-based offset for reading from the resultset row.
* @param string $indexType One of the class type constants TableMap::TYPE_PHPNAME, TableMap::TYPE_STUDLYPHPNAME
* TableMap::TYPE_COLNAME, TableMap::TYPE_FIELDNAME, TableMap::TYPE_NUM
*
* @return mixed The primary key of the row
*/
public static function getPrimaryKeyFromRow($row, $offset = 0, $indexType = TableMap::TYPE_NUM)
{
return (int) $row[
$indexType == TableMap::TYPE_NUM
? 0 + $offset
: self::translateFieldName('Id', TableMap::TYPE_PHPNAME, $indexType)
];
}
/**
* The class that the tableMap will make instances of.
*
* If $withPrefix is true, the returned path
* uses a dot-path notation which is translated into a path
* relative to a location on the PHP include_path.
* (e.g. path.to.MyClass -> 'path/to/MyClass.php')
*
* @param boolean $withPrefix Whether or not to return the path with the class name
* @return string path.to.ClassName
*/
public static function getOMClass($withPrefix = true)
{
return $withPrefix ? ExportCategoryTableMap::CLASS_DEFAULT : ExportCategoryTableMap::OM_CLASS;
}
/**
* Populates an object of the default type or an object that inherit from the default.
*
* @param array $row row returned by DataFetcher->fetch().
* @param int $offset The 0-based offset for reading from the resultset row.
* @param string $indexType The index type of $row. Mostly DataFetcher->getIndexType().
One of the class type constants TableMap::TYPE_PHPNAME, TableMap::TYPE_STUDLYPHPNAME
* TableMap::TYPE_COLNAME, TableMap::TYPE_FIELDNAME, TableMap::TYPE_NUM.
*
* @throws PropelException Any exceptions caught during processing will be
* rethrown wrapped into a PropelException.
* @return array (ExportCategory object, last column rank)
*/
public static function populateObject($row, $offset = 0, $indexType = TableMap::TYPE_NUM)
{
$key = ExportCategoryTableMap::getPrimaryKeyHashFromRow($row, $offset, $indexType);
if (null !== ($obj = ExportCategoryTableMap::getInstanceFromPool($key))) {
// We no longer rehydrate the object, since this can cause data loss.
// See http://www.propelorm.org/ticket/509
// $obj->hydrate($row, $offset, true); // rehydrate
$col = $offset + ExportCategoryTableMap::NUM_HYDRATE_COLUMNS;
} else {
$cls = ExportCategoryTableMap::OM_CLASS;
$obj = new $cls();
$col = $obj->hydrate($row, $offset, false, $indexType);
ExportCategoryTableMap::addInstanceToPool($obj, $key);
}
return array($obj, $col);
}
/**
* The returned array will contain objects of the default type or
* objects that inherit from the default.
*
* @param DataFetcherInterface $dataFetcher
* @return array
* @throws PropelException Any exceptions caught during processing will be
* rethrown wrapped into a PropelException.
*/
public static function populateObjects(DataFetcherInterface $dataFetcher)
{
$results = array();
// set the class once to avoid overhead in the loop
$cls = static::getOMClass(false);
// populate the object(s)
while ($row = $dataFetcher->fetch()) {
$key = ExportCategoryTableMap::getPrimaryKeyHashFromRow($row, 0, $dataFetcher->getIndexType());
if (null !== ($obj = ExportCategoryTableMap::getInstanceFromPool($key))) {
// We no longer rehydrate the object, since this can cause data loss.
// See http://www.propelorm.org/ticket/509
// $obj->hydrate($row, 0, true); // rehydrate
$results[] = $obj;
} else {
$obj = new $cls();
$obj->hydrate($row);
$results[] = $obj;
ExportCategoryTableMap::addInstanceToPool($obj, $key);
} // if key exists
}
return $results;
}
/**
* Add all the columns needed to create a new object.
*
* Note: any columns that were marked with lazyLoad="true" in the
* XML schema will not be added to the select list and only loaded
* on demand.
*
* @param Criteria $criteria object containing the columns to add.
* @param string $alias optional table alias
* @throws PropelException Any exceptions caught during processing will be
* rethrown wrapped into a PropelException.
*/
public static function addSelectColumns(Criteria $criteria, $alias = null)
{
if (null === $alias) {
$criteria->addSelectColumn(ExportCategoryTableMap::ID);
$criteria->addSelectColumn(ExportCategoryTableMap::REF);
$criteria->addSelectColumn(ExportCategoryTableMap::POSITION);
$criteria->addSelectColumn(ExportCategoryTableMap::CREATED_AT);
$criteria->addSelectColumn(ExportCategoryTableMap::UPDATED_AT);
} else {
$criteria->addSelectColumn($alias . '.ID');
$criteria->addSelectColumn($alias . '.REF');
$criteria->addSelectColumn($alias . '.POSITION');
$criteria->addSelectColumn($alias . '.CREATED_AT');
$criteria->addSelectColumn($alias . '.UPDATED_AT');
}
}
/**
* Returns the TableMap related to this object.
* This method is not needed for general use but a specific application could have a need.
* @return TableMap
* @throws PropelException Any exceptions caught during processing will be
* rethrown wrapped into a PropelException.
*/
public static function getTableMap()
{
return Propel::getServiceContainer()->getDatabaseMap(ExportCategoryTableMap::DATABASE_NAME)->getTable(ExportCategoryTableMap::TABLE_NAME);
}
/**
* Add a TableMap instance to the database for this tableMap class.
*/
public static function buildTableMap()
{
$dbMap = Propel::getServiceContainer()->getDatabaseMap(ExportCategoryTableMap::DATABASE_NAME);
if (!$dbMap->hasTable(ExportCategoryTableMap::TABLE_NAME)) {
$dbMap->addTableObject(new ExportCategoryTableMap());
}
}
/**
* Performs a DELETE on the database, given a ExportCategory or Criteria object OR a primary key value.
*
* @param mixed $values Criteria or ExportCategory object or primary key or array of primary keys
* which is used to create the DELETE statement
* @param ConnectionInterface $con the connection to use
* @return int The number of affected rows (if supported by underlying database driver). This includes CASCADE-related rows
* if supported by native driver or if emulated using Propel.
* @throws PropelException Any exceptions caught during processing will be
* rethrown wrapped into a PropelException.
*/
public static function doDelete($values, ConnectionInterface $con = null)
{
if (null === $con) {
$con = Propel::getServiceContainer()->getWriteConnection(ExportCategoryTableMap::DATABASE_NAME);
}
if ($values instanceof Criteria) {
// rename for clarity
$criteria = $values;
} elseif ($values instanceof \Thelia\Model\ExportCategory) { // it's a model object
// create criteria based on pk values
$criteria = $values->buildPkeyCriteria();
} else { // it's a primary key, or an array of pks
$criteria = new Criteria(ExportCategoryTableMap::DATABASE_NAME);
$criteria->add(ExportCategoryTableMap::ID, (array) $values, Criteria::IN);
}
$query = ExportCategoryQuery::create()->mergeWith($criteria);
if ($values instanceof Criteria) { ExportCategoryTableMap::clearInstancePool();
} elseif (!is_object($values)) { // it's a primary key, or an array of pks
foreach ((array) $values as $singleval) { ExportCategoryTableMap::removeInstanceFromPool($singleval);
}
}
return $query->delete($con);
}
/**
* Deletes all rows from the export_category table.
*
* @param ConnectionInterface $con the connection to use
* @return int The number of affected rows (if supported by underlying database driver).
*/
public static function doDeleteAll(ConnectionInterface $con = null)
{
return ExportCategoryQuery::create()->doDeleteAll($con);
}
/**
* Performs an INSERT on the database, given a ExportCategory or Criteria object.
*
* @param mixed $criteria Criteria or ExportCategory object containing data that is used to create the INSERT statement.
* @param ConnectionInterface $con the ConnectionInterface connection to use
* @return mixed The new primary key.
* @throws PropelException Any exceptions caught during processing will be
* rethrown wrapped into a PropelException.
*/
public static function doInsert($criteria, ConnectionInterface $con = null)
{
if (null === $con) {
$con = Propel::getServiceContainer()->getWriteConnection(ExportCategoryTableMap::DATABASE_NAME);
}
if ($criteria instanceof Criteria) {
$criteria = clone $criteria; // rename for clarity
} else {
$criteria = $criteria->buildCriteria(); // build Criteria from ExportCategory object
}
if ($criteria->containsKey(ExportCategoryTableMap::ID) && $criteria->keyContainsValue(ExportCategoryTableMap::ID) ) {
throw new PropelException('Cannot insert a value for auto-increment primary key ('.ExportCategoryTableMap::ID.')');
}
// Set the correct dbName
$query = ExportCategoryQuery::create()->mergeWith($criteria);
try {
// use transaction because $criteria could contain info
// for more than one table (I guess, conceivably)
$con->beginTransaction();
$pk = $query->doInsert($con);
$con->commit();
} catch (PropelException $e) {
$con->rollBack();
throw $e;
}
return $pk;
}
} // ExportCategoryTableMap
// This is the static code needed to register the TableMap for this table with the main Propel class.
//
ExportCategoryTableMap::buildTableMap();
| 1 | 10,314 | do you really want to change the behavior on foreign key ? | thelia-thelia | php |
@@ -183,7 +183,8 @@ void event_batch_destroy (struct event_batch *batch)
if (batch->f)
(void)flux_future_wait_for (batch->f, -1);
if (batch->state_trans) {
- event_publish_state (batch->event, batch->state_trans);
+ if (json_array_size (batch->state_trans) > 0)
+ event_publish_state (batch->event, batch->state_trans);
json_decref (batch->state_trans);
}
if (batch->responses) { | 1 | /************************************************************\
* Copyright 2019 Lawrence Livermore National Security, LLC
* (c.f. AUTHORS, NOTICE.LLNS, COPYING)
*
* This file is part of the Flux resource manager framework.
* For details, see https://github.com/flux-framework.
*
* SPDX-License-Identifier: LGPL-3.0
\************************************************************/
/* event.c - job state machine and eventlog commit batching
*
* event_job_update() implements the job state machine described
* in RFC 21. This function is called when an event occurs for a job,
* to drive changes to job state and flags. For example, an "alloc"
* event transitions a job from SCHED to RUN state.
*
* event_job_action() is called after event_job_update(). It takes actions
* appropriate for job state and flags. For example, in RUN state,
* job shells are started.
*
* Events are logged in the job eventlog in the KVS. For performance,
* multiple updates may be combined into one commit. The location of
* the job eventlog and its contents are described in RFC 16 and RFC 18.
*
* The function event_job_post_pack() posts an event to a job, running
* event_job_update(), event_job_action(), and committing the event to
* the job eventlog, in a delayed batch.
*
* Notes:
* - A KVS commit failure is handled as fatal to the job-manager
* - event_job_action() is idempotent
* - event_ctx_destroy() flushes batched eventlog updates before returning
*/
#if HAVE_CONFIG_H
#include "config.h"
#endif
#include <czmq.h>
#include <jansson.h>
#include <flux/core.h>
#include "alloc.h"
#include "start.h"
#include "drain.h"
#include "wait.h"
#include "event.h"
#include "src/common/libeventlog/eventlog.h"
const double batch_timeout = 0.01;
struct event {
struct job_manager *ctx;
struct event_batch *batch;
flux_watcher_t *timer;
zlist_t *pending;
zlist_t *pub_futures;
};
struct event_batch {
struct event *event;
flux_kvs_txn_t *txn;
flux_future_t *f;
json_t *state_trans;
zlist_t *responses; // responses deferred until batch complete
};
struct event_batch *event_batch_create (struct event *event);
void event_batch_destroy (struct event_batch *batch);
/* Batch commit has completed.
* If there was a commit error, log it and stop the reactor.
* Destroy 'batch'.
*/
void commit_continuation (flux_future_t *f, void *arg)
{
struct event_batch *batch = arg;
struct event *event = batch->event;
struct job_manager *ctx = event->ctx;
if (flux_future_get (batch->f, NULL) < 0) {
flux_log_error (ctx->h, "%s: eventlog update failed", __FUNCTION__);
flux_reactor_stop_error (flux_get_reactor (ctx->h));
}
zlist_remove (event->pending, batch);
event_batch_destroy (batch);
}
/* job-state event publish has completed.
* If there was a publish error, log it and stop the reactor.
* Destroy 'f'.
*/
void publish_continuation (flux_future_t *f, void *arg)
{
struct event *event = arg;
struct job_manager *ctx = event->ctx;
if (flux_future_get (f, NULL) < 0) {
flux_log_error (ctx->h, "%s: event publish failed", __FUNCTION__);
flux_reactor_stop_error (flux_get_reactor (ctx->h));
}
zlist_remove (event->pub_futures, f);
flux_future_destroy (f);
}
/* Close the current batch, if any, and commit it.
*/
void event_batch_commit (struct event *event)
{
struct event_batch *batch = event->batch;
struct job_manager *ctx = event->ctx;
if (batch) {
event->batch = NULL;
if (batch->txn) {
if (!(batch->f = flux_kvs_commit (ctx->h, NULL, 0, batch->txn)))
goto error;
if (flux_future_then (batch->f, -1., commit_continuation, batch) < 0)
goto error;
if (zlist_append (event->pending, batch) < 0)
goto nomem;
}
else { // just publish events and be done
event_batch_destroy (batch);
}
}
return;
nomem:
errno = ENOMEM;
error: // unlikely (e.g. ENOMEM)
flux_log_error (ctx->h, "%s: aborting reactor", __FUNCTION__);
flux_reactor_stop_error (flux_get_reactor (ctx->h));
event_batch_destroy (batch);
}
void timer_cb (flux_reactor_t *r, flux_watcher_t *w, int revents, void *arg)
{
struct job_manager *ctx = arg;
event_batch_commit (ctx->event);
}
void event_publish_state (struct event *event, json_t *state_trans)
{
struct job_manager *ctx = event->ctx;
flux_future_t *f;
if (!(f = flux_event_publish_pack (ctx->h,
"job-state",
0,
"{s:O}",
"transitions",
state_trans))) {
flux_log_error (ctx->h, "%s: flux_event_publish_pack", __FUNCTION__);
goto error;
}
if (flux_future_then (f, -1., publish_continuation, event) < 0) {
flux_future_destroy (f);
flux_log_error (ctx->h, "%s: flux_future_then", __FUNCTION__);
goto error;
}
if (zlist_append (event->pub_futures, f) < 0) {
flux_future_destroy (f);
flux_log_error (ctx->h, "%s: zlist_append", __FUNCTION__);
goto error;
}
return;
error:
flux_reactor_stop_error (flux_get_reactor (ctx->h));
}
/* Besides cleaning up, this function has the following side effects:
* - publish state transition event (if any)
* - respond to deferred responses (if any)
*/
void event_batch_destroy (struct event_batch *batch)
{
if (batch) {
int saved_errno = errno;
flux_kvs_txn_destroy (batch->txn);
if (batch->f)
(void)flux_future_wait_for (batch->f, -1);
if (batch->state_trans) {
event_publish_state (batch->event, batch->state_trans);
json_decref (batch->state_trans);
}
if (batch->responses) {
flux_msg_t *msg;
flux_t *h = batch->event->ctx->h;
while ((msg = zlist_pop (batch->responses))) {
if (flux_send (h, msg, 0) < 0)
flux_log_error (h, "error sending batch response");
flux_msg_decref (msg);
}
zlist_destroy (&batch->responses);
}
flux_future_destroy (batch->f);
free (batch);
errno = saved_errno;
}
}
struct event_batch *event_batch_create (struct event *event)
{
struct event_batch *batch;
if (!(batch = calloc (1, sizeof (*batch))))
return NULL;
if (!(batch->state_trans = json_array ()))
goto nomem;
batch->event = event;
return batch;
nomem:
errno = ENOMEM;
event_batch_destroy (batch);
return NULL;
}
/* Create a new "batch" if there is none.
* No-op if batch already started.
*/
int event_batch_start (struct event *event)
{
if (!event->batch) {
if (!(event->batch = event_batch_create (event)))
return -1;
flux_timer_watcher_reset (event->timer, batch_timeout, 0.);
flux_watcher_start (event->timer);
}
return 0;
}
static int event_batch_commit_event (struct event *event,
struct job *job,
json_t *entry)
{
char key[64];
char *entrystr = NULL;
if (event_batch_start (event) < 0)
return -1;
if (flux_job_kvs_key (key, sizeof (key), job->id, "eventlog") < 0)
return -1;
if (!event->batch->txn && !(event->batch->txn = flux_kvs_txn_create ()))
return -1;
if (!(entrystr = eventlog_entry_encode (entry)))
return -1;
if (flux_kvs_txn_put (event->batch->txn,
FLUX_KVS_APPEND,
key,
entrystr) < 0) {
free (entrystr);
return -1;
}
free (entrystr);
return 0;
}
int event_batch_pub_state (struct event *event, struct job *job,
double timestamp)
{
json_t *o;
if (event_batch_start (event) < 0)
goto error;
if (!(o = json_pack ("[I,s,f]",
job->id,
flux_job_statetostr (job->state, false),
timestamp)))
goto nomem;
if (json_array_append_new (event->batch->state_trans, o)) {
json_decref (o);
goto nomem;
}
return 0;
nomem:
errno = ENOMEM;
error:
return -1;
}
int event_batch_respond (struct event *event, const flux_msg_t *msg)
{
if (event_batch_start (event) < 0)
return -1;
if (!event->batch->responses) {
if (!(event->batch->responses = zlist_new ()))
goto nomem;
}
if (zlist_append (event->batch->responses,
(void *)flux_msg_incref (msg)) < 0) {
flux_msg_decref (msg);
goto nomem;
}
return 0;
nomem:
errno = ENOMEM;
return -1;
}
int event_job_action (struct event *event, struct job *job)
{
struct job_manager *ctx = event->ctx;
switch (job->state) {
case FLUX_JOB_NEW:
break;
case FLUX_JOB_DEPEND:
if (event_job_post_pack (event, job, "depend", NULL) < 0)
return -1;
break;
case FLUX_JOB_SCHED:
if (alloc_enqueue_alloc_request (ctx->alloc, job) < 0)
return -1;
break;
case FLUX_JOB_RUN:
if (start_send_request (ctx->start, job) < 0)
return -1;
break;
case FLUX_JOB_CLEANUP:
if (job->alloc_pending)
alloc_cancel_alloc_request (ctx->alloc, job);
if (job->alloc_queued)
alloc_dequeue_alloc_request (ctx->alloc, job);
/* N.B. start_pending indicates that the start request is still
* expecting responses. The final response is the 'release'
* response with final=true. Thus once the flag is clear,
* it is safe to release all resources to the scheduler.
*/
if (job->has_resources && !job->start_pending
&& !job->free_pending) {
if (alloc_send_free_request (ctx->alloc, job) < 0)
return -1;
}
/* Post cleanup event when cleanup is complete.
*/
if (!job->alloc_queued && !job->alloc_pending
&& !job->free_pending
&& !job->start_pending
&& !job->has_resources) {
if (event_job_post_pack (event, job, "clean", NULL) < 0)
return -1;
}
break;
case FLUX_JOB_INACTIVE:
if ((job->flags & FLUX_JOB_WAITABLE))
wait_notify_inactive (ctx->wait, job);
zhashx_delete (ctx->active_jobs, &job->id);
drain_check (ctx->drain);
break;
}
return 0;
}
int event_submit_context_decode (json_t *context,
int *priority,
uint32_t *userid,
int *flags)
{
if (json_unpack (context, "{ s:i s:i s:i }",
"priority", priority,
"userid", userid,
"flags", flags) < 0) {
errno = EPROTO;
return -1;
}
return 0;
}
int event_priority_context_decode (json_t *context,
int *priority)
{
if (json_unpack (context, "{ s:i }", "priority", priority) < 0) {
errno = EPROTO;
return -1;
}
return 0;
}
int event_exception_context_decode (json_t *context,
int *severity)
{
if (json_unpack (context, "{ s:i }", "severity", severity) < 0) {
errno = EPROTO;
return -1;
}
return 0;
}
int event_release_context_decode (json_t *context,
int *final)
{
*final = 0;
if (json_unpack (context, "{ s:b }", "final", &final) < 0) {
errno = EPROTO;
return -1;
}
return 0;
}
/* This function implements state transitions per RFC 21.
* If FLUX_JOB_WAITABLE flag is set, then on a fatal exception or
* cleanup event, capture the event in job->end_event for flux_job_wait().
*/
int event_job_update (struct job *job, json_t *event)
{
double timestamp;
const char *name;
json_t *context;
if (eventlog_entry_parse (event, ×tamp, &name, &context) < 0)
goto error;
if (!strcmp (name, "submit")) {
if (job->state != FLUX_JOB_NEW)
goto inval;
job->t_submit = timestamp;
if (event_submit_context_decode (context,
&job->priority,
&job->userid,
&job->flags) < 0)
goto error;
job->state = FLUX_JOB_DEPEND;
}
if (!strcmp (name, "depend")) {
if (job->state != FLUX_JOB_DEPEND)
goto inval;
job->state = FLUX_JOB_SCHED;
}
else if (!strcmp (name, "priority")) {
if (event_priority_context_decode (context, &job->priority) < 0)
goto error;
}
else if (!strcmp (name, "exception")) {
int severity;
if (job->state == FLUX_JOB_NEW || job->state == FLUX_JOB_INACTIVE)
goto inval;
if (event_exception_context_decode (context, &severity) < 0)
goto error;
if (severity == 0) {
if ((job->flags & FLUX_JOB_WAITABLE) && !job->end_event)
job->end_event = json_incref (event);
job->state = FLUX_JOB_CLEANUP;
}
}
else if (!strcmp (name, "alloc")) {
if (job->state != FLUX_JOB_SCHED && job->state != FLUX_JOB_CLEANUP)
goto inval;
job->has_resources = 1;
if (job->state == FLUX_JOB_SCHED)
job->state = FLUX_JOB_RUN;
}
else if (!strcmp (name, "free")) {
if (job->state != FLUX_JOB_CLEANUP)
goto inval;
job->has_resources = 0;
}
else if (!strcmp (name, "finish")) {
if (job->state != FLUX_JOB_RUN && job->state != FLUX_JOB_CLEANUP)
goto inval;
if (job->state == FLUX_JOB_RUN) {
if ((job->flags & FLUX_JOB_WAITABLE) && !job->end_event)
job->end_event = json_incref (event);
job->state = FLUX_JOB_CLEANUP;
}
}
else if (!strcmp (name, "release")) {
int final;
if (job->state != FLUX_JOB_RUN && job->state != FLUX_JOB_CLEANUP)
goto inval;
if (event_release_context_decode (context, &final) < 0)
goto error;
if (final && job->state == FLUX_JOB_RUN)
goto inval;
}
else if (!strcmp (name, "clean")) {
if (job->state != FLUX_JOB_CLEANUP)
goto inval;
job->state = FLUX_JOB_INACTIVE;
}
return 0;
inval:
errno = EINVAL;
error:
return -1;
}
static int get_timestamp_now (double *timestamp)
{
struct timespec ts;
if (clock_gettime (CLOCK_REALTIME, &ts) < 0)
return -1;
*timestamp = (1E-9 * ts.tv_nsec) + ts.tv_sec;
return 0;
}
int event_job_post_pack (struct event *event,
struct job *job,
const char *name,
const char *context_fmt,
...)
{
va_list ap;
json_t *entry = NULL;
int saved_errno;
double timestamp;
flux_job_state_t old_state = job->state;
va_start (ap, context_fmt);
if (get_timestamp_now (×tamp) < 0)
goto error;
if (!(entry = eventlog_entry_vpack (timestamp, name, context_fmt, ap)))
return -1;
if (event_job_update (job, entry) < 0) // modifies job->state
goto error;
if (event_batch_commit_event (event, job, entry) < 0)
goto error;
if (job->state != old_state) {
if (event_batch_pub_state (event, job, timestamp) < 0)
goto error;
}
/* Keep track of running job count.
* If queue reaches idle state, event_job_action() triggers any waiters.
*/
if ((job->state & FLUX_JOB_RUNNING) && !(old_state & FLUX_JOB_RUNNING))
event->ctx->running_jobs++;
else if (!(job->state & FLUX_JOB_RUNNING) && (old_state & FLUX_JOB_RUNNING))
event->ctx->running_jobs--;
if (event_job_action (event, job) < 0)
goto error;
json_decref (entry);
va_end (ap);
return 0;
error:
saved_errno = errno;
json_decref (entry);
va_end (ap);
errno = saved_errno;
return -1;
}
/* Finalizes in-flight batch KVS commits and event pubs (synchronously).
*/
void event_ctx_destroy (struct event *event)
{
if (event) {
int saved_errno = errno;
flux_watcher_destroy (event->timer);
event_batch_commit (event);
if (event->pending) {
struct event_batch *batch;
while ((batch = zlist_pop (event->pending)))
event_batch_destroy (batch); // N.B. can append to pub_futures
}
zlist_destroy (&event->pending);
if (event->pub_futures) {
flux_future_t *f;
while ((f = zlist_pop (event->pub_futures))) {
if (flux_future_get (f, NULL) < 0)
flux_log_error (event->ctx->h,
"error publishing job-state event");
flux_future_destroy (f);
}
}
zlist_destroy (&event->pub_futures);
free (event);
errno = saved_errno;
}
}
struct event *event_ctx_create (struct job_manager *ctx)
{
struct event *event;
if (!(event = calloc (1, sizeof (*event))))
return NULL;
event->ctx = ctx;
if (!(event->timer = flux_timer_watcher_create (flux_get_reactor (ctx->h),
0.,
0.,
timer_cb,
ctx)))
goto error;
if (!(event->pending = zlist_new ()))
goto nomem;
if (!(event->pub_futures = zlist_new ()))
goto nomem;
return event;
nomem:
errno = ENOMEM;
error:
event_ctx_destroy (event);
return NULL;
}
/*
* vi:tabstop=4 shiftwidth=4 expandtab
*/
| 1 | 28,264 | Looks like my bad. Thanks for fixing! | flux-framework-flux-core | c |
@@ -82,6 +82,7 @@ class Command:
no_cmd_split: If true, ';;' to split sub-commands is ignored.
backend: Which backend the command works with (or None if it works with
both)
+ no_replace_variables: Whether or not to replace variables like {url}
_qute_args: The saved data from @cmdutils.argument
_needs_js: Whether the command needs javascript enabled
_modes: The modes the command can be executed in. | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Contains the Command class, a skeleton for a command."""
import inspect
import collections
import traceback
from PyQt5.QtWebKit import QWebSettings
from qutebrowser.commands import cmdexc, argparser
from qutebrowser.utils import (log, utils, message, docutils, objreg,
usertypes, typing)
from qutebrowser.utils import debug as debug_utils
class ArgInfo:
"""Information about an argument."""
def __init__(self, win_id=False, count=False, flag=None, hide=False,
metavar=None, completion=None, choices=None):
if win_id and count:
raise TypeError("Argument marked as both count/win_id!")
self.win_id = win_id
self.count = count
self.flag = flag
self.hide = hide
self.metavar = metavar
self.completion = completion
self.choices = choices
def __eq__(self, other):
return (self.win_id == other.win_id and
self.count == other.count and
self.flag == other.flag and
self.hide == other.hide and
self.metavar == other.metavar and
self.completion == other.completion and
self.choices == other.choices)
def __repr__(self):
return utils.get_repr(self, win_id=self.win_id, count=self.count,
flag=self.flag, hide=self.hide,
metavar=self.metavar, completion=self.completion,
choices=self.choices, constructor=True)
class Command:
"""Base skeleton for a command.
Attributes:
name: The main name of the command.
maxsplit: The maximum amount of splits to do for the commandline, or
None.
hide: Whether to hide the arguments or not.
deprecated: False, or a string to describe why a command is deprecated.
desc: The description of the command.
handler: The handler function to call.
completion: Completions to use for arguments, as a list of strings.
debug: Whether this is a debugging command (only shown with --debug).
parser: The ArgumentParser to use to parse this command.
flags_with_args: A list of flags which take an argument.
no_cmd_split: If true, ';;' to split sub-commands is ignored.
backend: Which backend the command works with (or None if it works with
both)
_qute_args: The saved data from @cmdutils.argument
_needs_js: Whether the command needs javascript enabled
_modes: The modes the command can be executed in.
_not_modes: The modes the command can not be executed in.
_count: The count set for the command.
_instance: The object to bind 'self' to.
_scope: The scope to get _instance for in the object registry.
"""
def __init__(self, *, handler, name, instance=None, maxsplit=None,
hide=False, modes=None, not_modes=None, needs_js=False,
debug=False, ignore_args=False, deprecated=False,
no_cmd_split=False, star_args_optional=False, scope='global',
backend=None):
# I really don't know how to solve this in a better way, I tried.
# pylint: disable=too-many-locals
if modes is not None and not_modes is not None:
raise ValueError("Only modes or not_modes can be given!")
if modes is not None:
for m in modes:
if not isinstance(m, usertypes.KeyMode):
raise TypeError("Mode {} is no KeyMode member!".format(m))
if not_modes is not None:
for m in not_modes:
if not isinstance(m, usertypes.KeyMode):
raise TypeError("Mode {} is no KeyMode member!".format(m))
if scope != 'global' and instance is None:
raise ValueError("Setting scope without setting instance makes "
"no sense!")
self.name = name
self.maxsplit = maxsplit
self.hide = hide
self.deprecated = deprecated
self._instance = instance
self._modes = modes
self._not_modes = not_modes
self._scope = scope
self._needs_js = needs_js
self._star_args_optional = star_args_optional
self.debug = debug
self.ignore_args = ignore_args
self.handler = handler
self.no_cmd_split = no_cmd_split
self.backend = backend
self.docparser = docutils.DocstringParser(handler)
self.parser = argparser.ArgumentParser(
name, description=self.docparser.short_desc,
epilog=self.docparser.long_desc)
self.parser.add_argument('-h', '--help', action=argparser.HelpAction,
default=argparser.SUPPRESS, nargs=0,
help=argparser.SUPPRESS)
self._check_func()
self.opt_args = collections.OrderedDict()
self.namespace = None
self._count = None
self.pos_args = []
self.desc = None
self.flags_with_args = []
# This is checked by future @cmdutils.argument calls so they fail
# (as they'd be silently ignored otherwise)
self._qute_args = getattr(self.handler, 'qute_args', {})
self.handler.qute_args = None
args = self._inspect_func()
self.completion = []
for arg in args:
arg_completion = self.get_arg_info(arg).completion
if arg_completion is not None:
self.completion.append(arg_completion)
def _check_prerequisites(self, win_id):
"""Check if the command is permitted to run currently.
Args:
win_id: The window ID the command is run in.
"""
mode_manager = objreg.get('mode-manager', scope='window',
window=win_id)
curmode = mode_manager.mode
if self._modes is not None and curmode not in self._modes:
mode_names = '/'.join(mode.name for mode in self._modes)
raise cmdexc.PrerequisitesError(
"{}: This command is only allowed in {} mode.".format(
self.name, mode_names))
elif self._not_modes is not None and curmode in self._not_modes:
mode_names = '/'.join(mode.name for mode in self._not_modes)
raise cmdexc.PrerequisitesError(
"{}: This command is not allowed in {} mode.".format(
self.name, mode_names))
if self._needs_js and not QWebSettings.globalSettings().testAttribute(
QWebSettings.JavascriptEnabled):
raise cmdexc.PrerequisitesError(
"{}: This command needs javascript enabled.".format(self.name))
used_backend = usertypes.arg2backend[objreg.get('args').backend]
if self.backend is not None and used_backend != self.backend:
raise cmdexc.PrerequisitesError(
"{}: Only available with {} "
"backend.".format(self.name, self.backend.name))
if self.deprecated:
message.warning(win_id, '{} is deprecated - {}'.format(
self.name, self.deprecated))
def _check_func(self):
"""Make sure the function parameters don't violate any rules."""
signature = inspect.signature(self.handler)
if 'self' in signature.parameters and self._instance is None:
raise TypeError("{} is a class method, but instance was not "
"given!".format(self.name[0]))
elif 'self' not in signature.parameters and self._instance is not None:
raise TypeError("{} is not a class method, but instance was "
"given!".format(self.name[0]))
elif any(param.kind == inspect.Parameter.VAR_KEYWORD
for param in signature.parameters.values()):
raise TypeError("{}: functions with varkw arguments are not "
"supported!".format(self.name[0]))
def get_arg_info(self, param):
"""Get an ArgInfo tuple for the given inspect.Parameter."""
return self._qute_args.get(param.name, ArgInfo())
def _inspect_special_param(self, param):
"""Check if the given parameter is a special one.
Args:
param: The inspect.Parameter to handle.
Return:
True if the parameter is special, False otherwise.
"""
arg_info = self.get_arg_info(param)
if arg_info.count:
if param.default is inspect.Parameter.empty:
raise TypeError("{}: handler has count parameter "
"without default!".format(self.name))
return True
elif arg_info.win_id:
return True
def _inspect_func(self):
"""Inspect the function to get useful informations from it.
Sets instance attributes (desc, type_conv, name_conv) based on the
informations.
Return:
How many user-visible arguments the command has.
"""
signature = inspect.signature(self.handler)
doc = inspect.getdoc(self.handler)
if doc is not None:
self.desc = doc.splitlines()[0].strip()
else:
self.desc = ""
if not self.ignore_args:
for param in signature.parameters.values():
if param.name == 'self':
continue
if self._inspect_special_param(param):
continue
typ = self._get_type(param)
is_bool = typ is bool
kwargs = self._param_to_argparse_kwargs(param, is_bool)
args = self._param_to_argparse_args(param, is_bool)
callsig = debug_utils.format_call(
self.parser.add_argument, args, kwargs,
full=False)
log.commands.vdebug('Adding arg {} of type {} -> {}'.format(
param.name, typ, callsig))
self.parser.add_argument(*args, **kwargs)
return signature.parameters.values()
def _param_to_argparse_kwargs(self, param, is_bool):
"""Get argparse keyword arguments for a parameter.
Args:
param: The inspect.Parameter object to get the args for.
is_bool: Whether the parameter is a boolean.
Return:
A kwargs dict.
"""
kwargs = {}
try:
kwargs['help'] = self.docparser.arg_descs[param.name]
except KeyError:
pass
kwargs['dest'] = param.name
arg_info = self.get_arg_info(param)
if is_bool:
kwargs['action'] = 'store_true'
else:
if arg_info.metavar is not None:
kwargs['metavar'] = arg_info.metavar
else:
kwargs['metavar'] = argparser.arg_name(param.name)
if param.kind == inspect.Parameter.VAR_POSITIONAL:
kwargs['nargs'] = '*' if self._star_args_optional else '+'
elif param.kind == inspect.Parameter.KEYWORD_ONLY:
kwargs['default'] = param.default
elif not is_bool and param.default is not inspect.Parameter.empty:
kwargs['default'] = param.default
kwargs['nargs'] = '?'
return kwargs
def _param_to_argparse_args(self, param, is_bool):
"""Get argparse positional arguments for a parameter.
Args:
param: The inspect.Parameter object to get the args for.
is_bool: Whether the parameter is a boolean.
Return:
A list of args.
"""
args = []
name = argparser.arg_name(param.name)
arg_info = self.get_arg_info(param)
if arg_info.flag is not None:
shortname = arg_info.flag
else:
shortname = name[0]
if len(shortname) != 1:
raise ValueError("Flag '{}' of parameter {} (command {}) must be "
"exactly 1 char!".format(shortname, name,
self.name))
if is_bool or param.kind == inspect.Parameter.KEYWORD_ONLY:
long_flag = '--{}'.format(name)
short_flag = '-{}'.format(shortname)
args.append(long_flag)
args.append(short_flag)
self.opt_args[param.name] = long_flag, short_flag
if not is_bool:
self.flags_with_args += [short_flag, long_flag]
else:
if not arg_info.hide:
self.pos_args.append((param.name, name))
return args
def _get_type(self, param):
"""Get the type of an argument from its default value or annotation.
Args:
param: The inspect.Parameter to look at.
"""
if param.annotation is not inspect.Parameter.empty:
return param.annotation
elif param.default is None or param.default is inspect.Parameter.empty:
return None
else:
return type(param.default)
def _get_self_arg(self, win_id, param, args):
"""Get the self argument for a function call.
Arguments:
win_id: The window id this command should be executed in.
param: The count parameter.
args: The positional argument list. Gets modified directly.
"""
assert param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
if self._scope == 'global':
tab_id = None
win_id = None
elif self._scope == 'tab':
tab_id = 'current'
elif self._scope == 'window':
tab_id = None
else:
raise ValueError("Invalid scope {}!".format(self._scope))
obj = objreg.get(self._instance, scope=self._scope, window=win_id,
tab=tab_id)
args.append(obj)
def _get_count_arg(self, param, args, kwargs):
"""Add the count argument to a function call.
Arguments:
param: The count parameter.
args: The positional argument list. Gets modified directly.
kwargs: The keyword argument dict. Gets modified directly.
"""
if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
if self._count is not None:
args.append(self._count)
else:
args.append(param.default)
elif param.kind == inspect.Parameter.KEYWORD_ONLY:
if self._count is not None:
kwargs[param.name] = self._count
else:
raise TypeError("{}: invalid parameter type {} for argument "
"{!r}!".format(self.name, param.kind, param.name))
def _get_win_id_arg(self, win_id, param, args, kwargs):
"""Add the win_id argument to a function call.
Arguments:
win_id: The window ID to add.
param: The count parameter.
args: The positional argument list. Gets modified directly.
kwargs: The keyword argument dict. Gets modified directly.
"""
if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
args.append(win_id)
elif param.kind == inspect.Parameter.KEYWORD_ONLY:
kwargs[param.name] = win_id
else:
raise TypeError("{}: invalid parameter type {} for argument "
"{!r}!".format(self.name, param.kind, param.name))
def _get_param_value(self, param):
"""Get the converted value for an inspect.Parameter."""
value = getattr(self.namespace, param.name)
typ = self._get_type(param)
if isinstance(typ, tuple):
raise TypeError("{}: Legacy tuple type annotation!".format(
self.name))
elif issubclass(typ, typing.Union):
# this is... slightly evil, I know
types = list(typ.__union_params__)
if param.default is not inspect.Parameter.empty:
types.append(type(param.default))
choices = self.get_arg_info(param).choices
value = argparser.multitype_conv(param, types, value,
str_choices=choices)
elif typ is str:
choices = self.get_arg_info(param).choices
value = argparser.type_conv(param, typ, value, str_choices=choices)
elif typ is None:
pass
elif typ is bool: # no type conversion for flags
assert isinstance(value, bool)
else:
value = argparser.type_conv(param, typ, value)
return value
def _get_call_args(self, win_id):
"""Get arguments for a function call.
Args:
win_id: The window id this command should be executed in.
Return:
An (args, kwargs) tuple.
"""
args = []
kwargs = {}
signature = inspect.signature(self.handler)
if self.ignore_args:
if self._instance is not None:
param = list(signature.parameters.values())[0]
self._get_self_arg(win_id, param, args)
return args, kwargs
for i, param in enumerate(signature.parameters.values()):
arg_info = self.get_arg_info(param)
if i == 0 and self._instance is not None:
# Special case for 'self'.
self._get_self_arg(win_id, param, args)
continue
elif arg_info.count:
# Special case for count parameter.
self._get_count_arg(param, args, kwargs)
continue
# elif arg_info.win_id:
elif arg_info.win_id:
# Special case for win_id parameter.
self._get_win_id_arg(win_id, param, args, kwargs)
continue
value = self._get_param_value(param)
if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
args.append(value)
elif param.kind == inspect.Parameter.VAR_POSITIONAL:
if value is not None:
args += value
elif param.kind == inspect.Parameter.KEYWORD_ONLY:
kwargs[param.name] = value
else:
raise TypeError("{}: Invalid parameter type {} for argument "
"'{}'!".format(
self.name, param.kind, param.name))
return args, kwargs
def run(self, win_id, args=None, count=None):
"""Run the command.
Note we don't catch CommandError here as it might happen async.
Args:
win_id: The window ID the command is run in.
args: Arguments to the command.
count: Command repetition count.
"""
dbgout = ["command called:", self.name]
if args:
dbgout.append(str(args))
elif args is None:
args = []
if count is not None:
dbgout.append("(count={})".format(count))
log.commands.debug(' '.join(dbgout))
try:
self.namespace = self.parser.parse_args(args)
except argparser.ArgumentParserError as e:
message.error(win_id, '{}: {}'.format(self.name, e),
stack=traceback.format_exc())
return
except argparser.ArgumentParserExit as e:
log.commands.debug("argparser exited with status {}: {}".format(
e.status, e))
return
self._count = count
self._check_prerequisites(win_id)
posargs, kwargs = self._get_call_args(win_id)
log.commands.debug('Calling {}'.format(
debug_utils.format_call(self.handler, posargs, kwargs)))
self.handler(*posargs, **kwargs)
| 1 | 15,585 | I think "Don't replace variables ..." would be cleaner. | qutebrowser-qutebrowser | py |
@@ -82,8 +82,7 @@ class MPLPlot(DimensionedPlot):
sublabel_size = param.Number(default=18, doc="""
Size of optional subfigure label.""")
- projection = param.ObjectSelector(default=None,
- objects=['3d', 'polar', None], doc="""
+ projection = param.Parameter(default=None, doc="""
The projection of the plot axis, default of None is equivalent to
2D plot, '3d' and 'polar' are also supported.""")
| 1 | from __future__ import division
import numpy as np
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D # noqa (For 3D plots)
from matplotlib import pyplot as plt
from matplotlib import gridspec, animation
import param
from ...core import (OrderedDict, HoloMap, AdjointLayout, NdLayout,
GridSpace, Element, CompositeOverlay, Element3D,
Empty, Collator)
from ...core.options import Store, Compositor
from ...core.util import int_to_roman, int_to_alpha, basestring
from ...core import traversal
from ..plot import DimensionedPlot, GenericLayoutPlot, GenericCompositePlot
from ..util import get_dynamic_mode, initialize_sampled
from .renderer import MPLRenderer
from .util import compute_ratios
class MPLPlot(DimensionedPlot):
"""
An MPLPlot object draws a matplotlib figure object when called or
indexed but can also return a matplotlib animation object as
appropriate. MPLPlots take element objects such as Image, Contours
or Points as inputs and plots them in the appropriate format using
matplotlib. As HoloMaps are supported, all plots support animation
via the anim() method.
"""
renderer = MPLRenderer
sideplots = {}
fig_alpha = param.Number(default=1.0, bounds=(0, 1), doc="""
Alpha of the overall figure background.""")
fig_bounds = param.NumericTuple(default=(0.15, 0.15, 0.85, 0.85),
doc="""
The bounds of the overall figure as a 4-tuple of the form
(left, bottom, right, top), defining the size of the border
around the subplots.""")
fig_inches = param.Parameter(default=4, doc="""
The overall matplotlib figure size in inches. May be set as
an integer in which case it will be used to autocompute a
size. Alternatively may be set with an explicit tuple or list,
in which case it will be applied directly after being scaled
by fig_size. If either the width or height is set to None,
it will be computed automatically.""")
fig_latex = param.Boolean(default=False, doc="""
Whether to use LaTeX text in the overall figure.""")
fig_rcparams = param.Dict(default={}, doc="""
matplotlib rc parameters to apply to the overall figure.""")
fig_size = param.Integer(default=100, bounds=(1, None), doc="""
Size relative to the supplied overall fig_inches in percent.""")
initial_hooks = param.HookList(default=[], doc="""
Optional list of hooks called before plotting the data onto
the axis. The hook is passed the plot object and the displayed
object, other plotting handles can be accessed via plot.handles.""")
final_hooks = param.HookList(default=[], doc="""
Optional list of hooks called when finalizing an axis.
The hook is passed the plot object and the displayed
object, other plotting handles can be accessed via plot.handles.""")
finalize_hooks = param.HookList(default=[], doc="""
Optional list of hooks called when finalizing an axis.
The hook is passed the plot object and the displayed
object, other plotting handles can be accessed via plot.handles.""")
sublabel_format = param.String(default=None, allow_None=True, doc="""
Allows labeling the subaxes in each plot with various formatters
including {Alpha}, {alpha}, {numeric} and {roman}.""")
sublabel_position = param.NumericTuple(default=(-0.35, 0.85), doc="""
Position relative to the plot for placing the optional subfigure label.""")
sublabel_size = param.Number(default=18, doc="""
Size of optional subfigure label.""")
projection = param.ObjectSelector(default=None,
objects=['3d', 'polar', None], doc="""
The projection of the plot axis, default of None is equivalent to
2D plot, '3d' and 'polar' are also supported.""")
show_frame = param.Boolean(default=True, doc="""
Whether or not to show a complete frame around the plot.""")
_close_figures = True
def __init__(self, fig=None, axis=None, **params):
self._create_fig = True
super(MPLPlot, self).__init__(**params)
# List of handles to matplotlib objects for animation update
scale = self.fig_size/100.
if isinstance(self.fig_inches, (tuple, list)):
self.fig_inches = [None if i is None else i*scale
for i in self.fig_inches]
else:
self.fig_inches *= scale
fig, axis = self._init_axis(fig, axis)
self.handles['fig'] = fig
self.handles['axis'] = axis
if self.final_hooks and self.finalize_hooks:
self.warning('Set either final_hooks or deprecated '
'finalize_hooks, not both.')
self.finalize_hooks = self.final_hooks
def _init_axis(self, fig, axis):
"""
Return an axis which may need to be initialized from
a new figure.
"""
if not fig and self._create_fig:
rc_params = self.fig_rcparams
if self.fig_latex:
rc_params['text.usetex'] = True
with mpl.rc_context(rc=rc_params):
fig = plt.figure()
l, b, r, t = self.fig_bounds
inches = self.fig_inches
fig.subplots_adjust(left=l, bottom=b, right=r, top=t)
fig.patch.set_alpha(self.fig_alpha)
if isinstance(inches, (tuple, list)):
inches = list(inches)
if inches[0] is None:
inches[0] = inches[1]
elif inches[1] is None:
inches[1] = inches[0]
fig.set_size_inches(list(inches))
else:
fig.set_size_inches([inches, inches])
axis = fig.add_subplot(111, projection=self.projection)
axis.set_aspect('auto')
return fig, axis
def _subplot_label(self, axis):
layout_num = self.layout_num if self.subplot else 1
if self.sublabel_format and not self.adjoined and layout_num > 0:
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredText
labels = {}
if '{Alpha}' in self.sublabel_format:
labels['Alpha'] = int_to_alpha(layout_num-1)
elif '{alpha}' in self.sublabel_format:
labels['alpha'] = int_to_alpha(layout_num-1, upper=False)
elif '{numeric}' in self.sublabel_format:
labels['numeric'] = self.layout_num
elif '{Roman}' in self.sublabel_format:
labels['Roman'] = int_to_roman(layout_num)
elif '{roman}' in self.sublabel_format:
labels['roman'] = int_to_roman(layout_num).lower()
at = AnchoredText(self.sublabel_format.format(**labels), loc=3,
bbox_to_anchor=self.sublabel_position, frameon=False,
prop=dict(size=self.sublabel_size, weight='bold'),
bbox_transform=axis.transAxes)
at.patch.set_visible(False)
axis.add_artist(at)
def _finalize_axis(self, key):
"""
General method to finalize the axis and plot.
"""
if 'title' in self.handles:
self.handles['title'].set_visible(self.show_title)
self.drawn = True
if self.subplot:
return self.handles['axis']
else:
fig = self.handles['fig']
if not getattr(self, 'overlaid', False) and self._close_figures:
plt.close(fig)
return fig
@property
def state(self):
return self.handles['fig']
def anim(self, start=0, stop=None, fps=30):
"""
Method to return a matplotlib animation. The start and stop
frames may be specified as well as the fps.
"""
figure = self.initialize_plot()
anim = animation.FuncAnimation(figure, self.update_frame,
frames=self.keys,
interval = 1000.0/fps)
# Close the figure handle
if self._close_figures: plt.close(figure)
return anim
def update(self, key):
rc_params = self.fig_rcparams
if self.fig_latex:
rc_params['text.usetex'] = True
mpl.rcParams.update(rc_params)
if len(self) == 1 and key == 0 and not self.drawn:
return self.initialize_plot()
return self.__getitem__(key)
class CompositePlot(GenericCompositePlot, MPLPlot):
"""
CompositePlot provides a baseclass for plots coordinate multiple
subplots to form a Layout.
"""
def update_frame(self, key, ranges=None):
ranges = self.compute_ranges(self.layout, key, ranges)
for subplot in self.subplots.values():
subplot.update_frame(key, ranges=ranges)
title = self._format_title(key) if self.show_title else ''
if 'title' in self.handles:
self.handles['title'].set_text(title)
else:
title = axis.set_title(title, **self._fontsize('title'))
self.handles['title'] = title
class GridPlot(CompositePlot):
"""
Plot a group of elements in a grid layout based on a GridSpace element
object.
"""
aspect = param.Parameter(default='equal', doc="""
Aspect ratios on GridPlot should be automatically determined.""")
padding = param.Number(default=0.1, doc="""
The amount of padding as a fraction of the total Grid size""")
shared_xaxis = param.Boolean(default=False, doc="""
If enabled the x-axes of the GridSpace will be drawn from the
objects inside the Grid rather than the GridSpace dimensions.""")
shared_yaxis = param.Boolean(default=False, doc="""
If enabled the x-axes of the GridSpace will be drawn from the
objects inside the Grid rather than the GridSpace dimensions.""")
show_frame = param.Boolean(default=False, doc="""
Whether to draw a frame around the Grid.""")
show_legend = param.Boolean(default=False, doc="""
Legends add to much clutter in a grid and are disabled by default.""")
tick_format = param.String(default="%.2f", doc="""
Formatting string for the GridPlot ticklabels.""")
xaxis = param.ObjectSelector(default='bottom',
objects=['bottom', 'top', None], doc="""
Whether and where to display the xaxis, supported options are
'bottom', 'top' and None.""")
yaxis = param.ObjectSelector(default='left',
objects=['left', 'right', None], doc="""
Whether and where to display the yaxis, supported options are
'left', 'right' and None.""")
xrotation = param.Integer(default=0, bounds=(0, 360), doc="""
Rotation angle of the xticks.""")
yrotation = param.Integer(default=0, bounds=(0, 360), doc="""
Rotation angle of the yticks.""")
def __init__(self, layout, axis=None, create_axes=True, ranges=None,
keys=None, dimensions=None, layout_num=1, **params):
if not isinstance(layout, GridSpace):
raise Exception("GridPlot only accepts GridSpace.")
self.layout = layout
self.cols, self.rows = layout.shape
self.layout_num = layout_num
extra_opts = self.lookup_options(layout, 'plot').options
if not keys or not dimensions:
dimensions, keys = traversal.unique_dimkeys(layout)
if 'uniform' not in params:
params['uniform'] = traversal.uniform(layout)
dynamic, sampled = get_dynamic_mode(layout)
if sampled:
initialize_sampled(layout, dimensions, keys[0])
super(GridPlot, self).__init__(keys=keys, dimensions=dimensions,
dynamic=dynamic,
**dict(extra_opts, **params))
# Compute ranges layoutwise
grid_kwargs = {}
if axis is not None:
bbox = axis.get_position()
l, b, w, h = bbox.x0, bbox.y0, bbox.width, bbox.height
grid_kwargs = {'left': l, 'right': l+w, 'bottom': b, 'top': b+h}
self.position = (l, b, w, h)
self.fig_inches = self._get_size()
self._layoutspec = gridspec.GridSpec(self.rows, self.cols, **grid_kwargs)
self.subplots, self.subaxes, self.layout = self._create_subplots(layout, axis, ranges, create_axes)
def _get_size(self):
max_dim = max(self.layout.shape)
# Reduce plot size as GridSpace gets larger
shape_factor = 1. / max_dim
# Expand small grids to a sensible viewing size
expand_factor = 1 + (max_dim - 1) * 0.1
scale_factor = expand_factor * shape_factor
cols, rows = self.layout.shape
if isinstance(self.fig_inches, (tuple, list)):
fig_inches = list(self.fig_inches)
if fig_inches[0] is None:
fig_inches[0] = fig_inches[1] * (cols/rows)
if fig_inches[1] is None:
fig_inches[1] = fig_inches[0] * (rows/cols)
return fig_inches
else:
fig_inches = (self.fig_inches,)*2
return (scale_factor * cols * fig_inches[0],
scale_factor * rows * fig_inches[1])
def _create_subplots(self, layout, axis, ranges, create_axes):
layout = layout.map(Compositor.collapse_element, [CompositeOverlay],
clone=False)
norm_opts = self._deep_options(layout, 'norm', ['axiswise'], [Element])
axiswise = all(v.get('axiswise', False) for v in norm_opts.values())
if not ranges:
self.handles['fig'].set_size_inches(self.fig_inches)
subplots, subaxes = OrderedDict(), OrderedDict()
frame_ranges = self.compute_ranges(layout, None, ranges)
frame_ranges = OrderedDict([(key, self.compute_ranges(layout, key, frame_ranges))
for key in self.keys])
collapsed_layout = layout.clone(shared_data=False, id=layout.id)
r, c = (0, 0)
for coord in layout.keys(full_grid=True):
if not isinstance(coord, tuple): coord = (coord,)
view = layout.data.get(coord, None)
# Create subplot
if view is not None:
vtype = view.type if isinstance(view, HoloMap) else view.__class__
opts = self.lookup_options(view, 'plot').options
else:
continue
# Create axes
kwargs = {}
if create_axes:
threed = issubclass(vtype, Element3D)
subax = plt.subplot(self._layoutspec[r, c],
projection='3d' if threed else None)
if not axiswise and self.shared_xaxis and self.xaxis is not None:
self.xaxis = 'top'
if not axiswise and self.shared_yaxis and self.yaxis is not None:
self.yaxis = 'right'
# Disable subplot axes depending on shared axis options
# and the position in the grid
if (self.shared_xaxis or self.shared_yaxis) and not axiswise:
if c == 0 and r != 0:
subax.xaxis.set_ticks_position('none')
kwargs['xaxis'] = 'bottom-bare'
if c != 0 and r == 0 and not layout.ndims == 1:
subax.yaxis.set_ticks_position('none')
kwargs['yaxis'] = 'left-bare'
if r != 0 and c != 0:
kwargs['xaxis'] = 'bottom-bare'
kwargs['yaxis'] = 'left-bare'
if not self.shared_xaxis:
kwargs['xaxis'] = 'bottom-bare'
if not self.shared_yaxis:
kwargs['yaxis'] = 'left-bare'
else:
kwargs['xaxis'] = 'bottom-bare'
kwargs['yaxis'] = 'left-bare'
subaxes[(r, c)] = subax
else:
subax = None
if issubclass(vtype, CompositeOverlay) and (c == self.cols - 1 and
r == self.rows//2):
kwargs['show_legend'] = self.show_legend
kwargs['legend_position'] = 'right'
# Create subplot
if view is not None:
params = dict(fig=self.handles['fig'], axis=subax,
dimensions=self.dimensions, show_title=False,
subplot=not create_axes, ranges=frame_ranges,
uniform=self.uniform, keys=self.keys,
show_legend=False)
plotting_class = Store.registry['matplotlib'][vtype]
subplot = plotting_class(view, **dict(opts, **dict(params, **kwargs)))
collapsed_layout[coord] = subplot.layout if isinstance(subplot, CompositePlot) else subplot.hmap
subplots[(r, c)] = subplot
else:
subax.set_visible(False)
if r != self.rows-1:
r += 1
else:
r = 0
c += 1
if create_axes:
self.handles['axis'] = self._layout_axis(layout, axis)
self._adjust_subplots(self.handles['axis'], subaxes)
return subplots, subaxes, collapsed_layout
def initialize_plot(self, ranges=None):
# Get the extent of the layout elements (not the whole layout)
key = self.keys[-1]
axis = self.handles['axis']
subplot_kwargs = dict()
ranges = self.compute_ranges(self.layout, key, ranges)
for subplot in self.subplots.values():
subplot.initialize_plot(ranges=ranges, **subplot_kwargs)
if self.show_title:
title = axis.set_title(self._format_title(key),
**self._fontsize('title'))
self.handles['title'] = title
self._readjust_axes(axis)
self.drawn = True
if self.subplot: return self.handles['axis']
if self._close_figures: plt.close(self.handles['fig'])
return self.handles['fig']
def _readjust_axes(self, axis):
if self.subplot:
axis.set_position(self.position)
if self.aspect == 'equal':
axis.set_aspect(float(self.rows)/self.cols)
self.handles['fig'].canvas.draw()
self._adjust_subplots(self.handles['axis'], self.subaxes)
def _layout_axis(self, layout, axis):
fig = self.handles['fig']
axkwargs = {'gid': str(self.position)} if axis else {}
layout_axis = fig.add_subplot(1,1,1, **axkwargs)
if axis:
axis.set_visible(False)
layout_axis.set_position(self.position)
layout_axis.patch.set_visible(False)
tick_fontsize = self._fontsize('ticks','labelsize',common=False)
if tick_fontsize: layout_axis.tick_params(**tick_fontsize)
# Set labels
layout_axis.set_xlabel(str(layout.kdims[0]),
**self._fontsize('xlabel'))
if layout.ndims == 2:
layout_axis.set_ylabel(str(layout.kdims[1]),
**self._fontsize('ylabel'))
# Compute and set x- and y-ticks
dims = layout.kdims
keys = layout.keys()
if layout.ndims == 1:
dim1_keys = keys
dim2_keys = [0]
layout_axis.get_yaxis().set_visible(False)
else:
dim1_keys, dim2_keys = zip(*keys)
layout_axis.set_ylabel(str(dims[1]))
layout_axis.set_aspect(float(self.rows)/self.cols)
# Process ticks
plot_width = (1.0 - self.padding) / self.cols
border_width = self.padding / (self.cols-1)
xticks = [(plot_width/2)+(r*(plot_width+border_width)) for r in range(self.cols)]
plot_height = (1.0 - self.padding) / self.rows
border_height = self.padding / (self.rows-1) if layout.ndims > 1 else 0
yticks = [(plot_height/2)+(r*(plot_height+border_height)) for r in range(self.rows)]
layout_axis.set_xticks(xticks)
layout_axis.set_xticklabels(self._process_ticklabels(sorted(set(dim1_keys)), dims[0]))
for tick in layout_axis.get_xticklabels():
tick.set_rotation(self.xrotation)
ydim = dims[1] if layout.ndims > 1 else None
layout_axis.set_yticks(yticks)
layout_axis.set_yticklabels(self._process_ticklabels(sorted(set(dim2_keys)), ydim))
for tick in layout_axis.get_yticklabels():
tick.set_rotation(self.yrotation)
if not self.show_frame:
layout_axis.spines['right' if self.yaxis == 'left' else 'left'].set_visible(False)
layout_axis.spines['bottom' if self.xaxis == 'top' else 'top'].set_visible(False)
axis = layout_axis
if self.xaxis is not None:
axis.xaxis.set_ticks_position(self.xaxis)
axis.xaxis.set_label_position(self.xaxis)
else:
axis.xaxis.set_visible(False)
if self.yaxis is not None:
axis.yaxis.set_ticks_position(self.yaxis)
axis.yaxis.set_label_position(self.yaxis)
else:
axis.yaxis.set_visible(False)
for pos in ['left', 'right', 'top', 'bottom']:
axis.spines[pos].set_visible(False)
return layout_axis
def _process_ticklabels(self, labels, dim):
formatted_labels = []
for k in labels:
if dim and dim.value_format:
k = dim.value_format(k)
elif not isinstance(k, (str, type(None))):
k = self.tick_format % k
elif k is None:
k = ''
formatted_labels.append(k)
return formatted_labels
def _adjust_subplots(self, axis, subaxes):
bbox = axis.get_position()
l, b, w, h = bbox.x0, bbox.y0, bbox.width, bbox.height
if self.padding:
width_padding = w/(1./self.padding)
height_padding = h/(1./self.padding)
else:
width_padding, height_padding = 0, 0
if self.cols == 1:
b_w = 0
else:
b_w = width_padding / (self.cols - 1)
if self.rows == 1:
b_h = 0
else:
b_h = height_padding / (self.rows - 1)
ax_w = (w - (width_padding if self.cols > 1 else 0)) / self.cols
ax_h = (h - (height_padding if self.rows > 1 else 0)) / self.rows
r, c = (0, 0)
for ax in subaxes.values():
xpos = l + (c*ax_w) + (c * b_w)
ypos = b + (r*ax_h) + (r * b_h)
if r != self.rows-1:
r += 1
else:
r = 0
c += 1
if not ax is None:
ax.set_position([xpos, ypos, ax_w, ax_h])
class AdjointLayoutPlot(CompositePlot):
"""
LayoutPlot allows placing up to three Views in a number of
predefined and fixed layouts, which are defined by the layout_dict
class attribute. This allows placing subviews next to a main plot
in either a 'top' or 'right' position.
Initially, a LayoutPlot computes an appropriate layout based for
the number of Views in the AdjointLayout object it has been given, but
when embedded in a NdLayout, it can recompute the layout to
match the number of rows and columns as part of a larger grid.
"""
layout_dict = {'Single': ['main'],
'Dual': ['main', 'right'],
'Triple': ['top', None, 'main', 'right'],
'Embedded Dual': [None, 'main']}
def __init__(self, layout, layout_type, subaxes, subplots, **params):
# The AdjointLayout ViewableElement object
self.layout = layout
# Type may be set to 'Embedded Dual' by a call it grid_situate
self.layout_type = layout_type
self.view_positions = self.layout_dict[self.layout_type]
# The supplied (axes, view) objects as indexed by position
self.subaxes = {pos: ax for ax, pos in zip(subaxes, self.view_positions)}
super(AdjointLayoutPlot, self).__init__(subplots=subplots, **params)
def initialize_plot(self, ranges=None):
"""
Plot all the views contained in the AdjointLayout Object using axes
appropriate to the layout configuration. All the axes are
supplied by LayoutPlot - the purpose of the call is to
invoke subplots with correct options and styles and hide any
empty axes as necessary.
"""
for pos in self.view_positions:
# Pos will be one of 'main', 'top' or 'right' or None
view = self.layout.get(pos, None)
subplot = self.subplots.get(pos, None)
ax = self.subaxes.get(pos, None)
# If no view object or empty position, disable the axis
if None in [view, pos, subplot]:
ax.set_axis_off()
continue
subplot.initialize_plot(ranges=ranges)
self.adjust_positions()
self.drawn = True
def adjust_positions(self):
"""
Make adjustments to the positions of subplots (if available)
relative to the main plot axes as required.
This method is called by LayoutPlot after an initial pass
used to position all the Layouts together. This method allows
LayoutPlots to make final adjustments to the axis positions.
"""
checks = [self.view_positions, self.subaxes, self.subplots]
right = all('right' in check for check in checks)
top = all('top' in check for check in checks)
if not 'main' in self.subplots or not (top or right):
return
self.handles['fig'].canvas.draw()
main_ax = self.subplots['main'].handles['axis']
bbox = main_ax.get_position()
if right:
ax = self.subaxes['right']
subplot = self.subplots['right']
ax.set_position([bbox.x1 + bbox.width * subplot.border_size,
bbox.y0,
bbox.width * subplot.subplot_size, bbox.height])
if isinstance(subplot, GridPlot):
ax.set_aspect('equal')
if top:
ax = self.subaxes['top']
subplot = self.subplots['top']
ax.set_position([bbox.x0,
bbox.y1 + bbox.height * subplot.border_size,
bbox.width, bbox.height * subplot.subplot_size])
if isinstance(subplot, GridPlot):
ax.set_aspect('equal')
def update_frame(self, key, ranges=None):
for pos in self.view_positions:
subplot = self.subplots.get(pos)
if subplot is not None:
subplot.update_frame(key, ranges)
def __len__(self):
return max([1 if self.keys is None else len(self.keys), 1])
class LayoutPlot(GenericLayoutPlot, CompositePlot):
"""
A LayoutPlot accepts either a Layout or a NdLayout and
displays the elements in a cartesian grid in scanline order.
"""
absolute_scaling = param.ObjectSelector(default=False, doc="""
If aspect_weight is enabled absolute_scaling determines whether
axes are scaled relative to the widest plot or whether the
aspect scales the axes in absolute terms.""")
aspect_weight = param.Number(default=0, doc="""
Weighting of the individual aspects when computing the Layout
grid aspects and overall figure size.""")
fig_bounds = param.NumericTuple(default=(0.05, 0.05, 0.95, 0.95), doc="""
The bounds of the figure as a 4-tuple of the form
(left, bottom, right, top), defining the size of the border
around the subplots.""")
tight = param.Boolean(default=False, doc="""
Tightly fit the axes in the layout within the fig_bounds
and tight_padding.""")
tight_padding = param.Parameter(default=3, doc="""
Integer or tuple specifying the padding in inches in a tight layout.""")
hspace = param.Number(default=0.5, doc="""
Specifies the space between horizontally adjacent elements in the grid.
Default value is set conservatively to avoid overlap of subplots.""")
vspace = param.Number(default=0.1, doc="""
Specifies the space between vertically adjacent elements in the grid.
Default value is set conservatively to avoid overlap of subplots.""")
fontsize = param.Parameter(default={'title':16}, allow_None=True)
def __init__(self, layout, **params):
super(LayoutPlot, self).__init__(layout=layout, **params)
self.subplots, self.subaxes, self.layout = self._compute_gridspec(layout)
def _compute_gridspec(self, layout):
"""
Computes the tallest and widest cell for each row and column
by examining the Layouts in the GridSpace. The GridSpec is then
instantiated and the LayoutPlots are configured with the
appropriate embedded layout_types. The first element of the
returned tuple is a dictionary of all the LayoutPlots indexed
by row and column. The second dictionary in the tuple supplies
the grid indicies needed to instantiate the axes for each
LayoutPlot.
"""
layout_items = layout.grid_items()
layout_dimensions = layout.kdims if isinstance(layout, NdLayout) else None
layouts = {}
col_widthratios, row_heightratios = {}, {}
for (r, c) in self.coords:
# Get view at layout position and wrap in AdjointLayout
_, view = layout_items.get((r, c), (None, None))
layout_view = view if isinstance(view, AdjointLayout) else AdjointLayout([view])
layouts[(r, c)] = layout_view
# Compute shape of AdjointLayout element
layout_lens = {1:'Single', 2:'Dual', 3:'Triple'}
layout_type = layout_lens[len(layout_view)]
# Get aspects
main = layout_view.main
main = main.last if isinstance(main, HoloMap) else main
main_options = self.lookup_options(main, 'plot').options if main else {}
if main and not isinstance(main_options.get('aspect', 1), basestring):
main_aspect = np.nan if isinstance(main, Empty) else main_options.get('aspect', 1)
main_aspect = self.aspect_weight*main_aspect + 1-self.aspect_weight
else:
main_aspect = np.nan
if layout_type in ['Dual', 'Triple']:
el = layout_view.get('right', None)
eltype = type(el)
if el and eltype in MPLPlot.sideplots:
plot_type = MPLPlot.sideplots[type(el)]
ratio = 0.6*(plot_type.subplot_size+plot_type.border_size)
width_ratios = [4, 4*ratio]
else:
width_ratios = [4, 1]
else:
width_ratios = [4]
inv_aspect = 1./main_aspect if main_aspect else np.NaN
if layout_type in ['Embedded Dual', 'Triple']:
el = layout_view.get('top', None)
eltype = type(el)
if el and eltype in MPLPlot.sideplots:
plot_type = MPLPlot.sideplots[type(el)]
ratio = 0.6*(plot_type.subplot_size+plot_type.border_size)
height_ratios = [4*ratio, 4]
else:
height_ratios = [1, 4]
else:
height_ratios = [4]
if not isinstance(main_aspect, (basestring, type(None))):
width_ratios = [wratio * main_aspect for wratio in width_ratios]
height_ratios = [hratio * inv_aspect for hratio in height_ratios]
layout_shape = (len(width_ratios), len(height_ratios))
# For each row and column record the width and height ratios
# of the LayoutPlot with the most horizontal or vertical splits
# and largest aspect
prev_heights = row_heightratios.get(r, (0, []))
if layout_shape[1] > prev_heights[0]:
row_heightratios[r] = [layout_shape[1], prev_heights[1]]
row_heightratios[r][1].append(height_ratios)
prev_widths = col_widthratios.get(c, (0, []))
if layout_shape[0] > prev_widths[0]:
col_widthratios[c] = (layout_shape[0], prev_widths[1])
col_widthratios[c][1].append(width_ratios)
col_splits = [v[0] for _c, v in sorted(col_widthratios.items())]
row_splits = [v[0] for _r, v in sorted(row_heightratios.items())]
widths = np.array([r for col in col_widthratios.values()
for ratios in col[1] for r in ratios])/4
wr_unnormalized = compute_ratios(col_widthratios, False)
hr_list = compute_ratios(row_heightratios)
wr_list = compute_ratios(col_widthratios)
# Compute the number of rows and cols
cols, rows = len(wr_list), len(hr_list)
wr_list = [r if np.isfinite(r) else 1 for r in wr_list]
hr_list = [r if np.isfinite(r) else 1 for r in hr_list]
width = sum([r if np.isfinite(r) else 1 for r in wr_list])
yscale = width/sum([(1/v)*4 if np.isfinite(v) else 4 for v in wr_unnormalized])
if self.absolute_scaling:
width = width*np.nanmax(widths)
xinches, yinches = None, None
if not isinstance(self.fig_inches, (tuple, list)):
xinches = self.fig_inches * width
yinches = xinches/yscale
elif self.fig_inches[0] is None:
xinches = self.fig_inches[1] * yscale
yinches = self.fig_inches[1]
elif self.fig_inches[1] is None:
xinches = self.fig_inches[0]
yinches = self.fig_inches[0] / yscale
if xinches and yinches:
self.handles['fig'].set_size_inches([xinches, yinches])
self.gs = gridspec.GridSpec(rows, cols,
width_ratios=wr_list,
height_ratios=hr_list,
wspace=self.hspace,
hspace=self.vspace)
# Situate all the Layouts in the grid and compute the gridspec
# indices for all the axes required by each LayoutPlot.
gidx = 0
layout_count = 0
tight = self.tight
collapsed_layout = layout.clone(shared_data=False, id=layout.id)
frame_ranges = self.compute_ranges(layout, None, None)
frame_ranges = OrderedDict([(key, self.compute_ranges(layout, key, frame_ranges))
for key in self.keys])
layout_subplots, layout_axes = {}, {}
for r, c in self.coords:
# Compute the layout type from shape
wsplits = col_splits[c]
hsplits = row_splits[r]
if (wsplits, hsplits) == (1,1):
layout_type = 'Single'
elif (wsplits, hsplits) == (2,1):
layout_type = 'Dual'
elif (wsplits, hsplits) == (1,2):
layout_type = 'Embedded Dual'
elif (wsplits, hsplits) == (2,2):
layout_type = 'Triple'
# Get the AdjoinLayout at the specified coordinate
view = layouts[(r, c)]
positions = AdjointLayoutPlot.layout_dict[layout_type]
# Create temporary subplots to get projections types
# to create the correct subaxes for all plots in the layout
_, _, projs = self._create_subplots(layouts[(r, c)], positions,
None, frame_ranges, create=False)
gidx, gsinds = self.grid_situate(gidx, layout_type, cols)
layout_key, _ = layout_items.get((r, c), (None, None))
if isinstance(layout, NdLayout) and layout_key:
layout_dimensions = OrderedDict(zip(layout_dimensions, layout_key))
# Generate the axes and create the subplots with the appropriate
# axis objects, handling any Empty objects.
obj = layouts[(r, c)]
empty = isinstance(obj.main, Empty)
if empty:
obj = AdjointLayout([])
else:
layout_count += 1
subaxes = [plt.subplot(self.gs[ind], projection=proj)
for ind, proj in zip(gsinds, projs)]
subplot_data = self._create_subplots(obj, positions,
layout_dimensions, frame_ranges,
dict(zip(positions, subaxes)),
num=0 if empty else layout_count)
subplots, adjoint_layout, _ = subplot_data
layout_axes[(r, c)] = subaxes
# Generate the AdjointLayoutsPlot which will coordinate
# plotting of AdjointLayouts in the larger grid
plotopts = self.lookup_options(view, 'plot').options
layout_plot = AdjointLayoutPlot(adjoint_layout, layout_type, subaxes, subplots,
fig=self.handles['fig'], **plotopts)
layout_subplots[(r, c)] = layout_plot
tight = not any(type(p) is GridPlot for p in layout_plot.subplots.values()) and tight
if layout_key:
collapsed_layout[layout_key] = adjoint_layout
# Apply tight layout if enabled and incompatible
# GridPlot isn't present.
if tight:
if isinstance(self.tight_padding, (tuple, list)):
wpad, hpad = self.tight_padding
padding = dict(w_pad=wpad, h_pad=hpad)
else:
padding = dict(w_pad=self.tight_padding, h_pad=self.tight_padding)
self.gs.tight_layout(self.handles['fig'], rect=self.fig_bounds, **padding)
return layout_subplots, layout_axes, collapsed_layout
def grid_situate(self, current_idx, layout_type, subgrid_width):
"""
Situate the current AdjointLayoutPlot in a LayoutPlot. The
LayoutPlot specifies a layout_type into which the AdjointLayoutPlot
must be embedded. This enclosing layout is guaranteed to have
enough cells to display all the views.
Based on this enforced layout format, a starting index
supplied by LayoutPlot (indexing into a large gridspec
arrangement) is updated to the appropriate embedded value. It
will also return a list of gridspec indices associated with
the all the required layout axes.
"""
# Set the layout configuration as situated in a NdLayout
if layout_type == 'Single':
start, inds = current_idx+1, [current_idx]
elif layout_type == 'Dual':
start, inds = current_idx+2, [current_idx, current_idx+1]
bottom_idx = current_idx + subgrid_width
if layout_type == 'Embedded Dual':
bottom = ((current_idx+1) % subgrid_width) == 0
grid_idx = (bottom_idx if bottom else current_idx)+1
start, inds = grid_idx, [current_idx, bottom_idx]
elif layout_type == 'Triple':
bottom = ((current_idx+2) % subgrid_width) == 0
grid_idx = (bottom_idx if bottom else current_idx) + 2
start, inds = grid_idx, [current_idx, current_idx+1,
bottom_idx, bottom_idx+1]
return start, inds
def _create_subplots(self, layout, positions, layout_dimensions, ranges, axes={}, num=1, create=True):
"""
Plot all the views contained in the AdjointLayout Object using axes
appropriate to the layout configuration. All the axes are
supplied by LayoutPlot - the purpose of the call is to
invoke subplots with correct options and styles and hide any
empty axes as necessary.
"""
subplots = {}
projections = []
adjoint_clone = layout.clone(shared_data=False, id=layout.id)
subplot_opts = dict(show_title=False, adjoined=layout)
for pos in positions:
# Pos will be one of 'main', 'top' or 'right' or None
view = layout.get(pos, None)
ax = axes.get(pos, None)
if view is None:
projections.append(None)
continue
# Determine projection type for plot
components = view.traverse(lambda x: x)
projs = ['3d' if isinstance(c, Element3D) else
self.lookup_options(c, 'plot').options.get('projection', None)
for c in components]
projs = [p for p in projs if p is not None]
if len(set(projs)) > 1:
raise Exception("A single axis may only be assigned one projection type")
elif projs:
projections.append(projs[0])
else:
projections.append(None)
if not create:
continue
# Customize plotopts depending on position.
plotopts = self.lookup_options(view, 'plot').options
# Options common for any subplot
override_opts = {}
sublabel_opts = {}
if pos == 'main':
own_params = self.get_param_values(onlychanged=True)
sublabel_opts = {k: v for k, v in own_params
if 'sublabel_' in k}
if not isinstance(view, GridSpace):
override_opts = dict(aspect='square')
elif pos == 'right':
right_opts = dict(invert_axes=True,
xaxis=None)
override_opts = dict(subplot_opts, **right_opts)
elif pos == 'top':
top_opts = dict(yaxis=None)
override_opts = dict(subplot_opts, **top_opts)
# Override the plotopts as required
plotopts = dict(sublabel_opts, **plotopts)
plotopts.update(override_opts, fig=self.handles['fig'])
vtype = view.type if isinstance(view, HoloMap) else view.__class__
if isinstance(view, GridSpace):
plotopts['create_axes'] = ax is not None
if pos == 'main':
plot_type = Store.registry['matplotlib'][vtype]
else:
plot_type = MPLPlot.sideplots[vtype]
num = num if len(self.coords) > 1 else 0
subplots[pos] = plot_type(view, axis=ax, keys=self.keys,
dimensions=self.dimensions,
layout_dimensions=layout_dimensions,
ranges=ranges, subplot=True,
uniform=self.uniform, layout_num=num,
**plotopts)
if isinstance(view, (Element, HoloMap, Collator, CompositeOverlay)):
adjoint_clone[pos] = subplots[pos].hmap
else:
adjoint_clone[pos] = subplots[pos].layout
return subplots, adjoint_clone, projections
def initialize_plot(self):
axis = self.handles['axis']
key = self.keys[-1]
ranges = self.compute_ranges(self.layout, key, None)
for subplot in self.subplots.values():
subplot.initialize_plot(ranges=ranges)
# Create title handle
if self.show_title and len(self.coords) > 1:
title = self._format_title(key)
title = self.handles['fig'].suptitle(title, **self._fontsize('title'))
self.handles['title'] = title
return self._finalize_axis(None)
class AdjoinedPlot(DimensionedPlot):
aspect = param.Parameter(default='auto', doc="""
Aspect ratios on SideHistogramPlot should be determined by the
AdjointLayoutPlot.""")
bgcolor = param.Parameter(default=(1, 1, 1, 0), doc="""
Make plot background invisible.""")
border_size = param.Number(default=0.25, doc="""
The size of the border expressed as a fraction of the main plot.""")
show_frame = param.Boolean(default=False)
show_title = param.Boolean(default=False, doc="""
Titles should be disabled on all SidePlots to avoid clutter.""")
subplot_size = param.Number(default=0.25, doc="""
The size subplots as expressed as a fraction of the main plot.""")
show_xlabel = param.Boolean(default=False, doc="""
Whether to show the x-label of the plot. Disabled by default
because plots are often too cramped to fit the title correctly.""")
| 1 | 14,484 | Do you validate the possible strings? I've not read the code below but we should make sure if a string is supplied it is validate... | holoviz-holoviews | py |
@@ -40,13 +40,11 @@ func AddDiskImportSteps(w *daisy.Workflow, dataDiskInfos []ovfutils.DiskInfo) {
for i, dataDiskInfo := range dataDiskInfos {
dataDiskIndex := i + 1
dataDiskFilePath := dataDiskInfo.FilePath
- diskNames = append(
- diskNames,
- fmt.Sprintf("%v-data-disk-%v", w.Vars["instance_name"].Value, dataDiskIndex))
+ diskNames = append(diskNames, generateDataDiskName(w.Vars["instance_name"].Value, dataDiskIndex))
setupDataDiskStepName := fmt.Sprintf("setup-data-disk-%v", dataDiskIndex)
- diskImporterDiskName := fmt.Sprintf("disk-importer-%v", dataDiskIndex)
- scratchDiskDiskName := fmt.Sprintf("disk-importer-scratch-%v-%v", dataDiskIndex, w.Vars["instance_name"].Value)
+ diskImporterDiskName := fmt.Sprintf("disk-importer-%v-%v", dataDiskIndex, w.ID())
+ scratchDiskDiskName := fmt.Sprintf("disk-importer-scratch-%v-%v", dataDiskIndex, w.ID())
setupDataDiskStep := daisy.NewStepDefaultTimeout(setupDataDiskStepName, w)
setupDataDiskStep.CreateDisks = &daisy.CreateDisks{ | 1 | // Copyright 2019 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
package daisyovfutils
import (
"fmt"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/gce_ovf_import/ovf_utils"
"github.com/GoogleCloudPlatform/compute-image-tools/daisy"
"google.golang.org/api/compute/v1"
)
const (
createInstanceStepName = "create-instance"
importerDiskSize = "10"
)
// AddDiskImportSteps adds Daisy steps to OVF import workflow to import disks defined in
// dataDiskInfos.
func AddDiskImportSteps(w *daisy.Workflow, dataDiskInfos []ovfutils.DiskInfo) {
if dataDiskInfos == nil || len(dataDiskInfos) == 0 {
return
}
var diskNames []string
w.Sources["import_image_data.sh"] = "../image_import/import_image.sh"
for i, dataDiskInfo := range dataDiskInfos {
dataDiskIndex := i + 1
dataDiskFilePath := dataDiskInfo.FilePath
diskNames = append(
diskNames,
fmt.Sprintf("%v-data-disk-%v", w.Vars["instance_name"].Value, dataDiskIndex))
setupDataDiskStepName := fmt.Sprintf("setup-data-disk-%v", dataDiskIndex)
diskImporterDiskName := fmt.Sprintf("disk-importer-%v", dataDiskIndex)
scratchDiskDiskName := fmt.Sprintf("disk-importer-scratch-%v-%v", dataDiskIndex, w.Vars["instance_name"].Value)
setupDataDiskStep := daisy.NewStepDefaultTimeout(setupDataDiskStepName, w)
setupDataDiskStep.CreateDisks = &daisy.CreateDisks{
{
Disk: compute.Disk{
Name: diskImporterDiskName,
SourceImage: "projects/compute-image-tools/global/images/family/debian-9-worker",
Type: "pd-ssd",
},
SizeGb: importerDiskSize,
},
{
Disk: compute.Disk{
Name: diskNames[i],
Type: "pd-ssd",
},
SizeGb: "10",
Resource: daisy.Resource{
ExactName: true,
NoCleanup: true,
},
},
{
Disk: compute.Disk{
Name: scratchDiskDiskName,
Type: "pd-ssd",
},
SizeGb: "10",
Resource: daisy.Resource{
ExactName: true,
},
},
}
w.Steps[setupDataDiskStepName] = setupDataDiskStep
createDiskImporterInstanceStepName := fmt.Sprintf("create-data-disk-import-instance-%v", dataDiskIndex)
createDiskImporterInstanceStep := daisy.NewStepDefaultTimeout(createDiskImporterInstanceStepName, w)
sTrue := "true"
dataDiskImporterInstanceName := fmt.Sprintf("data-disk-importer-%v", dataDiskIndex)
createDiskImporterInstanceStep.CreateInstances = &daisy.CreateInstances{
{
Instance: compute.Instance{
Name: dataDiskImporterInstanceName,
Disks: []*compute.AttachedDisk{
{Source: diskImporterDiskName},
{Source: scratchDiskDiskName},
{Source: diskNames[i]}},
MachineType: "n1-standard-4",
Metadata: &compute.Metadata{
Items: []*compute.MetadataItems{
{Key: "block-project-ssh-keys", Value: &sTrue},
{Key: "disk_name", Value: &diskNames[i]},
{Key: "scratch_disk_name", Value: &scratchDiskDiskName},
{Key: "source_disk_file", Value: &dataDiskFilePath},
},
},
NetworkInterfaces: []*compute.NetworkInterface{
{
Network: w.Vars["network"].Value,
Subnetwork: w.Vars["subnet"].Value,
},
},
},
Scopes: []string{
"https://www.googleapis.com/auth/devstorage.read_write",
"https://www.googleapis.com/auth/compute",
},
StartupScript: "import_image_data.sh",
},
}
w.Steps[createDiskImporterInstanceStepName] = createDiskImporterInstanceStep
waitForDataDiskImportInstanceSignalStepName := fmt.Sprintf("wait-for-data-disk-%v-signal", dataDiskIndex)
waitForDataDiskImportInstanceSignalStep := daisy.NewStepDefaultTimeout(waitForDataDiskImportInstanceSignalStepName, w)
waitForDataDiskImportInstanceSignalStep.WaitForInstancesSignal = &daisy.WaitForInstancesSignal{
{
Name: dataDiskImporterInstanceName,
SerialOutput: &daisy.SerialOutput{
Port: 1,
SuccessMatch: "ImportSuccess:",
FailureMatch: []string{"ImportFailed:", "WARNING Failed to download metadata script"},
StatusMatch: "Import:",
},
},
}
w.Steps[waitForDataDiskImportInstanceSignalStepName] = waitForDataDiskImportInstanceSignalStep
deleteDataDiskImportInstanceSignalStepName := fmt.Sprintf("delete-data-disk-%v-import-instance", dataDiskIndex)
deleteDataDiskImportInstanceSignalStep := daisy.NewStepDefaultTimeout(deleteDataDiskImportInstanceSignalStepName, w)
deleteDataDiskImportInstanceSignalStep.DeleteResources = &daisy.DeleteResources{
Instances: []string{dataDiskImporterInstanceName},
}
w.Steps[deleteDataDiskImportInstanceSignalStepName] = deleteDataDiskImportInstanceSignalStep
w.Dependencies[createDiskImporterInstanceStepName] = []string{setupDataDiskStepName}
w.Dependencies[waitForDataDiskImportInstanceSignalStepName] = []string{createDiskImporterInstanceStepName}
w.Dependencies[deleteDataDiskImportInstanceSignalStepName] = []string{waitForDataDiskImportInstanceSignalStepName}
w.Dependencies[createInstanceStepName] = append(
w.Dependencies[createInstanceStepName], deleteDataDiskImportInstanceSignalStepName)
}
// attach newly created disks to the instance
for _, diskName := range diskNames {
(*w.Steps[createInstanceStepName].CreateInstances)[0].Disks =
append(
(*w.Steps[createInstanceStepName].CreateInstances)[0].Disks,
&compute.AttachedDisk{Source: diskName, AutoDelete: true})
}
}
| 1 | 9,867 | I'd recommend using generateDataDiskName here as well -- might as well benefit from the safety that it gives to protect yourself from future changes to w.ID(). | GoogleCloudPlatform-compute-image-tools | go |
@@ -29,6 +29,11 @@ import java.util.Map;
* @param <F> the concrete Java class of a ContentFile instance.
*/
public interface ContentFile<F> {
+ /**
+ * Returns the ordinal position of the file in a manifest, or null if it was not read from a manifest.
+ */
+ Long pos();
+
/**
* Returns id of the partition spec used for partition metadata.
*/ | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Map;
/**
* Superinterface of {@link DataFile} and {@link DeleteFile} that exposes common methods.
*
* @param <F> the concrete Java class of a ContentFile instance.
*/
public interface ContentFile<F> {
/**
* Returns id of the partition spec used for partition metadata.
*/
int specId();
/**
* Returns type of content stored in the file; one of DATA, POSITION_DELETES, or EQUALITY_DELETES.
*/
FileContent content();
/**
* Returns fully qualified path to the file, suitable for constructing a Hadoop Path.
*/
CharSequence path();
/**
* Returns format of the file.
*/
FileFormat format();
/**
* Returns partition for this file as a {@link StructLike}.
*/
StructLike partition();
/**
* Returns the number of top-level records in the file.
*/
long recordCount();
/**
* Returns the file size in bytes.
*/
long fileSizeInBytes();
/**
* Returns if collected, map from column ID to the size of the column in bytes, null otherwise.
*/
Map<Integer, Long> columnSizes();
/**
* Returns if collected, map from column ID to the count of its non-null values, null otherwise.
*/
Map<Integer, Long> valueCounts();
/**
* Returns if collected, map from column ID to its null value count, null otherwise.
*/
Map<Integer, Long> nullValueCounts();
/**
* Returns if collected, map from column ID to value lower bounds, null otherwise.
*/
Map<Integer, ByteBuffer> lowerBounds();
/**
* Returns if collected, map from column ID to value upper bounds, null otherwise.
*/
Map<Integer, ByteBuffer> upperBounds();
/**
* Returns metadata about how this file is encrypted, or null if the file is stored in plain text.
*/
ByteBuffer keyMetadata();
/**
* Returns list of recommended split locations, if applicable, null otherwise.
* <p>
* When available, this information is used for planning scan tasks whose boundaries
* are determined by these offsets. The returned list must be sorted in ascending order.
*/
List<Long> splitOffsets();
/**
* Returns the set of field IDs used for equality comparison, in equality delete files.
* <p>
* An equality delete file may contain additional data fields that are not used by equality
* comparison. The subset of columns in a delete file to be used in equality comparison are
* tracked by ID. Extra columns can be used to reconstruct changes and metrics from extra
* columns are used during job planning.
*
* @return IDs of the fields used in equality comparison with the records in this delete file
*/
List<Integer> equalityFieldIds();
/**
* Copies this file. Manifest readers can reuse file instances; use
* this method to copy data when collecting files from tasks.
*
* @return a copy of this data file
*/
F copy();
/**
* Copies this file without file stats. Manifest readers can reuse file instances; use
* this method to copy data without stats when collecting files.
*
* @return a copy of this data file, without lower bounds, upper bounds, value counts, or null value counts
*/
F copyWithoutStats();
}
| 1 | 27,366 | qq: do we want to include anything in the name to indicate that it is a position in the manifest? | apache-iceberg | java |
@@ -969,8 +969,11 @@ public class VRBrowserActivity extends PlatformActivity implements WidgetManager
GleanMetricsService.stopImmersive();
Handler handler = new Handler(Looper.getMainLooper());
handler.postDelayed(() -> {
- mWindows.resumeCompositor();
- Log.d(LOGTAG, "Compositor Resumed");
+ if (!mWindows.isPaused()) {
+ Log.d(LOGTAG, "Compositor resume begin");
+ mWindows.resumeCompositor();
+ Log.d(LOGTAG, "Compositor resume end");
+ }
}, 20);
}
| 1 | /* -*- Mode: Java; c-basic-offset: 4; tab-width: 4; indent-tabs-mode: nil; -*-
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package org.mozilla.vrbrowser;
import android.content.BroadcastReceiver;
import android.content.ComponentCallbacks2;
import android.content.Context;
import android.content.Intent;
import android.content.IntentFilter;
import android.content.res.Configuration;
import android.graphics.Canvas;
import android.graphics.Color;
import android.graphics.Paint;
import android.graphics.PorterDuff;
import android.graphics.SurfaceTexture;
import android.media.AudioManager;
import android.net.Uri;
import android.opengl.GLES11Ext;
import android.opengl.GLES20;
import android.os.Bundle;
import android.os.Handler;
import android.os.Looper;
import android.os.Process;
import android.util.Log;
import android.util.Pair;
import android.view.KeyEvent;
import android.view.Surface;
import android.view.View;
import android.view.ViewTreeObserver;
import android.widget.FrameLayout;
import androidx.annotation.Keep;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.lifecycle.Lifecycle;
import androidx.lifecycle.LifecycleOwner;
import androidx.lifecycle.LifecycleRegistry;
import androidx.lifecycle.ViewModelStore;
import androidx.lifecycle.ViewModelStoreOwner;
import org.json.JSONObject;
import org.mozilla.geckoview.GeckoRuntime;
import org.mozilla.geckoview.GeckoSession;
import org.mozilla.geckoview.GeckoVRManager;
import org.mozilla.vrbrowser.audio.AudioEngine;
import org.mozilla.vrbrowser.browser.Accounts;
import org.mozilla.vrbrowser.browser.PermissionDelegate;
import org.mozilla.vrbrowser.browser.SettingsStore;
import org.mozilla.vrbrowser.browser.engine.Session;
import org.mozilla.vrbrowser.browser.engine.SessionStore;
import org.mozilla.vrbrowser.crashreporting.CrashReporterService;
import org.mozilla.vrbrowser.crashreporting.GlobalExceptionHandler;
import org.mozilla.vrbrowser.geolocation.GeolocationWrapper;
import org.mozilla.vrbrowser.input.MotionEventGenerator;
import org.mozilla.vrbrowser.search.SearchEngineWrapper;
import org.mozilla.vrbrowser.telemetry.GleanMetricsService;
import org.mozilla.vrbrowser.telemetry.TelemetryWrapper;
import org.mozilla.vrbrowser.ui.OffscreenDisplay;
import org.mozilla.vrbrowser.ui.adapters.Language;
import org.mozilla.vrbrowser.ui.widgets.KeyboardWidget;
import org.mozilla.vrbrowser.ui.widgets.NavigationBarWidget;
import org.mozilla.vrbrowser.ui.widgets.RootWidget;
import org.mozilla.vrbrowser.ui.widgets.TrayWidget;
import org.mozilla.vrbrowser.ui.widgets.UISurfaceTextureRenderer;
import org.mozilla.vrbrowser.ui.widgets.UIWidget;
import org.mozilla.vrbrowser.ui.widgets.Widget;
import org.mozilla.vrbrowser.ui.widgets.WidgetManagerDelegate;
import org.mozilla.vrbrowser.ui.widgets.WidgetPlacement;
import org.mozilla.vrbrowser.ui.widgets.WindowWidget;
import org.mozilla.vrbrowser.ui.widgets.Windows;
import org.mozilla.vrbrowser.ui.widgets.dialogs.CrashDialogWidget;
import org.mozilla.vrbrowser.ui.widgets.dialogs.PromptDialogWidget;
import org.mozilla.vrbrowser.ui.widgets.dialogs.WhatsNewWidget;
import org.mozilla.vrbrowser.ui.widgets.menus.VideoProjectionMenuWidget;
import org.mozilla.vrbrowser.utils.BitmapCache;
import org.mozilla.vrbrowser.utils.ConnectivityReceiver;
import org.mozilla.vrbrowser.utils.ConnectivityReceiver.Delegate;
import org.mozilla.vrbrowser.utils.DeviceType;
import org.mozilla.vrbrowser.utils.LocaleUtils;
import org.mozilla.vrbrowser.utils.ServoUtils;
import org.mozilla.vrbrowser.utils.StringUtils;
import org.mozilla.vrbrowser.utils.SystemUtils;
import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.function.Consumer;
public class VRBrowserActivity extends PlatformActivity implements WidgetManagerDelegate, ComponentCallbacks2, LifecycleOwner, ViewModelStoreOwner {
private BroadcastReceiver mCrashReceiver = new BroadcastReceiver() {
@Override
public void onReceive(Context context, Intent intent) {
if((intent.getAction() != null) && intent.getAction().equals(CrashReporterService.CRASH_ACTION)) {
Intent crashIntent = intent.getParcelableExtra(CrashReporterService.DATA_TAG);
handleContentCrashIntent(crashIntent);
}
}
};
private final LifecycleRegistry mLifeCycle;
@NonNull
@Override
public Lifecycle getLifecycle() {
return mLifeCycle;
}
private final ViewModelStore mViewModelStore;
@NonNull
@Override
public ViewModelStore getViewModelStore() {
return mViewModelStore;
}
public VRBrowserActivity() {
mLifeCycle = new LifecycleRegistry(this);
mLifeCycle.setCurrentState(Lifecycle.State.INITIALIZED);
mViewModelStore = new ViewModelStore();
}
class SwipeRunnable implements Runnable {
boolean mCanceled = false;
@Override
public void run() {
if (!mCanceled) {
mLastGesture = NoGesture;
}
}
}
// Used to load the 'native-lib' library on application startup.
static {
System.loadLibrary("native-lib");
}
static final int NoGesture = -1;
static final int GestureSwipeLeft = 0;
static final int GestureSwipeRight = 1;
static final int SwipeDelay = 1000; // milliseconds
static final long RESET_CRASH_COUNT_DELAY = 5000;
static final String LOGTAG = SystemUtils.createLogtag(VRBrowserActivity.class);
HashMap<Integer, Widget> mWidgets;
private int mWidgetHandleIndex = 1;
AudioEngine mAudioEngine;
OffscreenDisplay mOffscreenDisplay;
FrameLayout mWidgetContainer;
int mLastGesture;
SwipeRunnable mLastRunnable;
Handler mHandler = new Handler();
Runnable mAudioUpdateRunnable;
Windows mWindows;
RootWidget mRootWidget;
KeyboardWidget mKeyboard;
NavigationBarWidget mNavigationBar;
CrashDialogWidget mCrashDialog;
TrayWidget mTray;
PermissionDelegate mPermissionDelegate;
LinkedList<UpdateListener> mWidgetUpdateListeners;
LinkedList<PermissionListener> mPermissionListeners;
LinkedList<FocusChangeListener> mFocusChangeListeners;
LinkedList<WorldClickListener> mWorldClickListeners;
CopyOnWriteArrayList<Delegate> mConnectivityListeners;
LinkedList<Runnable> mBackHandlers;
private boolean mIsPresentingImmersive = false;
private Thread mUiThread;
private LinkedList<Pair<Object, Float>> mBrightnessQueue;
private Pair<Object, Float> mCurrentBrightness;
private SearchEngineWrapper mSearchEngineWrapper;
private SettingsStore mSettings;
private ConnectivityReceiver mConnectivityReceiver;
private boolean mConnectionAvailable = true;
private AudioManager mAudioManager;
private Widget mActiveDialog;
private Set<String> mPoorPerformanceWhiteList;
private float mCurrentCylinderDensity = 0;
private boolean callOnAudioManager(Consumer<AudioManager> fn) {
if (mAudioManager == null) {
mAudioManager = (AudioManager)getSystemService(Context.AUDIO_SERVICE);
}
if (mAudioManager != null) {
try {
fn.accept(mAudioManager);
return true;
} catch (Exception e) {
Log.e(LOGTAG, "Caught exception calling AudioManager: " + e.toString());
}
}
return false;
}
private ViewTreeObserver.OnGlobalFocusChangeListener globalFocusListener = new ViewTreeObserver.OnGlobalFocusChangeListener() {
@Override
public void onGlobalFocusChanged(View oldFocus, View newFocus) {
Log.d(LOGTAG, "======> OnGlobalFocusChangeListener: old(" + oldFocus + ") new(" + newFocus + ")");
for (FocusChangeListener listener: mFocusChangeListeners) {
listener.onGlobalFocusChanged(oldFocus, newFocus);
}
}
};
@Override
protected void attachBaseContext(Context base) {
Context newContext = LocaleUtils.init(base);
super.attachBaseContext(newContext);
}
@Override
protected void onCreate(Bundle savedInstanceState) {
((VRBrowserApplication)getApplication()).onActivityCreate();
SettingsStore.getInstance(getBaseContext()).setPid(Process.myPid());
// Fix for infinite restart on startup crashes.
long count = SettingsStore.getInstance(getBaseContext()).getCrashRestartCount();
boolean cancelRestart = count > CrashReporterService.MAX_RESTART_COUNT;
if (cancelRestart) {
super.onCreate(savedInstanceState);
Log.e(LOGTAG, "Cancel Restart");
finish();
return;
}
SettingsStore.getInstance(getBaseContext()).incrementCrashRestartCount();
mHandler.postDelayed(() -> SettingsStore.getInstance(getBaseContext()).resetCrashRestartCount(), RESET_CRASH_COUNT_DELAY);
// Set a global exception handler as soon as possible
GlobalExceptionHandler.register(this.getApplicationContext());
if (DeviceType.isOculusBuild()) {
workaroundGeckoSigAction();
}
mUiThread = Thread.currentThread();
BitmapCache.getInstance(this).onCreate();
Bundle extras = getIntent() != null ? getIntent().getExtras() : null;
SessionStore.get().setContext(this, extras);
SessionStore.get().initializeServices();
SessionStore.get().initializeStores(this);
SessionStore.get().setLocales(LocaleUtils.getPreferredLanguageTags(this));
// Create broadcast receiver for getting crash messages from crash process
IntentFilter intentFilter = new IntentFilter();
intentFilter.addAction(CrashReporterService.CRASH_ACTION);
registerReceiver(mCrashReceiver, intentFilter, BuildConfig.APPLICATION_ID + "." + getString(R.string.app_permission_name), null);
mLastGesture = NoGesture;
super.onCreate(savedInstanceState);
mWidgetUpdateListeners = new LinkedList<>();
mPermissionListeners = new LinkedList<>();
mFocusChangeListeners = new LinkedList<>();
mWorldClickListeners = new LinkedList<>();
mBackHandlers = new LinkedList<>();
mBrightnessQueue = new LinkedList<>();
mConnectivityListeners = new CopyOnWriteArrayList<>();
mCurrentBrightness = Pair.create(null, 1.0f);
mWidgets = new HashMap<>();
mWidgetContainer = new FrameLayout(this);
mPermissionDelegate = new PermissionDelegate(this, this);
mAudioEngine = new AudioEngine(this, null);
mAudioEngine.setEnabled(SettingsStore.getInstance(this).isAudioEnabled());
mAudioEngine.preloadAsync(() -> {
Log.i(LOGTAG, "AudioEngine sounds preloaded!");
// mAudioEngine.playSound(AudioEngine.Sound.AMBIENT, true);
});
mAudioUpdateRunnable = () -> mAudioEngine.update();
mSettings = SettingsStore.getInstance(this);
queueRunnable(() -> {
createOffscreenDisplay();
createCaptureSurface();
});
final String tempPath = getCacheDir().getAbsolutePath();
queueRunnable(() -> setTemporaryFilePath(tempPath));
initializeWidgets();
loadFromIntent(getIntent());
// Setup the search engine
mSearchEngineWrapper = SearchEngineWrapper.get(this);
mSearchEngineWrapper.registerForUpdates();
GeolocationWrapper.INSTANCE.update(this);
mConnectivityReceiver = new ConnectivityReceiver();
mPoorPerformanceWhiteList = new HashSet<>();
checkForCrash();
mLifeCycle.setCurrentState(Lifecycle.State.CREATED);
}
protected void initializeWidgets() {
UISurfaceTextureRenderer.setUseHardwareAcceleration(SettingsStore.getInstance(getBaseContext()).isUIHardwareAccelerationEnabled());
UISurfaceTextureRenderer.setRenderActive(true);
// Empty widget just for handling focus on empty space
mRootWidget = new RootWidget(this);
mRootWidget.setClickCallback(() -> {
for (WorldClickListener listener: mWorldClickListeners) {
listener.onWorldClick();
}
});
// Create Browser navigation widget
mNavigationBar = new NavigationBarWidget(this);
// Create keyboard widget
mKeyboard = new KeyboardWidget(this);
// Windows
mWindows = new Windows(this);
mWindows.setDelegate(new Windows.Delegate() {
@Override
public void onFocusedWindowChanged(@NonNull WindowWidget aFocusedWindow, @Nullable WindowWidget aPrevFocusedWindow) {
attachToWindow(aFocusedWindow, aPrevFocusedWindow);
mTray.setAddWindowVisible(mWindows.canOpenNewWindow());
mNavigationBar.hideAllNotifications();
}
@Override
public void onWindowBorderChanged(@NonNull WindowWidget aChangeWindow) {
mKeyboard.proxifyLayerIfNeeded(mWindows.getCurrentWindows());
}
@Override
public void onWindowsMoved() {
mNavigationBar.hideAllNotifications();
updateWidget(mTray);
}
@Override
public void onWindowClosed() {
mTray.setAddWindowVisible(mWindows.canOpenNewWindow());
mNavigationBar.hideAllNotifications();
updateWidget(mTray);
}
@Override
public void onWindowVideoAvailabilityChanged(@NonNull WindowWidget aWindow) {
@CPULevelFlags int cpuLevel = mWindows.isVideoAvailable() ? WidgetManagerDelegate.CPU_LEVEL_HIGH :
WidgetManagerDelegate.CPU_LEVEL_NORMAL;
queueRunnable(() -> setCPULevelNative(cpuLevel));
}
});
// Create the tray
mTray = new TrayWidget(this);
mTray.addListeners(mWindows);
mTray.setAddWindowVisible(mWindows.canOpenNewWindow());
attachToWindow(mWindows.getFocusedWindow(), null);
addWidgets(Arrays.asList(mRootWidget, mNavigationBar, mKeyboard, mTray));
// Show the what's upp dialog if we haven't showed it yet and this is v6.
if (!SettingsStore.getInstance(this).isWhatsNewDisplayed()) {
final WhatsNewWidget whatsNew = new WhatsNewWidget(this);
whatsNew.setLoginOrigin(Accounts.LoginOrigin.NONE);
whatsNew.getPlacement().parentHandle = mWindows.getFocusedWindow().getHandle();
whatsNew.show(UIWidget.REQUEST_FOCUS);
}
}
private void attachToWindow(@NonNull WindowWidget aWindow, @Nullable WindowWidget aPrevWindow) {
mPermissionDelegate.setParentWidgetHandle(aWindow.getHandle());
mNavigationBar.attachToWindow(aWindow);
mKeyboard.attachToWindow(aWindow);
mTray.attachToWindow(aWindow);
if (aPrevWindow != null) {
updateWidget(mNavigationBar);
updateWidget(mKeyboard);
updateWidget(mTray);
}
}
@Override
protected void onStart() {
SettingsStore.getInstance(getBaseContext()).setPid(Process.myPid());
super.onStart();
TelemetryWrapper.start();
mLifeCycle.setCurrentState(Lifecycle.State.STARTED);
}
@Override
protected void onStop() {
SettingsStore.getInstance(getBaseContext()).setPid(0);
super.onStop();
TelemetryWrapper.stop();
GleanMetricsService.sessionStop();
}
@Override
protected void onPause() {
if (mIsPresentingImmersive) {
// This needs to be sync to ensure that WebVR is correctly paused.
// Also prevents a deadlock in onDestroy when the BrowserWidget is released.
exitImmersiveSync();
}
mAudioEngine.pauseEngine();
mWindows.onPause();
for (Widget widget: mWidgets.values()) {
widget.onPause();
}
mConnectivityReceiver.unregister(this);
// Reset so the dialog will show again on resume.
mConnectionAvailable = true;
if (mOffscreenDisplay != null) {
mOffscreenDisplay.onPause();
}
mWidgetContainer.getViewTreeObserver().removeOnGlobalFocusChangeListener(globalFocusListener);
super.onPause();
UISurfaceTextureRenderer.setRenderActive(false);
}
@Override
protected void onResume() {
UISurfaceTextureRenderer.setRenderActive(true);
MotionEventGenerator.clearDevices();
mWidgetContainer.getViewTreeObserver().addOnGlobalFocusChangeListener(globalFocusListener);
if (mOffscreenDisplay != null) {
mOffscreenDisplay.onResume();
}
mWindows.onResume();
mAudioEngine.resumeEngine();
for (Widget widget: mWidgets.values()) {
widget.onResume();
}
mConnectivityListeners.forEach((listener) -> listener.OnConnectivityChanged(ConnectivityReceiver.isNetworkAvailable(this)));
mConnectivityReceiver.register(this, mConnectivityDelegate);
// If we're signed-in, poll for any new device events (e.g. received tabs) on activity resume.
// There's no push support right now, so this helps with the perception of speedy tab delivery.
((VRBrowserApplication)getApplicationContext()).getAccounts().refreshDevicesAsync();
((VRBrowserApplication)getApplicationContext()).getAccounts().pollForEventsAsync();
super.onResume();
mLifeCycle.setCurrentState(Lifecycle.State.RESUMED);
}
@Override
protected void onDestroy() {
SettingsStore.getInstance(getBaseContext()).setPid(0);
// Unregister the crash service broadcast receiver
unregisterReceiver(mCrashReceiver);
mSearchEngineWrapper.unregisterForUpdates();
for (Widget widget: mWidgets.values()) {
widget.releaseWidget();
}
if (mOffscreenDisplay != null) {
mOffscreenDisplay.release();
}
if (mAudioEngine != null) {
mAudioEngine.release();
}
if (mPermissionDelegate != null) {
mPermissionDelegate.release();
}
// Remove all widget listeners
mWindows.onDestroy();
BitmapCache.getInstance(this).onDestroy();
SessionStore.get().onDestroy();
super.onDestroy();
mLifeCycle.setCurrentState(Lifecycle.State.DESTROYED);
mViewModelStore.clear();
}
@Override
protected void onNewIntent(final Intent intent) {
Log.d(LOGTAG,"VRBrowserActivity onNewIntent");
super.onNewIntent(intent);
setIntent(intent);
final String action = intent.getAction();
if (Intent.ACTION_VIEW.equals(action)) {
loadFromIntent(intent);
} else if (GeckoRuntime.ACTION_CRASHED.equals(intent.getAction())) {
Log.e(LOGTAG, "Restarted after a crash");
}
}
@Override
public void onConfigurationChanged(Configuration newConfig) {
Language language = LocaleUtils.getDisplayLanguage(this);
newConfig.setLocale(language.getLocale());
getBaseContext().getResources().updateConfiguration(newConfig, getBaseContext().getResources().getDisplayMetrics());
LocaleUtils.update(this, language);
SessionStore.get().onConfigurationChanged(newConfig);
mWidgets.forEach((i, widget) -> widget.onConfigurationChanged(newConfig));
super.onConfigurationChanged(newConfig);
}
void loadFromIntent(final Intent intent) {
if (GeckoRuntime.ACTION_CRASHED.equals(intent.getAction())) {
Log.e(LOGTAG,"Loading from crash Intent");
}
Uri uri = intent.getData();
boolean openInWindow = false;
boolean openInTab = false;
boolean openInBackground = false;
Bundle extras = intent.getExtras();
if (extras != null) {
// If there is no data uri and there is a url parameter we get that
if (uri == null && extras.containsKey("url")) {
uri = Uri.parse(intent.getExtras().getString("url"));
}
// Overwrite the stored homepage
if (extras.containsKey("homepage")) {
Uri homepageUri = Uri.parse(extras.getString("homepage"));
SettingsStore.getInstance(this).setHomepage(homepageUri.toString());
}
// Open the provided URL in a new tab, if there is no URL provided we just open the homepage
if (extras.containsKey("create_new_tab")) {
openInTab = extras.getBoolean("create_new_tab", false);
if (uri == null) {
uri = Uri.parse(SettingsStore.getInstance(this).getHomepage());
}
}
// Open the tab in background/foreground, if there is no URL provided we just open the homepage
if (extras.containsKey("background")) {
openInBackground = extras.getBoolean("background", false);
if (uri == null) {
uri = Uri.parse(SettingsStore.getInstance(this).getHomepage());
}
}
// Open the provided URL in a new window, if there is no URL provided we just open the homepage
if (extras.containsKey("create_new_window")) {
openInWindow = extras.getBoolean("create_new_window", false);
if (uri == null) {
uri = Uri.parse(SettingsStore.getInstance(this).getHomepage());
}
}
}
// If there is a URI we open it
if (uri != null) {
Log.d(LOGTAG, "Loading URI from intent: " + uri.toString());
if (openInWindow) {
openNewWindow(uri.toString());
} else if (openInTab) {
if (openInBackground) {
openNewTab(uri.toString());
} else {
openNewTabForeground(uri.toString());
}
} else {
SessionStore.get().getActiveSession().loadUri(uri.toString());
}
} else {
mWindows.getFocusedWindow().loadHomeIfNotRestored();
}
}
private ConnectivityReceiver.Delegate mConnectivityDelegate = connected -> {
mConnectivityListeners.forEach((listener) -> listener.OnConnectivityChanged(connected));
mConnectionAvailable = connected;
};
private void checkForCrash() {
final ArrayList<String> files = CrashReporterService.findCrashFiles(getBaseContext());
if (files.isEmpty()) {
Log.d(LOGTAG, "No crash files found.");
return;
}
boolean isCrashReportingEnabled = SettingsStore.getInstance(this).isCrashReportingEnabled();
if (isCrashReportingEnabled) {
SystemUtils.postCrashFiles(this, files);
} else {
if (mCrashDialog == null) {
mCrashDialog = new CrashDialogWidget(this, files);
}
mCrashDialog.show(UIWidget.REQUEST_FOCUS);
}
}
private void handleContentCrashIntent(@NonNull final Intent intent) {
Log.e(LOGTAG, "Got content crashed intent");
final String dumpFile = intent.getStringExtra(GeckoRuntime.EXTRA_MINIDUMP_PATH);
final String extraFile = intent.getStringExtra(GeckoRuntime.EXTRA_EXTRAS_PATH);
Log.d(LOGTAG, "Dump File: " + dumpFile);
Log.d(LOGTAG, "Extras File: " + extraFile);
Log.d(LOGTAG, "Fatal: " + intent.getBooleanExtra(GeckoRuntime.EXTRA_CRASH_FATAL, false));
boolean isCrashReportingEnabled = SettingsStore.getInstance(this).isCrashReportingEnabled();
if (isCrashReportingEnabled) {
SystemUtils.postCrashFiles(this, dumpFile, extraFile);
} else {
if (mCrashDialog == null) {
mCrashDialog = new CrashDialogWidget(this, dumpFile, extraFile);
}
mCrashDialog.show(UIWidget.REQUEST_FOCUS);
}
}
@Override
public void onTrimMemory(int level) {
// Determine which lifecycle or system event was raised.
switch (level) {
case ComponentCallbacks2.TRIM_MEMORY_UI_HIDDEN:
case ComponentCallbacks2.TRIM_MEMORY_BACKGROUND:
case ComponentCallbacks2.TRIM_MEMORY_MODERATE:
case ComponentCallbacks2.TRIM_MEMORY_COMPLETE:
// Curently ignore these levels. They are handled somewhere else.
break;
case ComponentCallbacks2.TRIM_MEMORY_RUNNING_MODERATE:
case ComponentCallbacks2.TRIM_MEMORY_RUNNING_LOW:
case ComponentCallbacks2.TRIM_MEMORY_RUNNING_CRITICAL:
// It looks like these come in all at the same time so just always suspend inactive Sessions.
Log.d(LOGTAG, "Memory pressure, suspending inactive sessions.");
SessionStore.get().suspendAllInactiveSessions();
break;
default:
Log.e(LOGTAG, "onTrimMemory unknown level: " + level);
break;
}
}
@Override
public void onBackPressed() {
if (mIsPresentingImmersive) {
queueRunnable(this::exitImmersiveNative);
return;
}
if (mBackHandlers.size() > 0) {
mBackHandlers.getLast().run();
return;
}
if (!mWindows.handleBack()) {
super.onBackPressed();
}
}
@Override
public boolean dispatchKeyEvent(KeyEvent event) {
if (mKeyboard.dispatchKeyEvent(event)) {
return true;
}
final int keyCode = event.getKeyCode();
if (DeviceType.isOculusBuild()) {
if (event.getKeyCode() == KeyEvent.KEYCODE_SEARCH) {
// Eat search key, otherwise it causes a crash on Oculus
return true;
}
int action = event.getAction();
if (action != KeyEvent.ACTION_DOWN) {
return super.dispatchKeyEvent(event);
}
boolean result;
switch (keyCode) {
case KeyEvent.KEYCODE_VOLUME_UP:
result = callOnAudioManager((AudioManager aManager) -> aManager.adjustStreamVolume(AudioManager.STREAM_MUSIC, AudioManager.ADJUST_RAISE, AudioManager.FLAG_SHOW_UI));
break;
case KeyEvent.KEYCODE_VOLUME_DOWN:
result = callOnAudioManager((AudioManager aManager) -> aManager.adjustStreamVolume(AudioManager.STREAM_MUSIC, AudioManager.ADJUST_LOWER, AudioManager.FLAG_SHOW_UI));
break;
case KeyEvent.KEYCODE_VOLUME_MUTE:
result = callOnAudioManager((AudioManager aManager) -> aManager.adjustStreamVolume(AudioManager.STREAM_MUSIC, AudioManager.ADJUST_MUTE, AudioManager.FLAG_SHOW_UI));
break;
default:
return super.dispatchKeyEvent(event);
}
return result || super.dispatchKeyEvent(event);
}
return super.dispatchKeyEvent(event);
}
final Runnable mExitImmersive = new Runnable() {
@Override
public void run() {
exitImmersiveNative();
synchronized(this) {
this.notifyAll();
}
}
};
private void exitImmersiveSync() {
synchronized (mExitImmersive) {
queueRunnable(mExitImmersive);
try {
mExitImmersive.wait();
} catch (InterruptedException e) {
Log.e(LOGTAG, "Waiting for exit immersive onPause interrupted");
}
}
}
@Keep
@SuppressWarnings("unused")
void dispatchCreateWidget(final int aHandle, final SurfaceTexture aTexture, final int aWidth, final int aHeight) {
runOnUiThread(() -> {
final Widget widget = mWidgets.get(aHandle);
if (widget == null) {
Log.e(LOGTAG, "Widget " + aHandle + " not found");
return;
}
if (aTexture == null) {
Log.d(LOGTAG, "Widget: " + aHandle + " (" + aWidth + "x" + aHeight + ") received a null surface texture.");
} else {
Runnable aFirstDrawCallback = () -> {
if (!widget.isFirstPaintReady()) {
widget.setFirstPaintReady(true);
updateWidget(widget);
}
};
widget.setSurfaceTexture(aTexture, aWidth, aHeight, aFirstDrawCallback);
}
// Add widget to a virtual display for invalidation
View view = (View) widget;
if (view.getParent() == null) {
mWidgetContainer.addView(view, new FrameLayout.LayoutParams(widget.getPlacement().viewWidth(), widget.getPlacement().viewHeight()));
}
});
}
@Keep
@SuppressWarnings("unused")
void dispatchCreateWidgetLayer(final int aHandle, final Surface aSurface, final int aWidth, final int aHeight, final long aNativeCallback) {
runOnUiThread(() -> {
final Widget widget = mWidgets.get(aHandle);
if (widget == null) {
Log.e(LOGTAG, "Widget " + aHandle + " not found");
return;
}
Runnable aFirstDrawCallback = () -> {
if (aNativeCallback != 0) {
queueRunnable(() -> runCallbackNative(aNativeCallback));
}
if (aSurface != null && !widget.isFirstPaintReady()) {
widget.setFirstPaintReady(true);
updateWidget(widget);
}
};
widget.setSurface(aSurface, aWidth, aHeight, aFirstDrawCallback);
UIWidget view = (UIWidget) widget;
// Add widget to a virtual display for invalidation
if (aSurface != null && view.getParent() == null) {
mWidgetContainer.addView(view, new FrameLayout.LayoutParams(widget.getPlacement().viewWidth(), widget.getPlacement().viewHeight()));
} else if (aSurface == null && view.getParent() != null) {
mWidgetContainer.removeView(view);
}
view.setResizing(false);
view.postInvalidate();
});
}
@Keep
@SuppressWarnings("unused")
void handleMotionEvent(final int aHandle, final int aDevice, final boolean aFocused, final boolean aPressed, final float aX, final float aY) {
runOnUiThread(() -> {
Widget widget = mWidgets.get(aHandle);
if (!isWidgetInputEnabled(widget)) {
widget = null; // Fallback to mRootWidget in order to allow world clicks to dismiss UI.
}
float scale = widget != null ? widget.getPlacement().textureScale : 1.0f;
final float x = aX / scale;
final float y = aY / scale;
if (widget == null) {
MotionEventGenerator.dispatch(mRootWidget, aDevice, aFocused, aPressed, x, y);
} else if (widget.getBorderWidth() > 0) {
final int border = widget.getBorderWidth();
MotionEventGenerator.dispatch(widget, aDevice, aFocused, aPressed, x - border, y - border);
} else {
MotionEventGenerator.dispatch(widget, aDevice, aFocused, aPressed, x, y);
}
});
}
@Keep
@SuppressWarnings("unused")
void handleScrollEvent(final int aHandle, final int aDevice, final float aX, final float aY) {
runOnUiThread(() -> {
Widget widget = mWidgets.get(aHandle);
if (!isWidgetInputEnabled(widget)) {
return;
}
if (widget != null) {
float scrollDirection = mSettings.getScrollDirection() == 0 ? 1.0f : -1.0f;
MotionEventGenerator.dispatchScroll(widget, aDevice, true,aX * scrollDirection, aY * scrollDirection);
} else {
Log.e(LOGTAG, "Failed to find widget for scroll event: " + aHandle);
}
});
}
@Keep
@SuppressWarnings("unused")
void handleGesture(final int aType) {
runOnUiThread(() -> {
boolean consumed = false;
if ((aType == GestureSwipeLeft) && (mLastGesture == GestureSwipeLeft)) {
Log.d(LOGTAG, "Go back!");
SessionStore.get().getActiveSession().goBack();
consumed = true;
} else if ((aType == GestureSwipeRight) && (mLastGesture == GestureSwipeRight)) {
Log.d(LOGTAG, "Go forward!");
SessionStore.get().getActiveSession().goForward();
consumed = true;
}
if (mLastRunnable != null) {
mLastRunnable.mCanceled = true;
mLastRunnable = null;
}
if (consumed) {
mLastGesture = NoGesture;
} else {
mLastGesture = aType;
mLastRunnable = new SwipeRunnable();
mHandler.postDelayed(mLastRunnable, SwipeDelay);
}
});
}
@SuppressWarnings({"UnusedDeclaration"})
@Keep
void handleBack() {
runOnUiThread(() -> {
// On WAVE VR, the back button no longer seems to work.
if (DeviceType.isWaveBuild()) {
onBackPressed();
return;
}
dispatchKeyEvent(new KeyEvent(KeyEvent.ACTION_DOWN, KeyEvent.KEYCODE_BACK));
dispatchKeyEvent(new KeyEvent (KeyEvent.ACTION_UP, KeyEvent.KEYCODE_BACK));
});
}
@Keep
@SuppressWarnings({"UnusedDeclaration"})
void handleAudioPose(float qx, float qy, float qz, float qw, float px, float py, float pz) {
mAudioEngine.setPose(qx, qy, qz, qw, px, py, pz);
// https://developers.google.com/vr/reference/android/com/google/vr/sdk/audio/GvrAudioEngine.html#resume()
// The initialize method must be called from the main thread at a regular rate.
runOnUiThread(mAudioUpdateRunnable);
}
@Keep
@SuppressWarnings("unused")
void handleResize(final int aHandle, final float aWorldWidth, final float aWorldHeight) {
runOnUiThread(() -> mWindows.getFocusedWindow().handleResizeEvent(aWorldWidth, aWorldHeight));
}
@Keep
@SuppressWarnings("unused")
void handleMoveEnd(final int aHandle, final float aDeltaX, final float aDeltaY, final float aDeltaZ, final float aRotation) {
runOnUiThread(() -> {
Widget widget = mWidgets.get(aHandle);
if (widget != null) {
widget.handleMoveEvent(aDeltaX, aDeltaY, aDeltaZ, aRotation);
}
});
}
@Keep
@SuppressWarnings("unused")
void registerExternalContext(long aContext) {
ServoUtils.setExternalContext(aContext);
GeckoVRManager.setExternalContext(aContext);
}
class PauseCompositorRunnable implements Runnable {
public boolean done;
@Override
public void run() {
synchronized (VRBrowserActivity.this) {
Log.d(LOGTAG, "About to pause Compositor");
mWindows.pauseCompositor();
Log.d(LOGTAG, "Compositor Paused");
done = true;
VRBrowserActivity.this.notify();
}
}
}
@Keep
@SuppressWarnings("unused")
void pauseGeckoViewCompositor() {
if (Thread.currentThread() == mUiThread) {
return;
}
mIsPresentingImmersive = true;
mWindows.enterImmersiveMode();
TelemetryWrapper.startImmersive();
GleanMetricsService.startImmersive();
PauseCompositorRunnable runnable = new PauseCompositorRunnable();
synchronized (this) {
runOnUiThread(runnable);
while (!runnable.done) {
try {
this.wait();
} catch (InterruptedException e) {
Log.e(LOGTAG, "Waiting for compositor pause interrupted");
}
}
}
}
@Keep
@SuppressWarnings("unused")
void resumeGeckoViewCompositor() {
if (Thread.currentThread() == mUiThread) {
return;
}
mIsPresentingImmersive = false;
mWindows.exitImmersiveMode();
// Show the window in front of you when you exit immersive mode.
resetUIYaw();
TelemetryWrapper.uploadImmersiveToHistogram();
GleanMetricsService.stopImmersive();
Handler handler = new Handler(Looper.getMainLooper());
handler.postDelayed(() -> {
mWindows.resumeCompositor();
Log.d(LOGTAG, "Compositor Resumed");
}, 20);
}
@Keep
@SuppressWarnings("unused")
void renderPointerLayer(final Surface aSurface, final long aNativeCallback) {
runOnUiThread(() -> {
try {
Canvas canvas = aSurface.lockHardwareCanvas();
canvas.drawColor(Color.TRANSPARENT, PorterDuff.Mode.CLEAR);
Paint paint = new Paint();
paint.setAntiAlias(true);
paint.setDither(true);
paint.setColor(Color.WHITE);
paint.setStyle(Paint.Style.FILL);
final float x = canvas.getWidth() * 0.5f;
final float y = canvas.getHeight() * 0.5f;
final float radius = canvas.getWidth() * 0.4f;
canvas.drawCircle(x, y, radius, paint);
paint.setColor(Color.BLACK);
paint.setStrokeWidth(4);
paint.setStyle(Paint.Style.STROKE);
canvas.drawCircle(x, y, radius, paint);
aSurface.unlockCanvasAndPost(canvas);
}
catch (Exception ex) {
ex.printStackTrace();
}
if (aNativeCallback != 0) {
queueRunnable(() -> runCallbackNative(aNativeCallback));
}
});
}
@Keep
@SuppressWarnings("unused")
String getStorageAbsolutePath() {
final File path = getExternalFilesDir(null);
if (path == null) {
return "";
}
return path.getAbsolutePath();
}
@Keep
@SuppressWarnings("unused")
public boolean isOverrideEnvPathEnabled() {
return SettingsStore.getInstance(this).isEnvironmentOverrideEnabled();
}
@Keep
@SuppressWarnings("unused")
public boolean areLayersEnabled() {
return SettingsStore.getInstance(this).getLayersEnabled();
}
@Keep
@SuppressWarnings("unused")
public String getActiveEnvironment() {
return SettingsStore.getInstance(this).getEnvironment();
}
@Keep
@SuppressWarnings("unused")
public int getPointerColor() {
return SettingsStore.getInstance(this).getPointerColor();
}
@Keep
@SuppressWarnings("unused")
private void setDeviceType(int aType) {
runOnUiThread(() -> DeviceType.setType(aType));
}
@Keep
@SuppressWarnings("unused")
private void haltActivity(final int aReason) {
runOnUiThread(() -> {
if (mConnectionAvailable && mWindows.getFocusedWindow() != null) {
mWindows.getFocusedWindow().showAlert(
getString(R.string.not_entitled_title),
getString(R.string.not_entitled_message, getString(R.string.app_name)),
index -> finish());
}
});
}
@Keep
@SuppressWarnings("unused")
private void handlePoorPerformance() {
runOnUiThread(() -> {
if (!mSettings.isPerformanceMonitorEnabled()) {
return;
}
// Don't block poorly performing immersive pages.
if (mIsPresentingImmersive) {
return;
}
WindowWidget window = mWindows.getFocusedWindow();
if (window == null || window.getSession() == null) {
return;
}
final String originalUri = window.getSession().getCurrentUri();
if (mPoorPerformanceWhiteList.contains(originalUri)) {
return;
}
window.getSession().loadHomePage();
final String[] buttons = {getString(R.string.ok_button), getString(R.string.performance_unblock_page)};
window.showConfirmPrompt(getString(R.string.performance_title), getString(R.string.performance_message), buttons, index -> {
if (index == PromptDialogWidget.NEGATIVE) {
mPoorPerformanceWhiteList.add(originalUri);
window.getSession().loadUri(originalUri);
}
});
});
}
@Keep
@SuppressWarnings("unused")
private void onAppLink(String aJSON) {
runOnUiThread(() -> {
try {
JSONObject object = new JSONObject(aJSON);
String uri = object.optString("url");
Session session = SessionStore.get().getActiveSession();
if (!StringUtils.isEmpty(uri) && session != null) {
session.loadUri(uri);
}
} catch (Exception ex) {
Log.e(LOGTAG, "Error parsing app link JSON: " + ex.toString());
}
});
}
private SurfaceTexture createSurfaceTexture() {
int[] ids = new int[1];
GLES20.glGenTextures(1, ids, 0);
GLES20.glBindTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, ids[0]);
GLES20.glTexParameterf(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, GLES20.GL_TEXTURE_MIN_FILTER, GLES20.GL_LINEAR);
GLES20.glTexParameterf(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, GLES20.GL_TEXTURE_MAG_FILTER, GLES20.GL_LINEAR);
GLES20.glTexParameteri(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, GLES20.GL_TEXTURE_WRAP_S, GLES20.GL_CLAMP_TO_EDGE);
GLES20.glTexParameteri(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, GLES20.GL_TEXTURE_WRAP_T, GLES20.GL_CLAMP_TO_EDGE);
int error = GLES20.glGetError();
if (error != GLES20.GL_NO_ERROR) {
Log.e(LOGTAG, "OpenGL Error creating SurfaceTexture: " + error);
}
return new SurfaceTexture(ids[0]);
}
void createOffscreenDisplay() {
final SurfaceTexture texture = createSurfaceTexture();
runOnUiThread(() -> {
mOffscreenDisplay = new OffscreenDisplay(VRBrowserActivity.this, texture, 16, 16);
mOffscreenDisplay.setContentView(mWidgetContainer);
});
}
void createCaptureSurface() {
final SurfaceTexture texture = createSurfaceTexture();
runOnUiThread(() -> {
SettingsStore settings = SettingsStore.getInstance(this);
texture.setDefaultBufferSize(settings.getWindowWidth(), settings.getWindowHeight());
BitmapCache.getInstance(this).setCaptureSurface(texture);
});
}
@Override
public int newWidgetHandle() {
return mWidgetHandleIndex++;
}
public void addWidgets(final Iterable<? extends Widget> aWidgets) {
for (Widget widget : aWidgets) {
addWidget(widget);
}
}
private void updateActiveDialog(final Widget aWidget) {
if (!aWidget.isDialog()) {
return;
}
if (aWidget.isVisible()) {
mActiveDialog = aWidget;
} else if (aWidget == mActiveDialog && !aWidget.isVisible()) {
mActiveDialog = null;
}
}
@SuppressWarnings("BooleanMethodIsAlwaysInverted")
private boolean isWidgetInputEnabled(Widget aWidget) {
return mActiveDialog == null || aWidget == null || mActiveDialog == aWidget || aWidget instanceof KeyboardWidget;
}
// WidgetManagerDelegate
@Override
public void addWidget(Widget aWidget) {
if (aWidget == null) {
return;
}
mWidgets.put(aWidget.getHandle(), aWidget);
((View)aWidget).setVisibility(aWidget.getPlacement().visible ? View.VISIBLE : View.GONE);
final int handle = aWidget.getHandle();
final WidgetPlacement clone = aWidget.getPlacement().clone();
queueRunnable(() -> addWidgetNative(handle, clone));
updateActiveDialog(aWidget);
}
@Override
public void updateWidget(final Widget aWidget) {
if (aWidget == null) {
return;
}
final int handle = aWidget.getHandle();
final WidgetPlacement clone = aWidget.getPlacement().clone();
queueRunnable(() -> updateWidgetNative(handle, clone));
final int textureWidth = aWidget.getPlacement().textureWidth();
final int textureHeight = aWidget.getPlacement().textureHeight();
final int viewWidth = aWidget.getPlacement().viewWidth();
final int viewHeight = aWidget.getPlacement().viewHeight();
FrameLayout.LayoutParams params = (FrameLayout.LayoutParams)((View)aWidget).getLayoutParams();
if (params == null) {
// Widget not added yet
return;
}
UIWidget view = (UIWidget)aWidget;
if (params.width != viewWidth || params.height != viewHeight) {
params.width = viewWidth;
params.height = viewHeight;
if (view.isLayer()) {
// Reuse last frame and do not render while resizing surface with Layers enabled.
// Fixes resizing glitches.
view.setResizing(true);
}
((View)aWidget).setLayoutParams(params);
aWidget.resizeSurface(textureWidth, textureHeight);
}
boolean visible = aWidget.getPlacement().visible;
if (visible != (view.getVisibility() == View.VISIBLE)) {
view.setVisibility(visible ? View.VISIBLE : View.GONE);
}
for (UpdateListener listener: mWidgetUpdateListeners) {
listener.onWidgetUpdate(aWidget);
}
updateActiveDialog(aWidget);
}
@Override
public void removeWidget(final Widget aWidget) {
if (aWidget == null) {
return;
}
mWidgets.remove(aWidget.getHandle());
mWidgetContainer.removeView((View) aWidget);
aWidget.setFirstPaintReady(false);
queueRunnable(() -> removeWidgetNative(aWidget.getHandle()));
if (aWidget == mActiveDialog) {
mActiveDialog = null;
}
}
@Override
public void updateVisibleWidgets() {
queueRunnable(this::updateVisibleWidgetsNative);
}
@Override
public void startWidgetResize(final Widget aWidget, float aMaxWidth, float aMaxHeight, float minWidth, float minHeight) {
if (aWidget == null) {
return;
}
mWindows.enterResizeMode();
queueRunnable(() -> startWidgetResizeNative(aWidget.getHandle(), aMaxWidth, aMaxHeight, minWidth, minHeight));
}
@Override
public void finishWidgetResize(final Widget aWidget) {
if (aWidget == null) {
return;
}
mWindows.exitResizeMode();
queueRunnable(() -> finishWidgetResizeNative(aWidget.getHandle()));
}
@Override
public void startWidgetMove(final Widget aWidget, @WidgetMoveBehaviourFlags int aMoveBehaviour) {
if (aWidget == null) {
return;
}
queueRunnable(() -> startWidgetMoveNative(aWidget.getHandle(), aMoveBehaviour));
}
@Override
public void finishWidgetMove() {
queueRunnable(this::finishWidgetMoveNative);
}
@Override
public void addUpdateListener(@NonNull UpdateListener aUpdateListener) {
if (!mWidgetUpdateListeners.contains(aUpdateListener)) {
mWidgetUpdateListeners.add(aUpdateListener);
}
}
@Override
public void removeUpdateListener(@NonNull UpdateListener aUpdateListener) {
mWidgetUpdateListeners.remove(aUpdateListener);
}
@Override
public void addPermissionListener(PermissionListener aListener) {
if (!mPermissionListeners.contains(aListener)) {
mPermissionListeners.add(aListener);
}
}
@Override
public void removePermissionListener(PermissionListener aListener) {
mPermissionListeners.remove(aListener);
}
@Override
public void addFocusChangeListener(@NonNull FocusChangeListener aListener) {
if (!mFocusChangeListeners.contains(aListener)) {
mFocusChangeListeners.add(aListener);
}
}
@Override
public void removeFocusChangeListener(@NonNull FocusChangeListener aListener) {
mFocusChangeListeners.remove(aListener);
}
@Override
public void addWorldClickListener(WorldClickListener aListener) {
if (!mWorldClickListeners.contains(aListener)) {
mWorldClickListeners.add(aListener);
}
}
@Override
public void removeWorldClickListener(WorldClickListener aListener) {
mWorldClickListeners.remove(aListener);
}
@Override
public void addConnectivityListener(Delegate aListener) {
if (!mConnectivityListeners.contains(aListener)) {
mConnectivityListeners.add(aListener);
}
}
@Override
public void removeConnectivityListener(Delegate aListener) {
mConnectivityListeners.remove(aListener);
}
@Override
public void pushBackHandler(@NonNull Runnable aRunnable) {
mBackHandlers.addLast(aRunnable);
}
@Override
public void popBackHandler(@NonNull Runnable aRunnable) {
mBackHandlers.removeLastOccurrence(aRunnable);
}
@Override
public void setIsServoSession(boolean aIsServo) {
queueRunnable(() -> setIsServo(aIsServo));
}
@Override
public void pushWorldBrightness(Object aKey, float aBrightness) {
if (mCurrentBrightness.second != aBrightness) {
queueRunnable(() -> setWorldBrightnessNative(aBrightness));
}
mBrightnessQueue.add(mCurrentBrightness);
mCurrentBrightness = Pair.create(aKey, aBrightness);
}
@Override
public void setWorldBrightness(Object aKey, final float aBrightness) {
if (mCurrentBrightness.first == aKey) {
if (mCurrentBrightness.second != aBrightness) {
mCurrentBrightness = Pair.create(aKey, aBrightness);
queueRunnable(() -> setWorldBrightnessNative(aBrightness));
}
} else {
for (int i = mBrightnessQueue.size() - 1; i >= 0; --i) {
if (mBrightnessQueue.get(i).first == aKey) {
mBrightnessQueue.set(i, Pair.create(aKey, aBrightness));
break;
}
}
}
}
@Override
public void popWorldBrightness(Object aKey) {
if (mBrightnessQueue.size() == 0) {
return;
}
if (mCurrentBrightness.first == aKey) {
float brightness = mCurrentBrightness.second;
mCurrentBrightness = mBrightnessQueue.removeLast();
if (mCurrentBrightness.second != brightness) {
queueRunnable(() -> setWorldBrightnessNative(mCurrentBrightness.second));
}
return;
}
for (int i = mBrightnessQueue.size() - 1; i >= 0; --i) {
if (mBrightnessQueue.get(i).first == aKey) {
mBrightnessQueue.remove(i);
break;
}
}
}
@Override
public void setControllersVisible(final boolean aVisible) {
queueRunnable(() -> setControllersVisibleNative(aVisible));
}
@Override
public void setWindowSize(float targetWidth, float targetHeight) {
mWindows.getFocusedWindow().resizeByMultiplier(targetWidth / targetHeight, 1.0f);
}
@Override
public void keyboardDismissed() {
mNavigationBar.showVoiceSearch();
}
@Override
public void updateEnvironment() {
queueRunnable(this::updateEnvironmentNative);
}
@Override
public void updatePointerColor() {
queueRunnable(this::updatePointerColorNative);
}
@Override
public boolean isPermissionGranted(@NonNull String permission) {
return mPermissionDelegate.isPermissionGranted(permission);
}
@Override
public void requestPermission(String uri, @NonNull String permission, GeckoSession.PermissionDelegate.Callback aCallback) {
Session session = SessionStore.get().getActiveSession();
if (uri != null && !uri.isEmpty()) {
mPermissionDelegate.onAppPermissionRequest(session.getGeckoSession(), uri, permission, aCallback);
} else {
mPermissionDelegate.onAndroidPermissionsRequest(session.getGeckoSession(), new String[]{permission}, aCallback);
}
}
@Override
public void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions, @NonNull int[] grantResults) {
super.onRequestPermissionsResult(requestCode, permissions, grantResults);
runOnUiThread(() -> {
for (PermissionListener listener : mPermissionListeners) {
listener.onRequestPermissionsResult(requestCode, permissions, grantResults);
}
});
}
@Override
public void showVRVideo(final int aWindowHandle, final @VideoProjectionMenuWidget.VideoProjectionFlags int aVideoProjection) {
queueRunnable(() -> showVRVideoNative(aWindowHandle, aVideoProjection));
}
@Override
public void hideVRVideo() {
queueRunnable(this::hideVRVideoNative);
}
@Override
public void resetUIYaw() {
queueRunnable(this::resetUIYawNative);
}
@Override
public void setCylinderDensity(final float aDensity) {
if (mWindows != null && aDensity == 0.0f && mWindows.getWindowsCount() > 1) {
return;
}
mCurrentCylinderDensity = aDensity;
queueRunnable(() -> setCylinderDensityNative(aDensity));
if (mWindows != null) {
mWindows.updateCurvedMode(false);
}
}
@Override
public float getCylinderDensity() {
return mCurrentCylinderDensity;
}
@Override
public boolean canOpenNewWindow() {
return mWindows.canOpenNewWindow();
}
@Override
public void openNewWindow(String uri) {
WindowWidget newWindow = mWindows.addWindow();
if ((newWindow != null) && (newWindow.getSession() != null)) {
newWindow.getSession().loadUri(uri);
}
}
@Override
public void openNewTab(@NonNull String uri) {
mWindows.addBackgroundTab(mWindows.getFocusedWindow(), uri);
}
@Override
public void openNewTabForeground(@NonNull String uri) {
mWindows.addTab(mWindows.getFocusedWindow(), uri);
}
@Override
public WindowWidget getFocusedWindow() {
return mWindows.getFocusedWindow();
}
@Override
public TrayWidget getTray() {
return mTray;
}
@Override
public NavigationBarWidget getNavigationBar() {
return mNavigationBar;
}
@Override
public Windows getWindows() {
return mWindows;
}
@Override
public void saveState() {
mWindows.saveState();
}
@Override
public void updateLocale(@NonNull Context context) {
onConfigurationChanged(context.getResources().getConfiguration());
}
private native void addWidgetNative(int aHandle, WidgetPlacement aPlacement);
private native void updateWidgetNative(int aHandle, WidgetPlacement aPlacement);
private native void updateVisibleWidgetsNative();
private native void removeWidgetNative(int aHandle);
private native void startWidgetResizeNative(int aHandle, float maxWidth, float maxHeight, float minWidth, float minHeight);
private native void finishWidgetResizeNative(int aHandle);
private native void startWidgetMoveNative(int aHandle, int aMoveBehaviour);
private native void finishWidgetMoveNative();
private native void setWorldBrightnessNative(float aBrightness);
private native void setTemporaryFilePath(String aPath);
private native void exitImmersiveNative();
private native void workaroundGeckoSigAction();
private native void updateEnvironmentNative();
private native void updatePointerColorNative();
private native void showVRVideoNative(int aWindowHandler, int aVideoProjection);
private native void hideVRVideoNative();
private native void resetUIYawNative();
private native void setControllersVisibleNative(boolean aVisible);
private native void runCallbackNative(long aCallback);
private native void setCylinderDensityNative(float aDensity);
private native void setCPULevelNative(@CPULevelFlags int aCPULevel);
private native void setIsServo(boolean aIsServo);
}
| 1 | 9,156 | These logs should probably use the `LOGTAG` | MozillaReality-FirefoxReality | java |
@@ -28,10 +28,10 @@ import (
"github.com/mysteriumnetwork/node/core/connection"
"github.com/mysteriumnetwork/node/services/wireguard"
"github.com/mysteriumnetwork/node/services/wireguard/key"
- "github.com/mysteriumnetwork/wireguard-go/device"
- "github.com/mysteriumnetwork/wireguard-go/tun"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
+ "golang.zx2c4.com/wireguard/device"
+ "golang.zx2c4.com/wireguard/tun"
)
const ( | 1 | /*
* Copyright (C) 2018 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package mysterium
import (
"encoding/base64"
"encoding/json"
"net"
"sync"
"time"
"github.com/mysteriumnetwork/node/consumer"
"github.com/mysteriumnetwork/node/core/connection"
"github.com/mysteriumnetwork/node/services/wireguard"
"github.com/mysteriumnetwork/node/services/wireguard/key"
"github.com/mysteriumnetwork/wireguard-go/device"
"github.com/mysteriumnetwork/wireguard-go/tun"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
)
const (
//taken from android-wireguard project
androidTunMtu = 1280
)
// WireguardTunnelSetup exposes api for caller to implement external tunnel setup
type WireguardTunnelSetup interface {
NewTunnel()
AddTunnelAddress(ip string, prefixLen int)
AddRoute(route string, prefixLen int)
AddDNS(ip string)
SetBlocking(blocking bool)
Establish() (int, error)
SetMTU(mtu int)
Protect(socket int) error
SetSessionName(session string)
}
// WireguardConnectionFactory is the connection factory for wireguard
type WireguardConnectionFactory struct {
tunnelSetup WireguardTunnelSetup
}
// Create creates a new wireguard connection
func (wcf *WireguardConnectionFactory) Create(stateChannel connection.StateChannel, statisticsChannel connection.StatisticsChannel) (connection.Connection, error) {
privateKey, err := key.GeneratePrivateKey()
if err != nil {
return nil, err
}
deviceFactory := func(options connection.ConnectOptions) (*device.DeviceApi, error) {
var config wireguard.ServiceConfig
err := json.Unmarshal(options.SessionConfig, &config)
if err != nil {
return nil, err
}
config.Consumer.PrivateKey = privateKey
wcf.tunnelSetup.NewTunnel()
wcf.tunnelSetup.SetSessionName("wg-tun-session")
//TODO fetch from user connection options
wcf.tunnelSetup.AddDNS("8.8.8.8")
//TODO this heavy linfting might go to doInit
tun, err := newTunnDevice(wcf.tunnelSetup, &config)
if err != nil {
return nil, err
}
devApi := device.UserspaceDeviceApi(tun)
err = setupWireguardDevice(devApi, &config)
if err != nil {
devApi.Close()
return nil, err
}
devApi.Boot()
socket, err := devApi.GetNetworkSocket()
if err != nil {
devApi.Close()
return nil, err
}
err = wcf.tunnelSetup.Protect(socket)
if err != nil {
devApi.Close()
return nil, err
}
return devApi, nil
}
return &wireguardConnection{
deviceFactory: deviceFactory,
privKey: privateKey,
stopChannel: make(chan struct{}),
stateChannel: stateChannel,
statisticsChannel: statisticsChannel,
stopCompleted: &sync.WaitGroup{},
}, nil
}
// OverrideWireguardConnection overrides default wireguard connection implementation to more mobile adapted one
func (mobNode *MobileNode) OverrideWireguardConnection(wgTunnelSetup WireguardTunnelSetup) {
wireguard.Bootstrap()
factory := &WireguardConnectionFactory{
tunnelSetup: wgTunnelSetup,
}
mobNode.di.ConnectionRegistry.Register(wireguard.ServiceType, factory)
}
type deviceFactory func(options connection.ConnectOptions) (*device.DeviceApi, error)
func setupWireguardDevice(devApi *device.DeviceApi, config *wireguard.ServiceConfig) error {
err := devApi.SetListeningPort(0) //random port
if err != nil {
return err
}
privKeyArr, err := base64stringTo32ByteArray(config.Consumer.PrivateKey)
if err != nil {
return err
}
err = devApi.SetPrivateKey(device.NoisePrivateKey(privKeyArr))
if err != nil {
return err
}
peerPubKeyArr, err := base64stringTo32ByteArray(config.Provider.PublicKey)
if err != nil {
return err
}
ep := config.Provider.Endpoint.String()
endpoint, err := device.CreateEndpoint(ep)
if err != nil {
return err
}
err = devApi.AddPeer(device.ExternalPeer{
PublicKey: device.NoisePublicKey(peerPubKeyArr),
RemoteEndpoint: endpoint,
KeepAlivePeriod: 20,
//all traffic through this peer (unfortunately 0.0.0.0/0 didn't work as it was treated as ipv6)
AllowedIPs: []string{"0.0.0.0/1", "128.0.0.0/1"},
})
return err
}
func base64stringTo32ByteArray(s string) (res [32]byte, err error) {
decoded, err := base64.StdEncoding.DecodeString(s)
if len(decoded) != 32 {
err = errors.New("unexpected key size")
}
if err != nil {
return
}
copy(res[:], decoded)
return
}
func newTunnDevice(wgTunnSetup WireguardTunnelSetup, config *wireguard.ServiceConfig) (tun.TUNDevice, error) {
consumerIP := config.Consumer.IPAddress
prefixLen, _ := consumerIP.Mask.Size()
wgTunnSetup.AddTunnelAddress(consumerIP.IP.String(), prefixLen)
wgTunnSetup.SetMTU(androidTunMtu)
wgTunnSetup.SetBlocking(true)
//route all traffic through tunnel
wgTunnSetup.AddRoute("0.0.0.0", 1)
wgTunnSetup.AddRoute("128.0.0.0", 1)
// Provider requests to delay consumer connection since it might be in a process of setting up NAT traversal for given consumer
if config.Consumer.ConnectDelay > 0 {
log.Info().Msgf("Delaying tunnel creation for %v milliseconds", config.Consumer.ConnectDelay)
time.Sleep(time.Duration(config.Consumer.ConnectDelay) * time.Millisecond)
}
fd, err := wgTunnSetup.Establish()
if err != nil {
return nil, err
}
log.Info().Msgf("Tun value is: %d", fd)
tun, err := newDeviceFromFd(fd)
if err == nil {
//non-fatal
name, nameErr := tun.Name()
log.Info().Err(nameErr).Msg("Name value: " + name)
}
return tun, err
}
type wireguardConnection struct {
privKey string
deviceFactory deviceFactory
device *device.DeviceApi
stopChannel chan struct{}
stateChannel connection.StateChannel
statisticsChannel connection.StatisticsChannel
stopCompleted *sync.WaitGroup
}
func (wg *wireguardConnection) Start(options connection.ConnectOptions) error {
log.Debug().Msg("Creating device")
device, err := wg.deviceFactory(options)
if err != nil {
return errors.Wrap(err, "failed to start wireguard connection")
}
wg.device = device
wg.stateChannel <- connection.Connecting
if err := wg.doInit(); err != nil {
return errors.Wrap(err, "failed to start wireguard connection")
}
log.Debug().Msg("Emitting connected event")
wg.stateChannel <- connection.Connected
return nil
}
func (wg *wireguardConnection) doInit() error {
log.Debug().Msg("Starting doInit()")
wg.stopCompleted.Add(1)
go wg.runPeriodically(time.Second)
return wg.waitHandshake()
}
func (wg *wireguardConnection) Wait() error {
wg.stopCompleted.Wait()
return nil
}
func (wg *wireguardConnection) Stop() {
wg.stateChannel <- connection.Disconnecting
wg.updateStatistics()
close(wg.stopChannel)
}
func (wg *wireguardConnection) GetConfig() (connection.ConsumerConfig, error) {
publicKey, err := key.PrivateKeyToPublicKey(wg.privKey)
if err != nil {
return nil, err
}
return wireguard.ConsumerConfig{
PublicKey: publicKey,
}, nil
}
var _ connection.Connection = &wireguardConnection{}
func (wg *wireguardConnection) updateStatistics() {
var err error
defer func() {
if err != nil {
log.Error().Err(err).Msg("Error updating statistics")
}
}()
peers, err := wg.device.Peers()
if err != nil {
return
}
if len(peers) != 1 {
err = errors.New("exactly 1 peer expected")
return
}
peerStatistics := peers[0].Stats
wg.statisticsChannel <- consumer.SessionStatistics{
BytesSent: peerStatistics.Sent,
BytesReceived: peerStatistics.Received,
}
}
func (wg *wireguardConnection) doCleanup() {
wg.device.Close()
wg.device.Wait()
wg.stateChannel <- connection.NotConnected
close(wg.stateChannel)
wg.stopCompleted.Done()
}
func (wg *wireguardConnection) runPeriodically(duration time.Duration) {
for {
select {
case <-time.After(duration):
wg.updateStatistics()
case <-wg.stopChannel:
wg.doCleanup()
return
}
}
}
func (wg *wireguardConnection) waitHandshake() error {
// We need to send any packet to initialize handshake process
_, _ = net.DialTimeout("tcp", "8.8.8.8:53", 100*time.Millisecond)
for {
select {
case <-time.After(20 * time.Millisecond):
peers, err := wg.device.Peers()
if err != nil {
return errors.Wrap(err, "failed while waiting for a peer handshake")
}
if len(peers) != 1 {
return errors.Wrap(errors.New("exactly 1 peer expected"), "failed while waiting for a peer handshake")
}
if peers[0].LastHanshake != 0 {
return nil
}
case <-wg.stopChannel:
wg.doCleanup()
return errors.New("stop received")
}
}
}
| 1 | 15,162 | From this, it was clear, that we are using our version of wireguard-go. And now it looks like we are using original packages which is confusing. | mysteriumnetwork-node | go |
@@ -43,7 +43,7 @@ namespace Nethermind.JsonRpc
public static ResultWrapper<T> Fail(Exception e)
{
- return new() { Result = Result.Fail(e.ToString()), ErrorCode = ErrorCodes.InternalError};
+ return new() { Result = Result.Fail(e.Message), ErrorCode = ErrorCodes.InternalError};
}
public static ResultWrapper<T> Fail(string error, int errorCode, T outputData) | 1 | // Copyright (c) 2021 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using Nethermind.Core;
using Nethermind.Facade.Proxy;
using Nethermind.JsonRpc.Modules;
namespace Nethermind.JsonRpc
{
public class ResultWrapper<T> : IResultWrapper
{
public T Data { get; set; }
public Result Result { get; set; }
public int ErrorCode { get; set; }
private ResultWrapper()
{
}
public static ResultWrapper<T> Fail<TSearch>(SearchResult<TSearch> searchResult) where TSearch : class
{
return new() { Result = Result.Fail(searchResult.Error), ErrorCode = searchResult.ErrorCode};
}
public static ResultWrapper<T> Fail(string error)
{
return new() { Result = Result.Fail(error), ErrorCode = ErrorCodes.InternalError};
}
public static ResultWrapper<T> Fail(Exception e)
{
return new() { Result = Result.Fail(e.ToString()), ErrorCode = ErrorCodes.InternalError};
}
public static ResultWrapper<T> Fail(string error, int errorCode, T outputData)
{
return new() { Result = Result.Fail(error), ErrorCode = errorCode, Data = outputData};
}
public static ResultWrapper<T> Fail(string error, int errorCode)
{
return new() { Result = Result.Fail(error), ErrorCode = errorCode};
}
public static ResultWrapper<T> Fail(string error, T data)
{
return new() { Data = data, Result = Result.Fail(error) };
}
public static ResultWrapper<T> Success(T data)
{
return new() { Data = data, Result = Result.Success };
}
public Result GetResult()
{
return Result;
}
public object GetData()
{
return Data;
}
public int GetErrorCode()
{
return ErrorCode;
}
public static ResultWrapper<T> From(RpcResult<T> rpcResult)
{
if (rpcResult is null)
{
return Fail("Missing result.");
}
return rpcResult.IsValid ? Success(rpcResult.Result) : Fail(rpcResult.Error.Message);
}
}
}
| 1 | 26,192 | Any particular reason for this? This potentially will make harder to investigate users issues | NethermindEth-nethermind | .cs |
@@ -999,7 +999,8 @@ Blockly.BlockSvg.prototype.updatePreviews = function(closestConnection,
// grayed-out blocks instead of highlighting the connection; for compatibility
// with Web Blockly the name "highlightedConnection" will still be used.
if (Blockly.highlightedConnection_ &&
- Blockly.highlightedConnection_ != closestConnection) {
+ (Blockly.highlightedConnection_ != closestConnection ||
+ Blockly.localConnection_ != localConnection)) {
if (Blockly.insertionMarker_ && Blockly.insertionMarkerConnection_) {
Blockly.BlockSvg.disconnectInsertionMarker();
} | 1 | /**
* @license
* Visual Blocks Editor
*
* Copyright 2012 Google Inc.
* https://developers.google.com/blockly/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview Methods for graphically rendering a block as SVG.
* @author fraser@google.com (Neil Fraser)
*/
'use strict';
goog.provide('Blockly.BlockSvg');
goog.require('Blockly.Block');
goog.require('Blockly.ContextMenu');
goog.require('goog.Timer');
goog.require('goog.asserts');
goog.require('goog.dom');
goog.require('goog.math.Coordinate');
goog.require('goog.userAgent');
/**
* Class for a block's SVG representation.
* Not normally called directly, workspace.newBlock() is preferred.
* @param {!Blockly.Workspace} workspace The block's workspace.
* @param {?string} prototypeName Name of the language object containing
* type-specific functions for this block.
* @param {=string} opt_id Optional ID. Use this ID if provided, otherwise
* create a new id.
* @extends {Blockly.Block}
* @constructor
*/
Blockly.BlockSvg = function(workspace, prototypeName, opt_id) {
// Create core elements for the block.
/** @type {SVGElement} */
this.svgGroup_ = Blockly.createSvgElement('g', {}, null);
/** @type {SVGElement} */
this.svgPath_ = Blockly.createSvgElement('path', {'class': 'blocklyPath'},
this.svgGroup_);
this.svgPath_.tooltip = this;
/** @type {boolean} */
this.rendered = false;
Blockly.Tooltip.bindMouseEvents(this.svgPath_);
Blockly.BlockSvg.superClass_.constructor.call(this,
workspace, prototypeName, opt_id);
};
goog.inherits(Blockly.BlockSvg, Blockly.Block);
/**
* Height of this block, not including any statement blocks above or below.
* @type {number}
*/
Blockly.BlockSvg.prototype.height = 0;
/**
* Width of this block, including any connected value blocks.
* @type {number}
*/
Blockly.BlockSvg.prototype.width = 0;
/**
* Opacity of this block between 0 and 1.
* @type {number}
* @private
*/
Blockly.BlockSvg.prototype.opacity_ = 1;
/**
* Original location of block being dragged.
* @type {goog.math.Coordinate}
* @private
*/
Blockly.BlockSvg.prototype.dragStartXY_ = null;
/**
* Whether the block glows as if running.
* @type {boolean}
* @private
*/
Blockly.BlockSvg.prototype.isGlowing_ = false;
/**
* Constant for identifying rows that are to be rendered inline.
* Don't collide with Blockly.INPUT_VALUE and friends.
* @const
*/
Blockly.BlockSvg.INLINE = -1;
/**
* Create and initialize the SVG representation of the block.
* May be called more than once.
*/
Blockly.BlockSvg.prototype.initSvg = function() {
goog.asserts.assert(this.workspace.rendered, 'Workspace is headless.');
if (!this.isInsertionMarker()) { // Insertion markers not allowed to have inputs or icons
for (var i = 0, input; input = this.inputList[i]; i++) {
input.init();
}
var icons = this.getIcons();
for (i = 0; i < icons.length; i++) {
icons[i].createIcon();
}
}
this.updateColour();
this.updateMovable();
if (!this.workspace.options.readOnly && !this.eventsInit_) {
Blockly.bindEvent_(this.getSvgRoot(), 'mousedown', this,
this.onMouseDown_);
var thisBlock = this;
Blockly.bindEvent_(this.getSvgRoot(), 'touchstart', null,
function(e) {Blockly.longStart_(e, thisBlock);});
}
this.eventsInit_ = true;
if (!this.getSvgRoot().parentNode) {
this.workspace.getCanvas().appendChild(this.getSvgRoot());
}
};
/**
* Select this block. Highlight it visually.
*/
Blockly.BlockSvg.prototype.select = function() {
if (Blockly.selected == this) {
return;
}
var oldId = null;
if (Blockly.selected) {
oldId = Blockly.selected.id;
// Unselect any previously selected block.
Blockly.Events.disable();
Blockly.selected.unselect();
Blockly.Events.enable();
}
var event = new Blockly.Events.Ui(null, 'selected', oldId, this.id);
event.workspaceId = this.workspace.id;
Blockly.Events.fire(event);
Blockly.selected = this;
this.addSelect();
Blockly.fireUiEvent(this.workspace.getCanvas(), 'blocklySelectChange');
};
/**
* Unselect this block. Remove its highlighting.
*/
Blockly.BlockSvg.prototype.unselect = function() {
if (Blockly.selected != this) {
return;
}
var event = new Blockly.Events.Ui(null, 'selected', this.id, null);
event.workspaceId = this.workspace.id;
Blockly.Events.fire(event);
Blockly.selected = null;
this.removeSelect();
Blockly.fireUiEvent(this.workspace.getCanvas(), 'blocklySelectChange');
};
/**
* Glow this block. Highlight it visually as if it's running.
* @param {boolean} isGlowing Whether the block should glow.
*/
Blockly.BlockSvg.prototype.setGlow = function(isGlowing) {
this.isGlowing_ = isGlowing;
this.updateColour();
};
/**
* Block's mutator icon (if any).
* @type {Blockly.Mutator}
*/
Blockly.BlockSvg.prototype.mutator = null;
/**
* Block's comment icon (if any).
* @type {Blockly.Comment}
*/
Blockly.BlockSvg.prototype.comment = null;
/**
* Block's warning icon (if any).
* @type {Blockly.Warning}
*/
Blockly.BlockSvg.prototype.warning = null;
/**
* Returns a list of mutator, comment, and warning icons.
* @return {!Array} List of icons.
*/
Blockly.BlockSvg.prototype.getIcons = function() {
var icons = [];
if (this.mutator) {
icons.push(this.mutator);
}
if (this.comment) {
icons.push(this.comment);
}
if (this.warning) {
icons.push(this.warning);
}
return icons;
};
/**
* Wrapper function called when a mouseUp occurs during a drag operation.
* @type {Array.<!Array>}
* @private
*/
Blockly.BlockSvg.onMouseUpWrapper_ = null;
/**
* Wrapper function called when a mouseMove occurs during a drag operation.
* @type {Array.<!Array>}
* @private
*/
Blockly.BlockSvg.onMouseMoveWrapper_ = null;
/**
* Stop binding to the global mouseup and mousemove events.
* @private
*/
Blockly.BlockSvg.terminateDrag_ = function() {
if (Blockly.BlockSvg.onMouseUpWrapper_) {
Blockly.unbindEvent_(Blockly.BlockSvg.onMouseUpWrapper_);
Blockly.BlockSvg.onMouseUpWrapper_ = null;
}
if (Blockly.BlockSvg.onMouseMoveWrapper_) {
Blockly.unbindEvent_(Blockly.BlockSvg.onMouseMoveWrapper_);
Blockly.BlockSvg.onMouseMoveWrapper_ = null;
}
var selected = Blockly.selected;
if (Blockly.dragMode_ == Blockly.DRAG_FREE) {
// Terminate a drag operation.
if (selected) {
if (Blockly.insertionMarker_) {
Blockly.Events.disable();
if (Blockly.insertionMarkerConnection_) {
Blockly.BlockSvg.disconnectInsertionMarker();
}
Blockly.insertionMarker_.dispose();
Blockly.insertionMarker_ = null;
Blockly.Events.enable();
}
// Update the connection locations.
var xy = selected.getRelativeToSurfaceXY();
var dxy = goog.math.Coordinate.difference(xy, selected.dragStartXY_);
var event = new Blockly.Events.Move(selected);
event.oldCoordinate = selected.dragStartXY_;
event.recordNew();
Blockly.Events.fire(event);
selected.moveConnections_(dxy.x, dxy.y);
delete selected.draggedBubbles_;
selected.setDragging_(false);
selected.moveOffDragSurface_();
selected.render();
// Ensure that any stap and bump are part of this move's event group.
var group = Blockly.Events.getGroup();
setTimeout(function() {
Blockly.Events.setGroup(group);
selected.snapToGrid();
Blockly.Events.setGroup(false);
}, Blockly.BUMP_DELAY / 2);
setTimeout(function() {
Blockly.Events.setGroup(group);
selected.bumpNeighbours_();
Blockly.Events.setGroup(false);
}, Blockly.BUMP_DELAY);
// Fire an event to allow scrollbars to resize.
Blockly.fireUiEvent(window, 'resize');
}
}
Blockly.dragMode_ = Blockly.DRAG_NONE;
Blockly.Css.setCursor(Blockly.Css.Cursor.OPEN);
};
/**
* Set parent of this block to be a new block or null.
* @param {Blockly.BlockSvg} newParent New parent block.
*/
Blockly.BlockSvg.prototype.setParent = function(newParent) {
if (newParent == this.parentBlock_) {
return;
}
var svgRoot = this.getSvgRoot();
if (this.parentBlock_ && svgRoot) {
// Move this block up the DOM. Keep track of x/y translations.
var xy = this.getRelativeToSurfaceXY();
// Avoid moving a block up the DOM if it's currently selected/dragging,
// so as to avoid taking things off the drag surface.
if (Blockly.selected != this) {
this.workspace.getCanvas().appendChild(svgRoot);
this.translate(xy.x, xy.y);
}
}
Blockly.Field.startCache();
Blockly.BlockSvg.superClass_.setParent.call(this, newParent);
Blockly.Field.stopCache();
if (newParent) {
var oldXY = this.getRelativeToSurfaceXY();
newParent.getSvgRoot().appendChild(svgRoot);
var newXY = this.getRelativeToSurfaceXY();
// Move the connections to match the child's new position.
this.moveConnections_(newXY.x - oldXY.x, newXY.y - oldXY.y);
// If we are a shadow block, inherit tertiary colour.
if (this.isShadow()) {
this.setColour(this.getColour(), this.getColourSecondary(),
newParent.getColourTertiary());
}
}
};
/**
* Return the coordinates of the top-left corner of this block relative to the
* drawing surface's origin (0,0).
* @return {!goog.math.Coordinate} Object with .x and .y properties.
*/
Blockly.BlockSvg.prototype.getRelativeToSurfaceXY = function() {
// The drawing surface is relative to either the workspace canvas
// or to the drag surface group.
var x = 0;
var y = 0;
var dragSurfaceGroup = (this.workspace.dragSurface) ?
this.workspace.dragSurface.getGroup() : null;
var element = this.getSvgRoot();
if (element) {
do {
// Loop through this block and every parent.
var xy = Blockly.getRelativeXY_(element);
x += xy.x;
y += xy.y;
// If this element is the current element on the drag surface, include
// the translation of the drag surface itself.
if (this.workspace.dragSurface &&
this.workspace.dragSurface.getCurrentBlock() == element) {
var surfaceTranslation = this.workspace.dragSurface.getSurfaceTranslation();
x += surfaceTranslation.x;
y += surfaceTranslation.y;
}
element = element.parentNode;
} while (element && element != this.workspace.getCanvas() &&
element != dragSurfaceGroup);
}
return new goog.math.Coordinate(x, y);
};
/**
* Move a block by a relative offset.
* @param {number} dx Horizontal offset.
* @param {number} dy Vertical offset.
*/
Blockly.BlockSvg.prototype.moveBy = function(dx, dy) {
goog.asserts.assert(!this.parentBlock_, 'Block has parent.');
var event = new Blockly.Events.Move(this);
var xy = this.getRelativeToSurfaceXY();
this.translate(xy.x + dx, xy.y + dy);
this.moveConnections_(dx, dy);
event.recordNew();
Blockly.Events.fire(event);
Blockly.WidgetDiv.hide(true);
};
/**
* Set this block to an absolute translation.
* @param {number} x Horizontal translation.
* @param {number} y Vertical translation.
* @param {boolean=} opt_use3d If set, use 3d translation.
*/
Blockly.BlockSvg.prototype.translate = function(x, y, opt_use3d) {
if (opt_use3d) {
this.getSvgRoot().setAttribute('style', 'transform: translate3d(' + x + 'px,' + y + 'px, 0px)');
} else {
this.getSvgRoot().setAttribute('transform', 'translate(' + x + ',' + y + ')');
}
};
/**
* Snap this block to the nearest grid point.
*/
Blockly.BlockSvg.prototype.snapToGrid = function() {
if (!this.workspace) {
return; // Deleted block.
}
if (Blockly.dragMode_ != Blockly.DRAG_NONE) {
return; // Don't bump blocks during a drag.
}
if (this.getParent()) {
return; // Only snap top-level blocks.
}
if (this.isInFlyout) {
return; // Don't move blocks around in a flyout.
}
if (!this.workspace.options.gridOptions ||
!this.workspace.options.gridOptions['snap']) {
return; // Config says no snapping.
}
var spacing = this.workspace.options.gridOptions['spacing'];
var half = spacing / 2;
var xy = this.getRelativeToSurfaceXY();
var dx = Math.round((xy.x - half) / spacing) * spacing + half - xy.x;
var dy = Math.round((xy.y - half) / spacing) * spacing + half - xy.y;
dx = Math.round(dx);
dy = Math.round(dy);
if (dx != 0 || dy != 0) {
this.moveBy(dx, dy);
}
};
/**
* Returns a bounding box describing the dimensions of this block
* and any blocks stacked below it.
* @return {!{height: number, width: number}} Object with height and width
* properties.
*/
Blockly.BlockSvg.prototype.getHeightWidth = function() {
var height = this.height;
var width = this.width;
// Recursively add size of subsequent blocks.
var nextBlock = this.getNextBlock();
if (nextBlock) {
var nextHeightWidth = nextBlock.getHeightWidth();
height += nextHeightWidth.height - 4; // Height of tab.
width = Math.max(width, nextHeightWidth.width);
} else if (!this.nextConnection && !this.outputConnection) {
// Add a bit of margin under blocks with no bottom tab.
height += 2;
}
return {height: height, width: width};
};
/**
* Returns the coordinates of a bounding box describing the dimensions of this
* block and any blocks stacked below it.
* @return {!{topLeft: goog.math.Coordinate, bottomRight: goog.math.Coordinate}}
* Object with top left and bottom right coordinates of the bounding box.
*/
Blockly.BlockSvg.prototype.getBoundingRectangle = function() {
var blockXY = this.getRelativeToSurfaceXY(this);
var tab = this.outputConnection ? Blockly.BlockSvg.TAB_WIDTH : 0;
var blockBounds = this.getHeightWidth();
var topLeft;
var bottomRight;
if (this.RTL) {
// Width has the tab built into it already so subtract it here.
topLeft = new goog.math.Coordinate(blockXY.x - (blockBounds.width - tab),
blockXY.y);
// Add the width of the tab/puzzle piece knob to the x coordinate
// since X is the corner of the rectangle, not the whole puzzle piece.
bottomRight = new goog.math.Coordinate(blockXY.x + tab,
blockXY.y + blockBounds.height);
} else {
// Subtract the width of the tab/puzzle piece knob to the x coordinate
// since X is the corner of the rectangle, not the whole puzzle piece.
topLeft = new goog.math.Coordinate(blockXY.x - tab, blockXY.y);
// Width has the tab built into it already so subtract it here.
bottomRight = new goog.math.Coordinate(blockXY.x + blockBounds.width - tab,
blockXY.y + blockBounds.height);
}
return {topLeft: topLeft, bottomRight: bottomRight};
};
/**
* Set block opacity for SVG rendering.
* @param {number} opacity Intended opacity, betweeen 0 and 1
*/
Blockly.BlockSvg.prototype.setOpacity = function(opacity) {
this.opacity_ = opacity;
if (this.rendered) {
this.updateColour();
}
};
/**
* Get block opacity for SVG rendering.
* @return {number} Intended opacity, betweeen 0 and 1
*/
Blockly.BlockSvg.prototype.getOpacity = function() {
return this.opacity_;
};
/**
* Set whether the block is collapsed or not.
* @param {boolean} collapsed True if collapsed.
*/
Blockly.BlockSvg.prototype.setCollapsed = function(collapsed) {
if (this.collapsed_ == collapsed) {
return;
}
var renderList = [];
// Show/hide the inputs.
for (var i = 0, input; input = this.inputList[i]; i++) {
renderList.push.apply(renderList, input.setVisible(!collapsed));
}
var COLLAPSED_INPUT_NAME = '_TEMP_COLLAPSED_INPUT';
if (collapsed) {
var icons = this.getIcons();
for (i = 0; i < icons.length; i++) {
icons[i].setVisible(false);
}
var text = this.toString(Blockly.COLLAPSE_CHARS);
this.appendDummyInput(COLLAPSED_INPUT_NAME).appendField(text).init();
} else {
this.removeInput(COLLAPSED_INPUT_NAME);
// Clear any warnings inherited from enclosed blocks.
this.setWarningText(null);
}
Blockly.BlockSvg.superClass_.setCollapsed.call(this, collapsed);
if (!renderList.length) {
// No child blocks, just render this block.
renderList[0] = this;
}
if (this.rendered) {
for (var i = 0, block; block = renderList[i]; i++) {
block.render();
}
// Don't bump neighbours.
// Although bumping neighbours would make sense, users often collapse
// all their functions and store them next to each other. Expanding and
// bumping causes all their definitions to go out of alignment.
}
};
/**
* Open the next (or previous) FieldTextInput.
* @param {Blockly.Field|Blockly.Block} start Current location.
* @param {boolean} forward If true go forward, otherwise backward.
*/
Blockly.BlockSvg.prototype.tab = function(start, forward) {
// This function need not be efficient since it runs once on a keypress.
// Create an ordered list of all text fields and connected inputs.
var list = [];
for (var i = 0, input; input = this.inputList[i]; i++) {
for (var j = 0, field; field = input.fieldRow[j]; j++) {
if (field instanceof Blockly.FieldTextInput) {
// TODO: Also support dropdown fields.
list.push(field);
}
}
if (input.connection) {
var block = input.connection.targetBlock();
if (block) {
list.push(block);
}
}
}
i = list.indexOf(start);
if (i == -1) {
// No start location, start at the beginning or end.
i = forward ? -1 : list.length;
}
var target = list[forward ? i + 1 : i - 1];
if (!target) {
// Ran off of list.
var parent = this.getParent();
if (parent) {
parent.tab(this, forward);
}
} else if (target instanceof Blockly.Field) {
target.showEditor_();
} else {
target.tab(null, forward);
}
};
/**
* Handle a mouse-down on an SVG block.
* @param {!Event} e Mouse down event.
* @private
*/
Blockly.BlockSvg.prototype.onMouseDown_ = function(e) {
if (this.workspace.options.readOnly) {
return;
}
if (this.isInFlyout) {
e.stopPropagation();
return;
}
Blockly.setPageSelectable(false);
this.workspace.markFocused();
// Update Blockly's knowledge of its own location.
Blockly.svgResize(this.workspace);
Blockly.terminateDrag_();
this.select();
Blockly.hideChaff();
this.workspace.recordDeleteAreas();
if (Blockly.isRightButton(e)) {
// Right-click.
this.showContextMenu_(e);
} else if (!this.isMovable()) {
// Allow immovable blocks to be selected and context menued, but not
// dragged. Let this event bubble up to document, so the workspace may be
// dragged instead.
return;
} else {
if (!Blockly.Events.getGroup()) {
Blockly.Events.setGroup(true);
}
// Left-click (or middle click)
Blockly.Css.setCursor(Blockly.Css.Cursor.CLOSED);
this.dragStartXY_ = this.getRelativeToSurfaceXY();
this.workspace.startDrag(e, this.dragStartXY_.x, this.dragStartXY_.y);
Blockly.dragMode_ = Blockly.DRAG_STICKY;
Blockly.BlockSvg.onMouseUpWrapper_ = Blockly.bindEvent_(document,
'mouseup', this, this.onMouseUp_);
Blockly.BlockSvg.onMouseMoveWrapper_ = Blockly.bindEvent_(document,
'mousemove', this, this.onMouseMove_);
// Build a list of bubbles that need to be moved and where they started.
this.draggedBubbles_ = [];
var descendants = this.getDescendants();
for (var i = 0, descendant; descendant = descendants[i]; i++) {
var icons = descendant.getIcons();
for (var j = 0; j < icons.length; j++) {
var data = icons[j].getIconLocation();
data.bubble = icons[j];
this.draggedBubbles_.push(data);
}
}
}
// This event has been handled. No need to bubble up to the document.
e.stopPropagation();
};
/**
* Handle a mouse-up anywhere in the SVG pane. Is only registered when a
* block is clicked. We can't use mouseUp on the block since a fast-moving
* cursor can briefly escape the block before it catches up.
* @param {!Event} e Mouse up event.
* @private
*/
Blockly.BlockSvg.prototype.onMouseUp_ = function(e) {
if (Blockly.dragMode_ != Blockly.DRAG_FREE) {
Blockly.Events.fire(
new Blockly.Events.Ui(this, 'click', undefined, undefined));
}
Blockly.setPageSelectable(true);
Blockly.terminateDrag_();
if (Blockly.selected && Blockly.highlightedConnection_) {
if (Blockly.localConnection_ ==
Blockly.selected.getFirstStatementConnection()) {
// Snap to match the position of the pre-existing stack. Since this is a
// C-block, shift to take into account how the block will stretch as it
// surrounds the internal blocks.
Blockly.selected.moveBy(
Blockly.highlightedConnection_.x_ - Blockly.localConnection_.x_,
Blockly.highlightedConnection_.y_ - Blockly.localConnection_.y_ -
(Blockly.highlightedConnection_.sourceBlock_.getHeightWidth().height -
Blockly.BlockSvg.MIN_BLOCK_Y));
} else if (Blockly.localConnection_.type == Blockly.NEXT_STATEMENT) {
// Snap to match the position of the pre-existing stack.
Blockly.selected.moveBy(
Blockly.highlightedConnection_.x_ - Blockly.localConnection_.x_,
Blockly.highlightedConnection_.y_ - Blockly.localConnection_.y_);
}
// Connect two blocks together.
Blockly.localConnection_.connect(Blockly.highlightedConnection_);
if (this.rendered) {
// Trigger a connection animation.
// Determine which connection is inferior (lower in the source stack).
var inferiorConnection = Blockly.localConnection_.isSuperior() ?
Blockly.highlightedConnection_ : Blockly.localConnection_;
inferiorConnection.getSourceBlock().connectionUiEffect();
}
if (this.workspace.trashcan) {
// Don't throw an object in the trash can if it just got connected.
this.workspace.trashcan.close();
}
} else if (!this.getParent() && Blockly.selected.isDeletable() &&
this.workspace.isDeleteArea(e)) {
var trashcan = this.workspace.trashcan;
if (trashcan) {
goog.Timer.callOnce(trashcan.close, 100, trashcan);
}
Blockly.selected.dispose(false, true);
// Dropping a block on the trash can will usually cause the workspace to
// resize to contain the newly positioned block. Force a second resize
// now that the block has been deleted.
Blockly.fireUiEvent(window, 'resize');
}
if (Blockly.highlightedConnection_) {
Blockly.highlightedConnection_ = null;
}
Blockly.Css.setCursor(Blockly.Css.Cursor.OPEN);
if (!Blockly.WidgetDiv.isVisible()) {
Blockly.Events.setGroup(false);
}
};
/**
* Load the block's help page in a new window.
* @private
*/
Blockly.BlockSvg.prototype.showHelp_ = function() {
var url = goog.isFunction(this.helpUrl) ? this.helpUrl() : this.helpUrl;
if (url) {
// @todo rewrite
alert(url);
}
};
/**
* Show the context menu for this block.
* @param {!Event} e Mouse event.
* @private
*/
Blockly.BlockSvg.prototype.showContextMenu_ = function(e) {
if (this.workspace.options.readOnly || !this.contextMenu) {
return;
}
// Save the current block in a variable for use in closures.
var block = this;
var menuOptions = [];
if (this.isDeletable() && this.isMovable() && !block.isInFlyout) {
// Option to duplicate this block.
var duplicateOption = {
text: Blockly.Msg.DUPLICATE_BLOCK,
enabled: true,
callback: function() {
Blockly.duplicate_(block);
}
};
if (this.getDescendants().length > this.workspace.remainingCapacity()) {
duplicateOption.enabled = false;
}
menuOptions.push(duplicateOption);
if (this.isEditable() && this.workspace.options.comments) {
// Option to add/remove a comment.
var commentOption = {enabled: !goog.userAgent.IE};
if (this.comment) {
commentOption.text = Blockly.Msg.REMOVE_COMMENT;
commentOption.callback = function() {
block.setCommentText(null);
};
} else {
commentOption.text = Blockly.Msg.ADD_COMMENT;
commentOption.callback = function() {
block.setCommentText('');
};
}
menuOptions.push(commentOption);
}
// Option to delete this block.
// Count the number of blocks that are nested in this block.
var descendantCount = this.getDescendants().length;
var nextBlock = this.getNextBlock();
if (nextBlock) {
// Blocks in the current stack would survive this block's deletion.
descendantCount -= nextBlock.getDescendants().length;
}
var deleteOption = {
text: descendantCount == 1 ? Blockly.Msg.DELETE_BLOCK :
Blockly.Msg.DELETE_X_BLOCKS.replace('%1', String(descendantCount)),
enabled: true,
callback: function() {
block.dispose(true, true);
}
};
menuOptions.push(deleteOption);
}
// Option to get help.
var url = goog.isFunction(this.helpUrl) ? this.helpUrl() : this.helpUrl;
var helpOption = {enabled: !!url};
helpOption.text = Blockly.Msg.HELP;
helpOption.callback = function() {
block.showHelp_();
};
menuOptions.push(helpOption);
// Allow the block to add or modify menuOptions.
if (this.customContextMenu && !block.isInFlyout) {
this.customContextMenu(menuOptions);
}
Blockly.ContextMenu.show(e, menuOptions, this.RTL);
Blockly.ContextMenu.currentBlock = this;
};
/**
* Move the connections for this block and all blocks attached under it.
* Also update any attached bubbles.
* @param {number} dx Horizontal offset from current location.
* @param {number} dy Vertical offset from current location.
* @private
*/
Blockly.BlockSvg.prototype.moveConnections_ = function(dx, dy) {
if (!this.rendered) {
// Rendering is required to lay out the blocks.
// This is probably an invisible block attached to a collapsed block.
return;
}
var myConnections = this.getConnections_(false);
for (var i = 0; i < myConnections.length; i++) {
myConnections[i].moveBy(dx, dy);
}
var icons = this.getIcons();
for (i = 0; i < icons.length; i++) {
icons[i].computeIconLocation();
}
// Recurse through all blocks attached under this one.
for (i = 0; i < this.childBlocks_.length; i++) {
this.childBlocks_[i].moveConnections_(dx, dy);
}
};
/**
* Recursively adds or removes the dragging class to this node and its children.
* @param {boolean} adding True if adding, false if removing.
* @private
*/
Blockly.BlockSvg.prototype.setDragging_ = function(adding) {
if (adding) {
this.addDragging();
Blockly.draggingConnections_ =
Blockly.draggingConnections_.concat(this.getConnections_(true));
} else {
this.removeDragging();
Blockly.draggingConnections_ = [];
}
// Recurse through all blocks attached under this one.
for (var i = 0; i < this.childBlocks_.length; i++) {
this.childBlocks_[i].setDragging_(adding);
}
};
/**
* Move this block to its workspace's drag surface, accounting for positioning.
* Generally should be called at the same time as setDragging_(true).
* @private
*/
Blockly.BlockSvg.prototype.moveToDragSurface_ = function() {
// The translation for drag surface blocks,
// is equal to the current relative-to-surface position,
// to keep the position in sync as it move on/off the surface.
var xy = this.getRelativeToSurfaceXY();
this.clearTransformAttributes_();
this.workspace.dragSurface.translateSurface(xy.x, xy.y);
// Execute the move on the top-level SVG component
this.workspace.dragSurface.setBlocksAndShow(this.getSvgRoot());
};
/**
* Move this block back to the workspace block canvas.
* Generally should be called at the same time as setDragging_(false).
* @private
*/
Blockly.BlockSvg.prototype.moveOffDragSurface_ = function() {
// Translate to current position, turning off 3d.
var xy = this.getRelativeToSurfaceXY();
this.clearTransformAttributes_();
this.translate(xy.x, xy.y, false);
this.workspace.dragSurface.clearAndHide(this.workspace.getCanvas());
};
/**
* Clear the block of style="..." and transform="..." attributes.
* Used when the block is switching from 3d to 2d transform or vice versa.
* @private
*/
Blockly.BlockSvg.prototype.clearTransformAttributes_ = function() {
if (this.getSvgRoot().hasAttribute('transform')) {
this.getSvgRoot().removeAttribute('transform');
}
if (this.getSvgRoot().hasAttribute('style')) {
this.getSvgRoot().removeAttribute('style');
}
};
/**
* Drag this block to follow the mouse.
* @param {!Event} e Mouse move event.
* @private
*/
Blockly.BlockSvg.prototype.onMouseMove_ = function(e) {
if (e.type == 'mousemove' && e.clientX <= 1 && e.clientY == 0 &&
e.button == 0) {
/* HACK:
Safari Mobile 6.0 and Chrome for Android 18.0 fire rogue mousemove
events on certain touch actions. Ignore events with these signatures.
This may result in a one-pixel blind spot in other browsers,
but this shouldn't be noticeable. */
e.stopPropagation();
return;
}
var oldXY = this.getRelativeToSurfaceXY();
var newXY = this.workspace.moveDrag(e);
if (Blockly.dragMode_ == Blockly.DRAG_STICKY) {
// Still dragging within the sticky DRAG_RADIUS.
var dr = goog.math.Coordinate.distance(oldXY, newXY) * this.workspace.scale;
if (dr > Blockly.DRAG_RADIUS) {
// Switch to unrestricted dragging.
Blockly.dragMode_ = Blockly.DRAG_FREE;
Blockly.longStop_();
// Must move to drag surface before unplug(),
// or else connections will calculate the wrong relative to surface XY
// in tighten_(). Then blocks connected to this block move around on the
// drag surface. By moving to the drag surface before unplug, connection
// positions will be calculated correctly.
this.moveToDragSurface_();
// Clear all WidgetDivs without animating, in case blocks are moved around
Blockly.WidgetDiv.hide(true);
if (this.parentBlock_) {
// Push this block to the very top of the stack.
this.unplug();
this.disconnectUiEffect();
}
this.setDragging_(true);
}
}
if (Blockly.dragMode_ == Blockly.DRAG_FREE) {
var dx = oldXY.x - this.dragStartXY_.x;
var dy = oldXY.y - this.dragStartXY_.y;
this.workspace.dragSurface.translateSurface(newXY.x, newXY.y);
// Drag all the nested bubbles.
for (var i = 0; i < this.draggedBubbles_.length; i++) {
var commentData = this.draggedBubbles_[i];
commentData.bubble.setIconLocation(commentData.x + dx,
commentData.y + dy);
}
// Check to see if any of this block's connections are within range of
// another block's connection.
var myConnections = this.getConnections_(false);
// Also check the last connection on this stack
var lastOnStack = this.lastConnectionInStack_();
if (lastOnStack && lastOnStack != this.nextConnection) {
myConnections.push(lastOnStack);
}
var closestConnection = null;
var localConnection = null;
var radiusConnection = Blockly.SNAP_RADIUS;
for (i = 0; i < myConnections.length; i++) {
var myConnection = myConnections[i];
var neighbour = myConnection.closest(radiusConnection, dx, dy);
if (neighbour.connection) {
closestConnection = neighbour.connection;
localConnection = myConnection;
radiusConnection = neighbour.radius;
}
}
var candidateIsLast = (localConnection == lastOnStack);
this.updatePreviews(closestConnection, localConnection, radiusConnection,
e, newXY.x - this.dragStartXY_.x, newXY.y - this.dragStartXY_.y,
candidateIsLast);
}
// This event has been handled. No need to bubble up to the document.
e.stopPropagation();
};
/**
* Preview the results of the drag if the mouse is released immediately.
* @param {Blockly.Connection} closestConnection The closest connection found
* during the search
* @param {Blockly.Connection} localConnection The connection on the moving
* block.
* @param {number} radiusConnection The distance between closestConnection and
* localConnection.
* @param {!Event} e Mouse move event.
* @param {number} dx The x distance the block has moved onscreen up to this
* point in the drag.
* @param {number} dy The y distance the block has moved onscreen up to this
* point in the drag.
* @param {boolean} candidateIsLast True if the dragging stack is more than one
* block long and localConnection is the last connection on the stack.
*/
Blockly.BlockSvg.prototype.updatePreviews = function(closestConnection,
localConnection, radiusConnection, e, dx, dy, candidateIsLast) {
// Don't fire events for insertion marker creation or movement.
Blockly.Events.disable();
// Remove an insertion marker if needed. For Scratch-Blockly we are using
// grayed-out blocks instead of highlighting the connection; for compatibility
// with Web Blockly the name "highlightedConnection" will still be used.
if (Blockly.highlightedConnection_ &&
Blockly.highlightedConnection_ != closestConnection) {
if (Blockly.insertionMarker_ && Blockly.insertionMarkerConnection_) {
Blockly.BlockSvg.disconnectInsertionMarker();
}
// If there's already an insertion marker but it's representing the wrong
// block, delete it so we can create the correct one.
if (Blockly.insertionMarker_ &&
(candidateIsLast && Blockly.localConnection_.sourceBlock_ == this) ||
(!candidateIsLast && Blockly.localConnection_.sourceBlock_ != this)) {
Blockly.insertionMarker_.dispose();
Blockly.insertionMarker_ = null;
}
Blockly.highlightedConnection_ = null;
Blockly.localConnection_ = null;
}
// Add an insertion marker if needed.
if (closestConnection &&
closestConnection != Blockly.highlightedConnection_ &&
!closestConnection.sourceBlock_.isInsertionMarker()) {
Blockly.highlightedConnection_ = closestConnection;
Blockly.localConnection_ = localConnection;
if (!Blockly.insertionMarker_) {
Blockly.insertionMarker_ =
this.workspace.newBlock(Blockly.localConnection_.sourceBlock_.type);
Blockly.insertionMarker_.setInsertionMarker(true);
Blockly.insertionMarker_.initSvg();
}
var insertionMarker = Blockly.insertionMarker_;
var insertionMarkerConnection = insertionMarker.getMatchingConnection(
localConnection.sourceBlock_, localConnection);
if (insertionMarkerConnection != Blockly.insertionMarkerConnection_) {
insertionMarker.rendered = true;
// Render disconnected from everything else so that we have a valid
// connection location.
insertionMarker.render();
insertionMarker.getSvgRoot().setAttribute('visibility', 'visible');
// Move the preview to the correct location before the existing block.
if (insertionMarkerConnection.type == Blockly.NEXT_STATEMENT) {
var newX = closestConnection.x_ - insertionMarkerConnection.x_;
var newY = closestConnection.y_ - insertionMarkerConnection.y_;
// If it's the first statement connection of a c-block, this block is
// going to get taller as soon as render() is called below.
if (insertionMarkerConnection != insertionMarker.nextConnection) {
newY -= closestConnection.sourceBlock_.getHeightWidth().height -
Blockly.BlockSvg.MIN_BLOCK_Y;
}
insertionMarker.moveBy(newX, newY);
}
if (insertionMarkerConnection.type == Blockly.PREVIOUS_STATEMENT &&
!insertionMarker.nextConnection) {
Blockly.bumpedConnection_ = closestConnection.targetConnection;
}
// Renders insertin marker.
insertionMarkerConnection.connect(closestConnection);
// Render dragging block so it appears on top.
Blockly.insertionMarkerConnection_ = insertionMarkerConnection;
}
}
// Reenable events.
Blockly.Events.enable();
// Provide visual indication of whether the block will be deleted if
// dropped here.
if (this.isDeletable()) {
this.workspace.isDeleteArea(e);
}
};
/**
* Disconnect the current insertion marker from the stack, and heal the stack to
* its previous state.
*/
Blockly.BlockSvg.disconnectInsertionMarker = function() {
// The insertion marker is the first block in a stack, either because it
// doesn't have a previous connection or because the previous connection is
// not connected. Unplug won't do anything in that case. Instead, unplug the
// following block.
if (Blockly.insertionMarkerConnection_ ==
Blockly.insertionMarker_.nextConnection &&
(!Blockly.insertionMarker_.previousConnection ||
!Blockly.insertionMarker_.previousConnection.targetConnection)) {
Blockly.insertionMarkerConnection_.targetBlock().unplug(false);
}
// Inside of a C-block, first statement connection.
else if (Blockly.insertionMarkerConnection_.type == Blockly.NEXT_STATEMENT &&
Blockly.insertionMarkerConnection_ !=
Blockly.insertionMarker_.nextConnection) {
var innerConnection = Blockly.insertionMarkerConnection_.targetConnection;
innerConnection.sourceBlock_.unplug(false);
var previousBlockNextConnection =
Blockly.insertionMarker_.previousConnection.targetConnection;
Blockly.insertionMarker_.unplug(true);
if (previousBlockNextConnection) {
previousBlockNextConnection.connect(innerConnection);
}
}
else {
Blockly.insertionMarker_.unplug(true /* healStack */);
}
if (Blockly.insertionMarkerConnection_.targetConnection) {
throw 'insertionMarkerConnection still connected at the end of disconnectInsertionMarker';
}
Blockly.insertionMarkerConnection_ = null;
Blockly.insertionMarker_.getSvgRoot().setAttribute('visibility', 'hidden');
};
/**
* Add or remove the UI indicating if this block is movable or not.
*/
Blockly.BlockSvg.prototype.updateMovable = function() {
if (this.isMovable()) {
Blockly.addClass_(/** @type {!Element} */ (this.svgGroup_),
'blocklyDraggable');
} else {
Blockly.removeClass_(/** @type {!Element} */ (this.svgGroup_),
'blocklyDraggable');
}
};
/**
* Set whether this block is movable or not.
* @param {boolean} movable True if movable.
*/
Blockly.BlockSvg.prototype.setMovable = function(movable) {
Blockly.BlockSvg.superClass_.setMovable.call(this, movable);
this.updateMovable();
};
/**
* Set whether this block is editable or not.
* @param {boolean} editable True if editable.
*/
Blockly.BlockSvg.prototype.setEditable = function(editable) {
Blockly.BlockSvg.superClass_.setEditable.call(this, editable);
if (this.rendered) {
for (var i = 0; i < this.icons_.length; i++) {
this.icons_[i].updateEditable();
}
}
};
/**
* Set whether this block is a shadow block or not.
* @param {boolean} shadow True if a shadow.
*/
Blockly.BlockSvg.prototype.setShadow = function(shadow) {
Blockly.BlockSvg.superClass_.setShadow.call(this, shadow);
this.updateColour();
};
/**
* Set whether this block is an insertion marker block or not.
* @param {boolean} insertionMarker True if an insertion marker.
*/
Blockly.BlockSvg.prototype.setInsertionMarker = function(insertionMarker) {
Blockly.BlockSvg.superClass_.setInsertionMarker.call(this, insertionMarker);
this.updateColour();
};
/**
* Return the root node of the SVG or null if none exists.
* @return {Element} The root SVG node (probably a group).
*/
Blockly.BlockSvg.prototype.getSvgRoot = function() {
return this.svgGroup_;
};
/**
* Dispose of this block.
* @param {boolean} healStack If true, then try to heal any gap by connecting
* the next statement with the previous statement. Otherwise, dispose of
* all children of this block.
* @param {boolean} animate If true, show a disposal animation and sound.
*/
Blockly.BlockSvg.prototype.dispose = function(healStack, animate) {
Blockly.Field.startCache();
// If this block is being dragged, unlink the mouse events.
if (Blockly.selected == this) {
this.unselect();
Blockly.terminateDrag_();
}
// If this block has a context menu open, close it.
if (Blockly.ContextMenu.currentBlock == this) {
Blockly.ContextMenu.hide();
}
if (animate && this.rendered) {
this.unplug(healStack);
this.disposeUiEffect();
}
// Stop rerendering.
this.rendered = false;
Blockly.Events.disable();
var icons = this.getIcons();
for (var i = 0; i < icons.length; i++) {
icons[i].dispose();
}
Blockly.Events.enable();
Blockly.BlockSvg.superClass_.dispose.call(this, healStack);
goog.dom.removeNode(this.svgGroup_);
// Sever JavaScript to DOM connections.
this.svgGroup_ = null;
this.svgPath_ = null;
Blockly.Field.stopCache();
};
/**
* Play some UI effects (sound, animation) when disposing of a block.
*/
Blockly.BlockSvg.prototype.disposeUiEffect = function() {
this.workspace.playAudio('delete');
var xy = Blockly.getSvgXY_(/** @type {!Element} */ (this.svgGroup_),
this.workspace);
// Deeply clone the current block.
var clone = this.svgGroup_.cloneNode(true);
clone.translateX_ = xy.x;
clone.translateY_ = xy.y;
clone.setAttribute('transform',
'translate(' + clone.translateX_ + ',' + clone.translateY_ + ')');
this.workspace.getParentSvg().appendChild(clone);
clone.bBox_ = clone.getBBox();
// Start the animation.
Blockly.BlockSvg.disposeUiStep_(clone, this.RTL, new Date(),
this.workspace.scale);
};
/**
* Animate a cloned block and eventually dispose of it.
* This is a class method, not an instace method since the original block has
* been destroyed and is no longer accessible.
* @param {!Element} clone SVG element to animate and dispose of.
* @param {boolean} rtl True if RTL, false if LTR.
* @param {!Date} start Date of animation's start.
* @param {number} workspaceScale Scale of workspace.
* @private
*/
Blockly.BlockSvg.disposeUiStep_ = function(clone, rtl, start, workspaceScale) {
var ms = (new Date()) - start;
var percent = ms / 150;
if (percent > 1) {
goog.dom.removeNode(clone);
} else {
var x = clone.translateX_ +
(rtl ? -1 : 1) * clone.bBox_.width * workspaceScale / 2 * percent;
var y = clone.translateY_ + clone.bBox_.height * workspaceScale * percent;
var scale = (1 - percent) * workspaceScale;
clone.setAttribute('transform', 'translate(' + x + ',' + y + ')' +
' scale(' + scale + ')');
var closure = function() {
Blockly.BlockSvg.disposeUiStep_(clone, rtl, start, workspaceScale);
};
setTimeout(closure, 10);
}
};
/**
* Play some UI effects (sound) when disconnecting a block.
*/
Blockly.BlockSvg.prototype.disconnectUiEffect = function() {
this.workspace.playAudio('disconnect');
};
/**
* Enable or disable a block.
*/
Blockly.BlockSvg.prototype.updateDisabled = function() {
// not supported
};
/**
* Returns the comment on this block (or '' if none).
* @return {string} Block's comment.
*/
Blockly.BlockSvg.prototype.getCommentText = function() {
if (this.comment) {
var comment = this.comment.getText();
// Trim off trailing whitespace.
return comment.replace(/\s+$/, '').replace(/ +\n/g, '\n');
}
return '';
};
/**
* Set this block's comment text.
* @param {?string} text The text, or null to delete.
*/
Blockly.BlockSvg.prototype.setCommentText = function(text) {
var changedState = false;
if (goog.isString(text)) {
if (!this.comment) {
this.comment = new Blockly.Comment(this);
changedState = true;
}
this.comment.setText(/** @type {string} */ (text));
} else {
if (this.comment) {
this.comment.dispose();
changedState = true;
}
}
if (changedState && this.rendered) {
this.render();
// Adding or removing a comment icon will cause the block to change shape.
this.bumpNeighbours_();
}
};
/**
* Set this block's warning text.
* @param {?string} text The text, or null to delete.
* @param {string=} opt_id An optional ID for the warning text to be able to
* maintain multiple warnings.
*/
Blockly.BlockSvg.prototype.setWarningText = function(text, opt_id) {
if (!this.setWarningText.pid_) {
// Create a database of warning PIDs.
// Only runs once per block (and only those with warnings).
this.setWarningText.pid_ = Object.create(null);
}
var id = opt_id || '';
if (!id) {
// Kill all previous pending processes, this edit supercedes them all.
for (var n in this.setWarningText.pid_) {
clearTimeout(this.setWarningText.pid_[n]);
delete this.setWarningText.pid_[n];
}
} else if (this.setWarningText.pid_[id]) {
// Only queue up the latest change. Kill any earlier pending process.
clearTimeout(this.setWarningText.pid_[id]);
delete this.setWarningText.pid_[id];
}
if (Blockly.dragMode_ == Blockly.DRAG_FREE) {
// Don't change the warning text during a drag.
// Wait until the drag finishes.
var thisBlock = this;
this.setWarningText.pid_[id] = setTimeout(function() {
if (thisBlock.workspace) { // Check block wasn't deleted.
delete thisBlock.setWarningText.pid_[id];
thisBlock.setWarningText(text, id);
}
}, 100);
return;
}
if (this.isInFlyout) {
text = null;
}
var changedState = false;
if (goog.isString(text)) {
if (!this.warning) {
this.warning = new Blockly.Warning(this);
changedState = true;
}
this.warning.setText(/** @type {string} */ (text), id);
} else {
// Dispose all warnings if no id is given.
if (this.warning && !id) {
this.warning.dispose();
changedState = true;
} else if (this.warning) {
var oldText = this.warning.getText();
this.warning.setText('', id);
var newText = this.warning.getText();
if (!newText) {
this.warning.dispose();
}
changedState = oldText == newText;
}
}
if (changedState && this.rendered) {
this.render();
// Adding or removing a warning icon will cause the block to change shape.
this.bumpNeighbours_();
}
};
/**
* Give this block a mutator dialog.
* @param {Blockly.Mutator} mutator A mutator dialog instance or null to remove.
*/
Blockly.BlockSvg.prototype.setMutator = function(mutator) {
if (this.mutator && this.mutator !== mutator) {
this.mutator.dispose();
}
if (mutator) {
mutator.block_ = this;
this.mutator = mutator;
mutator.createIcon();
}
};
/**
* Select this block. Highlight it visually.
*/
Blockly.BlockSvg.prototype.addSelect = function() {
Blockly.addClass_(/** @type {!Element} */ (this.svgGroup_),
'blocklySelected');
// Move the selected block to the top of the stack.
this.svgGroup_.parentNode.appendChild(this.svgGroup_);
};
/**
* Unselect this block. Remove its highlighting.
*/
Blockly.BlockSvg.prototype.removeSelect = function() {
Blockly.removeClass_(/** @type {!Element} */ (this.svgGroup_),
'blocklySelected');
};
/**
* Adds the dragging class to this block.
*/
Blockly.BlockSvg.prototype.addDragging = function() {
Blockly.addClass_(/** @type {!Element} */ (this.svgGroup_),
'blocklyDragging');
};
/**
* Removes the dragging class from this block.
*/
Blockly.BlockSvg.prototype.removeDragging = function() {
Blockly.removeClass_(/** @type {!Element} */ (this.svgGroup_),
'blocklyDragging');
};
// Overrides of functions on Blockly.Block that take into account whether the
// block has been rendered.
/**
* Change the colour of a block.
* @param {number|string} colour HSV hue value, or #RRGGBB string.
* @param {number|string} colourSecondary Secondary HSV hue value, or #RRGGBB
* string.
* @param {number|string} colourTertiary Tertiary HSV hue value, or #RRGGBB
* string.
*/
Blockly.BlockSvg.prototype.setColour = function(colour, colourSecondary,
colourTertiary) {
Blockly.BlockSvg.superClass_.setColour.call(this, colour, colourSecondary,
colourTertiary);
if (this.rendered) {
this.updateColour();
}
};
/**
* Set whether this block can chain onto the bottom of another block.
* @param {boolean} newBoolean True if there can be a previous statement.
* @param {string|Array.<string>|null|undefined} opt_check Statement type or
* list of statement types. Null/undefined if any type could be connected.
*/
Blockly.BlockSvg.prototype.setPreviousStatement =
function(newBoolean, opt_check) {
Blockly.BlockSvg.superClass_.setPreviousStatement.call(this, newBoolean,
opt_check);
if (this.rendered) {
this.render();
this.bumpNeighbours_();
}
};
/**
* Set whether another block can chain onto the bottom of this block.
* @param {boolean} newBoolean True if there can be a next statement.
* @param {string|Array.<string>|null|undefined} opt_check Statement type or
* list of statement types. Null/undefined if any type could be connected.
*/
Blockly.BlockSvg.prototype.setNextStatement = function(newBoolean, opt_check) {
Blockly.BlockSvg.superClass_.setNextStatement.call(this, newBoolean,
opt_check);
if (this.rendered) {
this.render();
this.bumpNeighbours_();
}
};
/**
* Set whether this block returns a value.
* @param {boolean} newBoolean True if there is an output.
* @param {string|Array.<string>|null|undefined} opt_check Returned type or list
* of returned types. Null or undefined if any type could be returned
* (e.g. variable get).
*/
Blockly.BlockSvg.prototype.setOutput = function(newBoolean, opt_check) {
Blockly.BlockSvg.superClass_.setOutput.call(this, newBoolean, opt_check);
if (this.rendered) {
this.render();
this.bumpNeighbours_();
}
};
/**
* Set whether value inputs are arranged horizontally or vertically.
* @param {boolean} newBoolean True if inputs are horizontal.
*/
Blockly.BlockSvg.prototype.setInputsInline = function(newBoolean) {
Blockly.BlockSvg.superClass_.setInputsInline.call(this, newBoolean);
if (this.rendered) {
this.render();
this.bumpNeighbours_();
}
};
/**
* Remove an input from this block.
* @param {string} name The name of the input.
* @param {boolean=} opt_quiet True to prevent error if input is not present.
* @throws {goog.asserts.AssertionError} if the input is not present and
* opt_quiet is not true.
*/
Blockly.BlockSvg.prototype.removeInput = function(name, opt_quiet) {
Blockly.BlockSvg.superClass_.removeInput.call(this, name, opt_quiet);
if (this.rendered) {
this.render();
// Removing an input will cause the block to change shape.
this.bumpNeighbours_();
}
};
/**
* Move a numbered input to a different location on this block.
* @param {number} inputIndex Index of the input to move.
* @param {number} refIndex Index of input that should be after the moved input.
*/
Blockly.BlockSvg.prototype.moveNumberedInputBefore = function(
inputIndex, refIndex) {
Blockly.BlockSvg.superClass_.moveNumberedInputBefore.call(this, inputIndex,
refIndex);
if (this.rendered) {
this.render();
// Moving an input will cause the block to change shape.
this.bumpNeighbours_();
}
};
/**
* Add a value input, statement input or local variable to this block.
* @param {number} type Either Blockly.INPUT_VALUE or Blockly.NEXT_STATEMENT or
* Blockly.DUMMY_INPUT.
* @param {string} name Language-neutral identifier which may used to find this
* input again. Should be unique to this block.
* @return {!Blockly.Input} The input object created.
* @private
*/
Blockly.BlockSvg.prototype.appendInput_ = function(type, name) {
var input = Blockly.BlockSvg.superClass_.appendInput_.call(this, type, name);
if (this.rendered) {
this.render();
// Adding an input will cause the block to change shape.
this.bumpNeighbours_();
}
return input;
};
/**
* Returns connections originating from this block.
* @param {boolean} all If true, return all connections even hidden ones.
* Otherwise, for a non-rendered block return an empty list, and for a
* collapsed block don't return inputs connections.
* @return {!Array.<!Blockly.Connection>} Array of connections.
* @private
*/
Blockly.BlockSvg.prototype.getConnections_ = function(all) {
var myConnections = [];
if (all || this.rendered) {
if (this.outputConnection) {
myConnections.push(this.outputConnection);
}
if (this.previousConnection) {
myConnections.push(this.previousConnection);
}
if (this.nextConnection) {
myConnections.push(this.nextConnection);
}
if (all || !this.collapsed_) {
for (var i = 0, input; input = this.inputList[i]; i++) {
if (input.connection) {
myConnections.push(input.connection);
}
}
}
}
return myConnections;
};
| 1 | 7,697 | Do you also need to check if Blockly.localConnection_ is non-null? | LLK-scratch-blocks | js |
@@ -1,5 +1,6 @@
module RSpec
module Core
+ # Internal container for global non-configuration data
class World
include RSpec::Core::Hooks | 1 | module RSpec
module Core
class World
include RSpec::Core::Hooks
attr_reader :example_groups, :filtered_examples
attr_accessor :wants_to_quit
def initialize(configuration=RSpec.configuration)
@configuration = configuration
@example_groups = []
@filtered_examples = Hash.new { |hash,group|
hash[group] = begin
examples = group.examples.dup
examples = filter_manager.prune(examples)
examples.uniq!
examples
end
}
end
def ordered_example_groups
ordering_strategy = @configuration.ordering_registry.fetch(:global)
ordering_strategy.order(@example_groups)
end
def reset
example_groups.clear
SharedExampleGroup.registry.clear
end
def filter_manager
@configuration.filter_manager
end
def register(example_group)
example_groups << example_group
example_group
end
def inclusion_filter
@configuration.inclusion_filter
end
def exclusion_filter
@configuration.exclusion_filter
end
def configure_group(group)
@configuration.configure_group(group)
end
def example_count
FlatMap.flat_map(example_groups) {|g| g.descendants}.
inject(0) {|sum, g| sum + g.filtered_examples.size}
end
def preceding_declaration_line(filter_line)
declaration_line_numbers.sort.inject(nil) do |highest_prior_declaration_line, line|
line <= filter_line ? line : highest_prior_declaration_line
end
end
def reporter
@configuration.reporter
end
def announce_filters
filter_announcements = []
announce_inclusion_filter filter_announcements
announce_exclusion_filter filter_announcements
unless filter_manager.empty?
if filter_announcements.length == 1
reporter.message("Run options: #{filter_announcements[0]}")
else
reporter.message("Run options:\n #{filter_announcements.join("\n ")}")
end
end
if @configuration.run_all_when_everything_filtered? && example_count.zero?
reporter.message("#{everything_filtered_message}; ignoring #{inclusion_filter.description}")
filtered_examples.clear
inclusion_filter.clear
end
if example_count.zero?
example_groups.clear
if filter_manager.empty?
reporter.message("No examples found.")
elsif exclusion_filter.empty_without_conditional_filters?
message = everything_filtered_message
if @configuration.run_all_when_everything_filtered?
message << "; ignoring #{inclusion_filter.description}"
end
reporter.message(message)
elsif inclusion_filter.empty?
reporter.message(everything_filtered_message)
end
end
end
def everything_filtered_message
"\nAll examples were filtered out"
end
def announce_inclusion_filter(announcements)
unless inclusion_filter.empty?
announcements << "include #{inclusion_filter.description}"
end
end
def announce_exclusion_filter(announcements)
unless exclusion_filter.empty_without_conditional_filters?
announcements << "exclude #{exclusion_filter.description}"
end
end
private
def declaration_line_numbers
@line_numbers ||= example_groups.inject([]) do |lines, g|
lines + g.declaration_line_numbers
end
end
end
end
end
| 1 | 11,784 | Not really a container, and it's not just about non-config data, not sure of a better description, @myronmarston ? | rspec-rspec-core | rb |
@@ -3,6 +3,8 @@ const { basename } = require('./path-utils');
const shim = require('./shim').default;
const JoplinError = require('./JoplinError').default;
const { Buffer } = require('buffer');
+const { Readable } = require('stream').Readable;
+const { GetObjectCommand, ListObjectsV2Command, HeadObjectCommand, PutObjectCommand, DeleteObjectCommand, DeleteObjectsCommand } = require("@aws-sdk/client-s3");
const S3_MAX_DELETES = 1000;
| 1 | const { basicDelta } = require('./file-api');
const { basename } = require('./path-utils');
const shim = require('./shim').default;
const JoplinError = require('./JoplinError').default;
const { Buffer } = require('buffer');
const S3_MAX_DELETES = 1000;
class FileApiDriverAmazonS3 {
constructor(api, s3_bucket) {
this.s3_bucket_ = s3_bucket;
this.api_ = api;
}
api() {
return this.api_;
}
requestRepeatCount() {
return 3;
}
makePath_(path) {
if (!path) return '';
return path;
}
hasErrorCode_(error, errorCode) {
if (!error || typeof error.code !== 'string') return false;
return error.code.indexOf(errorCode) >= 0;
}
// Need to make a custom promise, built-in promise is broken: https://github.com/aws/aws-sdk-js/issues/1436
async s3GetObject(key) {
return new Promise((resolve, reject) => {
this.api().getObject({
Bucket: this.s3_bucket_,
Key: key,
}, (err, response) => {
if (err) reject(err);
else resolve(response);
});
});
}
async s3ListObjects(key, cursor) {
return new Promise((resolve, reject) => {
this.api().listObjectsV2({
Bucket: this.s3_bucket_,
Prefix: key,
Delimiter: '/',
ContinuationToken: cursor,
}, (err, response) => {
if (err) reject(err);
else resolve(response);
});
});
}
async s3HeadObject(key) {
return new Promise((resolve, reject) => {
this.api().headObject({
Bucket: this.s3_bucket_,
Key: key,
}, (err, response) => {
if (err) reject(err);
else resolve(response);
});
});
}
async s3PutObject(key, body) {
return new Promise((resolve, reject) => {
this.api().putObject({
Bucket: this.s3_bucket_,
Key: key,
Body: body,
}, (err, response) => {
if (err) reject(err);
else resolve(response);
});
});
}
async s3UploadFileFrom(path, key) {
if (!shim.fsDriver().exists(path)) throw new Error('s3UploadFileFrom: file does not exist');
const body = await shim.fsDriver().readFile(path, 'base64');
const fileStat = await shim.fsDriver().stat(path);
return new Promise((resolve, reject) => {
this.api().putObject({
Bucket: this.s3_bucket_,
Key: key,
Body: Buffer.from(body, 'base64'),
ContentLength: `${fileStat.size}`,
}, (err, response) => {
if (err) reject(err);
else resolve(response);
});
});
}
async s3DeleteObject(key) {
return new Promise((resolve, reject) => {
this.api().deleteObject({
Bucket: this.s3_bucket_,
Key: key,
},
(err, response) => {
if (err) {
console.log(err.code);
console.log(err.message);
reject(err);
} else { resolve(response); }
});
});
}
// Assumes key is formatted, like `{Key: 's3 path'}`
async s3DeleteObjects(keys) {
return new Promise((resolve, reject) => {
this.api().deleteObjects({
Bucket: this.s3_bucket_,
Delete: { Objects: keys },
},
(err, response) => {
if (err) {
console.log(err.code);
console.log(err.message);
reject(err);
} else { resolve(response); }
});
});
}
async stat(path) {
try {
const metadata = await this.s3HeadObject(this.makePath_(path));
return this.metadataToStat_(metadata, path);
} catch (error) {
if (this.hasErrorCode_(error, 'NotFound')) {
// ignore
} else {
throw error;
}
}
}
metadataToStat_(md, path) {
const relativePath = basename(path);
const lastModifiedDate = md['LastModified'] ? new Date(md['LastModified']) : new Date();
const output = {
path: relativePath,
updated_time: lastModifiedDate.getTime(),
isDeleted: !!md['DeleteMarker'],
isDir: false,
};
return output;
}
metadataToStats_(mds) {
const output = [];
for (let i = 0; i < mds.length; i++) {
output.push(this.metadataToStat_(mds[i], mds[i].Key));
}
return output;
}
async setTimestamp() {
throw new Error('Not implemented'); // Not needed anymore
}
async delta(path, options) {
const getDirStats = async path => {
const result = await this.list(path);
return result.items;
};
return await basicDelta(path, getDirStats, options);
}
async list(path) {
let prefixPath = this.makePath_(path);
const pathLen = prefixPath.length;
if (pathLen > 0 && prefixPath[pathLen - 1] !== '/') {
prefixPath = `${prefixPath}/`;
}
let response = await this.s3ListObjects(prefixPath);
let output = this.metadataToStats_(response.Contents, prefixPath);
while (response.IsTruncated) {
response = await this.s3ListObjects(prefixPath, response.NextContinuationToken);
output = output.concat(this.metadataToStats_(response.Contents, prefixPath));
}
return {
items: output,
hasMore: false,
context: { cursor: response.NextContinuationToken },
};
}
async get(path, options) {
const remotePath = this.makePath_(path);
if (!options) options = {};
const responseFormat = options.responseFormat || 'text';
try {
let output = null;
const response = await this.s3GetObject(remotePath);
output = response.Body;
if (options.target === 'file') {
const filePath = options.path;
if (!filePath) throw new Error('get: target options.path is missing');
// TODO: check if this ever hits on RN
await shim.fsDriver().writeBinaryFile(filePath, output);
return {
ok: true,
path: filePath,
text: () => {
return response.statusMessage;
},
json: () => {
return { message: `${response.statusCode}: ${response.statusMessage}` };
},
status: response.statusCode,
headers: response.headers,
};
}
if (responseFormat === 'text') {
output = output.toString();
}
return output;
} catch (error) {
if (this.hasErrorCode_(error, 'NoSuchKey')) {
return null;
} else if (this.hasErrorCode_(error, 'AccessDenied')) {
throw new JoplinError('Do not have proper permissions to Bucket', 'rejectedByTarget');
} else {
throw error;
}
}
}
// Don't need to make directories, S3 is key based storage.
async mkdir() {
return true;
}
async put(path, content, options = null) {
const remotePath = this.makePath_(path);
if (!options) options = {};
// See https://github.com/facebook/react-native/issues/14445#issuecomment-352965210
if (typeof content === 'string') content = shim.Buffer.from(content, 'utf8');
try {
if (options.source === 'file') {
await this.s3UploadFileFrom(options.path, remotePath);
return;
}
await this.s3PutObject(remotePath, content);
} catch (error) {
if (this.hasErrorCode_(error, 'AccessDenied')) {
throw new JoplinError('Do not have proper permissions to Bucket', 'rejectedByTarget');
} else {
throw error;
}
}
}
async delete(path) {
try {
await this.s3DeleteObject(this.makePath_(path));
} catch (error) {
if (this.hasErrorCode_(error, 'NoSuchKey')) {
// ignore
} else {
throw error;
}
}
}
async batchDeletes(paths) {
const keys = paths.map(path => { return { Key: path }; });
while (keys.length > 0) {
const toDelete = keys.splice(0, S3_MAX_DELETES);
try {
await this.s3DeleteObjects(toDelete);
} catch (error) {
if (this.hasErrorCode_(error, 'NoSuchKey')) {
// ignore
} else {
throw error;
}
}
}
}
async move(oldPath, newPath) {
const req = new Promise((resolve, reject) => {
this.api().copyObject({
Bucket: this.s3_bucket_,
CopySource: this.makePath_(oldPath),
Key: newPath,
},(err, response) => {
if (err) reject(err);
else resolve(response);
});
});
try {
await req;
this.delete(oldPath);
} catch (error) {
if (this.hasErrorCode_(error, 'NoSuchKey')) {
// ignore
} else {
throw error;
}
}
}
format() {
throw new Error('Not supported');
}
async clearRoot() {
const listRecursive = async (cursor) => {
return new Promise((resolve, reject) => {
return this.api().listObjectsV2({
Bucket: this.s3_bucket_,
ContinuationToken: cursor,
}, (err, response) => {
if (err) reject(err);
else resolve(response);
});
});
};
let response = await listRecursive();
let keys = response.Contents.map((content) => content.Key);
while (response.IsTruncated) {
response = await listRecursive(response.NextContinuationToken);
keys = keys.concat(response.Contents.map((content) => content.Key));
}
this.batchDeletes(keys);
}
}
module.exports = { FileApiDriverAmazonS3 };
| 1 | 17,434 | The desktop app will load this fine. on iOS I get `TypeError: undefined is not an object (evaluating '_$$_REQUIRE(_dependencyMap[8], "stream").Readable.Readable')` if I change it to `const Readable = require('stream').Readable;` or `const { Readable } = require('stream');` I get undefined errors from the stream on iOS: `[TypeError: stream.on is not a function. (In 'stream.on("data", function (chunk) { return chunks.push(chunk); })', 'stream.on' is undefined)]` What am I missing? | laurent22-joplin | js |
@@ -93,7 +93,9 @@ func (m *MockStorer) Put(ctx context.Context, mode storage.ModePut, chs ...swarm
po := swarm.Proximity(ch.Address().Bytes(), m.baseAddress)
m.bins[po]++
}
- m.store[ch.Address().String()] = ch.Data()
+ b := make([]byte, len(ch.Data()))
+ copy(b, ch.Data())
+ m.store[ch.Address().String()] = b
m.modePut[ch.Address().String()] = mode
// pin chunks if needed | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mock
import (
"context"
"errors"
"sync"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
)
var _ storage.Storer = (*MockStorer)(nil)
type MockStorer struct {
store map[string][]byte
modePut map[string]storage.ModePut
modeSet map[string]storage.ModeSet
pinnedAddress []swarm.Address // Stores the pinned address
pinnedCounter []uint64 // and its respective counter. These are stored as slices to preserve the order.
subpull []storage.Descriptor
partialInterval bool
morePull chan struct{}
mtx sync.Mutex
quit chan struct{}
baseAddress []byte
bins []uint64
}
func WithSubscribePullChunks(chs ...storage.Descriptor) Option {
return optionFunc(func(m *MockStorer) {
m.subpull = make([]storage.Descriptor, len(chs))
for i, v := range chs {
m.subpull[i] = v
}
})
}
func WithBaseAddress(a swarm.Address) Option {
return optionFunc(func(m *MockStorer) {
m.baseAddress = a.Bytes()
})
}
func WithPartialInterval(v bool) Option {
return optionFunc(func(m *MockStorer) {
m.partialInterval = v
})
}
func NewStorer(opts ...Option) *MockStorer {
s := &MockStorer{
store: make(map[string][]byte),
modePut: make(map[string]storage.ModePut),
modeSet: make(map[string]storage.ModeSet),
morePull: make(chan struct{}),
quit: make(chan struct{}),
bins: make([]uint64, swarm.MaxBins),
}
for _, v := range opts {
v.apply(s)
}
return s
}
func (m *MockStorer) Get(_ context.Context, _ storage.ModeGet, addr swarm.Address) (ch swarm.Chunk, err error) {
m.mtx.Lock()
defer m.mtx.Unlock()
v, has := m.store[addr.String()]
if !has {
return nil, storage.ErrNotFound
}
return swarm.NewChunk(addr, v), nil
}
func (m *MockStorer) Put(ctx context.Context, mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err error) {
m.mtx.Lock()
defer m.mtx.Unlock()
exist = make([]bool, len(chs))
for i, ch := range chs {
exist[i], err = m.has(ctx, ch.Address())
if err != nil {
return exist, err
}
if !exist[i] {
po := swarm.Proximity(ch.Address().Bytes(), m.baseAddress)
m.bins[po]++
}
m.store[ch.Address().String()] = ch.Data()
m.modePut[ch.Address().String()] = mode
// pin chunks if needed
switch mode {
case storage.ModePutUploadPin:
// if mode is set pin, increment the pin counter
var found bool
addr := ch.Address()
for i, ad := range m.pinnedAddress {
if addr.String() == ad.String() {
m.pinnedCounter[i] = m.pinnedCounter[i] + 1
found = true
}
}
if !found {
m.pinnedAddress = append(m.pinnedAddress, addr)
m.pinnedCounter = append(m.pinnedCounter, uint64(1))
}
default:
}
}
return exist, nil
}
func (m *MockStorer) GetMulti(ctx context.Context, mode storage.ModeGet, addrs ...swarm.Address) (ch []swarm.Chunk, err error) {
panic("not implemented") // TODO: Implement
}
func (m *MockStorer) has(ctx context.Context, addr swarm.Address) (yes bool, err error) {
_, has := m.store[addr.String()]
return has, nil
}
func (m *MockStorer) Has(ctx context.Context, addr swarm.Address) (yes bool, err error) {
m.mtx.Lock()
defer m.mtx.Unlock()
return m.has(ctx, addr)
}
func (m *MockStorer) HasMulti(ctx context.Context, addrs ...swarm.Address) (yes []bool, err error) {
panic("not implemented") // TODO: Implement
}
func (m *MockStorer) Set(ctx context.Context, mode storage.ModeSet, addrs ...swarm.Address) (err error) {
m.mtx.Lock()
defer m.mtx.Unlock()
for _, addr := range addrs {
m.modeSet[addr.String()] = mode
switch mode {
case storage.ModeSetPin:
// check if chunk exists
has, err := m.has(ctx, addr)
if err != nil {
return err
}
if !has {
return storage.ErrNotFound
}
// if mode is set pin, increment the pin counter
var found bool
for i, ad := range m.pinnedAddress {
if addr.String() == ad.String() {
m.pinnedCounter[i] = m.pinnedCounter[i] + 1
found = true
}
}
if !found {
m.pinnedAddress = append(m.pinnedAddress, addr)
m.pinnedCounter = append(m.pinnedCounter, uint64(1))
}
case storage.ModeSetUnpin:
// if mode is set unpin, decrement the pin counter and remove the address
// once it reaches zero
for i, ad := range m.pinnedAddress {
if addr.String() == ad.String() {
m.pinnedCounter[i] = m.pinnedCounter[i] - 1
if m.pinnedCounter[i] == 0 {
copy(m.pinnedAddress[i:], m.pinnedAddress[i+1:])
m.pinnedAddress[len(m.pinnedAddress)-1] = swarm.NewAddress([]byte{0})
m.pinnedAddress = m.pinnedAddress[:len(m.pinnedAddress)-1]
copy(m.pinnedCounter[i:], m.pinnedCounter[i+1:])
m.pinnedCounter[len(m.pinnedCounter)-1] = uint64(0)
m.pinnedCounter = m.pinnedCounter[:len(m.pinnedCounter)-1]
}
}
}
case storage.ModeSetRemove:
delete(m.store, addr.String())
default:
}
}
return nil
}
func (m *MockStorer) GetModePut(addr swarm.Address) (mode storage.ModePut) {
m.mtx.Lock()
defer m.mtx.Unlock()
if mode, ok := m.modePut[addr.String()]; ok {
return mode
}
return mode
}
func (m *MockStorer) GetModeSet(addr swarm.Address) (mode storage.ModeSet) {
m.mtx.Lock()
defer m.mtx.Unlock()
if mode, ok := m.modeSet[addr.String()]; ok {
return mode
}
return mode
}
func (m *MockStorer) LastPullSubscriptionBinID(bin uint8) (id uint64, err error) {
return m.bins[bin], nil
}
func (m *MockStorer) SubscribePull(ctx context.Context, bin uint8, since, until uint64) (<-chan storage.Descriptor, <-chan struct{}, func()) {
c := make(chan storage.Descriptor)
done := make(chan struct{})
stop := func() {
close(done)
}
go func() {
defer close(c)
m.mtx.Lock()
for _, ch := range m.subpull {
select {
case c <- ch:
case <-done:
return
case <-ctx.Done():
return
case <-m.quit:
return
}
}
m.mtx.Unlock()
if m.partialInterval {
// block since we're at the top of the bin and waiting for new chunks
select {
case <-done:
return
case <-m.quit:
return
case <-ctx.Done():
return
case <-m.morePull:
}
}
m.mtx.Lock()
defer m.mtx.Unlock()
// iterate on what we have in the iterator
for _, ch := range m.subpull {
select {
case c <- ch:
case <-done:
return
case <-ctx.Done():
return
case <-m.quit:
return
}
}
}()
return c, m.quit, stop
}
func (m *MockStorer) MorePull(d ...storage.Descriptor) {
// clear out what we already have in subpull
m.mtx.Lock()
defer m.mtx.Unlock()
m.subpull = make([]storage.Descriptor, len(d))
for i, v := range d {
m.subpull[i] = v
}
close(m.morePull)
}
func (m *MockStorer) SubscribePush(ctx context.Context) (c <-chan swarm.Chunk, stop func()) {
panic("not implemented") // TODO: Implement
}
func (m *MockStorer) PinnedChunks(ctx context.Context, offset, cursor int) (pinnedChunks []*storage.Pinner, err error) {
m.mtx.Lock()
defer m.mtx.Unlock()
if len(m.pinnedAddress) == 0 {
return pinnedChunks, nil
}
for i, addr := range m.pinnedAddress {
pi := &storage.Pinner{
Address: swarm.NewAddress(addr.Bytes()),
PinCounter: m.pinnedCounter[i],
}
pinnedChunks = append(pinnedChunks, pi)
}
if pinnedChunks == nil {
return pinnedChunks, errors.New("pin chunks: leveldb: not found")
}
return pinnedChunks, nil
}
func (m *MockStorer) PinCounter(address swarm.Address) (uint64, error) {
m.mtx.Lock()
defer m.mtx.Unlock()
for i, addr := range m.pinnedAddress {
if addr.String() == address.String() {
return m.pinnedCounter[i], nil
}
}
return 0, storage.ErrNotFound
}
func (m *MockStorer) Close() error {
close(m.quit)
return nil
}
type Option interface {
apply(*MockStorer)
}
type optionFunc func(*MockStorer)
func (f optionFunc) apply(r *MockStorer) { f(r) }
| 1 | 13,966 | this is needed since the chunk feeder shares memory across calls to the pipeline. this is in order to avoid multiple allocations. this change mimics the behavior of shed and localstore, and copies the data from the call into the in-memory store | ethersphere-bee | go |
@@ -42,6 +42,10 @@ import (
"go.uber.org/yarpc/yarpcerrors"
)
+func init() {
+ opentracing.SetGlobalTracer(nil)
+}
+
func TestHandlerSuccess(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish() | 1 | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package http
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/opentracing/opentracing-go"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
yarpc "go.uber.org/yarpc"
"go.uber.org/yarpc/api/transport"
"go.uber.org/yarpc/api/transport/transporttest"
"go.uber.org/yarpc/encoding/raw"
"go.uber.org/yarpc/internal/routertest"
"go.uber.org/yarpc/yarpcerrors"
)
func TestHandlerSuccess(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
headers := make(http.Header)
headers.Set(CallerHeader, "moe")
headers.Set(EncodingHeader, "raw")
headers.Set(TTLMSHeader, "1000")
headers.Set(ProcedureHeader, "nyuck")
headers.Set(ServiceHeader, "curly")
headers.Set(ShardKeyHeader, "shard")
headers.Set(RoutingKeyHeader, "routekey")
headers.Set(RoutingDelegateHeader, "routedelegate")
router := transporttest.NewMockRouter(mockCtrl)
rpcHandler := transporttest.NewMockUnaryHandler(mockCtrl)
spec := transport.NewUnaryHandlerSpec(rpcHandler)
router.EXPECT().Choose(gomock.Any(), routertest.NewMatcher().
WithService("curly").
WithProcedure("nyuck"),
).Return(spec, nil)
rpcHandler.EXPECT().Handle(
transporttest.NewContextMatcher(t,
transporttest.ContextTTL(time.Second),
),
transporttest.NewRequestMatcher(
t, &transport.Request{
Caller: "moe",
Service: "curly",
Encoding: raw.Encoding,
Procedure: "nyuck",
ShardKey: "shard",
RoutingKey: "routekey",
RoutingDelegate: "routedelegate",
Body: bytes.NewReader([]byte("Nyuck Nyuck")),
},
),
gomock.Any(),
).Return(nil)
httpHandler := handler{router: router, tracer: &opentracing.NoopTracer{}}
req := &http.Request{
Method: "POST",
Header: headers,
Body: ioutil.NopCloser(bytes.NewReader([]byte("Nyuck Nyuck"))),
}
rw := httptest.NewRecorder()
httpHandler.ServeHTTP(rw, req)
code := rw.Code
assert.Equal(t, code, 200, "expected 200 code")
assert.Equal(t, rw.Body.String(), "")
}
func TestHandlerHeaders(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
tests := []struct {
giveEncoding string
giveHeaders http.Header
wantTTL time.Duration
wantHeaders map[string]string
}{
{
giveEncoding: "json",
giveHeaders: http.Header{
TTLMSHeader: {"1000"},
"Rpc-Header-Foo": {"bar"},
},
wantTTL: time.Second,
wantHeaders: map[string]string{
"foo": "bar",
},
},
{
giveEncoding: "raw",
giveHeaders: http.Header{
TTLMSHeader: {"100"},
"Rpc-Foo": {"ignored"},
},
wantTTL: 100 * time.Millisecond,
wantHeaders: map[string]string{},
},
{
giveEncoding: "thrift",
giveHeaders: http.Header{
TTLMSHeader: {"1000"},
},
wantTTL: time.Second,
wantHeaders: map[string]string{},
},
{
giveEncoding: "proto",
giveHeaders: http.Header{
TTLMSHeader: {"1000"},
},
wantTTL: time.Second,
wantHeaders: map[string]string{},
},
}
for _, tt := range tests {
router := transporttest.NewMockRouter(mockCtrl)
rpcHandler := transporttest.NewMockUnaryHandler(mockCtrl)
spec := transport.NewUnaryHandlerSpec(rpcHandler)
router.EXPECT().Choose(gomock.Any(), routertest.NewMatcher().
WithService("service").
WithProcedure("hello"),
).Return(spec, nil)
httpHandler := handler{router: router, tracer: &opentracing.NoopTracer{}}
rpcHandler.EXPECT().Handle(
transporttest.NewContextMatcher(t,
transporttest.ContextTTL(tt.wantTTL),
),
transporttest.NewRequestMatcher(t,
&transport.Request{
Caller: "caller",
Service: "service",
Encoding: transport.Encoding(tt.giveEncoding),
Procedure: "hello",
Headers: transport.HeadersFromMap(tt.wantHeaders),
Body: bytes.NewReader([]byte("world")),
}),
gomock.Any(),
).Return(nil)
headers := http.Header{}
for k, vs := range tt.giveHeaders {
for _, v := range vs {
headers.Add(k, v)
}
}
headers.Set(CallerHeader, "caller")
headers.Set(ServiceHeader, "service")
headers.Set(EncodingHeader, tt.giveEncoding)
headers.Set(ProcedureHeader, "hello")
req := &http.Request{
Method: "POST",
Header: headers,
Body: ioutil.NopCloser(bytes.NewReader([]byte("world"))),
}
rw := httptest.NewRecorder()
httpHandler.ServeHTTP(rw, req)
assert.Equal(t, 200, rw.Code, "expected 200 status code")
assert.Equal(t, getContentType(transport.Encoding(tt.giveEncoding)), rw.HeaderMap.Get("Content-Type"))
}
}
func TestHandlerFailures(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
service, procedure := "fake", "hello"
baseHeaders := make(http.Header)
baseHeaders.Set(CallerHeader, "somecaller")
baseHeaders.Set(EncodingHeader, "raw")
baseHeaders.Set(TTLMSHeader, "1000")
baseHeaders.Set(ProcedureHeader, procedure)
baseHeaders.Set(ServiceHeader, service)
headersWithBadTTL := headerCopyWithout(baseHeaders, TTLMSHeader)
headersWithBadTTL.Set(TTLMSHeader, "not a number")
tests := []struct {
req *http.Request
// if we expect an error as a result of the TTL
errTTL bool
wantCode yarpcerrors.Code
}{
{
req: &http.Request{Method: "GET"},
wantCode: yarpcerrors.CodeNotFound,
},
{
req: &http.Request{
Method: "POST",
Header: headerCopyWithout(baseHeaders, CallerHeader),
},
wantCode: yarpcerrors.CodeInvalidArgument,
},
{
req: &http.Request{
Method: "POST",
Header: headerCopyWithout(baseHeaders, ServiceHeader),
},
wantCode: yarpcerrors.CodeInvalidArgument,
},
{
req: &http.Request{
Method: "POST",
Header: headerCopyWithout(baseHeaders, ProcedureHeader),
},
wantCode: yarpcerrors.CodeInvalidArgument,
},
{
req: &http.Request{
Method: "POST",
Header: headerCopyWithout(baseHeaders, TTLMSHeader),
},
wantCode: yarpcerrors.CodeInvalidArgument,
errTTL: true,
},
{
req: &http.Request{
Method: "POST",
},
wantCode: yarpcerrors.CodeInvalidArgument,
},
{
req: &http.Request{
Method: "POST",
Header: headersWithBadTTL,
},
wantCode: yarpcerrors.CodeInvalidArgument,
errTTL: true,
},
}
for _, tt := range tests {
req := tt.req
if req.Body == nil {
req.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
}
reg := transporttest.NewMockRouter(mockCtrl)
if tt.errTTL {
// since TTL is checked after we've determined the transport type, if we have an
// error with TTL it will be discovered after we read from the router
spec := transport.NewUnaryHandlerSpec(panickedHandler{})
reg.EXPECT().Choose(gomock.Any(), routertest.NewMatcher().
WithService(service).
WithProcedure(procedure),
).Return(spec, nil)
}
h := handler{router: reg, tracer: &opentracing.NoopTracer{}}
rw := httptest.NewRecorder()
h.ServeHTTP(rw, tt.req)
httpStatusCode := rw.Code
assert.True(t, httpStatusCode >= 400 && httpStatusCode < 500, "expected 400 level code")
code := statusCodeToBestCode(httpStatusCode)
assert.Equal(t, tt.wantCode, code)
assert.Equal(t, "text/plain; charset=utf8", rw.HeaderMap.Get("Content-Type"))
}
}
func TestHandlerInternalFailure(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
headers := make(http.Header)
headers.Set(CallerHeader, "somecaller")
headers.Set(EncodingHeader, "raw")
headers.Set(TTLMSHeader, "1000")
headers.Set(ProcedureHeader, "hello")
headers.Set(ServiceHeader, "fake")
request := http.Request{
Method: "POST",
Header: headers,
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
}
rpcHandler := transporttest.NewMockUnaryHandler(mockCtrl)
rpcHandler.EXPECT().Handle(
transporttest.NewContextMatcher(t, transporttest.ContextTTL(time.Second)),
transporttest.NewRequestMatcher(
t, &transport.Request{
Caller: "somecaller",
Service: "fake",
Encoding: raw.Encoding,
Procedure: "hello",
Body: bytes.NewReader([]byte{}),
},
),
gomock.Any(),
).Return(fmt.Errorf("great sadness"))
router := transporttest.NewMockRouter(mockCtrl)
spec := transport.NewUnaryHandlerSpec(rpcHandler)
router.EXPECT().Choose(gomock.Any(), routertest.NewMatcher().
WithService("fake").
WithProcedure("hello"),
).Return(spec, nil)
httpHandler := handler{router: router, tracer: &opentracing.NoopTracer{}}
httpResponse := httptest.NewRecorder()
httpHandler.ServeHTTP(httpResponse, &request)
code := httpResponse.Code
assert.True(t, code >= 500 && code < 600, "expected 500 level response")
assert.Equal(t,
`error for service "fake" and procedure "hello": great sadness`+"\n",
httpResponse.Body.String())
}
type panickedHandler struct{}
func (th panickedHandler) Handle(context.Context, *transport.Request, transport.ResponseWriter) error {
panic("oops I panicked!")
}
func TestHandlerPanic(t *testing.T) {
httpTransport := NewTransport()
inbound := httpTransport.NewInbound("localhost:0")
serverDispatcher := yarpc.NewDispatcher(yarpc.Config{
Name: "yarpc-test",
Inbounds: yarpc.Inbounds{inbound},
})
serverDispatcher.Register([]transport.Procedure{
{
Name: "panic",
HandlerSpec: transport.NewUnaryHandlerSpec(panickedHandler{}),
},
})
require.NoError(t, serverDispatcher.Start())
defer serverDispatcher.Stop()
clientDispatcher := yarpc.NewDispatcher(yarpc.Config{
Name: "yarpc-test-client",
Outbounds: yarpc.Outbounds{
"yarpc-test": {
Unary: httpTransport.NewSingleOutbound(fmt.Sprintf("http://%s", inbound.Addr().String())),
},
},
})
require.NoError(t, clientDispatcher.Start())
defer clientDispatcher.Stop()
client := raw.New(clientDispatcher.ClientConfig("yarpc-test"))
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
_, err := client.Call(ctx, "panic", []byte{})
assert.Equal(t, yarpcerrors.CodeUnknown, yarpcerrors.ErrorCode(err))
}
func headerCopyWithout(headers http.Header, names ...string) http.Header {
newHeaders := make(http.Header)
for k, vs := range headers {
for _, v := range vs {
newHeaders.Add(k, v)
}
}
for _, k := range names {
newHeaders.Del(k)
}
return newHeaders
}
func TestResponseWriter(t *testing.T) {
recorder := httptest.NewRecorder()
writer := newResponseWriter(recorder)
headers := transport.HeadersFromMap(map[string]string{
"foo": "bar",
"shard-key": "123",
})
writer.AddHeaders(headers)
_, err := writer.Write([]byte("hello"))
require.NoError(t, err)
writer.Close(http.StatusOK)
assert.Equal(t, "bar", recorder.Header().Get("rpc-header-foo"))
assert.Equal(t, "123", recorder.Header().Get("rpc-header-shard-key"))
assert.Equal(t, "hello", recorder.Body.String())
}
| 1 | 15,400 | wut. We can do this at the beginning of tests if we want right? Why are we depending on init? | yarpc-yarpc-go | go |
@@ -1403,9 +1403,12 @@ func (c *client) processConnect(arg []byte) error {
c.mu.Lock()
acc := c.acc
c.mu.Unlock()
+ srv.mu.Lock()
if acc != nil && acc != srv.gacc {
+ srv.mu.Unlock()
return ErrTooManyAccountConnections
}
+ srv.mu.Unlock()
}
c.authViolation()
return ErrAuthentication | 1 | // Copyright 2012-2020 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"math/rand"
"net"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/nats-io/jwt"
)
// Type of client connection.
const (
// CLIENT is an end user.
CLIENT = iota
// ROUTER represents another server in the cluster.
ROUTER
// GATEWAY is a link between 2 clusters.
GATEWAY
// SYSTEM is an internal system client.
SYSTEM
// LEAF is for leaf node connections.
LEAF
)
const (
// ClientProtoZero is the original Client protocol from 2009.
// http://nats.io/documentation/internals/nats-protocol/
ClientProtoZero = iota
// ClientProtoInfo signals a client can receive more then the original INFO block.
// This can be used to update clients on other cluster members, etc.
ClientProtoInfo
)
const (
pingProto = "PING" + _CRLF_
pongProto = "PONG" + _CRLF_
errProto = "-ERR '%s'" + _CRLF_
okProto = "+OK" + _CRLF_
)
func init() {
rand.Seed(time.Now().UnixNano())
}
const (
// Scratch buffer size for the processMsg() calls.
msgScratchSize = 1024
msgHeadProto = "RMSG "
msgHeadProtoLen = len(msgHeadProto)
// For controlling dynamic buffer sizes.
startBufSize = 512 // For INFO/CONNECT block
minBufSize = 64 // Smallest to shrink to for PING/PONG
maxBufSize = 65536 // 64k
shortsToShrink = 2 // Trigger to shrink dynamic buffers
maxFlushPending = 10 // Max fsps to have in order to wait for writeLoop
readLoopReport = 2 * time.Second
// Server should not send a PING (for RTT) before the first PONG has
// been sent to the client. However, in case some client libs don't
// send CONNECT+PING, cap the maximum time before server can send
// the RTT PING.
maxNoRTTPingBeforeFirstPong = 2 * time.Second
// For stalling fast producers
stallClientMinDuration = 100 * time.Millisecond
stallClientMaxDuration = time.Second
)
var readLoopReportThreshold = readLoopReport
// Represent client booleans with a bitmask
type clientFlag uint16
// Some client state represented as flags
const (
connectReceived clientFlag = 1 << iota // The CONNECT proto has been received
infoReceived // The INFO protocol has been received
firstPongSent // The first PONG has been sent
handshakeComplete // For TLS clients, indicate that the handshake is complete
flushOutbound // Marks client as having a flushOutbound call in progress.
noReconnect // Indicate that on close, this connection should not attempt a reconnect
closeConnection // Marks that closeConnection has already been called.
writeLoopStarted // Marks that the writeLoop has been started.
skipFlushOnClose // Marks that flushOutbound() should not be called on connection close.
expectConnect // Marks if this connection is expected to send a CONNECT
)
// set the flag (would be equivalent to set the boolean to true)
func (cf *clientFlag) set(c clientFlag) {
*cf |= c
}
// clear the flag (would be equivalent to set the boolean to false)
func (cf *clientFlag) clear(c clientFlag) {
*cf &= ^c
}
// isSet returns true if the flag is set, false otherwise
func (cf clientFlag) isSet(c clientFlag) bool {
return cf&c != 0
}
// setIfNotSet will set the flag `c` only if that flag was not already
// set and return true to indicate that the flag has been set. Returns
// false otherwise.
func (cf *clientFlag) setIfNotSet(c clientFlag) bool {
if *cf&c == 0 {
*cf |= c
return true
}
return false
}
// ClosedState is the reason client was closed. This will
// be passed into calls to clearConnection, but will only
// be stored in ConnInfo for monitoring.
type ClosedState int
const (
ClientClosed = ClosedState(iota + 1)
AuthenticationTimeout
AuthenticationViolation
TLSHandshakeError
SlowConsumerPendingBytes
SlowConsumerWriteDeadline
WriteError
ReadError
ParseError
StaleConnection
ProtocolViolation
BadClientProtocolVersion
WrongPort
MaxAccountConnectionsExceeded
MaxConnectionsExceeded
MaxPayloadExceeded
MaxControlLineExceeded
MaxSubscriptionsExceeded
DuplicateRoute
RouteRemoved
ServerShutdown
AuthenticationExpired
WrongGateway
MissingAccount
Revocation
)
// Some flags passed to processMsgResultsEx
const pmrNoFlag int = 0
const (
pmrCollectQueueNames int = 1 << iota
pmrIgnoreEmptyQueueFilter
pmrAllowSendFromRouteToRoute
)
type client struct {
// Here first because of use of atomics, and memory alignment.
stats
// Indicate if we should check gwrm or not. Since checking gwrm is done
// when processing inbound messages and requires the lock we want to
// check only when needed. This is set/get using atomic, so needs to
// be memory aligned.
cgwrt int32
mpay int32
msubs int32
mcl int32
mu sync.Mutex
kind int
cid uint64
opts clientOpts
start time.Time
nonce []byte
nc net.Conn
ncs string
out outbound
srv *Server
acc *Account
user *NkeyUser
host string
port uint16
subs map[string]*subscription
perms *permissions
replies map[string]*resp
mperms *msgDeny
darray []string
in readCache
pcd map[*client]struct{}
atmr *time.Timer
ping pinfo
msgb [msgScratchSize]byte
last time.Time
parseState
rtt time.Duration
rttStart time.Time
rrTracking map[string]*remoteLatency
rrMax int
route *route
gw *gateway
leaf *leaf
// To keep track of gateway replies mapping
gwrm map[string]*gwReplyMap
flags clientFlag // Compact booleans into a single field. Size will be increased when needed.
trace bool
echo bool
}
// Struct for PING initiation from the server.
type pinfo struct {
tmr *time.Timer
last time.Time
out int
}
// outbound holds pending data for a socket.
type outbound struct {
p []byte // Primary write buffer
s []byte // Secondary for use post flush
nb net.Buffers // net.Buffers for writev IO
sz int32 // limit size per []byte, uses variable BufSize constants, start, min, max.
sws int32 // Number of short writes, used for dynamic resizing.
pb int64 // Total pending/queued bytes.
pm int32 // Total pending/queued messages.
fsp int32 // Flush signals that are pending per producer from readLoop's pcd.
sch chan struct{} // To signal writeLoop that there is data to flush.
wdl time.Duration // Snapshot of write deadline.
mp int64 // Snapshot of max pending for client.
lft time.Duration // Last flush time for Write.
stc chan struct{} // Stall chan we create to slow down producers on overrun, e.g. fan-in.
lwb int32 // Last byte size of Write.
}
type perm struct {
allow *Sublist
deny *Sublist
}
type permissions struct {
sub perm
pub perm
resp *ResponsePermission
pcache map[string]bool
}
// This is used to dynamically track responses and reply subjects
// for dynamic permissioning.
type resp struct {
t time.Time
n int
}
// msgDeny is used when a user permission for subscriptions has a deny
// clause but a subscription could be made that is of broader scope.
// e.g. deny = "foo", but user subscribes to "*". That subscription should
// succeed but no message sent on foo should be delivered.
type msgDeny struct {
deny *Sublist
dcache map[string]bool
}
// routeTarget collects information regarding routes and queue groups for
// sending information to a remote.
type routeTarget struct {
sub *subscription
qs []byte
_qs [32]byte
}
const (
maxResultCacheSize = 512
maxDenyPermCacheSize = 256
maxPermCacheSize = 128
pruneSize = 32
routeTargetInit = 8
replyPermLimit = 4096
)
// Used in readloop to cache hot subject lookups and group statistics.
type readCache struct {
// These are for clients who are bound to a single account.
genid uint64
results map[string]*SublistResult
// This is for routes and gateways to have their own L1 as well that is account aware.
pacache map[string]*perAccountCache
// This is for when we deliver messages across a route. We use this structure
// to make sure to only send one message and properly scope to queues as needed.
rts []routeTarget
prand *rand.Rand
// These are all temporary totals for an invocation of a read in readloop.
msgs int32
bytes int32
subs int32
rsz int32 // Read buffer size
srs int32 // Short reads, used for dynamic buffer resizing.
}
const (
defaultMaxPerAccountCacheSize = 4096
defaultPrunePerAccountCacheSize = 256
defaultClosedSubsCheckInterval = 5 * time.Minute
)
var (
maxPerAccountCacheSize = defaultMaxPerAccountCacheSize
prunePerAccountCacheSize = defaultPrunePerAccountCacheSize
closedSubsCheckInterval = defaultClosedSubsCheckInterval
)
// perAccountCache is for L1 semantics for inbound messages from a route or gateway to mimic the performance of clients.
type perAccountCache struct {
acc *Account
results *SublistResult
genid uint64
}
func (c *client) String() (id string) {
return c.ncs
}
// GetName returns the application supplied name for the connection.
func (c *client) GetName() string {
c.mu.Lock()
name := c.opts.Name
c.mu.Unlock()
return name
}
// GetOpts returns the client options provided by the application.
func (c *client) GetOpts() *clientOpts {
return &c.opts
}
// GetTLSConnectionState returns the TLS ConnectionState if TLS is enabled, nil
// otherwise. Implements the ClientAuth interface.
func (c *client) GetTLSConnectionState() *tls.ConnectionState {
tc, ok := c.nc.(*tls.Conn)
if !ok {
return nil
}
state := tc.ConnectionState()
return &state
}
// This is the main subscription struct that indicates
// interest in published messages.
// FIXME(dlc) - This is getting bloated for normal subs, need
// to optionally have an opts section for non-normal stuff.
type subscription struct {
client *client
im *streamImport // This is for import stream support.
shadow []*subscription // This is to track shadowed accounts.
subject []byte
queue []byte
sid []byte
nm int64
max int64
qw int32
closed int32
}
// Indicate that this subscription is closed.
// This is used in pruning of route and gateway cache items.
func (s *subscription) close() {
atomic.StoreInt32(&s.closed, 1)
}
// Return true if this subscription was unsubscribed
// or its connection has been closed.
func (s *subscription) isClosed() bool {
return atomic.LoadInt32(&s.closed) == 1
}
type clientOpts struct {
Echo bool `json:"echo"`
Verbose bool `json:"verbose"`
Pedantic bool `json:"pedantic"`
TLSRequired bool `json:"tls_required"`
Nkey string `json:"nkey,omitempty"`
JWT string `json:"jwt,omitempty"`
Sig string `json:"sig,omitempty"`
Authorization string `json:"auth_token,omitempty"`
Username string `json:"user,omitempty"`
Password string `json:"pass,omitempty"`
Name string `json:"name"`
Lang string `json:"lang"`
Version string `json:"version"`
Protocol int `json:"protocol"`
Account string `json:"account,omitempty"`
AccountNew bool `json:"new_account,omitempty"`
// Routes only
Import *SubjectPermission `json:"import,omitempty"`
Export *SubjectPermission `json:"export,omitempty"`
}
var defaultOpts = clientOpts{Verbose: true, Pedantic: true, Echo: true}
var internalOpts = clientOpts{Verbose: false, Pedantic: false, Echo: false}
func init() {
rand.Seed(time.Now().UnixNano())
}
func (c *client) setTraceLevel() {
if c.kind == SYSTEM && !(atomic.LoadInt32(&c.srv.logging.traceSysAcc) != 0) {
c.trace = false
} else {
c.trace = (atomic.LoadInt32(&c.srv.logging.trace) != 0)
}
}
// Lock should be held
func (c *client) initClient() {
s := c.srv
c.cid = atomic.AddUint64(&s.gcid, 1)
// Outbound data structure setup
c.out.sz = startBufSize
c.out.sch = make(chan struct{}, 1)
opts := s.getOpts()
// Snapshots to avoid mutex access in fast paths.
c.out.wdl = opts.WriteDeadline
c.out.mp = opts.MaxPending
c.subs = make(map[string]*subscription)
c.echo = true
c.setTraceLevel()
// This is a scratch buffer used for processMsg()
// The msg header starts with "RMSG ", which can be used
// for both local and routes.
// in bytes that is [82 77 83 71 32].
c.msgb = [msgScratchSize]byte{82, 77, 83, 71, 32}
// This is to track pending clients that have data to be flushed
// after we process inbound msgs from our own connection.
c.pcd = make(map[*client]struct{})
// snapshot the string version of the connection
var conn string
if ip, ok := c.nc.(*net.TCPConn); ok {
conn = ip.RemoteAddr().String()
host, port, _ := net.SplitHostPort(conn)
iPort, _ := strconv.Atoi(port)
c.host, c.port = host, uint16(iPort)
}
switch c.kind {
case CLIENT:
c.ncs = fmt.Sprintf("%s - cid:%d", conn, c.cid)
case ROUTER:
c.ncs = fmt.Sprintf("%s - rid:%d", conn, c.cid)
case GATEWAY:
c.ncs = fmt.Sprintf("%s - gid:%d", conn, c.cid)
case LEAF:
c.ncs = fmt.Sprintf("%s - lid:%d", conn, c.cid)
case SYSTEM:
c.ncs = "SYSTEM"
}
}
// RemoteAddress expose the Address of the client connection,
// nil when not connected or unknown
func (c *client) RemoteAddress() net.Addr {
c.mu.Lock()
defer c.mu.Unlock()
if c.nc == nil {
return nil
}
return c.nc.RemoteAddr()
}
// Helper function to report errors.
func (c *client) reportErrRegisterAccount(acc *Account, err error) {
if err == ErrTooManyAccountConnections {
c.maxAccountConnExceeded()
return
}
c.Errorf("Problem registering with account [%s]", acc.Name)
c.sendErr("Failed Account Registration")
}
// registerWithAccount will register the given user with a specific
// account. This will change the subject namespace.
func (c *client) registerWithAccount(acc *Account) error {
if acc == nil || acc.sl == nil {
return ErrBadAccount
}
// If we were previously registered, usually to $G, do accounting here to remove.
if c.acc != nil {
if prev := c.acc.removeClient(c); prev == 1 && c.srv != nil {
c.srv.decActiveAccounts()
}
}
c.mu.Lock()
kind := c.kind
srv := c.srv
c.acc = acc
c.applyAccountLimits()
c.mu.Unlock()
// Check if we have a max connections violation
if kind == CLIENT && acc.MaxTotalConnectionsReached() {
return ErrTooManyAccountConnections
} else if kind == LEAF && acc.MaxTotalLeafNodesReached() {
return ErrTooManyAccountConnections
}
// Add in new one.
if prev := acc.addClient(c); prev == 0 && srv != nil {
srv.incActiveAccounts()
}
return nil
}
// Helper to determine if we have met or exceeded max subs.
func (c *client) subsAtLimit() bool {
return c.msubs != jwt.NoLimit && len(c.subs) >= int(c.msubs)
}
// Apply account limits
// Lock is held on entry.
// FIXME(dlc) - Should server be able to override here?
func (c *client) applyAccountLimits() {
if c.acc == nil || (c.kind != CLIENT && c.kind != LEAF) {
return
}
// Set here, will need to fo checks for NoLimit.
if c.acc.msubs != jwt.NoLimit {
c.msubs = c.acc.msubs
}
if c.acc.mpay != jwt.NoLimit {
c.mpay = c.acc.mpay
}
s := c.srv
opts := s.getOpts()
// We check here if the server has an option set that is lower than the account limit.
if c.mpay != jwt.NoLimit && opts.MaxPayload != 0 && int32(opts.MaxPayload) < c.acc.mpay {
c.Errorf("Max Payload set to %d from server config which overrides %d from account claims", opts.MaxPayload, c.acc.mpay)
c.mpay = int32(opts.MaxPayload)
}
// We check here if the server has an option set that is lower than the account limit.
if c.msubs != jwt.NoLimit && opts.MaxSubs != 0 && opts.MaxSubs < int(c.acc.msubs) {
c.Errorf("Max Subscriptions set to %d from server config which overrides %d from account claims", opts.MaxSubs, c.acc.msubs)
c.msubs = int32(opts.MaxSubs)
}
if c.subsAtLimit() {
go func() {
c.maxSubsExceeded()
time.Sleep(20 * time.Millisecond)
c.closeConnection(MaxSubscriptionsExceeded)
}()
}
}
// RegisterUser allows auth to call back into a new client
// with the authenticated user. This is used to map
// any permissions into the client and setup accounts.
func (c *client) RegisterUser(user *User) {
// Register with proper account and sublist.
if user.Account != nil {
if err := c.registerWithAccount(user.Account); err != nil {
c.reportErrRegisterAccount(user.Account, err)
return
}
}
c.mu.Lock()
// Assign permissions.
if user.Permissions == nil {
// Reset perms to nil in case client previously had them.
c.perms = nil
c.mperms = nil
} else {
c.setPermissions(user.Permissions)
}
c.mu.Unlock()
}
// RegisterNkey allows auth to call back into a new nkey
// client with the authenticated user. This is used to map
// any permissions into the client and setup accounts.
func (c *client) RegisterNkeyUser(user *NkeyUser) error {
// Register with proper account and sublist.
if user.Account != nil {
if err := c.registerWithAccount(user.Account); err != nil {
c.reportErrRegisterAccount(user.Account, err)
return err
}
}
c.mu.Lock()
c.user = user
// Assign permissions.
if user.Permissions == nil {
// Reset perms to nil in case client previously had them.
c.perms = nil
c.mperms = nil
} else {
c.setPermissions(user.Permissions)
}
c.mu.Unlock()
return nil
}
func splitSubjectQueue(sq string) ([]byte, []byte, error) {
vals := strings.Fields(strings.TrimSpace(sq))
s := []byte(vals[0])
var q []byte
if len(vals) == 2 {
q = []byte(vals[1])
} else if len(vals) > 2 {
return nil, nil, fmt.Errorf("invalid subject-queue %q", sq)
}
return s, q, nil
}
// Initializes client.perms structure.
// Lock is held on entry.
func (c *client) setPermissions(perms *Permissions) {
if perms == nil {
return
}
c.perms = &permissions{}
c.perms.pcache = make(map[string]bool)
// Loop over publish permissions
if perms.Publish != nil {
if perms.Publish.Allow != nil {
c.perms.pub.allow = NewSublistWithCache()
}
for _, pubSubject := range perms.Publish.Allow {
sub := &subscription{subject: []byte(pubSubject)}
c.perms.pub.allow.Insert(sub)
}
if len(perms.Publish.Deny) > 0 {
c.perms.pub.deny = NewSublistWithCache()
}
for _, pubSubject := range perms.Publish.Deny {
sub := &subscription{subject: []byte(pubSubject)}
c.perms.pub.deny.Insert(sub)
}
}
// Check if we are allowed to send responses.
if perms.Response != nil {
rp := *perms.Response
c.perms.resp = &rp
c.replies = make(map[string]*resp)
}
// Loop over subscribe permissions
if perms.Subscribe != nil {
var err error
if len(perms.Subscribe.Allow) > 0 {
c.perms.sub.allow = NewSublistWithCache()
}
for _, subSubject := range perms.Subscribe.Allow {
sub := &subscription{}
sub.subject, sub.queue, err = splitSubjectQueue(subSubject)
if err != nil {
c.Errorf("%s", err.Error())
continue
}
c.perms.sub.allow.Insert(sub)
}
if len(perms.Subscribe.Deny) > 0 {
c.perms.sub.deny = NewSublistWithCache()
// Also hold onto this array for later.
c.darray = perms.Subscribe.Deny
}
for _, subSubject := range perms.Subscribe.Deny {
sub := &subscription{}
sub.subject, sub.queue, err = splitSubjectQueue(subSubject)
if err != nil {
c.Errorf("%s", err.Error())
continue
}
c.perms.sub.deny.Insert(sub)
}
}
}
// Check to see if we have an expiration for the user JWT via base claims.
// FIXME(dlc) - Clear on connect with new JWT.
func (c *client) checkExpiration(claims *jwt.ClaimsData) {
if claims.Expires == 0 {
return
}
tn := time.Now().Unix()
if claims.Expires < tn {
return
}
expiresAt := time.Duration(claims.Expires - tn)
c.setExpirationTimer(expiresAt * time.Second)
}
// This will load up the deny structure used for filtering delivered
// messages based on a deny clause for subscriptions.
// Lock should be held.
func (c *client) loadMsgDenyFilter() {
c.mperms = &msgDeny{NewSublistWithCache(), make(map[string]bool)}
for _, sub := range c.darray {
c.mperms.deny.Insert(&subscription{subject: []byte(sub)})
}
}
// writeLoop is the main socket write functionality.
// Runs in its own Go routine.
func (c *client) writeLoop() {
defer c.srv.grWG.Done()
c.mu.Lock()
if c.isClosed() {
c.mu.Unlock()
return
}
c.flags.set(writeLoopStarted)
ch := c.out.sch
c.mu.Unlock()
// This will clear connection state and remove it from the server.
defer c.teardownConn()
// Used to check that we did flush from last wake up.
waitOk := true
// Used to limit the wait for a signal
const maxWait = time.Second
t := time.NewTimer(maxWait)
var close bool
// Main loop. Will wait to be signaled and then will use
// buffered outbound structure for efficient writev to the underlying socket.
for {
c.mu.Lock()
if close = c.flags.isSet(closeConnection); !close {
owtf := c.out.fsp > 0 && c.out.pb < maxBufSize && c.out.fsp < maxFlushPending
if waitOk && (c.out.pb == 0 || owtf) {
c.mu.Unlock()
// Reset our timer
t.Reset(maxWait)
// Wait on pending data.
select {
case <-ch:
case <-t.C:
}
c.mu.Lock()
close = c.flags.isSet(closeConnection)
}
}
if close {
c.flushAndClose(false)
c.mu.Unlock()
return
}
// Flush data
waitOk = c.flushOutbound()
c.mu.Unlock()
}
}
// flushClients will make sure to flush any clients we may have
// sent to during processing. We pass in a budget as a time.Duration
// for how much time to spend in place flushing for this client. This
// will normally be called in the readLoop of the client who sent the
// message that now is being delivered.
func (c *client) flushClients(budget time.Duration) time.Time {
last := time.Now()
// Check pending clients for flush.
for cp := range c.pcd {
// TODO(dlc) - Wonder if it makes more sense to create a new map?
delete(c.pcd, cp)
// Queue up a flush for those in the set
cp.mu.Lock()
// Update last activity for message delivery
cp.last = last
// Remove ourselves from the pending list.
cp.out.fsp--
// Just ignore if this was closed.
if cp.flags.isSet(closeConnection) {
cp.mu.Unlock()
continue
}
if budget > 0 && cp.flushOutbound() {
budget -= cp.out.lft
} else {
cp.flushSignal()
}
cp.mu.Unlock()
}
return last
}
// readLoop is the main socket read functionality.
// Runs in its own Go routine.
func (c *client) readLoop() {
// Grab the connection off the client, it will be cleared on a close.
// We check for that after the loop, but want to avoid a nil dereference
c.mu.Lock()
s := c.srv
defer s.grWG.Done()
if c.isClosed() {
c.mu.Unlock()
return
}
nc := c.nc
c.in.rsz = startBufSize
// Snapshot max control line since currently can not be changed on reload and we
// were checking it on each call to parse. If this changes and we allow MaxControlLine
// to be reloaded without restart, this code will need to change.
c.mcl = MAX_CONTROL_LINE_SIZE
if s != nil {
if opts := s.getOpts(); opts != nil {
c.mcl = int32(opts.MaxControlLine)
}
}
// Check the per-account-cache for closed subscriptions
cpacc := c.kind == ROUTER || c.kind == GATEWAY
// Last per-account-cache check for closed subscriptions
lpacc := time.Now()
c.mu.Unlock()
defer func() {
// These are used only in the readloop, so we can set them to nil
// on exit of the readLoop.
c.in.results, c.in.pacache = nil, nil
}()
// Start read buffer.
b := make([]byte, c.in.rsz)
for {
n, err := nc.Read(b)
// If we have any data we will try to parse and exit at the end.
if n == 0 && err != nil {
c.closeConnection(closedStateForErr(err))
return
}
start := time.Now()
// Clear inbound stats cache
c.in.msgs = 0
c.in.bytes = 0
c.in.subs = 0
// Main call into parser for inbound data. This will generate callouts
// to process messages, etc.
if err := c.parse(b[:n]); err != nil {
if dur := time.Since(start); dur >= readLoopReportThreshold {
c.Warnf("Readloop processing time: %v", dur)
}
// Need to call flushClients because some of the clients have been
// assigned messages and their "fsp" incremented, and need now to be
// decremented and their writeLoop signaled.
c.flushClients(0)
// handled inline
if err != ErrMaxPayload && err != ErrAuthentication {
c.Error(err)
c.closeConnection(ProtocolViolation)
}
return
}
// Updates stats for client and server that were collected
// from parsing through the buffer.
if c.in.msgs > 0 {
atomic.AddInt64(&c.inMsgs, int64(c.in.msgs))
atomic.AddInt64(&c.inBytes, int64(c.in.bytes))
atomic.AddInt64(&s.inMsgs, int64(c.in.msgs))
atomic.AddInt64(&s.inBytes, int64(c.in.bytes))
}
// Budget to spend in place flushing outbound data.
// Client will be checked on several fronts to see
// if applicable. Routes and Gateways will never
// spend time flushing outbound in place.
var budget time.Duration
if c.kind == CLIENT {
budget = time.Millisecond
}
// Flush, or signal to writeLoop to flush to socket.
last := c.flushClients(budget)
// Update activity, check read buffer size.
c.mu.Lock()
closed := c.isClosed()
// Activity based on interest changes or data/msgs.
if c.in.msgs > 0 || c.in.subs > 0 {
c.last = last
}
if n >= cap(b) {
c.in.srs = 0
} else if n < cap(b)/2 { // divide by 2 b/c we want less than what we would shrink to.
c.in.srs++
}
// Update read buffer size as/if needed.
if n >= cap(b) && cap(b) < maxBufSize {
// Grow
c.in.rsz = int32(cap(b) * 2)
b = make([]byte, c.in.rsz)
} else if n < cap(b) && cap(b) > minBufSize && c.in.srs > shortsToShrink {
// Shrink, for now don't accelerate, ping/pong will eventually sort it out.
c.in.rsz = int32(cap(b) / 2)
b = make([]byte, c.in.rsz)
}
c.mu.Unlock()
if dur := time.Since(start); dur >= readLoopReportThreshold {
c.Warnf("Readloop processing time: %v", dur)
}
// Check to see if we got closed, e.g. slow consumer
if closed {
return
}
// We could have had a read error from above but still read some data.
// If so do the close here unconditionally.
if err != nil {
c.closeConnection(closedStateForErr(err))
return
}
if cpacc && start.Sub(lpacc) >= closedSubsCheckInterval {
c.pruneClosedSubFromPerAccountCache()
lpacc = time.Now()
}
}
}
// Returns the appropriate closed state for a given read error.
func closedStateForErr(err error) ClosedState {
if err == io.EOF {
return ClientClosed
}
return ReadError
}
// collapsePtoNB will place primary onto nb buffer as needed in prep for WriteTo.
// This will return a copy on purpose.
func (c *client) collapsePtoNB() net.Buffers {
if c.out.p != nil {
p := c.out.p
c.out.p = nil
return append(c.out.nb, p)
}
return c.out.nb
}
// This will handle the fixup needed on a partial write.
// Assume pending has been already calculated correctly.
func (c *client) handlePartialWrite(pnb net.Buffers) {
nb := c.collapsePtoNB()
// The partial needs to be first, so append nb to pnb
c.out.nb = append(pnb, nb...)
}
// flushOutbound will flush outbound buffer to a client.
// Will return true if data was attempted to be written.
// Lock must be held
func (c *client) flushOutbound() bool {
if c.flags.isSet(flushOutbound) {
// For CLIENT connections, it is possible that the readLoop calls
// flushOutbound(). If writeLoop and readLoop compete and we are
// here we should release the lock to reduce the risk of spinning.
c.mu.Unlock()
runtime.Gosched()
c.mu.Lock()
return false
}
c.flags.set(flushOutbound)
defer c.flags.clear(flushOutbound)
// Check for nothing to do.
if c.nc == nil || c.srv == nil || c.out.pb == 0 {
return true // true because no need to queue a signal.
}
// Place primary on nb, assign primary to secondary, nil out nb and secondary.
nb := c.collapsePtoNB()
c.out.p, c.out.nb, c.out.s = c.out.s, nil, nil
// For selecting primary replacement.
cnb := nb
var lfs int
if len(cnb) > 0 {
lfs = len(cnb[0])
}
// In case it goes away after releasing the lock.
nc := c.nc
attempted := c.out.pb
apm := c.out.pm
// Capture this (we change the value in some tests)
wdl := c.out.wdl
// Do NOT hold lock during actual IO.
c.mu.Unlock()
// flush here
now := time.Now()
// FIXME(dlc) - writev will do multiple IOs past 1024 on
// most platforms, need to account for that with deadline?
nc.SetWriteDeadline(now.Add(wdl))
// Actual write to the socket.
n, err := nb.WriteTo(nc)
nc.SetWriteDeadline(time.Time{})
lft := time.Since(now)
// Re-acquire client lock.
c.mu.Lock()
if err != nil {
// Handle timeout error (slow consumer) differently
if ne, ok := err.(net.Error); ok && ne.Timeout() {
if closed := c.handleWriteTimeout(n, attempted, len(cnb)); closed {
return true
}
} else {
// Other errors will cause connection to be closed.
// For clients, report as debug but for others report as error.
report := c.Debugf
if c.kind != CLIENT {
report = c.Errorf
}
report("Error flushing: %v", err)
c.markConnAsClosed(WriteError, true)
return true
}
}
// Update flush time statistics.
c.out.lft = lft
c.out.lwb = int32(n)
// Subtract from pending bytes and messages.
c.out.pb -= int64(c.out.lwb)
c.out.pm -= apm // FIXME(dlc) - this will not be totally accurate on partials.
// Check for partial writes
// TODO(dlc) - zero write with no error will cause lost message and the writeloop to spin.
if int64(c.out.lwb) != attempted && n > 0 {
c.handlePartialWrite(nb)
} else if c.out.lwb >= c.out.sz {
c.out.sws = 0
}
// Adjust based on what we wrote plus any pending.
pt := int64(c.out.lwb) + c.out.pb
// Adjust sz as needed downward, keeping power of 2.
// We do this at a slower rate.
if pt < int64(c.out.sz) && c.out.sz > minBufSize {
c.out.sws++
if c.out.sws > shortsToShrink {
c.out.sz >>= 1
}
}
// Adjust sz as needed upward, keeping power of 2.
if pt > int64(c.out.sz) && c.out.sz < maxBufSize {
c.out.sz <<= 1
}
// Check to see if we can reuse buffers.
if lfs != 0 && n >= int64(lfs) {
oldp := cnb[0][:0]
if cap(oldp) >= int(c.out.sz) {
// Replace primary or secondary if they are nil, reusing same buffer.
if c.out.p == nil {
c.out.p = oldp
} else if c.out.s == nil || cap(c.out.s) < int(c.out.sz) {
c.out.s = oldp
}
}
}
// Check that if there is still data to send and writeLoop is in wait,
// then we need to signal.
if c.out.pb > 0 {
c.flushSignal()
}
// Check if we have a stalled gate and if so and we are recovering release
// any stalled producers. Only kind==CLIENT will stall.
if c.out.stc != nil && (int64(c.out.lwb) == attempted || c.out.pb < c.out.mp/2) {
close(c.out.stc)
c.out.stc = nil
}
return true
}
// This is invoked from flushOutbound() for io/timeout error (slow consumer).
// Returns a boolean to indicate if the connection has been closed or not.
// Lock is held on entry.
func (c *client) handleWriteTimeout(written, attempted int64, numChunks int) bool {
if tlsConn, ok := c.nc.(*tls.Conn); ok {
if !tlsConn.ConnectionState().HandshakeComplete {
// Likely a TLSTimeout error instead...
c.markConnAsClosed(TLSHandshakeError, true)
// Would need to coordinate with tlstimeout()
// to avoid double logging, so skip logging
// here, and don't report a slow consumer error.
return true
}
} else if c.flags.isSet(expectConnect) && !c.flags.isSet(connectReceived) {
// Under some conditions, a connection may hit a slow consumer write deadline
// before the authorization timeout. If that is the case, then we handle
// as slow consumer though we do not increase the counter as that can be
// misleading.
c.markConnAsClosed(SlowConsumerWriteDeadline, true)
return true
}
// Slow consumer here..
atomic.AddInt64(&c.srv.slowConsumers, 1)
c.Noticef("Slow Consumer Detected: WriteDeadline of %v exceeded with %d chunks of %d total bytes.",
c.out.wdl, numChunks, attempted)
// We always close CLIENT connections, or when nothing was written at all...
if c.kind == CLIENT || written == 0 {
c.markConnAsClosed(SlowConsumerWriteDeadline, true)
return true
}
return false
}
// Marks this connection has closed with the given reason.
// Sets the closeConnection flag and skipFlushOnClose flag if asked.
// Depending on the kind of connection, the connection will be saved.
// If a writeLoop has been started, the final flush/close/teardown will
// be done there, otherwise flush and close of TCP connection is done here in place.
// Returns true if closed in place, flase otherwise.
// Lock is held on entry.
func (c *client) markConnAsClosed(reason ClosedState, skipFlush bool) bool {
if c.flags.isSet(closeConnection) {
return false
}
c.flags.set(closeConnection)
if skipFlush {
c.flags.set(skipFlushOnClose)
}
// Save off the connection if its a client or leafnode.
if c.kind == CLIENT || c.kind == LEAF {
if nc := c.nc; nc != nil && c.srv != nil {
// TODO: May want to send events to single go routine instead
// of creating a new go routine for each save.
go c.srv.saveClosedClient(c, nc, reason)
}
}
// If writeLoop exists, let it do the final flush, close and teardown.
if c.flags.isSet(writeLoopStarted) {
c.flushSignal()
return false
}
// Flush (if skipFlushOnClose is not set) and close in place. If flushing,
// use a small WriteDeadline.
c.flushAndClose(true)
return true
}
// flushSignal will use server to queue the flush IO operation to a pool of flushers.
// Lock must be held.
func (c *client) flushSignal() bool {
select {
case c.out.sch <- struct{}{}:
return true
default:
}
return false
}
// Traces a message.
// Will NOT check if tracing is enabled, does NOT need the client lock.
func (c *client) traceMsg(msg []byte) {
maxTrace := c.srv.getOpts().MaxTracedMsgLen
if maxTrace > 0 && (len(msg)-LEN_CR_LF) > maxTrace {
c.Tracef("<<- MSG_PAYLOAD: [\"%s...\"]", msg[:maxTrace])
} else {
c.Tracef("<<- MSG_PAYLOAD: [%q]", msg[:len(msg)-LEN_CR_LF])
}
}
// Traces an incoming operation.
// Will NOT check if tracing is enabled, does NOT need the client lock.
func (c *client) traceInOp(op string, arg []byte) {
c.traceOp("<<- %s", op, arg)
}
// Traces an outgoing operation.
// Will NOT check if tracing is enabled, does NOT need the client lock.
func (c *client) traceOutOp(op string, arg []byte) {
c.traceOp("->> %s", op, arg)
}
func (c *client) traceOp(format, op string, arg []byte) {
opa := []interface{}{}
if op != "" {
opa = append(opa, op)
}
if arg != nil {
opa = append(opa, string(arg))
}
c.Tracef(format, opa)
}
// Process the information messages from Clients and other Routes.
func (c *client) processInfo(arg []byte) error {
info := Info{}
if err := json.Unmarshal(arg, &info); err != nil {
return err
}
switch c.kind {
case ROUTER:
c.processRouteInfo(&info)
case GATEWAY:
c.processGatewayInfo(&info)
case LEAF:
return c.processLeafnodeInfo(&info)
}
return nil
}
func (c *client) processErr(errStr string) {
switch c.kind {
case CLIENT:
c.Errorf("Client Error %s", errStr)
case ROUTER:
c.Errorf("Route Error %s", errStr)
case GATEWAY:
c.Errorf("Gateway Error %s", errStr)
case LEAF:
c.Errorf("Leafnode Error %s", errStr)
}
c.closeConnection(ParseError)
}
// Password pattern matcher.
var passPat = regexp.MustCompile(`"?\s*pass\S*?"?\s*[:=]\s*"?(([^",\r\n}])*)`)
// removePassFromTrace removes any notion of passwords from trace
// messages for logging.
func removePassFromTrace(arg []byte) []byte {
if !bytes.Contains(arg, []byte(`pass`)) {
return arg
}
// Take a copy of the connect proto just for the trace message.
var _arg [4096]byte
buf := append(_arg[:0], arg...)
m := passPat.FindAllSubmatchIndex(buf, -1)
if len(m) == 0 {
return arg
}
redactedPass := []byte("[REDACTED]")
for _, i := range m {
if len(i) < 4 {
continue
}
start := i[2]
end := i[3]
// Replace password substring.
buf = append(buf[:start], append(redactedPass, buf[end:]...)...)
break
}
return buf
}
// Returns the RTT by computing the elapsed time since now and `start`.
// On Windows VM where I (IK) run tests, time.Since() will return 0
// (I suspect some time granularity issues). So return at minimum 1ns.
func computeRTT(start time.Time) time.Duration {
rtt := time.Since(start)
if rtt <= 0 {
rtt = time.Nanosecond
}
return rtt
}
func (c *client) processConnect(arg []byte) error {
c.mu.Lock()
// If we can't stop the timer because the callback is in progress...
if !c.clearAuthTimer() {
// wait for it to finish and handle sending the failure back to
// the client.
for !c.isClosed() {
c.mu.Unlock()
time.Sleep(25 * time.Millisecond)
c.mu.Lock()
}
c.mu.Unlock()
return nil
}
c.last = time.Now()
// Estimate RTT to start.
if c.kind == CLIENT {
c.rtt = computeRTT(c.start)
if c.srv != nil {
c.clearPingTimer()
c.srv.setFirstPingTimer(c)
}
}
kind := c.kind
srv := c.srv
// Moved unmarshalling of clients' Options under the lock.
// The client has already been added to the server map, so it is possible
// that other routines lookup the client, and access its options under
// the client's lock, so unmarshalling the options outside of the lock
// would cause data RACEs.
if err := json.Unmarshal(arg, &c.opts); err != nil {
c.mu.Unlock()
return err
}
// Indicate that the CONNECT protocol has been received, and that the
// server now knows which protocol this client supports.
c.flags.set(connectReceived)
// Capture these under lock
c.echo = c.opts.Echo
proto := c.opts.Protocol
verbose := c.opts.Verbose
lang := c.opts.Lang
account := c.opts.Account
accountNew := c.opts.AccountNew
ujwt := c.opts.JWT
c.mu.Unlock()
if srv != nil {
// Applicable to clients only:
// As soon as c.opts is unmarshalled and if the proto is at
// least ClientProtoInfo, we need to increment the following counter.
// This is decremented when client is removed from the server's
// clients map.
if kind == CLIENT && proto >= ClientProtoInfo {
srv.mu.Lock()
srv.cproto++
srv.mu.Unlock()
}
// Check for Auth
if ok := srv.checkAuthentication(c); !ok {
// We may fail here because we reached max limits on an account.
if ujwt != "" {
c.mu.Lock()
acc := c.acc
c.mu.Unlock()
if acc != nil && acc != srv.gacc {
return ErrTooManyAccountConnections
}
}
c.authViolation()
return ErrAuthentication
}
// Check for Account designation, this section should be only used when there is not a jwt.
if account != "" {
var acc *Account
var wasNew bool
var err error
if !srv.NewAccountsAllowed() {
acc, err = srv.LookupAccount(account)
if err != nil {
c.Errorf(err.Error())
c.sendErr(ErrMissingAccount.Error())
return err
} else if accountNew && acc != nil {
c.sendErrAndErr(ErrAccountExists.Error())
return ErrAccountExists
}
} else {
// We can create this one on the fly.
acc, wasNew = srv.LookupOrRegisterAccount(account)
if accountNew && !wasNew {
c.sendErrAndErr(ErrAccountExists.Error())
return ErrAccountExists
}
}
// If we are here we can register ourselves with the new account.
if err := c.registerWithAccount(acc); err != nil {
c.reportErrRegisterAccount(acc, err)
return ErrBadAccount
}
} else if c.acc == nil {
// By default register with the global account.
c.registerWithAccount(srv.gacc)
}
}
switch kind {
case CLIENT:
// Check client protocol request if it exists.
if proto < ClientProtoZero || proto > ClientProtoInfo {
c.sendErr(ErrBadClientProtocol.Error())
c.closeConnection(BadClientProtocolVersion)
return ErrBadClientProtocol
}
if verbose {
c.sendOK()
}
case ROUTER:
// Delegate the rest of processing to the route
return c.processRouteConnect(srv, arg, lang)
case GATEWAY:
// Delegate the rest of processing to the gateway
return c.processGatewayConnect(arg)
case LEAF:
// Delegate the rest of processing to the leaf node
return c.processLeafNodeConnect(srv, arg, lang)
}
return nil
}
func (c *client) sendErrAndErr(err string) {
c.sendErr(err)
c.Errorf(err)
}
func (c *client) sendErrAndDebug(err string) {
c.sendErr(err)
c.Debugf(err)
}
func (c *client) authTimeout() {
c.sendErrAndDebug("Authentication Timeout")
c.closeConnection(AuthenticationTimeout)
}
func (c *client) authExpired() {
c.sendErrAndDebug("User Authentication Expired")
c.closeConnection(AuthenticationExpired)
}
func (c *client) accountAuthExpired() {
c.sendErrAndDebug("Account Authentication Expired")
c.closeConnection(AuthenticationExpired)
}
func (c *client) authViolation() {
var s *Server
var hasTrustedNkeys, hasNkeys, hasUsers bool
if s = c.srv; s != nil {
s.mu.Lock()
hasTrustedNkeys = len(s.trustedKeys) > 0
hasNkeys = s.nkeys != nil
hasUsers = s.users != nil
s.mu.Unlock()
defer s.sendAuthErrorEvent(c)
}
if hasTrustedNkeys {
c.Errorf("%v", ErrAuthentication)
} else if hasNkeys {
c.Errorf("%s - Nkey %q",
ErrAuthentication.Error(),
c.opts.Nkey)
} else if hasUsers {
c.Errorf("%s - User %q",
ErrAuthentication.Error(),
c.opts.Username)
} else {
c.Errorf(ErrAuthentication.Error())
}
c.sendErr("Authorization Violation")
c.closeConnection(AuthenticationViolation)
}
func (c *client) maxAccountConnExceeded() {
c.sendErrAndErr(ErrTooManyAccountConnections.Error())
c.closeConnection(MaxAccountConnectionsExceeded)
}
func (c *client) maxConnExceeded() {
c.sendErrAndErr(ErrTooManyConnections.Error())
c.closeConnection(MaxConnectionsExceeded)
}
func (c *client) maxSubsExceeded() {
c.sendErrAndErr(ErrTooManySubs.Error())
}
func (c *client) maxPayloadViolation(sz int, max int32) {
c.Errorf("%s: %d vs %d", ErrMaxPayload.Error(), sz, max)
c.sendErr("Maximum Payload Violation")
c.closeConnection(MaxPayloadExceeded)
}
// queueOutbound queues data for a clientconnection.
// Return if the data is referenced or not. If referenced, the caller
// should not reuse the `data` array.
// Lock should be held.
func (c *client) queueOutbound(data []byte) bool {
// Do not keep going if closed
if c.flags.isSet(closeConnection) {
return false
}
// Assume data will not be referenced
referenced := false
// Add to pending bytes total.
c.out.pb += int64(len(data))
// Check for slow consumer via pending bytes limit.
// ok to return here, client is going away.
if c.kind == CLIENT && c.out.pb > c.out.mp {
// Perf wise, it looks like it is faster to optimistically add than
// checking current pb+len(data) and then add to pb.
c.out.pb -= int64(len(data))
atomic.AddInt64(&c.srv.slowConsumers, 1)
c.Noticef("Slow Consumer Detected: MaxPending of %d Exceeded", c.out.mp)
c.markConnAsClosed(SlowConsumerPendingBytes, true)
return referenced
}
if c.out.p == nil && len(data) < maxBufSize {
if c.out.sz == 0 {
c.out.sz = startBufSize
}
if c.out.s != nil && cap(c.out.s) >= int(c.out.sz) {
c.out.p = c.out.s
c.out.s = nil
} else {
// FIXME(dlc) - make power of 2 if less than maxBufSize?
c.out.p = make([]byte, 0, c.out.sz)
}
}
// Determine if we copy or reference
available := cap(c.out.p) - len(c.out.p)
if len(data) > available {
// We can't fit everything into existing primary, but message will
// fit in next one we allocate or utilize from the secondary.
// So copy what we can.
if available > 0 && len(data) < int(c.out.sz) {
c.out.p = append(c.out.p, data[:available]...)
data = data[available:]
}
// Put the primary on the nb if it has a payload
if len(c.out.p) > 0 {
c.out.nb = append(c.out.nb, c.out.p)
c.out.p = nil
}
// Check for a big message, and if found place directly on nb
// FIXME(dlc) - do we need signaling of ownership here if we want len(data) < maxBufSize
if len(data) > maxBufSize {
c.out.nb = append(c.out.nb, data)
referenced = true
} else {
// We will copy to primary.
if c.out.p == nil {
// Grow here
if (c.out.sz << 1) <= maxBufSize {
c.out.sz <<= 1
}
if len(data) > int(c.out.sz) {
c.out.p = make([]byte, 0, len(data))
} else {
if c.out.s != nil && cap(c.out.s) >= int(c.out.sz) { // TODO(dlc) - Size mismatch?
c.out.p = c.out.s
c.out.s = nil
} else {
c.out.p = make([]byte, 0, c.out.sz)
}
}
}
c.out.p = append(c.out.p, data...)
}
} else {
c.out.p = append(c.out.p, data...)
}
// Check here if we should create a stall channel if we are falling behind.
// We do this here since if we wait for consumer's writeLoop it could be
// too late with large number of fan in producers.
if c.out.pb > c.out.mp/2 && c.out.stc == nil {
c.out.stc = make(chan struct{})
}
return referenced
}
// Assume the lock is held upon entry.
func (c *client) enqueueProtoAndFlush(proto []byte, doFlush bool) {
if c.isClosed() {
return
}
c.queueOutbound(proto)
if !(doFlush && c.flushOutbound()) {
c.flushSignal()
}
}
// Queues and then flushes the connection. This should only be called when
// the writeLoop cannot be started yet. Use enqueueProto() otherwise.
// Lock is held on entry.
func (c *client) sendProtoNow(proto []byte) {
c.enqueueProtoAndFlush(proto, true)
}
// Enqueues the given protocol and signal the writeLoop if necessary.
// Lock is held on entry.
func (c *client) enqueueProto(proto []byte) {
c.enqueueProtoAndFlush(proto, false)
}
// Assume the lock is held upon entry.
func (c *client) sendPong() {
if c.trace {
c.traceOutOp("PONG", nil)
}
c.enqueueProto([]byte(pongProto))
}
// Used to kick off a RTT measurement for latency tracking.
func (c *client) sendRTTPing() bool {
c.mu.Lock()
sent := c.sendRTTPingLocked()
c.mu.Unlock()
return sent
}
// Used to kick off a RTT measurement for latency tracking.
// This is normally called only when the caller has checked that
// the c.rtt is 0 and wants to force an update by sending a PING.
// Client lock held on entry.
func (c *client) sendRTTPingLocked() bool {
// Most client libs send a CONNECT+PING and wait for a PONG from the
// server. So if firstPongSent flag is set, it is ok for server to
// send the PING. But in case we have client libs that don't do that,
// allow the send of the PING if more than 2 secs have elapsed since
// the client TCP connection was accepted.
if !c.flags.isSet(closeConnection) &&
(c.flags.isSet(firstPongSent) || time.Since(c.start) > maxNoRTTPingBeforeFirstPong) {
c.sendPing()
return true
}
return false
}
// Assume the lock is held upon entry.
func (c *client) sendPing() {
c.rttStart = time.Now()
c.ping.out++
if c.trace {
c.traceOutOp("PING", nil)
}
c.enqueueProto([]byte(pingProto))
}
// Generates the INFO to be sent to the client with the client ID included.
// info arg will be copied since passed by value.
// Assume lock is held.
func (c *client) generateClientInfoJSON(info Info) []byte {
info.CID = c.cid
info.ClientIP = c.host
info.MaxPayload = c.mpay
// Generate the info json
b, _ := json.Marshal(info)
pcs := [][]byte{[]byte("INFO"), b, []byte(CR_LF)}
return bytes.Join(pcs, []byte(" "))
}
func (c *client) sendErr(err string) {
c.mu.Lock()
if c.trace {
c.traceOutOp("-ERR", []byte(err))
}
c.enqueueProto([]byte(fmt.Sprintf(errProto, err)))
c.mu.Unlock()
}
func (c *client) sendOK() {
c.mu.Lock()
if c.trace {
c.traceOutOp("OK", nil)
}
c.enqueueProto([]byte(okProto))
c.pcd[c] = needFlush
c.mu.Unlock()
}
func (c *client) processPing() {
c.mu.Lock()
if c.isClosed() {
c.mu.Unlock()
return
}
c.sendPong()
// Record this to suppress us sending one if this
// is within a given time interval for activity.
c.ping.last = time.Now()
// If not a CLIENT, we are done. Also the CONNECT should
// have been received, but make sure it is so before proceeding
if c.kind != CLIENT || !c.flags.isSet(connectReceived) {
c.mu.Unlock()
return
}
// If we are here, the CONNECT has been received so we know
// if this client supports async INFO or not.
var (
checkInfoChange bool
srv = c.srv
)
// For older clients, just flip the firstPongSent flag if not already
// set and we are done.
if c.opts.Protocol < ClientProtoInfo || srv == nil {
c.flags.setIfNotSet(firstPongSent)
} else {
// This is a client that supports async INFO protocols.
// If this is the first PING (so firstPongSent is not set yet),
// we will need to check if there was a change in cluster topology
// or we have a different max payload. We will send this first before
// pong since most clients do flush after connect call.
checkInfoChange = !c.flags.isSet(firstPongSent)
}
c.mu.Unlock()
if checkInfoChange {
opts := srv.getOpts()
srv.mu.Lock()
c.mu.Lock()
// Now that we are under both locks, we can flip the flag.
// This prevents sendAsyncInfoToClients() and code here to
// send a double INFO protocol.
c.flags.set(firstPongSent)
// If there was a cluster update since this client was created,
// send an updated INFO protocol now.
if srv.lastCURLsUpdate >= c.start.UnixNano() || c.mpay != int32(opts.MaxPayload) {
c.enqueueProto(c.generateClientInfoJSON(srv.copyInfo()))
}
c.mu.Unlock()
srv.mu.Unlock()
}
}
func (c *client) processPong() {
c.mu.Lock()
c.ping.out = 0
c.rtt = computeRTT(c.rttStart)
srv := c.srv
reorderGWs := c.kind == GATEWAY && c.gw.outbound
c.mu.Unlock()
if reorderGWs {
srv.gateway.orderOutboundConnections()
}
}
func (c *client) processPub(arg []byte) error {
// Unroll splitArgs to avoid runtime/heap issues
a := [MAX_PUB_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
c.pa.arg = arg
switch len(args) {
case 2:
c.pa.subject = args[0]
c.pa.reply = nil
c.pa.size = parseSize(args[1])
c.pa.szb = args[1]
case 3:
c.pa.subject = args[0]
c.pa.reply = args[1]
c.pa.size = parseSize(args[2])
c.pa.szb = args[2]
default:
return fmt.Errorf("processPub Parse Error: '%s'", arg)
}
// If number overruns an int64, parseSize() will have returned a negative value
if c.pa.size < 0 {
return fmt.Errorf("processPub Bad or Missing Size: '%s'", arg)
}
maxPayload := atomic.LoadInt32(&c.mpay)
// Use int64() to avoid int32 overrun...
if maxPayload != jwt.NoLimit && int64(c.pa.size) > int64(maxPayload) {
c.maxPayloadViolation(c.pa.size, maxPayload)
return ErrMaxPayload
}
if c.opts.Pedantic && !IsValidLiteralSubject(string(c.pa.subject)) {
c.sendErr("Invalid Publish Subject")
}
return nil
}
func splitArg(arg []byte) [][]byte {
a := [MAX_MSG_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t', '\r', '\n':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
return args
}
func (c *client) processSub(argo []byte, noForward bool) (*subscription, error) {
// Indicate activity.
c.in.subs++
// Copy so we do not reference a potentially large buffer
// FIXME(dlc) - make more efficient.
arg := make([]byte, len(argo))
copy(arg, argo)
args := splitArg(arg)
sub := &subscription{client: c}
switch len(args) {
case 2:
sub.subject = args[0]
sub.queue = nil
sub.sid = args[1]
case 3:
sub.subject = args[0]
sub.queue = args[1]
sub.sid = args[2]
default:
return nil, fmt.Errorf("processSub Parse Error: '%s'", arg)
}
c.mu.Lock()
// Grab connection type, account and server info.
kind := c.kind
acc := c.acc
srv := c.srv
sid := string(sub.sid)
// This check does not apply to SYSTEM clients (because they don't have a `nc`...)
if kind != SYSTEM && c.isClosed() {
c.mu.Unlock()
return sub, nil
}
// Check permissions if applicable.
if kind == CLIENT {
// First do a pass whether queue subscription is valid. This does not necessarily
// mean that it will not be able to plain subscribe.
//
// allow = ["foo"] -> can subscribe or queue subscribe to foo using any queue
// allow = ["foo v1"] -> can only queue subscribe to 'foo v1', no plain subs allowed.
// allow = ["foo", "foo v1"] -> can subscribe to 'foo' but can only queue subscribe to 'foo v1'
//
if sub.queue != nil {
if !c.canQueueSubscribe(string(sub.subject), string(sub.queue)) {
c.mu.Unlock()
c.subPermissionViolation(sub)
return nil, nil
}
} else if !c.canSubscribe(string(sub.subject)) {
c.mu.Unlock()
c.subPermissionViolation(sub)
return nil, nil
}
}
// Check if we have a maximum on the number of subscriptions.
if c.subsAtLimit() {
c.mu.Unlock()
c.maxSubsExceeded()
return nil, nil
}
var updateGWs bool
var err error
// Subscribe here.
if c.subs[sid] == nil {
c.subs[sid] = sub
if acc != nil && acc.sl != nil {
err = acc.sl.Insert(sub)
if err != nil {
delete(c.subs, sid)
} else {
updateGWs = c.srv.gateway.enabled
}
}
}
// Unlocked from here onward
c.mu.Unlock()
if err != nil {
c.sendErr("Invalid Subject")
return nil, nil
} else if c.opts.Verbose && kind != SYSTEM {
c.sendOK()
}
// No account just return.
if acc == nil {
return sub, nil
}
if err := c.addShadowSubscriptions(acc, sub); err != nil {
c.Errorf(err.Error())
}
if noForward {
return sub, nil
}
// If we are routing and this is a local sub, add to the route map for the associated account.
if kind == CLIENT || kind == SYSTEM {
srv.updateRouteSubscriptionMap(acc, sub, 1)
if updateGWs {
srv.gatewayUpdateSubInterest(acc.Name, sub, 1)
}
}
// Now check on leafnode updates.
srv.updateLeafNodes(acc, sub, 1)
return sub, nil
}
// If the client's account has stream imports and there are matches for
// this subscription's subject, then add shadow subscriptions in the
// other accounts that export this subject.
func (c *client) addShadowSubscriptions(acc *Account, sub *subscription) error {
if acc == nil {
return ErrMissingAccount
}
var (
rims [32]*streamImport
ims = rims[:0]
rfroms [32]*streamImport
froms = rfroms[:0]
tokens []string
tsa [32]string
hasWC bool
)
acc.mu.RLock()
// Loop over the import subjects. We have 3 scenarios. If we exact
// match or we know the proposed subject is a strict subset of the
// import we can subscribe to the subscription's subject directly.
// The third scenario is where the proposed subject has a wildcard
// and may not be an exact subset, but is a match. Therefore we have to
// subscribe to the import subject, not the subscription's subject.
for _, im := range acc.imports.streams {
if im.invalid {
continue
}
subj := string(sub.subject)
if subj == im.prefix+im.from {
ims = append(ims, im)
continue
}
if tokens == nil {
tokens = tsa[:0]
start := 0
for i := 0; i < len(subj); i++ {
// This is not perfect, but the test below will
// be more exact, this is just to trigger the
// additional test.
if subj[i] == pwc || subj[i] == fwc {
hasWC = true
} else if subj[i] == btsep {
tokens = append(tokens, subj[start:i])
start = i + 1
}
}
tokens = append(tokens, subj[start:])
}
if isSubsetMatch(tokens, im.prefix+im.from) {
ims = append(ims, im)
} else if hasWC {
if subjectIsSubsetMatch(im.prefix+im.from, subj) {
froms = append(froms, im)
}
}
}
acc.mu.RUnlock()
var shadow []*subscription
if len(ims) > 0 || len(froms) > 0 {
shadow = make([]*subscription, 0, len(ims)+len(froms))
}
// Now walk through collected importMaps
for _, im := range ims {
// We will create a shadow subscription.
nsub, err := c.addShadowSub(sub, im, false)
if err != nil {
return err
}
shadow = append(shadow, nsub)
}
// Now walk through importMaps that we need to subscribe
// exactly to the "from" property.
for _, im := range froms {
// We will create a shadow subscription.
nsub, err := c.addShadowSub(sub, im, true)
if err != nil {
return err
}
shadow = append(shadow, nsub)
}
if shadow != nil {
c.mu.Lock()
sub.shadow = shadow
c.mu.Unlock()
}
return nil
}
// Add in the shadow subscription.
func (c *client) addShadowSub(sub *subscription, im *streamImport, useFrom bool) (*subscription, error) {
nsub := *sub // copy
nsub.im = im
if useFrom {
nsub.subject = []byte(im.from)
} else if im.prefix != "" {
// redo subject here to match subject in the publisher account space.
// Just remove prefix from what they gave us. That maps into other space.
nsub.subject = sub.subject[len(im.prefix):]
}
c.Debugf("Creating import subscription on %q from account %q", nsub.subject, im.acc.Name)
if err := im.acc.sl.Insert(&nsub); err != nil {
errs := fmt.Sprintf("Could not add shadow import subscription for account %q", im.acc.Name)
c.Debugf(errs)
return nil, fmt.Errorf(errs)
}
// Update our route map here.
c.srv.updateRouteSubscriptionMap(im.acc, &nsub, 1)
return &nsub, nil
}
// canSubscribe determines if the client is authorized to subscribe to the
// given subject. Assumes caller is holding lock.
func (c *client) canSubscribe(subject string) bool {
if c.perms == nil {
return true
}
allowed := true
// Check allow list. If no allow list that means all are allowed. Deny can overrule.
if c.perms.sub.allow != nil {
r := c.perms.sub.allow.Match(subject)
allowed = len(r.psubs) != 0
}
// If we have a deny list and we think we are allowed, check that as well.
if allowed && c.perms.sub.deny != nil {
r := c.perms.sub.deny.Match(subject)
allowed = len(r.psubs) == 0
// We use the actual subscription to signal us to spin up the deny mperms
// and cache. We check if the subject is a wildcard that contains any of
// the deny clauses.
// FIXME(dlc) - We could be smarter and track when these go away and remove.
if allowed && c.mperms == nil && subjectHasWildcard(subject) {
// Whip through the deny array and check if this wildcard subject is within scope.
for _, sub := range c.darray {
tokens := strings.Split(sub, tsep)
if isSubsetMatch(tokens, sub) {
c.loadMsgDenyFilter()
break
}
}
}
}
return allowed
}
func queueMatches(queue string, qsubs [][]*subscription) bool {
if len(qsubs) == 0 {
return true
}
for _, qsub := range qsubs {
qs := qsub[0]
qname := string(qs.queue)
// NOTE: '*' and '>' tokens can also be valid
// queue names so we first check against the
// literal name. e.g. v1.* == v1.*
if queue == qname || (subjectHasWildcard(qname) && subjectIsSubsetMatch(queue, qname)) {
return true
}
}
return false
}
func (c *client) canQueueSubscribe(subject, queue string) bool {
if c.perms == nil {
return true
}
allowed := true
if c.perms.sub.allow != nil {
r := c.perms.sub.allow.Match(subject)
// If perms DO NOT have queue name, then psubs will be greater than
// zero. If perms DO have queue name, then qsubs will be greater than
// zero.
allowed = len(r.psubs) > 0
if len(r.qsubs) > 0 {
// If the queue appears in the allow list, then DO allow.
allowed = queueMatches(queue, r.qsubs)
}
}
if allowed && c.perms.sub.deny != nil {
r := c.perms.sub.deny.Match(subject)
// If perms DO NOT have queue name, then psubs will be greater than
// zero. If perms DO have queue name, then qsubs will be greater than
// zero.
allowed = len(r.psubs) == 0
if len(r.qsubs) > 0 {
// If the queue appears in the deny list, then DO NOT allow.
allowed = !queueMatches(queue, r.qsubs)
}
}
return allowed
}
// Low level unsubscribe for a given client.
func (c *client) unsubscribe(acc *Account, sub *subscription, force, remove bool) {
c.mu.Lock()
if !force && sub.max > 0 && sub.nm < sub.max {
c.Debugf(
"Deferring actual UNSUB(%s): %d max, %d received",
string(sub.subject), sub.max, sub.nm)
c.mu.Unlock()
return
}
if c.trace {
c.traceOp("<-> %s", "DELSUB", sub.sid)
}
if c.kind != CLIENT && c.kind != SYSTEM {
c.removeReplySubTimeout(sub)
}
// Remove accounting if requested. This will be false when we close a connection
// with open subscriptions.
if remove {
delete(c.subs, string(sub.sid))
if acc != nil {
acc.sl.Remove(sub)
}
}
// Check to see if we have shadow subscriptions.
var updateRoute bool
shadowSubs := sub.shadow
sub.shadow = nil
if len(shadowSubs) > 0 {
updateRoute = (c.kind == CLIENT || c.kind == SYSTEM || c.kind == LEAF) && c.srv != nil
}
sub.close()
c.mu.Unlock()
// Process shadow subs if we have them.
for _, nsub := range shadowSubs {
if err := nsub.im.acc.sl.Remove(nsub); err != nil {
c.Debugf("Could not remove shadow import subscription for account %q", nsub.im.acc.Name)
} else if updateRoute {
c.srv.updateRouteSubscriptionMap(nsub.im.acc, nsub, -1)
}
// Now check on leafnode updates.
c.srv.updateLeafNodes(nsub.im.acc, nsub, -1)
}
// Now check to see if this was part of a respMap entry for service imports.
if acc != nil {
acc.checkForRespEntry(string(sub.subject))
}
}
func (c *client) processUnsub(arg []byte) error {
args := splitArg(arg)
var sid []byte
max := -1
switch len(args) {
case 1:
sid = args[0]
case 2:
sid = args[0]
max = parseSize(args[1])
default:
return fmt.Errorf("processUnsub Parse Error: '%s'", arg)
}
// Indicate activity.
c.in.subs++
var sub *subscription
var ok, unsub bool
c.mu.Lock()
// Grab connection type.
kind := c.kind
srv := c.srv
var acc *Account
updateGWs := false
if sub, ok = c.subs[string(sid)]; ok {
acc = c.acc
if max > 0 {
sub.max = int64(max)
} else {
// Clear it here to override
sub.max = 0
unsub = true
}
updateGWs = srv.gateway.enabled
}
c.mu.Unlock()
if c.opts.Verbose {
c.sendOK()
}
if unsub {
c.unsubscribe(acc, sub, false, true)
if acc != nil && kind == CLIENT || kind == SYSTEM {
srv.updateRouteSubscriptionMap(acc, sub, -1)
if updateGWs {
srv.gatewayUpdateSubInterest(acc.Name, sub, -1)
}
}
// Now check on leafnode updates.
srv.updateLeafNodes(acc, sub, -1)
}
return nil
}
// checkDenySub will check if we are allowed to deliver this message in the
// presence of deny clauses for subscriptions. Deny clauses will not prevent
// larger scoped wildcard subscriptions, so we need to check at delivery time.
// Lock should be held.
func (c *client) checkDenySub(subject string) bool {
if denied, ok := c.mperms.dcache[subject]; ok {
return denied
} else if r := c.mperms.deny.Match(subject); len(r.psubs) != 0 {
c.mperms.dcache[subject] = true
return true
} else {
c.mperms.dcache[subject] = false
}
if len(c.mperms.dcache) > maxDenyPermCacheSize {
c.pruneDenyCache()
}
return false
}
func (c *client) msgHeader(mh []byte, sub *subscription, reply []byte) []byte {
if len(sub.sid) > 0 {
mh = append(mh, sub.sid...)
mh = append(mh, ' ')
}
if reply != nil {
mh = append(mh, reply...)
mh = append(mh, ' ')
}
mh = append(mh, c.pa.szb...)
mh = append(mh, _CRLF_...)
return mh
}
func (c *client) stalledWait(producer *client) {
stall := c.out.stc
ttl := stallDuration(c.out.pb, c.out.mp)
c.mu.Unlock()
defer c.mu.Lock()
select {
case <-stall:
case <-time.After(ttl):
producer.Debugf("Timed out of fast producer stall (%v)", ttl)
}
}
func stallDuration(pb, mp int64) time.Duration {
ttl := stallClientMinDuration
if pb >= mp {
ttl = stallClientMaxDuration
} else if hmp := mp / 2; pb > hmp {
bsz := hmp / 10
additional := int64(ttl) * ((pb - hmp) / bsz)
ttl += time.Duration(additional)
}
return ttl
}
// Used to treat maps as efficient set
var needFlush = struct{}{}
// deliverMsg will deliver a message to a matching subscription and its underlying client.
// We process all connection/client types. mh is the part that will be protocol/client specific.
func (c *client) deliverMsg(sub *subscription, subject, mh, msg []byte, gwrply bool) bool {
if sub.client == nil {
return false
}
client := sub.client
client.mu.Lock()
// Check echo
if c == client && !client.echo {
client.mu.Unlock()
return false
}
// Check if we have a subscribe deny clause. This will trigger us to check the subject
// for a match against the denied subjects.
if client.mperms != nil && client.checkDenySub(string(subject)) {
client.mu.Unlock()
return false
}
// This is set under the client lock using atomic because it can be
// checked with atomic without the client lock. Here, we don't need
// the atomic operation since we are under the lock.
if sub.closed == 1 {
client.mu.Unlock()
return false
}
srv := client.srv
sub.nm++
// Check if we should auto-unsubscribe.
if sub.max > 0 {
if client.kind == ROUTER && sub.nm >= sub.max {
// The only router based messages that we will see here are remoteReplies.
// We handle these slightly differently.
defer client.removeReplySub(sub)
} else {
// For routing..
shouldForward := client.kind == CLIENT || client.kind == SYSTEM && client.srv != nil
// If we are at the exact number, unsubscribe but
// still process the message in hand, otherwise
// unsubscribe and drop message on the floor.
if sub.nm == sub.max {
client.Debugf("Auto-unsubscribe limit of %d reached for sid '%s'", sub.max, string(sub.sid))
// Due to defer, reverse the code order so that execution
// is consistent with other cases where we unsubscribe.
if shouldForward {
if srv.gateway.enabled {
defer srv.gatewayUpdateSubInterest(client.acc.Name, sub, -1)
}
defer srv.updateRouteSubscriptionMap(client.acc, sub, -1)
}
defer client.unsubscribe(client.acc, sub, true, true)
} else if sub.nm > sub.max {
client.Debugf("Auto-unsubscribe limit [%d] exceeded", sub.max)
client.mu.Unlock()
client.unsubscribe(client.acc, sub, true, true)
if shouldForward {
srv.updateRouteSubscriptionMap(client.acc, sub, -1)
if srv.gateway.enabled {
srv.gatewayUpdateSubInterest(client.acc.Name, sub, -1)
}
}
return false
}
}
}
// Update statistics
// The msg includes the CR_LF, so pull back out for accounting.
msgSize := int64(len(msg) - LEN_CR_LF)
// No atomic needed since accessed under client lock.
// Monitor is reading those also under client's lock.
client.outMsgs++
client.outBytes += msgSize
atomic.AddInt64(&srv.outMsgs, 1)
atomic.AddInt64(&srv.outBytes, msgSize)
// Check for internal subscription.
if client.kind == SYSTEM {
s := client.srv
client.mu.Unlock()
s.deliverInternalMsg(sub, c, subject, c.pa.reply, msg[:msgSize])
return true
}
// If we are a client and we detect that the consumer we are
// sending to is in a stalled state, go ahead and wait here
// with a limit.
if c.kind == CLIENT && client.out.stc != nil {
client.stalledWait(c)
}
// Check for closed connection
if client.isClosed() {
client.mu.Unlock()
return false
}
// Do a fast check here to see if we should be tracking this from a latency
// perspective. This will be for a request being received for an exported service.
// This needs to be from a non-client (otherwise tracking happens at requestor).
//
// Also this check captures if the original reply (c.pa.reply) is a GW routed
// reply (since it is known to be > minReplyLen). If that is the case, we need to
// track the binding between the routed reply and the reply set in the message
// header (which is c.pa.reply without the GNR routing prefix).
if client.kind == CLIENT && len(c.pa.reply) > minReplyLen {
if gwrply {
// Note we keep track "in" the destination client (`client`) but the
// routed reply subject is in `c.pa.reply`. Should that change, we
// would have to pass the "reply" in deliverMsg().
srv.trackGWReply(client, c.pa.reply)
}
// If we do not have a registered RTT queue that up now.
if client.rtt == 0 {
client.sendRTTPingLocked()
}
// FIXME(dlc) - We may need to optimize this.
// We will have tagged this with a suffix ('.T') if we are tracking. This is
// needed from sampling. Not all will be tracked.
if c.kind != CLIENT && client.acc.IsExportServiceTracking(string(subject)) && isTrackedReply(c.pa.reply) {
client.trackRemoteReply(string(c.pa.reply))
}
}
// Queue to outbound buffer
client.queueOutbound(mh)
client.queueOutbound(msg)
client.out.pm++
// If we are tracking dynamic publish permissions that track reply subjects,
// do that accounting here. We only look at client.replies which will be non-nil.
if client.replies != nil && len(c.pa.reply) > 0 {
client.replies[string(c.pa.reply)] = &resp{time.Now(), 0}
if len(client.replies) > replyPermLimit {
client.pruneReplyPerms()
}
}
// Check outbound threshold and queue IO flush if needed.
// This is specifically looking at situations where we are getting behind and may want
// to intervene before this producer goes back to top of readloop. We are in the producer's
// readloop go routine at this point.
// FIXME(dlc) - We may call this alot, maybe suppress after first call?
if client.out.pm > 1 && client.out.pb > maxBufSize*2 {
client.flushSignal()
}
// Add the data size we are responsible for here. This will be processed when we
// return to the top of the readLoop.
if _, ok := c.pcd[client]; !ok {
client.out.fsp++
c.pcd[client] = needFlush
}
if client.trace {
client.traceOutOp(string(mh[:len(mh)-LEN_CR_LF]), nil)
}
client.mu.Unlock()
return true
}
// This will track a remote reply for an exported service that has requested
// latency tracking.
// Lock assumed to be held.
func (c *client) trackRemoteReply(reply string) {
if c.rrTracking == nil {
c.rrTracking = make(map[string]*remoteLatency)
c.rrMax = c.acc.MaxAutoExpireResponseMaps()
}
rl := remoteLatency{
Account: c.acc.Name,
ReqId: reply,
}
rl.M2.RequestStart = time.Now()
c.rrTracking[reply] = &rl
if len(c.rrTracking) >= c.rrMax {
c.pruneRemoteTracking()
}
}
// pruneReplyPerms will remove any stale or expired entries
// in our reply cache. We make sure to not check too often.
func (c *client) pruneReplyPerms() {
// Make sure we do not check too often.
if c.perms.resp == nil {
return
}
mm := c.perms.resp.MaxMsgs
ttl := c.perms.resp.Expires
now := time.Now()
for k, resp := range c.replies {
if mm > 0 && resp.n >= mm {
delete(c.replies, k)
} else if ttl > 0 && now.Sub(resp.t) > ttl {
delete(c.replies, k)
}
}
}
// pruneDenyCache will prune the deny cache via randomly
// deleting items. Doing so pruneSize items at a time.
// Lock must be held for this one since it is shared under
// deliverMsg.
func (c *client) pruneDenyCache() {
r := 0
for subject := range c.mperms.dcache {
delete(c.mperms.dcache, subject)
if r++; r > pruneSize {
break
}
}
}
// prunePubPermsCache will prune the cache via randomly
// deleting items. Doing so pruneSize items at a time.
func (c *client) prunePubPermsCache() {
r := 0
for subject := range c.perms.pcache {
delete(c.perms.pcache, subject)
if r++; r > pruneSize {
break
}
}
}
// pruneRemoteTracking will prune any remote tracking objects
// that are too old. These are orphaned when a service is not
// sending reponses etc.
// Lock should be held upon entry.
func (c *client) pruneRemoteTracking() {
ttl := c.acc.AutoExpireTTL()
now := time.Now()
for reply, rl := range c.rrTracking {
if now.Sub(rl.M2.RequestStart) > ttl {
delete(c.rrTracking, reply)
}
}
}
// pubAllowed checks on publish permissioning.
// Lock should not be held.
func (c *client) pubAllowed(subject string) bool {
return c.pubAllowedFullCheck(subject, true)
}
// pubAllowedFullCheck checks on all publish permissioning depending
// on the flag for dynamic reply permissions.
func (c *client) pubAllowedFullCheck(subject string, fullCheck bool) bool {
if c.perms == nil || (c.perms.pub.allow == nil && c.perms.pub.deny == nil) {
return true
}
// Check if published subject is allowed if we have permissions in place.
allowed, ok := c.perms.pcache[subject]
if ok {
return allowed
}
// Cache miss, check allow then deny as needed.
if c.perms.pub.allow != nil {
r := c.perms.pub.allow.Match(subject)
allowed = len(r.psubs) != 0
} else {
// No entries means all are allowed. Deny will overrule as needed.
allowed = true
}
// If we have a deny list and are currently allowed, check that as well.
if allowed && c.perms.pub.deny != nil {
r := c.perms.pub.deny.Match(subject)
allowed = len(r.psubs) == 0
}
// If we are currently not allowed but we are tracking reply subjects
// dynamically, check to see if we are allowed here but avoid pcache.
// We need to acquire the lock though.
if !allowed && fullCheck && c.perms.resp != nil {
c.mu.Lock()
if resp := c.replies[subject]; resp != nil {
resp.n++
// Check if we have sent too many responses.
if c.perms.resp.MaxMsgs > 0 && resp.n > c.perms.resp.MaxMsgs {
delete(c.replies, subject)
} else if c.perms.resp.Expires > 0 && time.Since(resp.t) > c.perms.resp.Expires {
delete(c.replies, subject)
} else {
allowed = true
}
}
c.mu.Unlock()
} else {
// Update our cache here.
c.perms.pcache[string(subject)] = allowed
// Prune if needed.
if len(c.perms.pcache) > maxPermCacheSize {
c.prunePubPermsCache()
}
}
return allowed
}
// Test whether a reply subject is a service import reply.
func isServiceReply(reply []byte) bool {
// This function is inlined and checking this way is actually faster
// than byte-by-byte comparison.
return len(reply) > 3 && string(reply[:4]) == replyPrefix
}
// Test whether a reply subject is a service import or a gateway routed reply.
func isReservedReply(reply []byte) bool {
if isServiceReply(reply) {
return true
}
// Faster to check with string([:]) than byte-by-byte
if len(reply) > gwReplyPrefixLen && string(reply[:gwReplyPrefixLen]) == gwReplyPrefix {
return true
}
return false
}
// This will decide to call the client code or router code.
func (c *client) processInboundMsg(msg []byte) {
switch c.kind {
case CLIENT:
c.processInboundClientMsg(msg)
case ROUTER:
c.processInboundRoutedMsg(msg)
case GATEWAY:
c.processInboundGatewayMsg(msg)
case LEAF:
c.processInboundLeafMsg(msg)
}
}
// processInboundClientMsg is called to process an inbound msg from a client.
func (c *client) processInboundClientMsg(msg []byte) {
// Update statistics
// The msg includes the CR_LF, so pull back out for accounting.
c.in.msgs++
c.in.bytes += int32(len(msg) - LEN_CR_LF)
// Check that client (could be here with SYSTEM) is not publishing on reserved "$GNR" prefix.
if c.kind == CLIENT && hasGWRoutedReplyPrefix(c.pa.subject) {
c.pubPermissionViolation(c.pa.subject)
return
}
// Check pub permissions
if c.perms != nil && (c.perms.pub.allow != nil || c.perms.pub.deny != nil) && !c.pubAllowed(string(c.pa.subject)) {
c.pubPermissionViolation(c.pa.subject)
return
}
// Now check for reserved replies. These are used for service imports.
if len(c.pa.reply) > 0 && isReservedReply(c.pa.reply) {
c.replySubjectViolation(c.pa.reply)
return
}
if c.opts.Verbose {
c.sendOK()
}
// Mostly under testing scenarios.
if c.srv == nil || c.acc == nil {
return
}
// Check if this client's gateway replies map is not empty
if atomic.LoadInt32(&c.cgwrt) > 0 && c.handleGWReplyMap(msg) {
return
}
// Check to see if we need to map/route to another account.
if c.acc.imports.services != nil {
c.checkForImportServices(c.acc, msg)
}
// If we have an exported service and we are doing remote tracking, check this subject
// to see if we need to report the latency.
if c.rrTracking != nil {
c.mu.Lock()
rl := c.rrTracking[string(c.pa.subject)]
if rl != nil {
delete(c.rrTracking, string(c.pa.subject))
}
rtt := c.rtt
c.mu.Unlock()
if rl != nil {
sl := &rl.M2
// Fill this in and send it off to the other side.
sl.AppName = c.opts.Name
sl.ServiceLatency = time.Since(sl.RequestStart) - rtt
sl.NATSLatency.Responder = rtt
sl.TotalLatency = sl.ServiceLatency + rtt
sanitizeLatencyMetric(sl)
lsub := remoteLatencySubjectForResponse(c.pa.subject)
c.srv.sendInternalAccountMsg(nil, lsub, &rl) // Send to SYS account
}
}
// Match the subscriptions. We will use our own L1 map if
// it's still valid, avoiding contention on the shared sublist.
var r *SublistResult
var ok bool
genid := atomic.LoadUint64(&c.acc.sl.genid)
if genid == c.in.genid && c.in.results != nil {
r, ok = c.in.results[string(c.pa.subject)]
} else {
// Reset our L1 completely.
c.in.results = make(map[string]*SublistResult)
c.in.genid = genid
}
// Go back to the sublist data structure.
if !ok {
r = c.acc.sl.Match(string(c.pa.subject))
c.in.results[string(c.pa.subject)] = r
// Prune the results cache. Keeps us from unbounded growth. Random delete.
if len(c.in.results) > maxResultCacheSize {
n := 0
for subject := range c.in.results {
delete(c.in.results, subject)
if n++; n > pruneSize {
break
}
}
}
}
var qnames [][]byte
// Check for no interest, short circuit if so.
// This is the fanout scale.
if len(r.psubs)+len(r.qsubs) > 0 {
flag := pmrNoFlag
// If there are matching queue subs and we are in gateway mode,
// we need to keep track of the queue names the messages are
// delivered to. When sending to the GWs, the RMSG will include
// those names so that the remote clusters do not deliver messages
// to their queue subs of the same names.
if len(r.qsubs) > 0 && c.srv.gateway.enabled &&
atomic.LoadInt64(&c.srv.gateway.totalQSubs) > 0 {
flag |= pmrCollectQueueNames
}
qnames = c.processMsgResults(c.acc, r, msg, c.pa.subject, c.pa.reply, flag)
}
// Now deal with gateways
if c.srv.gateway.enabled {
c.sendMsgToGateways(c.acc, msg, c.pa.subject, c.pa.reply, qnames)
}
}
// This is invoked knowing that this client has some GW replies
// in its map. It will check if one is find for the c.pa.subject
// and if so will process it directly (send to GWs and LEAF) and
// return true to notify the caller that the message was handled.
// If there is no mapping for the subject, false is returned.
func (c *client) handleGWReplyMap(msg []byte) bool {
c.mu.Lock()
rm, ok := c.gwrm[string(c.pa.subject)]
if !ok {
c.mu.Unlock()
return false
}
// Set subject to the mapped reply subject
c.pa.subject = []byte(rm.ms)
var rl *remoteLatency
var rtt time.Duration
if c.rrTracking != nil {
rl = c.rrTracking[string(c.pa.subject)]
if rl != nil {
delete(c.rrTracking, string(c.pa.subject))
}
rtt = c.rtt
}
c.mu.Unlock()
if rl != nil {
sl := &rl.M2
// Fill this in and send it off to the other side.
sl.AppName = c.opts.Name
sl.ServiceLatency = time.Since(sl.RequestStart) - rtt
sl.NATSLatency.Responder = rtt
sl.TotalLatency = sl.ServiceLatency + rtt
sanitizeLatencyMetric(sl)
lsub := remoteLatencySubjectForResponse(c.pa.subject)
c.srv.sendInternalAccountMsg(nil, lsub, &rl) // Send to SYS account
}
// Check for leaf nodes
if c.srv.gwLeafSubs.Count() > 0 {
if r := c.srv.gwLeafSubs.Match(string(c.pa.subject)); len(r.psubs) > 0 {
c.processMsgResults(c.acc, r, msg, c.pa.subject, c.pa.reply, pmrNoFlag)
}
}
if c.srv.gateway.enabled {
c.sendMsgToGateways(c.acc, msg, c.pa.subject, c.pa.reply, nil)
}
return true
}
// This checks and process import services by doing the mapping and sending the
// message onward if applicable.
func (c *client) checkForImportServices(acc *Account, msg []byte) {
if acc == nil || acc.imports.services == nil {
return
}
acc.mu.RLock()
si := acc.imports.services[string(c.pa.subject)]
invalid := si != nil && si.invalid
acc.mu.RUnlock()
// Get the results from the other account for the mapped "to" subject.
// If we have been marked invalid simply return here.
if si != nil && !invalid && si.acc != nil && si.acc.sl != nil {
var nrr []byte
if c.pa.reply != nil {
var latency *serviceLatency
var tracking bool
if tracking = shouldSample(si.latency); tracking {
latency = si.latency
}
// We want to remap this to provide anonymity.
nrr = si.acc.newServiceReply(tracking)
si.acc.addRespServiceImport(acc, string(nrr), string(c.pa.reply), si.rt, latency)
// Track our responses for cleanup if not auto-expire.
if si.rt != Singleton {
acc.addRespMapEntry(si.acc, string(c.pa.reply), string(nrr))
} else if si.latency != nil && c.rtt == 0 {
// We have a service import that we are tracking but have not established RTT.
c.sendRTTPing()
}
}
// FIXME(dlc) - Do L1 cache trick from above.
rr := si.acc.sl.Match(si.to)
// Check to see if we have no results and this is an internal serviceImport. If so we
// need to clean that up.
if len(rr.psubs)+len(rr.qsubs) == 0 && si.internal {
// We may also have a response entry, so go through that way.
si.acc.checkForRespEntry(si.to)
}
flags := pmrNoFlag
// If we are a route or gateway or leafnode and this message is flipped to a queue subscriber we
// need to handle that since the processMsgResults will want a queue filter.
if c.kind == GATEWAY || c.kind == ROUTER || c.kind == LEAF {
flags |= pmrIgnoreEmptyQueueFilter
}
if c.srv.gateway.enabled {
flags |= pmrCollectQueueNames
queues := c.processMsgResults(si.acc, rr, msg, []byte(si.to), nrr, flags)
c.sendMsgToGateways(si.acc, msg, []byte(si.to), nrr, queues)
} else {
c.processMsgResults(si.acc, rr, msg, []byte(si.to), nrr, flags)
}
shouldRemove := si.ae
// Calculate tracking info here if we are tracking this request/response.
if si.tracking {
if requesting := firstSubFromResult(rr); requesting != nil {
shouldRemove = acc.sendTrackingLatency(si, requesting.client, c)
}
}
if shouldRemove {
acc.removeServiceImport(si.from)
}
}
}
func (c *client) addSubToRouteTargets(sub *subscription) {
if c.in.rts == nil {
c.in.rts = make([]routeTarget, 0, routeTargetInit)
}
for i := range c.in.rts {
rt := &c.in.rts[i]
if rt.sub.client == sub.client {
if sub.queue != nil {
rt.qs = append(rt.qs, sub.queue...)
rt.qs = append(rt.qs, ' ')
}
return
}
}
var rt *routeTarget
lrts := len(c.in.rts)
// If we are here we do not have the sub yet in our list
// If we have to grow do so here.
if lrts == cap(c.in.rts) {
c.in.rts = append(c.in.rts, routeTarget{})
}
c.in.rts = c.in.rts[:lrts+1]
rt = &c.in.rts[lrts]
rt.sub = sub
rt.qs = rt._qs[:0]
if sub.queue != nil {
rt.qs = append(rt.qs, sub.queue...)
rt.qs = append(rt.qs, ' ')
}
}
// This processes the sublist results for a given message.
func (c *client) processMsgResults(acc *Account, r *SublistResult, msg, subject, reply []byte, flags int) [][]byte {
var queues [][]byte
// msg header for clients.
msgh := c.msgb[1:msgHeadProtoLen]
msgh = append(msgh, subject...)
msgh = append(msgh, ' ')
si := len(msgh)
// For sending messages across routes and leafnodes.
// Reset if we have one since we reuse this data structure.
if c.in.rts != nil {
c.in.rts = c.in.rts[:0]
}
var rplyHasGWPrefix bool
var creply = reply
// If the reply subject is a GW routed reply, we will perform some
// tracking in deliverMsg(). We also want to send to the user the
// reply without the prefix. `creply` will be set to that and be
// used to create the message header for client connections.
if rplyHasGWPrefix = isGWRoutedReply(reply); rplyHasGWPrefix {
creply = reply[gwSubjectOffset:]
}
// Loop over all normal subscriptions that match.
for _, sub := range r.psubs {
// Check if this is a send to a ROUTER. We now process
// these after everything else.
switch sub.client.kind {
case ROUTER:
if (c.kind != ROUTER && !c.isSolicitedLeafNode()) || (flags&pmrAllowSendFromRouteToRoute != 0) {
c.addSubToRouteTargets(sub)
}
continue
case GATEWAY:
// Never send to gateway from here.
continue
case LEAF:
// We handle similarly to routes and use the same data structures.
// Leaf node delivery audience is different however.
// Also leaf nodes are always no echo, so we make sure we are not
// going to send back to ourselves here.
if c != sub.client && (c.kind != ROUTER || !c.isSolicitedLeafNode()) {
c.addSubToRouteTargets(sub)
}
continue
}
// Check for stream import mapped subs. These apply to local subs only.
if sub.im != nil && sub.im.prefix != "" {
// Redo the subject here on the fly.
msgh = c.msgb[1:msgHeadProtoLen]
msgh = append(msgh, sub.im.prefix...)
msgh = append(msgh, subject...)
msgh = append(msgh, ' ')
si = len(msgh)
}
// Normal delivery
mh := c.msgHeader(msgh[:si], sub, creply)
c.deliverMsg(sub, subject, mh, msg, rplyHasGWPrefix)
}
// Set these up to optionally filter based on the queue lists.
// This is for messages received from routes which will have directed
// guidance on which queue groups we should deliver to.
qf := c.pa.queues
// For all non-client connections, we may still want to send messages to
// leaf nodes or routes even if there are no queue filters since we collect
// them above and do not process inline like normal clients.
// However, do select queue subs if asked to ignore empty queue filter.
if c.kind != CLIENT && qf == nil && flags&pmrIgnoreEmptyQueueFilter == 0 {
goto sendToRoutesOrLeafs
}
// Check to see if we have our own rand yet. Global rand
// has contention with lots of clients, etc.
if c.in.prand == nil {
c.in.prand = rand.New(rand.NewSource(time.Now().UnixNano()))
}
// Process queue subs
for i := 0; i < len(r.qsubs); i++ {
qsubs := r.qsubs[i]
// If we have a filter check that here. We could make this a map or someting more
// complex but linear search since we expect queues to be small. Should be faster
// and more cache friendly.
if qf != nil && len(qsubs) > 0 {
tqn := qsubs[0].queue
for _, qn := range qf {
if bytes.Equal(qn, tqn) {
goto selectQSub
}
}
continue
}
selectQSub:
// We will hold onto remote or lead qsubs when we are coming from
// a route or a leaf node just in case we can no longer do local delivery.
var rsub, sub *subscription
var _ql [32]*subscription
src := c.kind
// If we just came from a route we want to prefer local subs.
// So only select from local subs but remember the first rsub
// in case all else fails.
if src == ROUTER {
ql := _ql[:0]
for i := 0; i < len(qsubs); i++ {
sub = qsubs[i]
if sub.client.kind == CLIENT {
ql = append(ql, sub)
} else if rsub == nil {
rsub = sub
}
}
qsubs = ql
}
sindex := 0
lqs := len(qsubs)
if lqs > 1 {
sindex = c.in.prand.Int() % lqs
}
// Find a subscription that is able to deliver this message starting at a random index.
for i := 0; i < lqs; i++ {
if sindex+i < lqs {
sub = qsubs[sindex+i]
} else {
sub = qsubs[(sindex+i)%lqs]
}
if sub == nil {
continue
}
// We have taken care of preferring local subs for a message from a route above.
// Here we just care about a client or leaf and skipping a leaf and preferring locals.
if dst := sub.client.kind; dst == ROUTER || dst == LEAF {
if (src == LEAF || src == CLIENT) && dst == LEAF {
if rsub == nil {
rsub = sub
}
continue
} else {
c.addSubToRouteTargets(sub)
if flags&pmrCollectQueueNames != 0 {
queues = append(queues, sub.queue)
}
}
break
}
// Check for mapped subs
if sub.im != nil && sub.im.prefix != "" {
// Redo the subject here on the fly.
msgh = c.msgb[1:msgHeadProtoLen]
msgh = append(msgh, sub.im.prefix...)
msgh = append(msgh, subject...)
msgh = append(msgh, ' ')
si = len(msgh)
}
var rreply = reply
if rplyHasGWPrefix && sub.client.kind == CLIENT {
rreply = creply
}
// "rreply" will be stripped of the $GNR prefix (if present)
// for client connections only.
mh := c.msgHeader(msgh[:si], sub, rreply)
if c.deliverMsg(sub, subject, mh, msg, rplyHasGWPrefix) {
// Clear rsub
rsub = nil
if flags&pmrCollectQueueNames != 0 {
queues = append(queues, sub.queue)
}
break
}
}
if rsub != nil {
// If we are here we tried to deliver to a local qsub
// but failed. So we will send it to a remote or leaf node.
c.addSubToRouteTargets(rsub)
if flags&pmrCollectQueueNames != 0 {
queues = append(queues, rsub.queue)
}
}
}
sendToRoutesOrLeafs:
// If no messages for routes or leafnodes return here.
if len(c.in.rts) == 0 {
return queues
}
// We address by index to avoid struct copy.
// We have inline structs for memory layout and cache coherency.
for i := range c.in.rts {
rt := &c.in.rts[i]
kind := rt.sub.client.kind
mh := c.msgb[:msgHeadProtoLen]
if kind == ROUTER {
// Router (and Gateway) nodes are RMSG. Set here since leafnodes may rewrite.
mh[0] = 'R'
mh = append(mh, acc.Name...)
mh = append(mh, ' ')
} else {
// Leaf nodes are LMSG
mh[0] = 'L'
// Remap subject if its a shadow subscription, treat like a normal client.
if rt.sub.im != nil && rt.sub.im.prefix != "" {
mh = append(mh, rt.sub.im.prefix...)
}
}
mh = append(mh, subject...)
mh = append(mh, ' ')
if len(rt.qs) > 0 {
if reply != nil {
mh = append(mh, "+ "...) // Signal that there is a reply.
mh = append(mh, reply...)
mh = append(mh, ' ')
} else {
mh = append(mh, "| "...) // Only queues
}
mh = append(mh, rt.qs...)
} else if reply != nil {
mh = append(mh, reply...)
mh = append(mh, ' ')
}
mh = append(mh, c.pa.szb...)
mh = append(mh, _CRLF_...)
c.deliverMsg(rt.sub, subject, mh, msg, false)
}
return queues
}
func (c *client) pubPermissionViolation(subject []byte) {
c.sendErr(fmt.Sprintf("Permissions Violation for Publish to %q", subject))
c.Errorf("Publish Violation - %s, Subject %q", c.getAuthUser(), subject)
}
func (c *client) subPermissionViolation(sub *subscription) {
errTxt := fmt.Sprintf("Permissions Violation for Subscription to %q", sub.subject)
logTxt := fmt.Sprintf("Subscription Violation - %s, Subject %q, SID %s",
c.getAuthUser(), sub.subject, sub.sid)
if sub.queue != nil {
errTxt = fmt.Sprintf("Permissions Violation for Subscription to %q using queue %q", sub.subject, sub.queue)
logTxt = fmt.Sprintf("Subscription Violation - %s, Subject %q, Queue: %q, SID %s",
c.getAuthUser(), sub.subject, sub.queue, sub.sid)
}
c.sendErr(errTxt)
c.Errorf(logTxt)
}
func (c *client) replySubjectViolation(reply []byte) {
c.sendErr(fmt.Sprintf("Permissions Violation for Publish with Reply of %q", reply))
c.Errorf("Publish Violation - %s, Reply %q", c.getAuthUser(), reply)
}
func (c *client) processPingTimer() {
c.mu.Lock()
c.ping.tmr = nil
// Check if connection is still opened
if c.isClosed() {
c.mu.Unlock()
return
}
c.Debugf("%s Ping Timer", c.typeString())
// If we have had activity within the PingInterval then
// there is no need to send a ping. This can be client data
// or if we received a ping from the other side.
pingInterval := c.srv.getOpts().PingInterval
now := time.Now()
needRTT := c.rtt == 0 || now.Sub(c.rttStart) > DEFAULT_RTT_MEASUREMENT_INTERVAL
if delta := now.Sub(c.last); delta < pingInterval && !needRTT {
c.Debugf("Delaying PING due to client activity %v ago", delta.Round(time.Second))
} else if delta := now.Sub(c.ping.last); delta < pingInterval && !needRTT {
c.Debugf("Delaying PING due to remote ping %v ago", delta.Round(time.Second))
} else {
// Check for violation
if c.ping.out+1 > c.srv.getOpts().MaxPingsOut {
c.Debugf("Stale Client Connection - Closing")
c.enqueueProto([]byte(fmt.Sprintf(errProto, "Stale Connection")))
c.mu.Unlock()
c.closeConnection(StaleConnection)
return
}
// Send PING
c.sendPing()
}
// Reset to fire again.
c.setPingTimer()
c.mu.Unlock()
}
// Lock should be held
func (c *client) setPingTimer() {
if c.srv == nil {
return
}
d := c.srv.getOpts().PingInterval
c.ping.tmr = time.AfterFunc(d, c.processPingTimer)
}
// Lock should be held
func (c *client) clearPingTimer() {
if c.ping.tmr == nil {
return
}
c.ping.tmr.Stop()
c.ping.tmr = nil
}
// Lock should be held
func (c *client) setAuthTimer(d time.Duration) {
c.atmr = time.AfterFunc(d, c.authTimeout)
}
// Lock should be held
func (c *client) clearAuthTimer() bool {
if c.atmr == nil {
return true
}
stopped := c.atmr.Stop()
c.atmr = nil
return stopped
}
// We may reuse atmr for expiring user jwts,
// so check connectReceived.
// Lock assume held on entry.
func (c *client) awaitingAuth() bool {
return !c.flags.isSet(connectReceived) && c.atmr != nil
}
// This will set the atmr for the JWT expiration time.
// We will lock on entry.
func (c *client) setExpirationTimer(d time.Duration) {
c.mu.Lock()
c.atmr = time.AfterFunc(d, c.authExpired)
c.mu.Unlock()
}
// Possibly flush the connection and then close the low level connection.
// The boolean `minimalFlush` indicates if the flush operation should have a
// minimal write deadline.
// Lock is held on entry.
func (c *client) flushAndClose(minimalFlush bool) {
if !c.flags.isSet(skipFlushOnClose) && c.out.pb > 0 {
if minimalFlush {
const lowWriteDeadline = 100 * time.Millisecond
// Reduce the write deadline if needed.
if c.out.wdl > lowWriteDeadline {
c.out.wdl = lowWriteDeadline
}
}
c.flushOutbound()
}
c.out.p, c.out.s = nil, nil
// Close the low level connection. WriteDeadline need to be set
// in case this is a TLS connection.
if c.nc != nil {
c.nc.SetWriteDeadline(time.Now().Add(100 * time.Millisecond))
c.nc.Close()
}
}
func (c *client) typeString() string {
switch c.kind {
case CLIENT:
return "Client"
case ROUTER:
return "Router"
case GATEWAY:
return "Gateway"
case LEAF:
return "LeafNode"
}
return "Unknown Type"
}
// processSubsOnConfigReload removes any subscriptions the client has that are no
// longer authorized, and check for imports (accounts) due to a config reload.
func (c *client) processSubsOnConfigReload(awcsti map[string]struct{}) {
c.mu.Lock()
var (
checkPerms = c.perms != nil
checkAcc = c.acc != nil
acc = c.acc
)
if !checkPerms && !checkAcc {
c.mu.Unlock()
return
}
var (
_subs [32]*subscription
subs = _subs[:0]
_removed [32]*subscription
removed = _removed[:0]
srv = c.srv
)
if checkAcc {
// We actually only want to check if stream imports have changed.
if _, ok := awcsti[acc.Name]; !ok {
checkAcc = false
}
}
// We will clear any mperms we have here. It will rebuild on the fly with canSubscribe,
// so we do that here as we collect them. We will check result down below.
c.mperms = nil
// Collect client's subs under the lock
for _, sub := range c.subs {
// Just checking to rebuild mperms under the lock, will collect removed though here.
// Only collect under subs array of canSubscribe and checkAcc true.
canSub := c.canSubscribe(string(sub.subject))
canQSub := sub.queue != nil && c.canQueueSubscribe(string(sub.subject), string(sub.queue))
if !canSub && !canQSub {
removed = append(removed, sub)
} else if checkAcc {
subs = append(subs, sub)
}
}
c.mu.Unlock()
// This list is all subs who are allowed and we need to check accounts.
for _, sub := range subs {
c.mu.Lock()
oldShadows := sub.shadow
sub.shadow = nil
c.mu.Unlock()
c.addShadowSubscriptions(acc, sub)
for _, nsub := range oldShadows {
nsub.im.acc.sl.Remove(nsub)
}
}
// Unsubscribe all that need to be removed and report back to client and logs.
for _, sub := range removed {
c.unsubscribe(acc, sub, true, true)
c.sendErr(fmt.Sprintf("Permissions Violation for Subscription to %q (sid %q)",
sub.subject, sub.sid))
srv.Noticef("Removed sub %q (sid %q) for %s - not authorized",
sub.subject, sub.sid, c.getAuthUser())
}
}
// Allows us to count up all the queue subscribers during close.
type qsub struct {
sub *subscription
n int32
}
func (c *client) closeConnection(reason ClosedState) {
c.mu.Lock()
if c.nc == nil || c.flags.isSet(closeConnection) {
c.mu.Unlock()
return
}
// This will set the closeConnection flag and save the connection, etc..
// Will return true if no writeLoop was started and TCP connection was
// closed in place, in which case we need to do the teardown.
teardownNow := c.markConnAsClosed(reason, false)
c.mu.Unlock()
if teardownNow {
c.teardownConn()
}
}
// Clear the state of this connection and remove it from the server.
// If the connection was initiated (such as ROUTE, GATEWAY, etc..) this may trigger
// a reconnect. This function MUST be called only once per connection. It normally
// happens when the writeLoop returns, or in closeConnection() if no writeLoop has
// been started.
func (c *client) teardownConn() {
c.mu.Lock()
// Be consistent with the creation: for routes and gateways,
// we use Noticef on create, so use that too for delete.
if c.kind == ROUTER || c.kind == GATEWAY {
c.Noticef("%s connection closed", c.typeString())
} else { // Client and Leaf Node connections.
c.Debugf("%s connection closed", c.typeString())
}
c.clearAuthTimer()
c.clearPingTimer()
// Unblock anyone who is potentially stalled waiting on us.
if c.out.stc != nil {
close(c.out.stc)
c.out.stc = nil
}
c.nc = nil
var (
retryImplicit bool
connectURLs []string
gwName string
gwIsOutbound bool
gwCfg *gatewayCfg
kind = c.kind
srv = c.srv
noReconnect = c.flags.isSet(noReconnect)
acc = c.acc
)
// Snapshot for use if we are a client connection.
// FIXME(dlc) - we can just stub in a new one for client
// and reference existing one.
var subs []*subscription
if kind == CLIENT || kind == LEAF {
var _subs [32]*subscription
subs = _subs[:0]
for _, sub := range c.subs {
// Auto-unsubscribe subscriptions must be unsubscribed forcibly.
sub.max = 0
sub.close()
subs = append(subs, sub)
}
}
if c.route != nil {
if !noReconnect {
retryImplicit = c.route.retry
}
connectURLs = c.route.connectURLs
}
if kind == GATEWAY {
gwName = c.gw.name
gwIsOutbound = c.gw.outbound
gwCfg = c.gw.cfg
}
c.mu.Unlock()
// Remove client's or leaf node subscriptions.
if (kind == CLIENT || kind == LEAF) && acc != nil {
acc.sl.RemoveBatch(subs)
} else if kind == ROUTER {
go c.removeRemoteSubs()
}
if srv != nil {
// This is a route that disconnected, but we are not in lame duck mode...
if len(connectURLs) > 0 && !srv.isLameDuckMode() {
// Unless disabled, possibly update the server's INFO protocol
// and send to clients that know how to handle async INFOs.
if !srv.getOpts().Cluster.NoAdvertise {
srv.removeClientConnectURLsAndSendINFOToClients(connectURLs)
}
}
// Unregister
srv.removeClient(c)
// Update remote subscriptions.
if acc != nil && (kind == CLIENT || kind == LEAF) {
qsubs := map[string]*qsub{}
for _, sub := range subs {
// Call unsubscribe here to cleanup shadow subscriptions and such.
c.unsubscribe(acc, sub, true, false)
// Update route as normal for a normal subscriber.
if sub.queue == nil {
srv.updateRouteSubscriptionMap(acc, sub, -1)
} else {
// We handle queue subscribers special in case we
// have a bunch we can just send one update to the
// connected routes.
key := string(sub.subject) + " " + string(sub.queue)
if esub, ok := qsubs[key]; ok {
esub.n++
} else {
qsubs[key] = &qsub{sub, 1}
}
}
if srv.gateway.enabled {
srv.gatewayUpdateSubInterest(acc.Name, sub, -1)
}
// Now check on leafnode updates.
srv.updateLeafNodes(acc, sub, -1)
}
// Process any qsubs here.
for _, esub := range qsubs {
srv.updateRouteSubscriptionMap(acc, esub.sub, -(esub.n))
srv.updateLeafNodes(acc, esub.sub, -(esub.n))
}
if prev := acc.removeClient(c); prev == 1 && srv != nil {
srv.decActiveAccounts()
}
}
}
// Don't reconnect connections that have been marked with
// the no reconnect flag.
if noReconnect {
return
}
// Check for a solicited route. If it was, start up a reconnect unless
// we are already connected to the other end.
if c.isSolicitedRoute() || retryImplicit {
// Capture these under lock
c.mu.Lock()
rid := c.route.remoteID
rtype := c.route.routeType
rurl := c.route.url
c.mu.Unlock()
srv.mu.Lock()
defer srv.mu.Unlock()
// It is possible that the server is being shutdown.
// If so, don't try to reconnect
if !srv.running {
return
}
if rid != "" && srv.remotes[rid] != nil {
srv.Debugf("Not attempting reconnect for solicited route, already connected to \"%s\"", rid)
return
} else if rid == srv.info.ID {
srv.Debugf("Detected route to self, ignoring \"%s\"", rurl)
return
} else if rtype != Implicit || retryImplicit {
srv.Debugf("Attempting reconnect for solicited route \"%s\"", rurl)
// Keep track of this go-routine so we can wait for it on
// server shutdown.
srv.startGoRoutine(func() { srv.reConnectToRoute(rurl, rtype) })
}
} else if srv != nil && kind == GATEWAY && gwIsOutbound {
if gwCfg != nil {
srv.Debugf("Attempting reconnect for gateway %q", gwName)
// Run this as a go routine since we may be called within
// the solicitGateway itself if there was an error during
// the creation of the gateway connection.
srv.startGoRoutine(func() { srv.reconnectGateway(gwCfg) })
} else {
srv.Debugf("Gateway %q not in configuration, not attempting reconnect", gwName)
}
} else if c.isSolicitedLeafNode() {
// Check if this is a solicited leaf node. Start up a reconnect.
srv.startGoRoutine(func() { srv.reConnectToRemoteLeafNode(c.leaf.remote) })
}
}
// Set the noReconnect flag. This is used before a call to closeConnection()
// to prevent the connection to reconnect (routes, gateways).
func (c *client) setNoReconnect() {
c.mu.Lock()
c.flags.set(noReconnect)
c.mu.Unlock()
}
// Returns the client's RTT value with the protection of the client's lock.
func (c *client) getRTTValue() time.Duration {
c.mu.Lock()
rtt := c.rtt
c.mu.Unlock()
return rtt
}
// This function is used by ROUTER and GATEWAY connections to
// look for a subject on a given account (since these type of
// connections are not bound to a specific account).
// If the c.pa.subject is found in the cache, the cached result
// is returned, otherwse, we match the account's sublist and update
// the cache. The cache is pruned if reaching a certain size.
func (c *client) getAccAndResultFromCache() (*Account, *SublistResult) {
var (
acc *Account
pac *perAccountCache
r *SublistResult
ok bool
)
// Check our cache.
if pac, ok = c.in.pacache[string(c.pa.pacache)]; ok {
// Check the genid to see if it's still valid.
if genid := atomic.LoadUint64(&pac.acc.sl.genid); genid != pac.genid {
ok = false
delete(c.in.pacache, string(c.pa.pacache))
} else {
acc = pac.acc
r = pac.results
}
}
if !ok {
// Match correct account and sublist.
if acc, _ = c.srv.LookupAccount(string(c.pa.account)); acc == nil {
return nil, nil
}
// Match against the account sublist.
r = acc.sl.Match(string(c.pa.subject))
// Store in our cache
c.in.pacache[string(c.pa.pacache)] = &perAccountCache{acc, r, atomic.LoadUint64(&acc.sl.genid)}
// Check if we need to prune.
if len(c.in.pacache) > maxPerAccountCacheSize {
c.prunePerAccountCache()
}
}
return acc, r
}
// Account will return the associated account for this client.
func (c *client) Account() *Account {
if c == nil {
return nil
}
c.mu.Lock()
defer c.mu.Unlock()
return c.acc
}
// prunePerAccountCache will prune off a random number of cache entries.
func (c *client) prunePerAccountCache() {
n := 0
for cacheKey := range c.in.pacache {
delete(c.in.pacache, cacheKey)
if n++; n > prunePerAccountCacheSize {
break
}
}
}
// pruneClosedSubFromPerAccountCache remove entries that contain subscriptions
// that have been closed.
func (c *client) pruneClosedSubFromPerAccountCache() {
for cacheKey, pac := range c.in.pacache {
for _, sub := range pac.results.psubs {
if sub.isClosed() {
goto REMOVE
}
}
for _, qsub := range pac.results.qsubs {
for _, sub := range qsub {
if sub.isClosed() {
goto REMOVE
}
}
}
continue
REMOVE:
delete(c.in.pacache, cacheKey)
}
}
// getAuthUser returns the auth user for the client.
func (c *client) getAuthUser() string {
switch {
case c.opts.Nkey != "":
return fmt.Sprintf("Nkey %q", c.opts.Nkey)
case c.opts.Username != "":
return fmt.Sprintf("User %q", c.opts.Username)
default:
return `User "N/A"`
}
}
// isClosed returns true if either closeConnection or clearConnection
// flag have been set, or if `nc` is nil, which may happen in tests.
func (c *client) isClosed() bool {
return c.flags.isSet(closeConnection) || c.nc == nil
}
// Logging functionality scoped to a client or route.
func (c *client) Error(err error) {
c.srv.Errors(c, err)
}
func (c *client) Errorf(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Errorf(format, v...)
}
func (c *client) Debugf(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Debugf(format, v...)
}
func (c *client) Noticef(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Noticef(format, v...)
}
func (c *client) Tracef(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Tracef(format, v...)
}
func (c *client) Warnf(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Warnf(format, v...)
}
| 1 | 10,130 | Same trick here IMO. | nats-io-nats-server | go |
@@ -108,10 +108,11 @@ module Selenium
def initialize(source, duration, x, y, element: nil, origin: nil)
super(source)
+
@duration = duration * 1000
@x_offset = x
@y_offset = y
- @origin = element || origin
+ @origin = origin || POINTER
end
def type | 1 | # frozen_string_literal: true
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
module Selenium
module WebDriver
module Interactions
class PointerInput < InputDevice
KIND = {mouse: :mouse, pen: :pen, touch: :touch}.freeze
attr_reader :kind
def initialize(kind, name: nil)
super(name)
@kind = assert_kind(kind)
end
def type
Interactions::POINTER
end
def encode
return nil if no_actions?
output = {type: type, id: name, actions: @actions.map(&:encode)}
output[:parameters] = {pointerType: kind}
output
end
def assert_kind(pointer)
raise TypeError, "#{pointer.inspect} is not a valid pointer type" unless KIND.key? pointer
KIND[pointer]
end
def create_pointer_move(duration: 0, x: 0, y: 0, element: nil, origin: nil)
add_action(PointerMove.new(self, duration, x, y, element: element, origin: origin))
end
def create_pointer_down(button)
add_action(PointerPress.new(self, :down, button))
end
def create_pointer_up(button)
add_action(PointerPress.new(self, :up, button))
end
def create_pointer_cancel
add_action(PointerCancel.new(self))
end
end # PointerInput
class PointerPress < Interaction
BUTTONS = {left: 0, middle: 1, right: 2}.freeze
DIRECTIONS = {down: :pointerDown, up: :pointerUp}.freeze
def initialize(source, direction, button)
super(source)
@direction = assert_direction(direction)
@button = assert_button(button)
end
def type
@direction
end
def assert_button(button)
if button.is_a? Symbol
raise TypeError, "#{button.inspect} is not a valid button!" unless BUTTONS.key? button
button = BUTTONS[button]
end
raise ArgumentError, 'Button number cannot be negative!' unless button >= 0
button
end
def assert_direction(direction)
raise TypeError, "#{direction.inspect} is not a valid button direction" unless DIRECTIONS.key? direction
DIRECTIONS[direction]
end
def encode
{type: type, button: @button}
end
end # PointerPress
class PointerMove < Interaction
VIEWPORT = :viewport
POINTER = :pointer
ORIGINS = [VIEWPORT, POINTER].freeze
def initialize(source, duration, x, y, element: nil, origin: nil)
super(source)
@duration = duration * 1000
@x_offset = x
@y_offset = y
@origin = element || origin
end
def type
:pointerMove
end
def encode
output = {type: type, duration: @duration.to_i, x: @x_offset, y: @y_offset}
output[:origin] = @origin
output
end
end # Move
class PointerCancel < Interaction
def type
:pointerCancel
end
def encode
{type: type}
end
end # Cancel
end # Interactions
end # WebDriver
end # Selenium
| 1 | 17,344 | Should be `@origin = element || origin || POINTER` | SeleniumHQ-selenium | rb |
@@ -50,11 +50,14 @@ func NewUpgradeCStorSPCJob() *cobra.Command {
Long: cstorSPCUpgradeCmdHelpText,
Example: `upgrade cstor-spc --spc-name <spc-name>`,
Run: func(cmd *cobra.Command, args []string) {
+ util.CheckErr(options.RunCStorSPCUpgradeChecks(cmd, args), util.Fatal)
options.resourceKind = "storagePoolClaim"
- util.CheckErr(options.RunPreFlightChecks(cmd), util.Fatal)
- util.CheckErr(options.RunCStorSPCUpgradeChecks(cmd), util.Fatal)
- util.CheckErr(options.InitializeDefaults(cmd), util.Fatal)
- util.CheckErr(options.RunCStorSPCUpgrade(cmd), util.Fatal)
+ if options.cstorSPC.spcName != "" {
+ singleCstorSPCUpgrade(cmd)
+ }
+ if len(args) != 0 {
+ bulkCstorSPCUpgrade(cmd, args)
+ }
},
}
| 1 | /*
Copyright 2019 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package executor
import (
"strings"
"github.com/openebs/maya/pkg/util"
"github.com/spf13/cobra"
"k8s.io/klog"
apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
upgrader "github.com/openebs/maya/pkg/upgrade/upgrader"
errors "github.com/pkg/errors"
)
// CStorSPCOptions stores information required for cstor SPC upgrade
type CStorSPCOptions struct {
spcName string
}
var (
cstorSPCUpgradeCmdHelpText = `
This command upgrades the cStor SPC
Usage: upgrade cstor-spc --spc-name <spc-name> --options...
`
)
// NewUpgradeCStorSPCJob upgrades all the cStor Pools associated with
// a given Storage Pool Claim
func NewUpgradeCStorSPCJob() *cobra.Command {
cmd := &cobra.Command{
Use: "cstor-spc",
Short: "Upgrade cStor SPC",
Long: cstorSPCUpgradeCmdHelpText,
Example: `upgrade cstor-spc --spc-name <spc-name>`,
Run: func(cmd *cobra.Command, args []string) {
options.resourceKind = "storagePoolClaim"
util.CheckErr(options.RunPreFlightChecks(cmd), util.Fatal)
util.CheckErr(options.RunCStorSPCUpgradeChecks(cmd), util.Fatal)
util.CheckErr(options.InitializeDefaults(cmd), util.Fatal)
util.CheckErr(options.RunCStorSPCUpgrade(cmd), util.Fatal)
},
}
cmd.Flags().StringVarP(&options.cstorSPC.spcName,
"spc-name", "",
options.cstorSPC.spcName,
"cstor SPC name to be upgraded. Run \"kubectl get spc\", to get spc-name")
return cmd
}
// RunCStorSPCUpgradeChecks will ensure the sanity of the cstor SPC upgrade options
func (u *UpgradeOptions) RunCStorSPCUpgradeChecks(cmd *cobra.Command) error {
if len(strings.TrimSpace(u.cstorSPC.spcName)) == 0 {
return errors.Errorf("Cannot execute upgrade job: cstor spc name is missing")
}
return nil
}
// RunCStorSPCUpgrade upgrades the given Jiva Volume.
func (u *UpgradeOptions) RunCStorSPCUpgrade(cmd *cobra.Command) error {
if apis.IsCurrentVersionValid(u.fromVersion) && apis.IsDesiredVersionValid(u.toVersion) {
klog.Infof("Upgrading to %s", u.toVersion)
err := upgrader.Exec(u.fromVersion, u.toVersion,
u.resourceKind,
u.cstorSPC.spcName,
u.openebsNamespace,
u.imageURLPrefix,
u.toVersionImageTag)
if err != nil {
klog.Error(err)
return errors.Errorf("Failed to upgrade cStor SPC %v:", u.cstorSPC.spcName)
}
} else {
return errors.Errorf("Invalid from version %s or to version %s", u.fromVersion, u.toVersion)
}
return nil
}
| 1 | 18,386 | this example needs a fix? | openebs-maya | go |
@@ -248,9 +248,9 @@ static void close(struct roots_view *view) {
struct wlr_xdg_surface *surface = view->xdg_surface;
struct wlr_xdg_popup *popup = NULL;
wl_list_for_each(popup, &surface->popups, link) {
- wlr_xdg_surface_send_close(popup->base);
+ wlr_xdg_popup_destroy(popup->base);
}
- wlr_xdg_surface_send_close(surface);
+ wlr_xdg_toplevel_send_close(surface);
}
static void destroy(struct roots_view *view) { | 1 | #include <assert.h>
#include <stdbool.h>
#include <stdlib.h>
#include <wayland-server.h>
#include <wlr/types/wlr_box.h>
#include <wlr/types/wlr_surface.h>
#include <wlr/types/wlr_xdg_shell.h>
#include <wlr/util/log.h>
#include "rootston/cursor.h"
#include "rootston/desktop.h"
#include "rootston/input.h"
#include "rootston/server.h"
static void popup_destroy(struct roots_view_child *child) {
assert(child->destroy == popup_destroy);
struct roots_xdg_popup *popup = (struct roots_xdg_popup *)child;
if (popup == NULL) {
return;
}
wl_list_remove(&popup->destroy.link);
wl_list_remove(&popup->new_popup.link);
wl_list_remove(&popup->map.link);
wl_list_remove(&popup->unmap.link);
view_child_finish(&popup->view_child);
free(popup);
}
static void popup_handle_destroy(struct wl_listener *listener, void *data) {
struct roots_xdg_popup *popup =
wl_container_of(listener, popup, destroy);
popup_destroy((struct roots_view_child *)popup);
}
static void popup_handle_map(struct wl_listener *listener, void *data) {
struct roots_xdg_popup *popup = wl_container_of(listener, popup, map);
view_damage_whole(popup->view_child.view);
input_update_cursor_focus(popup->view_child.view->desktop->server->input);
}
static void popup_handle_unmap(struct wl_listener *listener, void *data) {
struct roots_xdg_popup *popup = wl_container_of(listener, popup, unmap);
view_damage_whole(popup->view_child.view);
}
static struct roots_xdg_popup *popup_create(struct roots_view *view,
struct wlr_xdg_popup *wlr_popup);
static void popup_handle_new_popup(struct wl_listener *listener, void *data) {
struct roots_xdg_popup *popup =
wl_container_of(listener, popup, new_popup);
struct wlr_xdg_popup *wlr_popup = data;
popup_create(popup->view_child.view, wlr_popup);
}
static void popup_unconstrain(struct roots_xdg_popup *popup) {
// get the output of the popup's positioner anchor point and convert it to
// the toplevel parent's coordinate system and then pass it to
// wlr_xdg_popup_v6_unconstrain_from_box
// TODO: unconstrain popups for rotated windows
if (popup->view_child.view->rotation != 0.0) {
return;
}
struct roots_view *view = popup->view_child.view;
struct wlr_output_layout *layout = view->desktop->layout;
struct wlr_xdg_popup *wlr_popup = popup->wlr_popup;
int anchor_lx, anchor_ly;
wlr_xdg_popup_get_anchor_point(wlr_popup, &anchor_lx, &anchor_ly);
int popup_lx, popup_ly;
wlr_xdg_popup_get_toplevel_coords(wlr_popup, wlr_popup->geometry.x,
wlr_popup->geometry.y, &popup_lx, &popup_ly);
popup_lx += view->box.x;
popup_ly += view->box.y;
anchor_lx += popup_lx;
anchor_ly += popup_ly;
double dest_x = 0, dest_y = 0;
wlr_output_layout_closest_point(layout, NULL, anchor_lx, anchor_ly,
&dest_x, &dest_y);
struct wlr_output *output =
wlr_output_layout_output_at(layout, dest_x, dest_y);
if (output == NULL) {
return;
}
int width = 0, height = 0;
wlr_output_effective_resolution(output, &width, &height);
// the output box expressed in the coordinate system of the toplevel parent
// of the popup
struct wlr_box output_toplevel_sx_box = {
.x = output->lx - view->box.x,
.y = output->ly - view->box.y,
.width = width,
.height = height
};
wlr_xdg_popup_unconstrain_from_box(
popup->wlr_popup, &output_toplevel_sx_box);
}
static struct roots_xdg_popup *popup_create(struct roots_view *view,
struct wlr_xdg_popup *wlr_popup) {
struct roots_xdg_popup *popup =
calloc(1, sizeof(struct roots_xdg_popup));
if (popup == NULL) {
return NULL;
}
popup->wlr_popup = wlr_popup;
popup->view_child.destroy = popup_destroy;
view_child_init(&popup->view_child, view, wlr_popup->base->surface);
popup->destroy.notify = popup_handle_destroy;
wl_signal_add(&wlr_popup->base->events.destroy, &popup->destroy);
popup->map.notify = popup_handle_map;
wl_signal_add(&wlr_popup->base->events.map, &popup->map);
popup->unmap.notify = popup_handle_unmap;
wl_signal_add(&wlr_popup->base->events.unmap, &popup->unmap);
popup->new_popup.notify = popup_handle_new_popup;
wl_signal_add(&wlr_popup->base->events.new_popup, &popup->new_popup);
popup_unconstrain(popup);
return popup;
}
static void get_size(const struct roots_view *view, struct wlr_box *box) {
assert(view->type == ROOTS_XDG_SHELL_VIEW);
struct wlr_xdg_surface *surface = view->xdg_surface;
struct wlr_box geo_box;
wlr_xdg_surface_get_geometry(surface, &geo_box);
box->width = geo_box.width;
box->height = geo_box.height;
}
static void activate(struct roots_view *view, bool active) {
assert(view->type == ROOTS_XDG_SHELL_VIEW);
struct wlr_xdg_surface *surface = view->xdg_surface;
if (surface->role == WLR_XDG_SURFACE_ROLE_TOPLEVEL) {
wlr_xdg_toplevel_set_activated(surface, active);
}
}
static void apply_size_constraints(struct wlr_xdg_surface *surface,
uint32_t width, uint32_t height, uint32_t *dest_width,
uint32_t *dest_height) {
*dest_width = width;
*dest_height = height;
struct wlr_xdg_toplevel_state *state = &surface->toplevel->current;
if (width < state->min_width) {
*dest_width = state->min_width;
} else if (state->max_width > 0 &&
width > state->max_width) {
*dest_width = state->max_width;
}
if (height < state->min_height) {
*dest_height = state->min_height;
} else if (state->max_height > 0 &&
height > state->max_height) {
*dest_height = state->max_height;
}
}
static void resize(struct roots_view *view, uint32_t width, uint32_t height) {
assert(view->type == ROOTS_XDG_SHELL_VIEW);
struct wlr_xdg_surface *surface = view->xdg_surface;
if (surface->role != WLR_XDG_SURFACE_ROLE_TOPLEVEL) {
return;
}
uint32_t constrained_width, constrained_height;
apply_size_constraints(surface, width, height, &constrained_width,
&constrained_height);
wlr_xdg_toplevel_set_size(surface, constrained_width,
constrained_height);
}
static void move_resize(struct roots_view *view, double x, double y,
uint32_t width, uint32_t height) {
assert(view->type == ROOTS_XDG_SHELL_VIEW);
struct roots_xdg_surface *roots_surface = view->roots_xdg_surface;
struct wlr_xdg_surface *surface = view->xdg_surface;
if (surface->role != WLR_XDG_SURFACE_ROLE_TOPLEVEL) {
return;
}
bool update_x = x != view->box.x;
bool update_y = y != view->box.y;
uint32_t constrained_width, constrained_height;
apply_size_constraints(surface, width, height, &constrained_width,
&constrained_height);
if (update_x) {
x = x + width - constrained_width;
}
if (update_y) {
y = y + height - constrained_height;
}
view->pending_move_resize.update_x = update_x;
view->pending_move_resize.update_y = update_y;
view->pending_move_resize.x = x;
view->pending_move_resize.y = y;
view->pending_move_resize.width = constrained_width;
view->pending_move_resize.height = constrained_height;
uint32_t serial = wlr_xdg_toplevel_set_size(surface, constrained_width,
constrained_height);
if (serial > 0) {
roots_surface->pending_move_resize_configure_serial = serial;
} else if (roots_surface->pending_move_resize_configure_serial == 0) {
view_update_position(view, x, y);
}
}
static void maximize(struct roots_view *view, bool maximized) {
assert(view->type == ROOTS_XDG_SHELL_VIEW);
struct wlr_xdg_surface *surface = view->xdg_surface;
if (surface->role != WLR_XDG_SURFACE_ROLE_TOPLEVEL) {
return;
}
wlr_xdg_toplevel_set_maximized(surface, maximized);
}
static void set_fullscreen(struct roots_view *view, bool fullscreen) {
assert(view->type == ROOTS_XDG_SHELL_VIEW);
struct wlr_xdg_surface *surface = view->xdg_surface;
if (surface->role != WLR_XDG_SURFACE_ROLE_TOPLEVEL) {
return;
}
wlr_xdg_toplevel_set_fullscreen(surface, fullscreen);
}
static void close(struct roots_view *view) {
assert(view->type == ROOTS_XDG_SHELL_VIEW);
struct wlr_xdg_surface *surface = view->xdg_surface;
struct wlr_xdg_popup *popup = NULL;
wl_list_for_each(popup, &surface->popups, link) {
wlr_xdg_surface_send_close(popup->base);
}
wlr_xdg_surface_send_close(surface);
}
static void destroy(struct roots_view *view) {
assert(view->type == ROOTS_XDG_SHELL_VIEW);
struct roots_xdg_surface *roots_xdg_surface = view->roots_xdg_surface;
wl_list_remove(&roots_xdg_surface->surface_commit.link);
wl_list_remove(&roots_xdg_surface->destroy.link);
wl_list_remove(&roots_xdg_surface->new_popup.link);
wl_list_remove(&roots_xdg_surface->map.link);
wl_list_remove(&roots_xdg_surface->unmap.link);
wl_list_remove(&roots_xdg_surface->request_move.link);
wl_list_remove(&roots_xdg_surface->request_resize.link);
wl_list_remove(&roots_xdg_surface->request_maximize.link);
wl_list_remove(&roots_xdg_surface->request_fullscreen.link);
wl_list_remove(&roots_xdg_surface->set_title.link);
wl_list_remove(&roots_xdg_surface->set_app_id.link);
roots_xdg_surface->view->xdg_surface->data = NULL;
free(roots_xdg_surface);
}
static void handle_request_move(struct wl_listener *listener, void *data) {
struct roots_xdg_surface *roots_xdg_surface =
wl_container_of(listener, roots_xdg_surface, request_move);
struct roots_view *view = roots_xdg_surface->view;
struct roots_input *input = view->desktop->server->input;
struct wlr_xdg_toplevel_move_event *e = data;
struct roots_seat *seat = input_seat_from_wlr_seat(input, e->seat->seat);
// TODO verify event serial
if (!seat || seat->cursor->mode != ROOTS_CURSOR_PASSTHROUGH) {
return;
}
roots_seat_begin_move(seat, view);
}
static void handle_request_resize(struct wl_listener *listener, void *data) {
struct roots_xdg_surface *roots_xdg_surface =
wl_container_of(listener, roots_xdg_surface, request_resize);
struct roots_view *view = roots_xdg_surface->view;
struct roots_input *input = view->desktop->server->input;
struct wlr_xdg_toplevel_resize_event *e = data;
// TODO verify event serial
struct roots_seat *seat = input_seat_from_wlr_seat(input, e->seat->seat);
assert(seat);
if (!seat || seat->cursor->mode != ROOTS_CURSOR_PASSTHROUGH) {
return;
}
roots_seat_begin_resize(seat, view, e->edges);
}
static void handle_request_maximize(struct wl_listener *listener, void *data) {
struct roots_xdg_surface *roots_xdg_surface =
wl_container_of(listener, roots_xdg_surface, request_maximize);
struct roots_view *view = roots_xdg_surface->view;
struct wlr_xdg_surface *surface = view->xdg_surface;
if (surface->role != WLR_XDG_SURFACE_ROLE_TOPLEVEL) {
return;
}
view_maximize(view, surface->toplevel->client_pending.maximized);
}
static void handle_request_fullscreen(struct wl_listener *listener,
void *data) {
struct roots_xdg_surface *roots_xdg_surface =
wl_container_of(listener, roots_xdg_surface, request_fullscreen);
struct roots_view *view = roots_xdg_surface->view;
struct wlr_xdg_surface *surface = view->xdg_surface;
struct wlr_xdg_toplevel_set_fullscreen_event *e = data;
if (surface->role != WLR_XDG_SURFACE_ROLE_TOPLEVEL) {
return;
}
view_set_fullscreen(view, e->fullscreen, e->output);
}
static void handle_set_title(struct wl_listener *listener, void *data) {
struct roots_xdg_surface *roots_xdg_surface =
wl_container_of(listener, roots_xdg_surface, set_title);
view_set_title(roots_xdg_surface->view,
roots_xdg_surface->view->xdg_surface->toplevel->title);
}
static void handle_set_app_id(struct wl_listener *listener, void *data) {
struct roots_xdg_surface *roots_xdg_surface =
wl_container_of(listener, roots_xdg_surface, set_app_id);
view_set_app_id(roots_xdg_surface->view,
roots_xdg_surface->view->xdg_surface->toplevel->app_id);
}
static void handle_surface_commit(struct wl_listener *listener, void *data) {
struct roots_xdg_surface *roots_surface =
wl_container_of(listener, roots_surface, surface_commit);
struct roots_view *view = roots_surface->view;
struct wlr_xdg_surface *surface = view->xdg_surface;
if (!surface->mapped) {
return;
}
view_apply_damage(view);
struct wlr_box size;
get_size(view, &size);
view_update_size(view, size.width, size.height);
uint32_t pending_serial =
roots_surface->pending_move_resize_configure_serial;
if (pending_serial > 0 && pending_serial >= surface->configure_serial) {
double x = view->box.x;
double y = view->box.y;
if (view->pending_move_resize.update_x) {
x = view->pending_move_resize.x + view->pending_move_resize.width -
size.width;
}
if (view->pending_move_resize.update_y) {
y = view->pending_move_resize.y + view->pending_move_resize.height -
size.height;
}
view_update_position(view, x, y);
if (pending_serial == surface->configure_serial) {
roots_surface->pending_move_resize_configure_serial = 0;
}
}
}
static void handle_new_popup(struct wl_listener *listener, void *data) {
struct roots_xdg_surface *roots_xdg_surface =
wl_container_of(listener, roots_xdg_surface, new_popup);
struct wlr_xdg_popup *wlr_popup = data;
popup_create(roots_xdg_surface->view, wlr_popup);
}
static void handle_map(struct wl_listener *listener, void *data) {
struct roots_xdg_surface *roots_xdg_surface =
wl_container_of(listener, roots_xdg_surface, map);
struct roots_view *view = roots_xdg_surface->view;
struct wlr_box box;
get_size(view, &box);
view->box.width = box.width;
view->box.height = box.height;
view_map(view, view->xdg_surface->surface);
view_setup(view);
wlr_foreign_toplevel_handle_v1_set_title(view->toplevel_handle,
view->xdg_surface->toplevel->title ?: "none");
wlr_foreign_toplevel_handle_v1_set_app_id(view->toplevel_handle,
view->xdg_surface->toplevel->app_id ?: "none");
}
static void handle_unmap(struct wl_listener *listener, void *data) {
struct roots_xdg_surface *roots_xdg_surface =
wl_container_of(listener, roots_xdg_surface, unmap);
view_unmap(roots_xdg_surface->view);
}
static void handle_destroy(struct wl_listener *listener, void *data) {
struct roots_xdg_surface *roots_xdg_surface =
wl_container_of(listener, roots_xdg_surface, destroy);
view_destroy(roots_xdg_surface->view);
}
void handle_xdg_shell_surface(struct wl_listener *listener, void *data) {
struct wlr_xdg_surface *surface = data;
assert(surface->role != WLR_XDG_SURFACE_ROLE_NONE);
if (surface->role == WLR_XDG_SURFACE_ROLE_POPUP) {
wlr_log(WLR_DEBUG, "new xdg popup");
return;
}
struct roots_desktop *desktop =
wl_container_of(listener, desktop, xdg_shell_surface);
wlr_log(WLR_DEBUG, "new xdg toplevel: title=%s, app_id=%s",
surface->toplevel->title, surface->toplevel->app_id);
wlr_xdg_surface_ping(surface);
struct roots_xdg_surface *roots_surface =
calloc(1, sizeof(struct roots_xdg_surface));
if (!roots_surface) {
return;
}
roots_surface->surface_commit.notify = handle_surface_commit;
wl_signal_add(&surface->surface->events.commit,
&roots_surface->surface_commit);
roots_surface->destroy.notify = handle_destroy;
wl_signal_add(&surface->events.destroy, &roots_surface->destroy);
roots_surface->map.notify = handle_map;
wl_signal_add(&surface->events.map, &roots_surface->map);
roots_surface->unmap.notify = handle_unmap;
wl_signal_add(&surface->events.unmap, &roots_surface->unmap);
roots_surface->request_move.notify = handle_request_move;
wl_signal_add(&surface->toplevel->events.request_move,
&roots_surface->request_move);
roots_surface->request_resize.notify = handle_request_resize;
wl_signal_add(&surface->toplevel->events.request_resize,
&roots_surface->request_resize);
roots_surface->request_maximize.notify = handle_request_maximize;
wl_signal_add(&surface->toplevel->events.request_maximize,
&roots_surface->request_maximize);
roots_surface->request_fullscreen.notify = handle_request_fullscreen;
wl_signal_add(&surface->toplevel->events.request_fullscreen,
&roots_surface->request_fullscreen);
roots_surface->set_title.notify = handle_set_title;
wl_signal_add(&surface->toplevel->events.set_title, &roots_surface->set_title);
roots_surface->set_app_id.notify = handle_set_app_id;
wl_signal_add(&surface->toplevel->events.set_app_id,
&roots_surface->set_app_id);
roots_surface->new_popup.notify = handle_new_popup;
wl_signal_add(&surface->events.new_popup, &roots_surface->new_popup);
surface->data = roots_surface;
struct roots_view *view = view_create(desktop);
if (!view) {
free(roots_surface);
return;
}
view->type = ROOTS_XDG_SHELL_VIEW;
view->xdg_surface = surface;
view->roots_xdg_surface = roots_surface;
view->activate = activate;
view->resize = resize;
view->move_resize = move_resize;
view->maximize = maximize;
view->set_fullscreen = set_fullscreen;
view->close = close;
view->destroy = destroy;
roots_surface->view = view;
if (surface->toplevel->client_pending.maximized) {
view_maximize(view, true);
}
if (surface->toplevel->client_pending.fullscreen) {
view_set_fullscreen(view, true, NULL);
}
}
static void decoration_handle_destroy(struct wl_listener *listener,
void *data) {
struct roots_xdg_toplevel_decoration *decoration =
wl_container_of(listener, decoration, destroy);
decoration->surface->xdg_toplevel_decoration = NULL;
view_update_decorated(decoration->surface->view, false);
wl_list_remove(&decoration->destroy.link);
wl_list_remove(&decoration->request_mode.link);
wl_list_remove(&decoration->surface_commit.link);
free(decoration);
}
static void decoration_handle_request_mode(struct wl_listener *listener,
void *data) {
struct roots_xdg_toplevel_decoration *decoration =
wl_container_of(listener, decoration, request_mode);
enum wlr_xdg_toplevel_decoration_v1_mode mode =
decoration->wlr_decoration->client_pending_mode;
if (mode == WLR_XDG_TOPLEVEL_DECORATION_V1_MODE_NONE) {
mode = WLR_XDG_TOPLEVEL_DECORATION_V1_MODE_CLIENT_SIDE;
}
wlr_xdg_toplevel_decoration_v1_set_mode(decoration->wlr_decoration, mode);
}
static void decoration_handle_surface_commit(struct wl_listener *listener,
void *data) {
struct roots_xdg_toplevel_decoration *decoration =
wl_container_of(listener, decoration, surface_commit);
bool decorated = decoration->wlr_decoration->current_mode ==
WLR_XDG_TOPLEVEL_DECORATION_V1_MODE_SERVER_SIDE;
view_update_decorated(decoration->surface->view, decorated);
}
void handle_xdg_toplevel_decoration(struct wl_listener *listener, void *data) {
struct wlr_xdg_toplevel_decoration_v1 *wlr_decoration = data;
wlr_log(WLR_DEBUG, "new xdg toplevel decoration");
struct roots_xdg_surface *xdg_surface = wlr_decoration->surface->data;
assert(xdg_surface != NULL);
struct wlr_xdg_surface *wlr_xdg_surface = xdg_surface->view->xdg_surface;
struct roots_xdg_toplevel_decoration *decoration =
calloc(1, sizeof(struct roots_xdg_toplevel_decoration));
if (decoration == NULL) {
return;
}
decoration->wlr_decoration = wlr_decoration;
decoration->surface = xdg_surface;
xdg_surface->xdg_toplevel_decoration = decoration;
decoration->destroy.notify = decoration_handle_destroy;
wl_signal_add(&wlr_decoration->events.destroy, &decoration->destroy);
decoration->request_mode.notify = decoration_handle_request_mode;
wl_signal_add(&wlr_decoration->events.request_mode,
&decoration->request_mode);
decoration->surface_commit.notify = decoration_handle_surface_commit;
wl_signal_add(&wlr_xdg_surface->surface->events.commit,
&decoration->surface_commit);
decoration_handle_request_mode(&decoration->request_mode, wlr_decoration);
}
| 1 | 13,188 | Is it safe to assume this surface is a toplevel? | swaywm-wlroots | c |
@@ -225,6 +225,16 @@ projCtx pj_ctx_alloc()
return new (std::nothrow) projCtx_t(*pj_get_default_ctx());
}
+/************************************************************************/
+/* pj_ctx_clone() */
+/************************************************************************/
+
+projCtx pj_ctx_clone( projCtx ctx )
+
+{
+ return new (std::nothrow) projCtx_t(*ctx);
+}
+
/************************************************************************/
/* pj_ctx_free() */
/************************************************************************/ | 1 | /******************************************************************************
* Project: PROJ.4
* Purpose: Implementation of the projCtx thread context object.
* Author: Frank Warmerdam, warmerdam@pobox.com
*
******************************************************************************
* Copyright (c) 2010, Frank Warmerdam
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*****************************************************************************/
#ifndef FROM_PROJ_CPP
#define FROM_PROJ_CPP
#endif
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <new>
#include "proj_experimental.h"
#include "proj_internal.h"
#include "filemanager.hpp"
#include "proj/internal/io_internal.hpp"
/************************************************************************/
/* pj_get_ctx() */
/************************************************************************/
projCtx pj_get_ctx( projPJ pj )
{
if (nullptr==pj)
return pj_get_default_ctx ();
if (nullptr==pj->ctx)
return pj_get_default_ctx ();
return pj->ctx;
}
/************************************************************************/
/* pj_set_ctx() */
/* */
/* Note we do not deallocate the old context! */
/************************************************************************/
void pj_set_ctx( projPJ pj, projCtx ctx )
{
if (pj==nullptr)
return;
pj->ctx = ctx;
if( pj->reassign_context )
{
pj->reassign_context(pj, ctx);
}
for( const auto &alt: pj->alternativeCoordinateOperations )
{
pj_set_ctx(alt.pj, ctx);
}
}
/************************************************************************/
/* proj_assign_context() */
/************************************************************************/
/** \brief Re-assign a context to a PJ* object.
*
* This may be useful if the PJ* has been created with a context that is
* thread-specific, and is later used in another thread. In that case,
* the user may want to assign another thread-specific context to the
* object.
*/
void proj_assign_context( PJ* pj, PJ_CONTEXT* ctx )
{
pj_set_ctx( pj, ctx );
}
/************************************************************************/
/* createDefault() */
/************************************************************************/
projCtx_t projCtx_t::createDefault()
{
projCtx_t ctx;
ctx.debug_level = PJ_LOG_NONE;
ctx.logger = pj_stderr_logger;
ctx.fileapi_legacy = pj_get_default_fileapi();
NS_PROJ::FileManager::fillDefaultNetworkInterface(&ctx);
if( getenv("PROJ_DEBUG") != nullptr )
{
if( atoi(getenv("PROJ_DEBUG")) >= -PJ_LOG_DEBUG_MINOR )
ctx.debug_level = atoi(getenv("PROJ_DEBUG"));
else
ctx.debug_level = PJ_LOG_DEBUG_MINOR;
}
return ctx;
}
/**************************************************************************/
/* get_cpp_context() */
/**************************************************************************/
projCppContext* projCtx_t::get_cpp_context()
{
if (cpp_context == nullptr) {
cpp_context = new projCppContext(this);
}
return cpp_context;
}
/**************************************************************************/
/* safeAutoCloseDbIfNeeded() */
/**************************************************************************/
void projCtx_t::safeAutoCloseDbIfNeeded()
{
if (cpp_context) {
cpp_context->autoCloseDbIfNeeded();
}
}
/************************************************************************/
/* set_search_paths() */
/************************************************************************/
void projCtx_t::set_search_paths(const std::vector<std::string>& search_paths_in )
{
search_paths = search_paths_in;
delete[] c_compat_paths;
c_compat_paths = nullptr;
if( !search_paths.empty() ) {
c_compat_paths = new const char*[search_paths.size()];
for( size_t i = 0; i < search_paths.size(); ++i ) {
c_compat_paths[i] = search_paths[i].c_str();
}
}
}
/**************************************************************************/
/* set_ca_bundle_path() */
/**************************************************************************/
void projCtx_t::set_ca_bundle_path(const std::string& ca_bundle_path_in)
{
ca_bundle_path = ca_bundle_path_in;
}
/************************************************************************/
/* projCtx_t(const projCtx_t& other) */
/************************************************************************/
projCtx_t::projCtx_t(const projCtx_t& other) :
debug_level(other.debug_level),
logger(other.logger),
logger_app_data(other.logger_app_data),
fileapi_legacy(other.fileapi_legacy),
cpp_context(other.cpp_context ? other.cpp_context->clone(this) : nullptr),
use_proj4_init_rules(other.use_proj4_init_rules),
epsg_file_exists(other.epsg_file_exists),
ca_bundle_path(other.ca_bundle_path),
env_var_proj_lib(other.env_var_proj_lib),
file_finder_legacy(other.file_finder_legacy),
file_finder(other.file_finder),
file_finder_user_data(other.file_finder_user_data),
custom_sqlite3_vfs_name(other.custom_sqlite3_vfs_name),
user_writable_directory(other.user_writable_directory),
// BEGIN ini file settings
iniFileLoaded(other.iniFileLoaded),
endpoint(other.endpoint),
networking(other.networking),
gridChunkCache(other.gridChunkCache),
defaultTmercAlgo(other.defaultTmercAlgo)
// END ini file settings
{
set_search_paths(other.search_paths);
}
/************************************************************************/
/* pj_get_default_ctx() */
/************************************************************************/
projCtx pj_get_default_ctx()
{
// C++11 rules guarantee a thread-safe instantiation.
static projCtx_t default_context(projCtx_t::createDefault());
return &default_context;
}
/************************************************************************/
/* ~projCtx_t() */
/************************************************************************/
projCtx_t::~projCtx_t()
{
delete[] c_compat_paths;
proj_context_delete_cpp_context(cpp_context);
}
/************************************************************************/
/* pj_ctx_alloc() */
/************************************************************************/
projCtx pj_ctx_alloc()
{
return new (std::nothrow) projCtx_t(*pj_get_default_ctx());
}
/************************************************************************/
/* pj_ctx_free() */
/************************************************************************/
void pj_ctx_free( projCtx ctx )
{
delete ctx;
}
/************************************************************************/
/* pj_ctx_get_errno() */
/************************************************************************/
int pj_ctx_get_errno( projCtx ctx )
{
if (nullptr==ctx)
return pj_get_default_ctx ()->last_errno;
return ctx->last_errno;
}
/************************************************************************/
/* pj_ctx_set_errno() */
/* */
/* Also sets the global errno */
/************************************************************************/
void pj_ctx_set_errno( projCtx ctx, int new_errno )
{
ctx->last_errno = new_errno;
if( new_errno == 0 )
return;
errno = new_errno;
pj_errno = new_errno;
}
/************************************************************************/
/* pj_ctx_set_debug() */
/************************************************************************/
void pj_ctx_set_debug( projCtx ctx, int new_debug )
{
if (nullptr==ctx)
return;
ctx->debug_level = new_debug;
}
/************************************************************************/
/* pj_ctx_set_logger() */
/************************************************************************/
void pj_ctx_set_logger( projCtx ctx, void (*new_logger)(void*,int,const char*) )
{
if (nullptr==ctx)
return;
ctx->logger = new_logger;
}
/************************************************************************/
/* pj_ctx_set_app_data() */
/************************************************************************/
void pj_ctx_set_app_data( projCtx ctx, void *new_app_data )
{
if (nullptr==ctx)
return;
ctx->logger_app_data = new_app_data;
}
/************************************************************************/
/* pj_ctx_get_app_data() */
/************************************************************************/
void *pj_ctx_get_app_data( projCtx ctx )
{
if (nullptr==ctx)
return nullptr;
return ctx->logger_app_data;
}
| 1 | 11,987 | we don't need that function | OSGeo-PROJ | cpp |
@@ -72,6 +72,13 @@ public enum Platform {
}
},
+ WIN10("windows 10", "win10") {
+ @Override
+ public Platform family() {
+ return WINDOWS;
+ }
+ },
+
MAC("mac", "darwin", "os x") {},
SNOW_LEOPARD("snow leopard", "os x 10.6") { | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Represents the known and supported Platforms that WebDriver runs on. This is pretty close to the
* Operating System, but differs slightly, because this class is used to extract information such as
* program locations and line endings.
*/
// Useful URLs:
// http://hg.openjdk.java.net/jdk7/modules/jdk/file/a37326fa7f95/src/windows/native/java/lang/java_props_md.c
public enum Platform {
/**
* Never returned, but can be used to request a browser running on any version of Windows.
*/
WINDOWS("") {},
/**
* For versions of Windows that "feel like" Windows XP. These are ones that store files in
* "\Program Files\" and documents under "\\documents and settings\\username"
*/
XP("Windows Server 2003", "xp", "windows", "winnt") {
@Override
public Platform family() {
return WINDOWS;
}
},
/**
* For versions of Windows that "feel like" Windows Vista.
*/
VISTA("windows vista", "Windows Server 2008", "windows 7", "win7") {
@Override
public Platform family() {
return WINDOWS;
}
},
/**
* For versions of Windows that "feel like" Windows 8.
*/
WIN8("Windows Server 2012", "windows 8", "win8") {
@Override
public Platform family() {
return WINDOWS;
}
},
WIN8_1("windows 8.1", "win8.1") {
@Override
public Platform family() {
return WINDOWS;
}
},
MAC("mac", "darwin", "os x") {},
SNOW_LEOPARD("snow leopard", "os x 10.6") {
@Override
public Platform family() {
return MAC;
}
@Override
public String toString() {
return "OS X 10.6";
}
},
MOUNTAIN_LION("mountain lion", "os x 10.8") {
@Override
public Platform family() {
return MAC;
}
@Override
public String toString() {
return "OS X 10.8";
}
},
MAVERICKS("mavericks", "os x 10.9") {
@Override
public Platform family() {
return MAC;
}
@Override
public String toString() {
return "OS X 10.9";
}
},
YOSEMITE("yosemite", "os x 10.10") {
@Override
public Platform family() {
return MAC;
}
@Override
public String toString() {
return "OS X 10.10";
}
},
/**
* Many platforms have UNIX traits, amongst them LINUX, Solaris and BSD.
*/
UNIX("solaris", "bsd") {},
LINUX("linux") {
@Override
public Platform family() {
return UNIX;
}
},
ANDROID("android", "dalvik") {
public String getLineEnding() {
return "\n";
}
@Override
public Platform family() {
return LINUX;
}
},
/**
* Never returned, but can be used to request a browser running on any operating system.
*/
ANY("") {
@Override
public boolean is(Platform compareWith) {
return this == compareWith;
}
};
private final String[] partOfOsName;
private final int minorVersion;
private final int majorVersion;
private Platform(String... partOfOsName) {
this.partOfOsName = partOfOsName;
String version = System.getProperty("os.version", "0.0.0");
int major = 0;
int min = 0;
Pattern pattern = Pattern.compile("^(\\d+)\\.(\\d+).*");
Matcher matcher = pattern.matcher(version);
if (matcher.matches()) {
try {
major = Integer.parseInt(matcher.group(1));
min = Integer.parseInt(matcher.group(2));
} catch (NumberFormatException e) {
// These things happen
}
}
majorVersion = major;
minorVersion = min;
}
public String[] getPartOfOsName() {
return partOfOsName;
}
/**
* Get current platform (not necessarily the same as operating system).
*
* @return current platform
*/
public static Platform getCurrent() {
return extractFromSysProperty(System.getProperty("os.name"));
}
/**
* Extracts platforms based on system properties in Java and uses a heuristic to determine the
* most likely operating system. If unable to determine the operating system, it will default to
* UNIX.
*
* @param osName the operating system name to determine the platform of
* @return the most likely platform based on given operating system name
*/
public static Platform extractFromSysProperty(String osName) {
return extractFromSysProperty(osName, System.getProperty("os.version"));
}
/**
* Extracts platforms based on system properties in Java and uses a heuristic to determine the
* most likely operating system. If unable to determine the operating system, it will default to
* UNIX.
*
* @param osName the operating system name to determine the platform of
* @param osVersion the operating system version to determine the platform of
* @return the most likely platform based on given operating system name and version
*/
public static Platform extractFromSysProperty(String osName, String osVersion) {
osName = osName.toLowerCase();
// os.name for android is linux
if ("dalvik".equalsIgnoreCase(System.getProperty("java.vm.name"))) {
return Platform.ANDROID;
}
// Windows 8 can't be detected by osName alone
if (osVersion.equals("6.2") && osName.startsWith("windows nt")) {
return WIN8;
}
// Windows 8 can't be detected by osName alone
if (osVersion.equals("6.3") && osName.startsWith("windows nt")) {
return WIN8_1;
}
Platform mostLikely = UNIX;
String previousMatch = null;
for (Platform os : Platform.values()) {
for (String matcher : os.partOfOsName) {
if ("".equals(matcher)) {
continue;
}
matcher = matcher.toLowerCase();
if (os.isExactMatch(osName, matcher)) {
return os;
}
if (os.isCurrentPlatform(osName, matcher) && isBetterMatch(previousMatch, matcher)) {
previousMatch = matcher;
mostLikely = os;
}
}
}
// Default to assuming we're on a UNIX variant (including LINUX)
return mostLikely;
}
/**
* Gets a platform with the name matching the parameter.
*
* @param name the platform name
* @return the Platform enum value matching the parameter
*/
public static Platform fromString(String name) {
try {
return Platform.valueOf(name);
} catch (IllegalArgumentException ex) {
for (Platform os : Platform.values()) {
for (String matcher : os.partOfOsName) {
if (name.toLowerCase().equals(matcher.toLowerCase())) {
return os;
}
}
}
throw new WebDriverException("Unrecognized platform: " + name);
}
}
/**
* Decides whether the previous match is better or not than the current match. If previous match
* is null, the newer match is always better.
*
* @param previous the previous match
* @param matcher the newer match
* @return true if newer match is better, false otherwise
*/
private static boolean isBetterMatch(String previous, String matcher) {
return previous == null || matcher.length() >= previous.length();
}
/**
* Heuristic for comparing two platforms. If platforms (which is not the same thing as operating
* systems) are found to be approximately similar in nature, this will return true. For instance
* the LINUX platform is similar to UNIX, and will give a positive result if compared.
*
* @param compareWith the platform to compare with
* @return true if platforms are approximately similar, false otherwise
*/
public boolean is(Platform compareWith) {
return this == compareWith || this.family().is(compareWith);
}
/**
* Returns a platform that represents a family for the current platform. For instance
* the LINUX if a part of the UNIX family, the XP is a part of the WINDOWS family.
*
* @return the family platform for the current one
*/
public Platform family() {
return ANY;
}
private boolean isCurrentPlatform(String osName, String matchAgainst) {
return osName.contains(matchAgainst);
}
private boolean isExactMatch(String osName, String matchAgainst) {
return matchAgainst.equals(osName);
}
/**
* Returns the major version of this platform.
*
* @return the major version of specified platform
*/
public int getMajorVersion() {
return majorVersion;
}
/**
* Returns the minor version of this platform.
*
* @return the minor version of specified platform
*/
public int getMinorVersion() {
return minorVersion;
}
}
| 1 | 12,210 | we'll also have to think about adding "Windows Server 2014" or whatever they come up with later. | SeleniumHQ-selenium | rb |
@@ -35,6 +35,9 @@ public interface ExecutorLoader {
Map<Integer, Pair<ExecutionReference, ExecutableFlow>> fetchActiveFlows()
throws ExecutorManagerException;
+ Pair<ExecutionReference, ExecutableFlow> fetchActiveFlowByExecId(int execId)
+ throws ExecutorManagerException;
+
List<ExecutableFlow> fetchFlowHistory(int skip, int num)
throws ExecutorManagerException;
| 1 | /*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.executor;
import java.io.File;
import java.util.List;
import java.util.Map;
import azkaban.executor.ExecutorLogEvent.EventType;
import azkaban.utils.FileIOUtils.LogData;
import azkaban.utils.Pair;
import azkaban.utils.Props;
public interface ExecutorLoader {
void uploadExecutableFlow(ExecutableFlow flow)
throws ExecutorManagerException;
ExecutableFlow fetchExecutableFlow(int execId)
throws ExecutorManagerException;
Map<Integer, Pair<ExecutionReference, ExecutableFlow>> fetchActiveFlows()
throws ExecutorManagerException;
List<ExecutableFlow> fetchFlowHistory(int skip, int num)
throws ExecutorManagerException;
List<ExecutableFlow> fetchFlowHistory(int projectId, String flowId,
int skip, int num) throws ExecutorManagerException;
List<ExecutableFlow> fetchFlowHistory(int projectId, String flowId,
int skip, int num, Status status) throws ExecutorManagerException;
List<ExecutableFlow> fetchFlowHistory(String projContain,
String flowContains, String userNameContains, int status, long startData,
long endData, int skip, int num) throws ExecutorManagerException;
/**
* <pre>
* Fetch all executors from executors table
* Note:-
* 1 throws an Exception in case of a SQL issue
* 2 returns an empty list in case of no executor
* </pre>
*
* @return List<Executor>
* @throws ExecutorManagerException
*/
List<Executor> fetchAllExecutors() throws ExecutorManagerException;
/**
* <pre>
* Fetch all executors from executors table with active = true
* Note:-
* 1 throws an Exception in case of a SQL issue
* 2 returns an empty list in case of no active executor
* </pre>
*
* @return List<Executor>
* @throws ExecutorManagerException
*/
List<Executor> fetchActiveExecutors() throws ExecutorManagerException;
/**
* <pre>
* Fetch executor from executors with a given (host, port)
* Note:
* 1. throws an Exception in case of a SQL issue
* 2. return null when no executor is found
* with the given (host,port)
* </pre>
*
* @return Executor
* @throws ExecutorManagerException
*/
Executor fetchExecutor(String host, int port)
throws ExecutorManagerException;
/**
* <pre>
* Fetch executor from executors with a given executorId
* Note:
* 1. throws an Exception in case of a SQL issue
* 2. return null when no executor is found with the given executorId
* </pre>
*
* @return Executor
* @throws ExecutorManagerException
*/
Executor fetchExecutor(int executorId) throws ExecutorManagerException;
/**
* <pre>
* create an executor and insert in executors table.
* Note:-
* 1. throws an Exception in case of a SQL issue
* 2. throws an Exception if a executor with (host, port) already exist
* 3. return null when no executor is found with the given executorId
* </pre>
*
* @return Executor
* @throws ExecutorManagerException
*/
Executor addExecutor(String host, int port)
throws ExecutorManagerException;
/**
* <pre>
* create an executor and insert in executors table.
* Note:-
* 1. throws an Exception in case of a SQL issue
* 2. throws an Exception if there is no executor with the given id
* 3. return null when no executor is found with the given executorId
* </pre>
*
* @param executorId
* @throws ExecutorManagerException
*/
void updateExecutor(Executor executor) throws ExecutorManagerException;
/**
* <pre>
* Remove the executor from executors table.
* Note:-
* 1. throws an Exception in case of a SQL issue
* 2. throws an Exception if there is no executor in the table* </pre>
* </pre>
* @param host
* @param port
* @throws ExecutorManagerException
*/
void removeExecutor(String host, int port) throws ExecutorManagerException;
/**
* <pre>
* Log an event in executor_event audit table Note:- throws an Exception in
* case of a SQL issue
* Note: throws an Exception in case of a SQL issue
* </pre>
*
* @param executor
* @param type
* @param user
* @param message
* @return isSuccess
*/
void postExecutorEvent(Executor executor, EventType type, String user,
String message) throws ExecutorManagerException;
/**
* <pre>
* This method is to fetch events recorded in executor audit table, inserted
* by postExecutorEvents with a given executor, starting from skip
* Note:-
* 1. throws an Exception in case of a SQL issue
* 2. Returns an empty list in case of no events
* </pre>
*
* @param executor
* @param num
* @param skip
* @return List<ExecutorLogEvent>
* @throws ExecutorManagerException
*/
List<ExecutorLogEvent> getExecutorEvents(Executor executor, int num,
int offset) throws ExecutorManagerException;
void addActiveExecutableReference(ExecutionReference ref)
throws ExecutorManagerException;
void removeActiveExecutableReference(int execId)
throws ExecutorManagerException;
/**
* <pre>
* Unset executor Id for an execution
* Note:-
* throws an Exception in case of a SQL issue
* </pre>
*
* @param executorId
* @param execId
* @throws ExecutorManagerException
*/
void unassignExecutor(int executionId) throws ExecutorManagerException;
/**
* <pre>
* Set an executor Id to an execution
* Note:-
* 1. throws an Exception in case of a SQL issue
* 2. throws an Exception in case executionId or executorId do not exist
* </pre>
*
* @param executorId
* @param execId
* @throws ExecutorManagerException
*/
void assignExecutor(int executorId, int execId)
throws ExecutorManagerException;
/**
* <pre>
* Fetches an executor corresponding to a given execution
* Note:-
* 1. throws an Exception in case of a SQL issue
* 2. return null when no executor is found with the given executionId
* </pre>
*
* @param executionId
* @return fetched Executor
* @throws ExecutorManagerException
*/
Executor fetchExecutorByExecutionId(int executionId)
throws ExecutorManagerException;
/**
* <pre>
* Fetch queued flows which have not yet dispatched
* Note:
* 1. throws an Exception in case of a SQL issue
* 2. return empty list when no queued execution is found
* </pre>
*
* @return List of queued flows and corresponding execution reference
* @throws ExecutorManagerException
*/
List<Pair<ExecutionReference, ExecutableFlow>> fetchQueuedFlows()
throws ExecutorManagerException;
boolean updateExecutableReference(int execId, long updateTime)
throws ExecutorManagerException;
LogData fetchLogs(int execId, String name, int attempt, int startByte,
int endByte) throws ExecutorManagerException;
List<Object> fetchAttachments(int execId, String name, int attempt)
throws ExecutorManagerException;
void uploadLogFile(int execId, String name, int attempt, File... files)
throws ExecutorManagerException;
void uploadAttachmentFile(ExecutableNode node, File file)
throws ExecutorManagerException;
void updateExecutableFlow(ExecutableFlow flow)
throws ExecutorManagerException;
void uploadExecutableNode(ExecutableNode node, Props inputParams)
throws ExecutorManagerException;
List<ExecutableJobInfo> fetchJobInfoAttempts(int execId, String jobId)
throws ExecutorManagerException;
ExecutableJobInfo fetchJobInfo(int execId, String jobId, int attempt)
throws ExecutorManagerException;
List<ExecutableJobInfo> fetchJobHistory(int projectId, String jobId,
int skip, int size) throws ExecutorManagerException;
void updateExecutableNode(ExecutableNode node)
throws ExecutorManagerException;
int fetchNumExecutableFlows(int projectId, String flowId)
throws ExecutorManagerException;
int fetchNumExecutableFlows() throws ExecutorManagerException;
int fetchNumExecutableNodes(int projectId, String jobId)
throws ExecutorManagerException;
Props fetchExecutionJobInputProps(int execId, String jobId)
throws ExecutorManagerException;
Props fetchExecutionJobOutputProps(int execId, String jobId)
throws ExecutorManagerException;
Pair<Props, Props> fetchExecutionJobProps(int execId, String jobId)
throws ExecutorManagerException;
int removeExecutionLogsByTime(long millis)
throws ExecutorManagerException;
}
| 1 | 13,023 | curious do we have an API to fetch an inactive flow? | azkaban-azkaban | java |