hunk
dict
file
stringlengths
0
11.8M
file_path
stringlengths
2
234
label
int64
0
1
commit_url
stringlengths
74
103
dependency_score
sequencelengths
5
5
{ "id": 0, "code_window": [ " ParentID 0, ParentSchemaID 0: database \"defaultdb\" (50): processed\n", " ParentID 0, ParentSchemaID 0: database \"postgres\" (51): processed\n", " ParentID 52, ParentSchemaID 29: relation \"users\" (53): referenced database ID 52: referenced descriptor not found\n", " ParentID 52, ParentSchemaID 29: relation \"users\" (53): processed\n", " ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_users\"\n", " ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): processed\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep" ], "after_edit": [ " ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): referenced database ID 52: referenced descriptor not found\n" ], "file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose", "type": "replace", "edit_start_line_idx": 41 }
debug doctor zipdir --verbose ---- debug doctor zipdir testdata/doctor/debugzip 21.11-52 --verbose reading testdata/doctor/debugzip/system.descriptor.txt reading testdata/doctor/debugzip/system.namespace.txt WARNING: errors occurred during the production of system.jobs.txt, contents may be missing or incomplete. reading testdata/doctor/debugzip/system.jobs.txt Examining 37 descriptors and 42 namespace entries... ParentID 0, ParentSchemaID 0: database "system" (1): processed ParentID 1, ParentSchemaID 29: relation "descriptor" (3): processed ParentID 1, ParentSchemaID 29: relation "users" (4): processed ParentID 1, ParentSchemaID 29: relation "zones" (5): processed ParentID 1, ParentSchemaID 29: relation "settings" (6): processed ParentID 1, ParentSchemaID 29: relation "tenants" (8): processed ParentID 1, ParentSchemaID 29: relation "lease" (11): processed ParentID 1, ParentSchemaID 29: relation "eventlog" (12): processed ParentID 1, ParentSchemaID 29: relation "rangelog" (13): processed ParentID 1, ParentSchemaID 29: relation "ui" (14): processed ParentID 1, ParentSchemaID 29: relation "jobs" (15): processed ParentID 1, ParentSchemaID 29: relation "web_sessions" (19): processed ParentID 1, ParentSchemaID 29: relation "table_statistics" (20): processed ParentID 1, ParentSchemaID 29: relation "locations" (21): processed ParentID 1, ParentSchemaID 29: relation "role_members" (23): processed ParentID 1, ParentSchemaID 29: relation "comments" (24): processed ParentID 1, ParentSchemaID 29: relation "replication_constraint_stats" (25): processed ParentID 1, ParentSchemaID 29: relation "replication_critical_localities" (26): processed ParentID 1, ParentSchemaID 29: relation "replication_stats" (27): processed ParentID 1, ParentSchemaID 29: relation "reports_meta" (28): processed ParentID 1, ParentSchemaID 29: relation "namespace" (30): processed ParentID 1, ParentSchemaID 29: relation "protected_ts_meta" (31): processed ParentID 1, ParentSchemaID 29: relation "protected_ts_records" (32): processed ParentID 1, ParentSchemaID 29: relation "role_options" (33): processed ParentID 1, ParentSchemaID 29: relation "statement_bundle_chunks" (34): processed ParentID 1, ParentSchemaID 29: relation "statement_diagnostics_requests" (35): processed ParentID 1, ParentSchemaID 29: relation "statement_diagnostics" (36): processed ParentID 1, ParentSchemaID 29: relation "scheduled_jobs" (37): processed ParentID 1, ParentSchemaID 29: relation "sqlliveness" (39): processed ParentID 0, ParentSchemaID 0: database "defaultdb" (50): processed ParentID 0, ParentSchemaID 0: database "postgres" (51): processed ParentID 52, ParentSchemaID 29: relation "users" (53): referenced database ID 52: referenced descriptor not found ParentID 52, ParentSchemaID 29: relation "users" (53): processed ParentID 52, ParentSchemaID 29: relation "vehicles" (54): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_users" ParentID 52, ParentSchemaID 29: relation "vehicles" (54): processed ParentID 52, ParentSchemaID 29: relation "rides" (55): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_users" ParentID 52, ParentSchemaID 29: relation "rides" (55): constraint id was missing for constraint: FOREIGN KEY with name "fk_vehicle_city_ref_vehicles" ParentID 52, ParentSchemaID 29: relation "rides" (55): processed ParentID 52, ParentSchemaID 29: relation "vehicle_location_histories" (56): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_rides" ParentID 52, ParentSchemaID 29: relation "vehicle_location_histories" (56): processed ParentID 52, ParentSchemaID 29: relation "promo_codes" (57): referenced database ID 52: referenced descriptor not found ParentID 52, ParentSchemaID 29: relation "promo_codes" (57): processed ParentID 52, ParentSchemaID 29: relation "user_promo_codes" (58): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_users" ParentID 52, ParentSchemaID 29: relation "user_promo_codes" (58): processed ParentID 0, ParentSchemaID 0: namespace entry "defaultdb" (50): processed ParentID 0, ParentSchemaID 0: namespace entry "movr" (52): descriptor not found ParentID 0, ParentSchemaID 0: namespace entry "postgres" (51): processed ParentID 0, ParentSchemaID 0: namespace entry "system" (1): processed ParentID 1, ParentSchemaID 0: namespace entry "public" (29): processed ParentID 1, ParentSchemaID 29: namespace entry "comments" (24): processed ParentID 1, ParentSchemaID 29: namespace entry "descriptor" (3): processed ParentID 1, ParentSchemaID 29: namespace entry "eventlog" (12): processed ParentID 1, ParentSchemaID 29: namespace entry "jobs" (15): processed ParentID 1, ParentSchemaID 29: namespace entry "lease" (11): processed ParentID 1, ParentSchemaID 29: namespace entry "locations" (21): processed ParentID 1, ParentSchemaID 29: namespace entry "namespace" (30): processed ParentID 1, ParentSchemaID 29: namespace entry "protected_ts_meta" (31): processed ParentID 1, ParentSchemaID 29: namespace entry "protected_ts_records" (32): processed ParentID 1, ParentSchemaID 29: namespace entry "rangelog" (13): processed ParentID 1, ParentSchemaID 29: namespace entry "replication_constraint_stats" (25): processed ParentID 1, ParentSchemaID 29: namespace entry "replication_critical_localities" (26): processed ParentID 1, ParentSchemaID 29: namespace entry "replication_stats" (27): processed ParentID 1, ParentSchemaID 29: namespace entry "reports_meta" (28): processed ParentID 1, ParentSchemaID 29: namespace entry "role_members" (23): processed ParentID 1, ParentSchemaID 29: namespace entry "role_options" (33): processed ParentID 1, ParentSchemaID 29: namespace entry "scheduled_jobs" (37): processed ParentID 1, ParentSchemaID 29: namespace entry "settings" (6): processed ParentID 1, ParentSchemaID 29: namespace entry "sqlliveness" (39): processed ParentID 1, ParentSchemaID 29: namespace entry "statement_bundle_chunks" (34): processed ParentID 1, ParentSchemaID 29: namespace entry "statement_diagnostics" (36): processed ParentID 1, ParentSchemaID 29: namespace entry "statement_diagnostics_requests" (35): processed ParentID 1, ParentSchemaID 29: namespace entry "table_statistics" (20): processed ParentID 1, ParentSchemaID 29: namespace entry "tenants" (8): processed ParentID 1, ParentSchemaID 29: namespace entry "ui" (14): processed ParentID 1, ParentSchemaID 29: namespace entry "users" (4): processed ParentID 1, ParentSchemaID 29: namespace entry "web_sessions" (19): processed ParentID 1, ParentSchemaID 29: namespace entry "zones" (5): processed ParentID 50, ParentSchemaID 0: namespace entry "public" (29): processed ParentID 51, ParentSchemaID 0: namespace entry "public" (29): processed ParentID 52, ParentSchemaID 0: namespace entry "public" (29): processed ParentID 52, ParentSchemaID 29: namespace entry "promo_codes" (57): processed ParentID 52, ParentSchemaID 29: namespace entry "rides" (55): processed ParentID 52, ParentSchemaID 29: namespace entry "user_promo_codes" (58): processed ParentID 52, ParentSchemaID 29: namespace entry "users" (53): processed ParentID 52, ParentSchemaID 29: namespace entry "vehicle_location_histories" (56): processed ParentID 52, ParentSchemaID 29: namespace entry "vehicles" (54): processed Examining 2 jobs... Processing job 587337426939772929 Processing job 587337426984566785 job 587337426984566785: running schema change GC refers to missing table descriptor(s) [59]; existing descriptors that still need to be dropped []; job safe to delete: true. ERROR: validation failed
pkg/cli/testdata/doctor/test_examine_zipdir_verbose
1
https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8
[ 0.9443483948707581, 0.11422600597143173, 0.0001909757120301947, 0.0006782904965803027, 0.2802562117576599 ]
{ "id": 0, "code_window": [ " ParentID 0, ParentSchemaID 0: database \"defaultdb\" (50): processed\n", " ParentID 0, ParentSchemaID 0: database \"postgres\" (51): processed\n", " ParentID 52, ParentSchemaID 29: relation \"users\" (53): referenced database ID 52: referenced descriptor not found\n", " ParentID 52, ParentSchemaID 29: relation \"users\" (53): processed\n", " ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_users\"\n", " ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): processed\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep" ], "after_edit": [ " ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): referenced database ID 52: referenced descriptor not found\n" ], "file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose", "type": "replace", "edit_start_line_idx": 41 }
// Copyright 2020 The Cockroach Authors. // // Licensed as a CockroachDB Enterprise file under the Cockroach Community // License (the "License"); you may not use this file except in compliance with // the License. You may obtain a copy of the License at // // https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt package sqlproxyccl import ( "crypto/tls" "net" "github.com/jackc/pgproto3/v2" ) // FrontendAdmit is the default implementation of a frontend admitter. It can // upgrade to an optional SSL connection, and will handle and verify the startup // message received from the PG SQL client. The connection returned should never // be nil in case of error. Depending on whether the error happened before the // connection was upgraded to TLS or not it will either be the original or the // TLS connection. var FrontendAdmit = func( conn net.Conn, incomingTLSConfig *tls.Config, ) (net.Conn, *pgproto3.StartupMessage, error) { // `conn` could be replaced by `conn` embedded in a `tls.Conn` connection, // hence it's important to close `conn` rather than `proxyConn` since closing // the latter will not call `Close` method of `tls.Conn`. var sniServerName string // Read first message from client. m, err := pgproto3.NewBackend(pgproto3.NewChunkReader(conn), conn).ReceiveStartupMessage() if err != nil { return conn, nil, newErrorf(codeClientReadFailed, "while receiving startup message") } // CancelRequest is unencrypted and unauthenticated, regardless of whether // the server requires TLS connections. For now, ignore the request to cancel, // and send back a nil StartupMessage, which will cause the proxy to just // close the connection in response. if _, ok := m.(*pgproto3.CancelRequest); ok { return conn, nil, nil } // If we have an incoming TLS Config, require that the client initiates with // an SSLRequest message. if incomingTLSConfig != nil { if _, ok := m.(*pgproto3.SSLRequest); !ok { code := codeUnexpectedInsecureStartupMessage return conn, nil, newErrorf(code, "unsupported startup message: %T", m) } _, err = conn.Write([]byte{pgAcceptSSLRequest}) if err != nil { return conn, nil, newErrorf(codeClientWriteFailed, "acking SSLRequest: %v", err) } cfg := incomingTLSConfig.Clone() cfg.GetConfigForClient = func(h *tls.ClientHelloInfo) (*tls.Config, error) { sniServerName = h.ServerName return nil, nil } conn = tls.Server(conn, cfg) // Now that SSL is established, read the encrypted startup message. m, err = pgproto3.NewBackend(pgproto3.NewChunkReader(conn), conn).ReceiveStartupMessage() if err != nil { return conn, nil, newErrorf(codeClientReadFailed, "receiving post-TLS startup message: %v", err) } } if startup, ok := m.(*pgproto3.StartupMessage); ok { // Add the sniServerName (if used) as parameter if sniServerName != "" { startup.Parameters["sni-server"] = sniServerName } return conn, startup, nil } code := codeUnexpectedStartupMessage return conn, nil, newErrorf(code, "unsupported post-TLS startup message: %T", m) }
pkg/ccl/sqlproxyccl/frontend_admitter.go
0
https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8
[ 0.00017748989921528846, 0.00016985720139928162, 0.0001599833049112931, 0.000170803556102328, 0.00000491416358272545 ]
{ "id": 0, "code_window": [ " ParentID 0, ParentSchemaID 0: database \"defaultdb\" (50): processed\n", " ParentID 0, ParentSchemaID 0: database \"postgres\" (51): processed\n", " ParentID 52, ParentSchemaID 29: relation \"users\" (53): referenced database ID 52: referenced descriptor not found\n", " ParentID 52, ParentSchemaID 29: relation \"users\" (53): processed\n", " ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_users\"\n", " ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): processed\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep" ], "after_edit": [ " ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): referenced database ID 52: referenced descriptor not found\n" ], "file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose", "type": "replace", "edit_start_line_idx": 41 }
post ---- ---- searchIssue repo:"cockroach" user:"cockroachdb" is:issue is:open in:title label:"C-test-failure" sort:created-desc "cmd/roachtest: some-roachtest failed" label:branch-release-0.1: [github.Issue{Number:30, Title:"boom", Labels:[github.Label{URL:"fake", Name:"C-test-failure"} github.Label{URL:"fake", Name:"O-robot"} github.Label{URL:"fake", Name:"release-0.1"}]}] searchIssue repo:"cockroach" user:"cockroachdb" is:issue is:open in:title label:"C-test-failure" sort:created-desc "cmd/roachtest: some-roachtest failed" -label:branch-release-0.1: [github.Issue{Number:31, Title:"boom related", Labels:[github.Label{URL:"fake", Name:"C-test-failure"} github.Label{URL:"fake", Name:"O-robot"} github.Label{URL:"fake", Name:"release-0.2"}]}] createComment owner=cockroachdb repo=cockroach issue=30: cmd/roachtest.some-roachtest [failed](https://teamcity.example.com/viewLog.html?buildId=8008135&tab=buildLog) on release-0.1 @ [abcd123](https://github.com/cockroachdb/cockroach/commits/abcd123): ``` boom ``` <details><summary>Help</summary> <p> See: [FooBar README](https://github.com/cockroachdb/cockroach) Parameters in this failure: - TAGS=deadlock - GOFLAGS=race </p> </details> <details><summary>Same failure on other branches</summary> <p> - #31 boom related [C-test-failure O-robot release-0.2] </p> </details> <sub> [This test on roachdash](https://roachdash.crdb.dev/?filter=status:open%20t:.*some-roachtest.*&sort=title+created&display=lastcommented+project) | [Improve this report!](https://github.com/cockroachdb/cockroach/tree/master/pkg/cmd/internal/issues) </sub> Rendered: https://github.com/cockroachdb/cockroach/issues/new?body=cmd%2Froachtest.some-roachtest+%5Bfailed%5D%28https%3A%2F%2Fteamcity.example.com%2FviewLog.html%3FbuildId%3D8008135%26tab%3DbuildLog%29+on+release-0.1+%40+%5Babcd123%5D%28https%3A%2F%2Fgithub.com%2Fcockroachdb%2Fcockroach%2Fcommits%2Fabcd123%29%3A%0A%0A%0A%60%60%60%0Aboom%0A%60%60%60%0A%3Cdetails%3E%3Csummary%3EHelp%3C%2Fsummary%3E%0A%3Cp%3E%0A%0A%0ASee%3A+%5BFooBar+README%5D%28https%3A%2F%2Fgithub.com%2Fcockroachdb%2Fcockroach%29%0A%0AParameters+in+this+failure%3A%0A%0A-+TAGS%3Ddeadlock%0A%0A-+GOFLAGS%3Drace%0A%3C%2Fp%3E%0A%3C%2Fdetails%3E%0A%3Cdetails%3E%3Csummary%3ESame+failure+on+other+branches%3C%2Fsummary%3E%0A%3Cp%3E%0A%0A-+%2331+boom+related+%5BC-test-failure+O-robot+release-0.2%5D%0A%3C%2Fp%3E%0A%3C%2Fdetails%3E%0A%3Csub%3E%0A%0A%5BThis+test+on+roachdash%5D%28https%3A%2F%2Froachdash.crdb.dev%2F%3Ffilter%3Dstatus%3Aopen%2520t%3A.%2Asome-roachtest.%2A%26sort%3Dtitle%2Bcreated%26display%3Dlastcommented%2Bproject%29+%7C+%5BImprove+this+report%21%5D%28https%3A%2F%2Fgithub.com%2Fcockroachdb%2Fcockroach%2Ftree%2Fmaster%2Fpkg%2Fcmd%2Finternal%2Fissues%29%0A%3C%2Fsub%3E%0A&title=%3Ccomment%3E ---- ----
pkg/cmd/internal/issues/testdata/post/failure-with-url-matching-and-related-issue.txt
0
https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8
[ 0.0001754599215928465, 0.00017166875477414578, 0.00016664880968164653, 0.00017128614126704633, 0.00000312298902827024 ]
{ "id": 0, "code_window": [ " ParentID 0, ParentSchemaID 0: database \"defaultdb\" (50): processed\n", " ParentID 0, ParentSchemaID 0: database \"postgres\" (51): processed\n", " ParentID 52, ParentSchemaID 29: relation \"users\" (53): referenced database ID 52: referenced descriptor not found\n", " ParentID 52, ParentSchemaID 29: relation \"users\" (53): processed\n", " ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_users\"\n", " ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): processed\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep" ], "after_edit": [ " ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): referenced database ID 52: referenced descriptor not found\n" ], "file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose", "type": "replace", "edit_start_line_idx": 41 }
statement ok SET experimental_enable_unique_without_index_constraints = true statement ok CREATE TABLE test_kv(k INT PRIMARY KEY, v INT, w DECIMAL); CREATE UNIQUE INDEX test_v_idx ON test_kv(v); CREATE INDEX test_v_idx2 ON test_kv(v DESC) STORING(w); CREATE INDEX test_v_idx3 ON test_kv(w) STORING(v); CREATE TABLE test_kvr1(k INT PRIMARY KEY REFERENCES test_kv(k)); CREATE TABLE test_kvr2(k INT, v INT UNIQUE REFERENCES test_kv(k)); CREATE TABLE test_kvr3(k INT, v INT UNIQUE REFERENCES test_kv(v)); CREATE TABLE test_kvi1(k INT PRIMARY KEY); CREATE TABLE test_kvi2(k INT PRIMARY KEY, v INT); CREATE UNIQUE INDEX test_kvi2_idx ON test_kvi2(v); CREATE VIEW test_v1 AS SELECT v FROM test_kv; CREATE VIEW test_v2 AS SELECT v FROM test_v1; CREATE TABLE test_uwi_parent(a INT UNIQUE WITHOUT INDEX); CREATE TABLE test_uwi_child(a INT REFERENCES test_uwi_parent(a)); query ITITTBTB colnames SELECT * FROM crdb_internal.table_columns WHERE descriptor_name LIKE 'test_%' ORDER BY descriptor_id, column_id ---- descriptor_id descriptor_name column_id column_name column_type nullable default_expr hidden 106 test_kv 1 k family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false false NULL false 106 test_kv 2 v family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false true NULL false 106 test_kv 3 w family:DecimalFamily width:0 precision:0 locale:"" visible_type:0 oid:1700 time_precision_is_set:false true NULL false 107 test_kvr1 1 k family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false false NULL false 108 test_kvr2 1 k family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false true NULL false 108 test_kvr2 2 v family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false true NULL false 108 test_kvr2 3 rowid family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false false unique_rowid() true 109 test_kvr3 1 k family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false true NULL false 109 test_kvr3 2 v family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false true NULL false 109 test_kvr3 3 rowid family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false false unique_rowid() true 110 test_kvi1 1 k family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false false NULL false 111 test_kvi2 1 k family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false false NULL false 111 test_kvi2 2 v family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false true NULL false 112 test_v1 1 v family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false true NULL false 113 test_v2 1 v family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false true NULL false 114 test_uwi_parent 1 a family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false true NULL false 114 test_uwi_parent 2 rowid family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false false unique_rowid() true 115 test_uwi_child 1 a family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false true NULL false 115 test_uwi_child 2 rowid family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false false unique_rowid() true query ITITTBBB colnames SELECT descriptor_id, descriptor_name, index_id, index_name, index_type, is_unique, is_inverted, is_sharded FROM crdb_internal.table_indexes WHERE descriptor_name LIKE 'test_%' ORDER BY descriptor_id, index_id ---- descriptor_id descriptor_name index_id index_name index_type is_unique is_inverted is_sharded 106 test_kv 1 test_kv_pkey primary true false false 106 test_kv 2 test_v_idx secondary true false false 106 test_kv 3 test_v_idx2 secondary false false false 106 test_kv 4 test_v_idx3 secondary false false false 107 test_kvr1 1 test_kvr1_pkey primary true false false 108 test_kvr2 1 test_kvr2_pkey primary true false false 108 test_kvr2 2 test_kvr2_v_key secondary true false false 109 test_kvr3 1 test_kvr3_pkey primary true false false 109 test_kvr3 2 test_kvr3_v_key secondary true false false 110 test_kvi1 1 test_kvi1_pkey primary true false false 111 test_kvi2 1 test_kvi2_pkey primary true false false 111 test_kvi2 2 test_kvi2_idx secondary true false false 112 test_v1 0 · primary false false false 113 test_v2 0 · primary false false false 114 test_uwi_parent 1 test_uwi_parent_pkey primary true false false 115 test_uwi_child 1 test_uwi_child_pkey primary true false false query ITITTITTB colnames SELECT * FROM crdb_internal.index_columns WHERE descriptor_name LIKE 'test_%' ORDER BY descriptor_id, index_id, column_type, column_id ---- descriptor_id descriptor_name index_id index_name column_type column_id column_name column_direction implicit 106 test_kv 1 test_kv_pkey key 1 k ASC false 106 test_kv 2 test_v_idx extra 1 NULL NULL false 106 test_kv 2 test_v_idx key 2 v ASC false 106 test_kv 3 test_v_idx2 extra 1 NULL NULL false 106 test_kv 3 test_v_idx2 key 2 v DESC false 106 test_kv 3 test_v_idx2 storing 3 NULL NULL false 106 test_kv 4 test_v_idx3 composite 3 NULL NULL false 106 test_kv 4 test_v_idx3 extra 1 NULL NULL false 106 test_kv 4 test_v_idx3 key 3 w ASC false 106 test_kv 4 test_v_idx3 storing 2 NULL NULL false 107 test_kvr1 1 test_kvr1_pkey key 1 k ASC false 108 test_kvr2 1 test_kvr2_pkey key 3 rowid ASC false 108 test_kvr2 2 test_kvr2_v_key extra 3 NULL NULL false 108 test_kvr2 2 test_kvr2_v_key key 2 v ASC false 109 test_kvr3 1 test_kvr3_pkey key 3 rowid ASC false 109 test_kvr3 2 test_kvr3_v_key extra 3 NULL NULL false 109 test_kvr3 2 test_kvr3_v_key key 2 v ASC false 110 test_kvi1 1 test_kvi1_pkey key 1 k ASC false 111 test_kvi2 1 test_kvi2_pkey key 1 k ASC false 111 test_kvi2 2 test_kvi2_idx extra 1 NULL NULL false 111 test_kvi2 2 test_kvi2_idx key 2 v ASC false 114 test_uwi_parent 1 test_uwi_parent_pkey key 2 rowid ASC false 115 test_uwi_child 1 test_uwi_child_pkey key 2 rowid ASC false query ITIIITITT colnames SELECT * FROM crdb_internal.backward_dependencies WHERE descriptor_name LIKE 'test_%' ORDER BY descriptor_id, index_id, dependson_type, dependson_id, dependson_index_id ---- descriptor_id descriptor_name index_id column_id dependson_id dependson_type dependson_index_id dependson_name dependson_details 107 test_kvr1 NULL NULL 106 fk 1 test_kvr1_k_fkey NULL 108 test_kvr2 NULL NULL 106 fk 1 test_kvr2_v_fkey NULL 109 test_kvr3 NULL NULL 106 fk 2 test_kvr3_v_fkey NULL 112 test_v1 NULL NULL 106 view NULL NULL NULL 113 test_v2 NULL NULL 112 view NULL NULL NULL 115 test_uwi_child NULL NULL 114 fk 0 test_uwi_child_a_fkey NULL query ITIITITT colnames SELECT * FROM crdb_internal.forward_dependencies WHERE descriptor_name LIKE 'test_%' ORDER BY descriptor_id, index_id, dependedonby_type, dependedonby_id, dependedonby_index_id ---- descriptor_id descriptor_name index_id dependedonby_id dependedonby_type dependedonby_index_id dependedonby_name dependedonby_details 106 test_kv NULL 107 fk NULL NULL NULL 106 test_kv NULL 108 fk NULL NULL NULL 106 test_kv NULL 109 fk NULL NULL NULL 106 test_kv NULL 112 view 0 NULL Columns: [2] 112 test_v1 NULL 113 view 0 NULL Columns: [1] 114 test_uwi_parent NULL 115 fk NULL NULL NULL # Checks view dependencies (#17306) statement ok CREATE TABLE moretest_t(k INT, v INT); CREATE VIEW moretest_v AS SELECT v FROM moretest_t WHERE FALSE query ITIIITITT colnames SELECT * FROM crdb_internal.backward_dependencies WHERE descriptor_name LIKE 'moretest_%' ORDER BY descriptor_id, index_id, dependson_type, dependson_id, dependson_index_id ---- descriptor_id descriptor_name index_id column_id dependson_id dependson_type dependson_index_id dependson_name dependson_details 117 moretest_v NULL NULL 116 view NULL NULL NULL query ITIITITT colnames SELECT * FROM crdb_internal.forward_dependencies WHERE descriptor_name LIKE 'moretest_%' ORDER BY descriptor_id, index_id, dependedonby_type, dependedonby_id, dependedonby_index_id ---- descriptor_id descriptor_name index_id dependedonby_id dependedonby_type dependedonby_index_id dependedonby_name dependedonby_details 116 moretest_t NULL 117 view 0 NULL Columns: [2] # Check sequence dependencies. statement ok CREATE SEQUENCE blog_posts_id_seq statement ok CREATE TABLE blog_posts (id INT PRIMARY KEY DEFAULT nextval('blog_posts_id_seq'), title text) query ITIIITITT colnames SELECT * FROM crdb_internal.backward_dependencies WHERE descriptor_name LIKE 'blog_posts' ---- descriptor_id descriptor_name index_id column_id dependson_id dependson_type dependson_index_id dependson_name dependson_details 119 blog_posts NULL 1 118 sequence NULL NULL NULL query ITIITITT colnames SELECT * FROM crdb_internal.forward_dependencies WHERE descriptor_name LIKE 'blog_posts%' ---- descriptor_id descriptor_name index_id dependedonby_id dependedonby_type dependedonby_index_id dependedonby_name dependedonby_details 118 blog_posts_id_seq NULL 119 sequence 0 NULL Columns: [1]
pkg/sql/logictest/testdata/logic_test/dependencies
0
https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8
[ 0.0001751040981616825, 0.00016951175348367542, 0.00016317021800205112, 0.0001704189198790118, 0.0000033402616281819064 ]
{ "id": 1, "code_window": [ " ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): processed\n", " ParentID 52, ParentSchemaID 29: relation \"rides\" (55): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_users\"\n", " ParentID 52, ParentSchemaID 29: relation \"rides\" (55): constraint id was missing for constraint: FOREIGN KEY with name \"fk_vehicle_city_ref_vehicles\"\n", " ParentID 52, ParentSchemaID 29: relation \"rides\" (55): processed\n" ], "labels": [ "keep", "replace", "replace", "keep" ], "after_edit": [ " ParentID 52, ParentSchemaID 29: relation \"rides\" (55): referenced database ID 52: referenced descriptor not found\n" ], "file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose", "type": "replace", "edit_start_line_idx": 43 }
debug doctor zipdir --verbose ---- debug doctor zipdir testdata/doctor/debugzip 21.11-52 --verbose reading testdata/doctor/debugzip/system.descriptor.txt reading testdata/doctor/debugzip/system.namespace.txt WARNING: errors occurred during the production of system.jobs.txt, contents may be missing or incomplete. reading testdata/doctor/debugzip/system.jobs.txt Examining 37 descriptors and 42 namespace entries... ParentID 0, ParentSchemaID 0: database "system" (1): processed ParentID 1, ParentSchemaID 29: relation "descriptor" (3): processed ParentID 1, ParentSchemaID 29: relation "users" (4): processed ParentID 1, ParentSchemaID 29: relation "zones" (5): processed ParentID 1, ParentSchemaID 29: relation "settings" (6): processed ParentID 1, ParentSchemaID 29: relation "tenants" (8): processed ParentID 1, ParentSchemaID 29: relation "lease" (11): processed ParentID 1, ParentSchemaID 29: relation "eventlog" (12): processed ParentID 1, ParentSchemaID 29: relation "rangelog" (13): processed ParentID 1, ParentSchemaID 29: relation "ui" (14): processed ParentID 1, ParentSchemaID 29: relation "jobs" (15): processed ParentID 1, ParentSchemaID 29: relation "web_sessions" (19): processed ParentID 1, ParentSchemaID 29: relation "table_statistics" (20): processed ParentID 1, ParentSchemaID 29: relation "locations" (21): processed ParentID 1, ParentSchemaID 29: relation "role_members" (23): processed ParentID 1, ParentSchemaID 29: relation "comments" (24): processed ParentID 1, ParentSchemaID 29: relation "replication_constraint_stats" (25): processed ParentID 1, ParentSchemaID 29: relation "replication_critical_localities" (26): processed ParentID 1, ParentSchemaID 29: relation "replication_stats" (27): processed ParentID 1, ParentSchemaID 29: relation "reports_meta" (28): processed ParentID 1, ParentSchemaID 29: relation "namespace" (30): processed ParentID 1, ParentSchemaID 29: relation "protected_ts_meta" (31): processed ParentID 1, ParentSchemaID 29: relation "protected_ts_records" (32): processed ParentID 1, ParentSchemaID 29: relation "role_options" (33): processed ParentID 1, ParentSchemaID 29: relation "statement_bundle_chunks" (34): processed ParentID 1, ParentSchemaID 29: relation "statement_diagnostics_requests" (35): processed ParentID 1, ParentSchemaID 29: relation "statement_diagnostics" (36): processed ParentID 1, ParentSchemaID 29: relation "scheduled_jobs" (37): processed ParentID 1, ParentSchemaID 29: relation "sqlliveness" (39): processed ParentID 0, ParentSchemaID 0: database "defaultdb" (50): processed ParentID 0, ParentSchemaID 0: database "postgres" (51): processed ParentID 52, ParentSchemaID 29: relation "users" (53): referenced database ID 52: referenced descriptor not found ParentID 52, ParentSchemaID 29: relation "users" (53): processed ParentID 52, ParentSchemaID 29: relation "vehicles" (54): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_users" ParentID 52, ParentSchemaID 29: relation "vehicles" (54): processed ParentID 52, ParentSchemaID 29: relation "rides" (55): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_users" ParentID 52, ParentSchemaID 29: relation "rides" (55): constraint id was missing for constraint: FOREIGN KEY with name "fk_vehicle_city_ref_vehicles" ParentID 52, ParentSchemaID 29: relation "rides" (55): processed ParentID 52, ParentSchemaID 29: relation "vehicle_location_histories" (56): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_rides" ParentID 52, ParentSchemaID 29: relation "vehicle_location_histories" (56): processed ParentID 52, ParentSchemaID 29: relation "promo_codes" (57): referenced database ID 52: referenced descriptor not found ParentID 52, ParentSchemaID 29: relation "promo_codes" (57): processed ParentID 52, ParentSchemaID 29: relation "user_promo_codes" (58): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_users" ParentID 52, ParentSchemaID 29: relation "user_promo_codes" (58): processed ParentID 0, ParentSchemaID 0: namespace entry "defaultdb" (50): processed ParentID 0, ParentSchemaID 0: namespace entry "movr" (52): descriptor not found ParentID 0, ParentSchemaID 0: namespace entry "postgres" (51): processed ParentID 0, ParentSchemaID 0: namespace entry "system" (1): processed ParentID 1, ParentSchemaID 0: namespace entry "public" (29): processed ParentID 1, ParentSchemaID 29: namespace entry "comments" (24): processed ParentID 1, ParentSchemaID 29: namespace entry "descriptor" (3): processed ParentID 1, ParentSchemaID 29: namespace entry "eventlog" (12): processed ParentID 1, ParentSchemaID 29: namespace entry "jobs" (15): processed ParentID 1, ParentSchemaID 29: namespace entry "lease" (11): processed ParentID 1, ParentSchemaID 29: namespace entry "locations" (21): processed ParentID 1, ParentSchemaID 29: namespace entry "namespace" (30): processed ParentID 1, ParentSchemaID 29: namespace entry "protected_ts_meta" (31): processed ParentID 1, ParentSchemaID 29: namespace entry "protected_ts_records" (32): processed ParentID 1, ParentSchemaID 29: namespace entry "rangelog" (13): processed ParentID 1, ParentSchemaID 29: namespace entry "replication_constraint_stats" (25): processed ParentID 1, ParentSchemaID 29: namespace entry "replication_critical_localities" (26): processed ParentID 1, ParentSchemaID 29: namespace entry "replication_stats" (27): processed ParentID 1, ParentSchemaID 29: namespace entry "reports_meta" (28): processed ParentID 1, ParentSchemaID 29: namespace entry "role_members" (23): processed ParentID 1, ParentSchemaID 29: namespace entry "role_options" (33): processed ParentID 1, ParentSchemaID 29: namespace entry "scheduled_jobs" (37): processed ParentID 1, ParentSchemaID 29: namespace entry "settings" (6): processed ParentID 1, ParentSchemaID 29: namespace entry "sqlliveness" (39): processed ParentID 1, ParentSchemaID 29: namespace entry "statement_bundle_chunks" (34): processed ParentID 1, ParentSchemaID 29: namespace entry "statement_diagnostics" (36): processed ParentID 1, ParentSchemaID 29: namespace entry "statement_diagnostics_requests" (35): processed ParentID 1, ParentSchemaID 29: namespace entry "table_statistics" (20): processed ParentID 1, ParentSchemaID 29: namespace entry "tenants" (8): processed ParentID 1, ParentSchemaID 29: namespace entry "ui" (14): processed ParentID 1, ParentSchemaID 29: namespace entry "users" (4): processed ParentID 1, ParentSchemaID 29: namespace entry "web_sessions" (19): processed ParentID 1, ParentSchemaID 29: namespace entry "zones" (5): processed ParentID 50, ParentSchemaID 0: namespace entry "public" (29): processed ParentID 51, ParentSchemaID 0: namespace entry "public" (29): processed ParentID 52, ParentSchemaID 0: namespace entry "public" (29): processed ParentID 52, ParentSchemaID 29: namespace entry "promo_codes" (57): processed ParentID 52, ParentSchemaID 29: namespace entry "rides" (55): processed ParentID 52, ParentSchemaID 29: namespace entry "user_promo_codes" (58): processed ParentID 52, ParentSchemaID 29: namespace entry "users" (53): processed ParentID 52, ParentSchemaID 29: namespace entry "vehicle_location_histories" (56): processed ParentID 52, ParentSchemaID 29: namespace entry "vehicles" (54): processed Examining 2 jobs... Processing job 587337426939772929 Processing job 587337426984566785 job 587337426984566785: running schema change GC refers to missing table descriptor(s) [59]; existing descriptors that still need to be dropped []; job safe to delete: true. ERROR: validation failed
pkg/cli/testdata/doctor/test_examine_zipdir_verbose
1
https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8
[ 0.9944576025009155, 0.10318175703287125, 0.00020600168500095606, 0.0011755257146432996, 0.2971707880496979 ]
{ "id": 1, "code_window": [ " ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): processed\n", " ParentID 52, ParentSchemaID 29: relation \"rides\" (55): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_users\"\n", " ParentID 52, ParentSchemaID 29: relation \"rides\" (55): constraint id was missing for constraint: FOREIGN KEY with name \"fk_vehicle_city_ref_vehicles\"\n", " ParentID 52, ParentSchemaID 29: relation \"rides\" (55): processed\n" ], "labels": [ "keep", "replace", "replace", "keep" ], "after_edit": [ " ParentID 52, ParentSchemaID 29: relation \"rides\" (55): referenced database ID 52: referenced descriptor not found\n" ], "file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose", "type": "replace", "edit_start_line_idx": 43 }
// Copyright 2020 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. import { analytics } from "src/redux/analytics"; export function trackTerminateSession() { const boundTrack = analytics.track.bind(analytics); (() => { boundTrack({ event: "Terminate Session", }); })(); } export function trackTerminateQuery() { const boundTrack = analytics.track.bind(analytics); (() => { boundTrack({ event: "Terminate Query", }); })(); }
pkg/ui/workspaces/db-console/src/util/analytics/trackTerminate.tsx
0
https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8
[ 0.00017951583140529692, 0.00017653295071795583, 0.00017389531421940774, 0.00017618767742533237, 0.000002307517888766597 ]
{ "id": 1, "code_window": [ " ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): processed\n", " ParentID 52, ParentSchemaID 29: relation \"rides\" (55): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_users\"\n", " ParentID 52, ParentSchemaID 29: relation \"rides\" (55): constraint id was missing for constraint: FOREIGN KEY with name \"fk_vehicle_city_ref_vehicles\"\n", " ParentID 52, ParentSchemaID 29: relation \"rides\" (55): processed\n" ], "labels": [ "keep", "replace", "replace", "keep" ], "after_edit": [ " ParentID 52, ParentSchemaID 29: relation \"rides\" (55): referenced database ID 52: referenced descriptor not found\n" ], "file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose", "type": "replace", "edit_start_line_idx": 43 }
pretty # # Generate code for interesting rule. # [Relational] define Select { # Input comment. Input RelExpr Filters FiltersExpr } [Relational, Join, JoinNonApply] define InnerJoin { Left RelExpr Right RelExpr On FiltersExpr } [Scalar, Bool, List] define Filters { } [Scalar, Bool, ListItem, ScalarProps] define FiltersItem { Condition ScalarExpr } # --- # Unattached comment. # --- [PushSelectIntoJoinLeft, Normalize] (Select | Other $input:(InnerJoin | InnerJoinApply | SomethingElse | Many | Things | LooooooooongLine | Much | More | Than | EightyChars $left:3 $right:* $on:* ) $filters:[ ... $item:* & (IsBoundBy $item $leftCols:(OutputCols $left)) ... ] ) => (Select ((OpName $input) (Select $left (ExtractBoundConditions $filters $leftCols) ) $right $on ) (ExtractUnboundConditions $filters $leftCols) ) [TestSingleLine] (Select $input:(InnerJoin | InnerJoinApply $left:* $right:* $on:*)) => (False) [TestNestedAnd] (Select $right:* & (HasOuterCols $right) & ^(GroupBy | DistinctOn $input:* $aggregations:* $groupingPrivate:*) & (IsUnorderedGrouping $groupingPrivate) $left: * & (Blah) ) => (False) [TestLet] (Select $input:* $filters:* & (Let ($a):(Foo $input) $a) & (Let ($a $b $c $ok):(SplitFilters $input $filters) $ok) & (Let ($filtersA $filtersB $filtersC $filtersD $filtersE $ok):(SplitFilters $input $filters) $ok) & (Let ($a $b $c $ok):(SplitFilters $input $filters $long $arg $list) $ok) & (Let ($filtersA $filtersB $filtersC $ok):(SplitFilters $input $filters $long $arg $list $longer) $ok) & (IsValid (Let ($filtersX $filtersY):(SplitFilters $input $filters) $filtersX)) & (OuterFunc (InnerFunc (Let ($foo $bar):(SplitFilters $input $filters) $foo))) & (Let ($foo $bar $baz):(Split (Let ($foo $bar $baz):(SplitAgain $a $b $c) $a)) $foo) ) => (False) ---- ---- # # Generate code for interesting rule. # [Relational] define Select { # Input comment. Input RelExpr Filters FiltersExpr } [Relational, Join, JoinNonApply] define InnerJoin { Left RelExpr Right RelExpr On FiltersExpr } [Scalar, Bool, List] define Filters { } [Scalar, Bool, ListItem, ScalarProps] define FiltersItem { Condition ScalarExpr } # --- # Unattached comment. # --- [PushSelectIntoJoinLeft, Normalize] (Select | Other $input:(InnerJoin | InnerJoinApply | SomethingElse | Many | Things | LooooooooongLine | Much | More | Than | EightyChars $left:3 $right:* $on:* ) $filters:[ ... $item:* & (IsBoundBy $item $leftCols:(OutputCols $left)) ... ] ) => (Select ((OpName $input) (Select $left (ExtractBoundConditions $filters $leftCols) ) $right $on ) (ExtractUnboundConditions $filters $leftCols) ) [TestSingleLine] (Select $input:(InnerJoin | InnerJoinApply $left:* $right:* $on:*) ) => (False) [TestNestedAnd] (Select $right:* & (HasOuterCols $right) & ^(GroupBy | DistinctOn $input:* $aggregations:* $groupingPrivate:* ) & (IsUnorderedGrouping $groupingPrivate) $left:* & (Blah) ) => (False) [TestLet] (Select $input:* $filters:* & (Let ($a):(Foo $input) $a) & (Let ($a $b $c $ok):(SplitFilters $input $filters) $ok) & (Let ( $filtersA $filtersB $filtersC $filtersD $filtersE $ok ):(SplitFilters $input $filters) $ok ) & (Let ($a $b $c $ok):(SplitFilters $input $filters $long $arg $list ) $ok ) & (Let ($filtersA $filtersB $filtersC $ok):(SplitFilters $input $filters $long $arg $list $longer ) $ok ) & (IsValid (Let ($filtersX $filtersY):(SplitFilters $input $filters ) $filtersX ) ) & (OuterFunc (InnerFunc (Let ($foo $bar):(SplitFilters $input $filters) $foo ) ) ) & (Let ($foo $bar $baz):(Split (Let ($foo $bar $baz):(SplitAgain $a $b $c) $a) ) $foo ) ) => (False) ---- ---- pretty [Short] (R) => (O) ---- [Short] (R) => (O) # The closing ")" should not be printed on it's own line if the result, "$ok", # is not printed on it's own line. pretty [FoldBinary, Normalize] (Binary $left:* & (IsConstValueOrGroupOfConstValues $left) $right:* & (IsConstValueOrGroupOfConstValues $right) & (Let ($result $ok):(FoldBinary (OpName) $left $right) $ok ) ) => $result ---- [FoldBinary, Normalize] (Binary $left:* & (IsConstValueOrGroupOfConstValues $left) $right:* & (IsConstValueOrGroupOfConstValues $right) & (Let ($result $ok):(FoldBinary (OpName) $left $right) $ok ) ) => $result
pkg/sql/opt/optgen/cmd/optfmt/testdata/test
0
https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8
[ 0.00017786974785849452, 0.00017261842731386423, 0.00016348386998288333, 0.00017237701104022563, 0.000002977836857098737 ]
{ "id": 1, "code_window": [ " ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): processed\n", " ParentID 52, ParentSchemaID 29: relation \"rides\" (55): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_users\"\n", " ParentID 52, ParentSchemaID 29: relation \"rides\" (55): constraint id was missing for constraint: FOREIGN KEY with name \"fk_vehicle_city_ref_vehicles\"\n", " ParentID 52, ParentSchemaID 29: relation \"rides\" (55): processed\n" ], "labels": [ "keep", "replace", "replace", "keep" ], "after_edit": [ " ParentID 52, ParentSchemaID 29: relation \"rides\" (55): referenced database ID 52: referenced descriptor not found\n" ], "file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose", "type": "replace", "edit_start_line_idx": 43 }
// Copyright 2017 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. /* Package fsm provides an interface for defining and working with finite-state machines. The package is split into two main types: Transitions and Machine. Transitions is an immutable State graph with Events acting as the directed edges between different States. The graph is built by calling Compile on a Pattern, which is meant to be done at init time. This pattern is a mapping from current States to Events that may be applied on those states to resulting Transitions. The pattern supports pattern matching on States and Events using wildcards and variable bindings. To add new transitions to the graph, simply adjust the Pattern provided to Compile. Transitions are not used directly after creation, instead, they're used by Machine instances. Machine is an instantiation of a finite-state machine. It is given a Transitions graph when it is created to specify its State graph. Since the Transition graph is itself state-less, multiple Machines can be powered by the same graph simultaneously. The Machine has an Apply(Event) method, which applies the provided event to its current state. This does two things: 1. It may move the current State to a new State, according to the Transitions graph. 2. It may apply an Action function on the Machine's ExtendedState, which is extra state in a Machine that does not contribute to state transition decisions, but that can be affected by a state transition. See example_test.go for a full working example of a state machine with an associated set of states and events. This package encourages the Pattern to be declared as a map literal. When declaring this literal, be careful to not declare two equal keys: they'll result in the second overwriting the first with no warning because of how Go deals with map literals. Note that keys that are not technically equal, but where one is a superset of the other, will work as intended. E.g. the following is permitted: Compile(Pattern{ stateOpen{retryIntent: Any} { eventTxnFinish{}: {...} } stateOpen{retryIntent: True} { eventRetriableErr{}: {...} } Members of this package are accessed frequently when implementing a state machine. For that reason, it is encouraged to dot-import this package in the file with the transitions Pattern. The respective file should be kept small and named <name>_fsm.go; our linter doesn't complain about dot-imports in such files. */ package fsm
pkg/util/fsm/doc.go
0
https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8
[ 0.0001794389681890607, 0.00017095690418500453, 0.00016543194942642003, 0.00017029163427650928, 0.000004505441665969556 ]
{ "id": 2, "code_window": [ " ParentID 52, ParentSchemaID 29: relation \"rides\" (55): processed\n", " ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_rides\"\n", " ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): processed\n", " ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): referenced database ID 52: referenced descriptor not found\n", " ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): processed\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ " ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): referenced database ID 52: referenced descriptor not found\n" ], "file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose", "type": "replace", "edit_start_line_idx": 46 }
// Copyright 2021 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package migrations_test import ( "context" "fmt" "testing" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/stretchr/testify/require" ) // TestEnsureConstraintIDs tests that constraint IDs are added as expected. func TestEnsureConstraintIDs(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) // Start off with the version that did not support // constraint IDs. clusterArgs := base.TestClusterArgs{ ServerArgs: base.TestServerArgs{ Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ DisableAutomaticVersionUpgrade: 1, BinaryVersionOverride: clusterversion.ByKey( tabledesc.ConstraintIDsAddedToTableDescsVersion - 1), }, }, }, } c := keys.SystemSQLCodec ctx := context.Background() tc := testcluster.StartTestCluster(t, 1, clusterArgs) s := tc.Server(0) defer tc.Stopper().Stop(ctx) sqlDB := tc.ServerConn(0) tdb := sqlutils.MakeSQLRunner(sqlDB) // Create table with a primary key constraint. tdb.Exec(t, "CREATE TABLE t(name int primary key)") // Validate the comments on constraints are blocked. tdb.ExpectErr(t, "pq: cannot comment on constraint", "COMMENT ON CONSTRAINT \"t_pkey\" ON t IS 'primary_comment'") // Validate that we have a constraint ID due to post deserialization logic desc := desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, "defaultdb", "t") desc.PrimaryIndex.ConstraintID = 0 require.NoError(t, s.DB().Put( context.Background(), catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()), desc.DescriptorProto(), )) // Validate that the post serialization will recompute the constraint IDs // if they are missing. desc = desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, "defaultdb", "t") require.Equal(t, desc.PrimaryIndex.ConstraintID, descpb.ConstraintID(2)) // If we set both the constraint ID / next value to 0, then we will have // it assigned form scratch. desc.PrimaryIndex.ConstraintID = 0 desc.NextConstraintID = 0 require.NoError(t, s.DB().Put( context.Background(), catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()), desc.DescriptorProto(), )) // Validate that the descriptor is invalid, since the constraint IDs // are missing. tdb.CheckQueryResults(t, `SELECT strpos(desc_json, 'constraintId') > 0, strpos(desc_json, 'nextConstraintId') > 0 FROM ( SELECT jsonb_pretty( crdb_internal.pb_to_json( 'cockroach.sql.sqlbase.Descriptor', descriptor, false ) ) AS desc_json FROM system.descriptor WHERE id = `+ fmt.Sprintf("%d", desc.GetID())+ `);`, [][]string{{"false", "false"}}, ) // Migrate to the new cluster version. tdb.Exec(t, `SET CLUSTER SETTING version = $1`, clusterversion.ByKey(tabledesc.ConstraintIDsAddedToTableDescsVersion).String()) tdb.CheckQueryResultsRetry(t, "SHOW CLUSTER SETTING version", [][]string{{clusterversion.ByKey(tabledesc.ConstraintIDsAddedToTableDescsVersion).String()}}) // Validate the constraint IDs are populated. // Validate that the descriptor is invalid, since the constraint IDs // are missing. tdb.CheckQueryResults(t, `SELECT strpos(desc_json, 'constraintId') > 0, strpos(desc_json, 'nextConstraintId') > 0 FROM ( SELECT jsonb_pretty( crdb_internal.pb_to_json( 'cockroach.sql.sqlbase.Descriptor', descriptor, false ) ) AS desc_json FROM system.descriptor WHERE id = `+ fmt.Sprintf("%d", desc.GetID())+ `);`, [][]string{{"true", "true"}}, ) // Validate we can comment constraints. tdb.Exec(t, "COMMENT ON CONSTRAINT \"t_pkey\" ON t IS 'primary_comment'") }
pkg/migration/migrations/ensure_constraint_id_test.go
1
https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8
[ 0.007629964500665665, 0.0007180050015449524, 0.00016343571769539267, 0.00017223868053406477, 0.00191744533367455 ]
{ "id": 2, "code_window": [ " ParentID 52, ParentSchemaID 29: relation \"rides\" (55): processed\n", " ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_rides\"\n", " ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): processed\n", " ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): referenced database ID 52: referenced descriptor not found\n", " ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): processed\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ " ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): referenced database ID 52: referenced descriptor not found\n" ], "file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose", "type": "replace", "edit_start_line_idx": 46 }
// Copyright 2021 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package log_test import ( "context" "testing" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/cockroachdb/logtags" "github.com/stretchr/testify/require" ) func TestTrace(t *testing.T) { for _, tc := range []struct { name string init func(context.Context) (context.Context, *tracing.Span) check func(*testing.T, context.Context, tracing.Recording, *tracing.Tracer) }{ { name: "verbose", init: func(ctx context.Context) (context.Context, *tracing.Span) { tracer := tracing.NewTracer() sp := tracer.StartSpan("s", tracing.WithRecording(tracing.RecordingVerbose)) ctxWithSpan := tracing.ContextWithSpan(ctx, sp) return ctxWithSpan, sp }, check: func(t *testing.T, _ context.Context, rec tracing.Recording, _ *tracing.Tracer) { if err := tracing.CheckRecordedSpans(rec, ` span: s tags: _verbose=1 event: test1 event: test2 event: testerr event: log `); err != nil { t.Fatal(err) } }, }, { name: "zipkin", init: func(ctx context.Context) (context.Context, *tracing.Span) { tr := tracing.NewTracer() st := cluster.MakeTestingClusterSettings() tracing.ZipkinCollector.Override(ctx, &st.SV, "127.0.0.1:9000000") tr.Configure(ctx, &st.SV) return tr.StartSpanCtx(context.Background(), "foo") }, check: func(t *testing.T, ctx context.Context, _ tracing.Recording, tr *tracing.Tracer) { // This isn't quite a real end-to-end-check, but it is good enough // to give us confidence that we're really passing log events to // the span, and the tracing package in turn has tests that verify // that a span so configured will actually log them to the external // trace. require.True(t, tr.HasExternalSink()) require.True(t, log.HasSpanOrEvent(ctx)) require.True(t, log.ExpensiveLogEnabled(ctx, 0 /* level */)) }, }, } { t.Run(tc.name, func(t *testing.T) { ctx := context.Background() // Events to context without a trace should be no-ops. log.Event(ctx, "should-not-show-up") ctxWithSpan, sp := tc.init(ctx) log.Event(ctxWithSpan, "test1") log.VEvent(ctxWithSpan, log.NoLogV(), "test2") log.VErrEvent(ctxWithSpan, log.NoLogV(), "testerr") log.Info(ctxWithSpan, "log") // Events to parent context should still be no-ops. log.Event(ctx, "should-not-show-up") tr := sp.Tracer() tc.check(t, ctxWithSpan, sp.FinishAndGetRecording(tracing.RecordingVerbose), tr) }) } } func TestTraceWithTags(t *testing.T) { ctx := context.Background() ctx = logtags.AddTag(ctx, "tag", 1) tracer := tracing.NewTracer() sp := tracer.StartSpan("s", tracing.WithRecording(tracing.RecordingVerbose)) ctxWithSpan := tracing.ContextWithSpan(ctx, sp) log.Event(ctxWithSpan, "test1") log.VEvent(ctxWithSpan, log.NoLogV(), "test2") log.VErrEvent(ctxWithSpan, log.NoLogV(), "testerr") log.Info(ctxWithSpan, "log") if err := tracing.CheckRecordedSpans(sp.FinishAndGetRecording(tracing.RecordingVerbose), ` span: s tags: _verbose=1 event: [tag=1] test1 event: [tag=1] test2 event: [tag=1] testerr event: [tag=1] log `); err != nil { t.Fatal(err) } }
pkg/util/log/trace_client_test.go
0
https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8
[ 0.00017906841821968555, 0.00017241528257727623, 0.00016840978059917688, 0.00017212201782967895, 0.00000267729046754539 ]
{ "id": 2, "code_window": [ " ParentID 52, ParentSchemaID 29: relation \"rides\" (55): processed\n", " ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_rides\"\n", " ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): processed\n", " ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): referenced database ID 52: referenced descriptor not found\n", " ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): processed\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ " ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): referenced database ID 52: referenced descriptor not found\n" ], "file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose", "type": "replace", "edit_start_line_idx": 46 }
// Copyright 2017 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package batcheval import ( "context" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval/result" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" ) // refreshRangeTBIEnabled controls whether we use a TBI during ranged refreshes. var refreshRangeTBIEnabled = settings.RegisterBoolSetting( settings.SystemOnly, "kv.refresh_range.time_bound_iterators.enabled", "use time-bound iterators when performing ranged transaction refreshes", util.ConstantWithMetamorphicTestBool("kv.refresh_range.time_bound_iterators.enabled", true), ) func init() { RegisterReadOnlyCommand(roachpb.RefreshRange, DefaultDeclareKeys, RefreshRange) } // RefreshRange checks whether the key range specified has any values written in // the interval (args.RefreshFrom, header.Timestamp]. func RefreshRange( ctx context.Context, reader storage.Reader, cArgs CommandArgs, resp roachpb.Response, ) (result.Result, error) { args := cArgs.Args.(*roachpb.RefreshRangeRequest) h := cArgs.Header if h.Txn == nil { return result.Result{}, errors.AssertionFailedf("no transaction specified to %s", args.Method()) } // We're going to refresh up to the transaction's read timestamp. if h.Timestamp != h.Txn.WriteTimestamp { // We're expecting the read and write timestamp to have converged before the // Refresh request was sent. log.Fatalf(ctx, "expected provisional commit ts %s == read ts %s. txn: %s", h.Timestamp, h.Txn.WriteTimestamp, h.Txn) } refreshTo := h.Timestamp refreshFrom := args.RefreshFrom if refreshFrom.IsEmpty() { return result.Result{}, errors.AssertionFailedf("empty RefreshFrom: %s", args) } log.VEventf(ctx, 2, "refresh %s @[%s-%s]", args.Span(), refreshFrom, refreshTo) tbi := refreshRangeTBIEnabled.Get(&cArgs.EvalCtx.ClusterSettings().SV) return result.Result{}, refreshRange(reader, tbi, args.Span(), refreshFrom, refreshTo, h.Txn.ID) } // refreshRange iterates over the specified key span until it discovers a value // written after the refreshFrom timestamp but before or at the refreshTo // timestamp. The iteration observes MVCC tombstones, which must be considered // as conflicts during a refresh. The iteration also observes intents, and any // intent that is not owned by the specified txn ID is considered a conflict. // // If such a conflict is found, the function returns an error. Otherwise, no // error is returned. func refreshRange( reader storage.Reader, timeBoundIterator bool, span roachpb.Span, refreshFrom, refreshTo hlc.Timestamp, txnID uuid.UUID, ) error { // Construct an incremental iterator with the desired time bounds. Incremental // iterators will emit MVCC tombstones by default and will emit intents when // configured to do so (see IntentPolicy). iter := storage.NewMVCCIncrementalIterator(reader, storage.MVCCIncrementalIterOptions{ EnableTimeBoundIteratorOptimization: timeBoundIterator, EndKey: span.EndKey, StartTime: refreshFrom, // exclusive EndTime: refreshTo, // inclusive IntentPolicy: storage.MVCCIncrementalIterIntentPolicyEmit, }) defer iter.Close() var meta enginepb.MVCCMetadata iter.SeekGE(storage.MakeMVCCMetadataKey(span.Key)) for { if ok, err := iter.Valid(); err != nil { return err } else if !ok { break } key := iter.Key() if !key.IsValue() { // Found an intent. Check whether it is owned by this transaction. // If so, proceed with iteration. Otherwise, return an error. if err := protoutil.Unmarshal(iter.UnsafeValue(), &meta); err != nil { return errors.Wrapf(err, "unmarshaling mvcc meta: %v", key) } if meta.IsInline() { // Ignore inline MVCC metadata. We don't expect to see this in practice // when performing a refresh of an MVCC keyspace. iter.Next() continue } if meta.Txn.ID == txnID { // Ignore the transaction's own intent and skip past the corresponding // provisional key-value. To do this, iterate to the provisional // key-value, validate its timestamp, then iterate again. iter.Next() if ok, err := iter.Valid(); err != nil { return errors.Wrap(err, "iterating to provisional value for intent") } else if !ok { return errors.Errorf("expected provisional value for intent") } if !meta.Timestamp.ToTimestamp().EqOrdering(iter.UnsafeKey().Timestamp) { return errors.Errorf("expected provisional value for intent with ts %s, found %s", meta.Timestamp, iter.UnsafeKey().Timestamp) } iter.Next() continue } return roachpb.NewRefreshFailedError(roachpb.RefreshFailedError_REASON_INTENT, key.Key, meta.Txn.WriteTimestamp) } // If a committed value is found, return an error. return roachpb.NewRefreshFailedError(roachpb.RefreshFailedError_REASON_COMMITTED_VALUE, key.Key, key.Timestamp) } return nil }
pkg/kv/kvserver/batcheval/cmd_refresh_range.go
0
https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8
[ 0.00025433406699448824, 0.00017721173935569823, 0.00016423885244876146, 0.00017195970576722175, 0.000020856499759247527 ]
{ "id": 2, "code_window": [ " ParentID 52, ParentSchemaID 29: relation \"rides\" (55): processed\n", " ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_rides\"\n", " ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): processed\n", " ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): referenced database ID 52: referenced descriptor not found\n", " ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): processed\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ " ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): referenced database ID 52: referenced descriptor not found\n" ], "file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose", "type": "replace", "edit_start_line_idx": 46 }
// Copyright 2016 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package cli import ( "fmt" "os" "strings" "github.com/cockroachdb/cockroach/pkg/workload" // Register the relevant examples _ "github.com/cockroachdb/cockroach/pkg/workload/examples" "github.com/cockroachdb/cockroach/pkg/workload/workloadsql" "github.com/spf13/cobra" ) var genExamplesCmd = &cobra.Command{ Use: "example-data", Short: "generate example SQL code suitable for use with CockroachDB", Long: `This command generates example SQL code that shows various CockroachDB features and is suitable to populate an example database for demonstration and education purposes. `, } func init() { for _, meta := range workload.Registered() { gen := meta.New() genExampleCmd := &cobra.Command{ Use: meta.Name, Short: meta.Description, Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, args []string) error { runGenExamplesCmd(gen) return nil }, } if f, ok := gen.(workload.Flagser); ok { genExampleCmd.Flags().AddFlagSet(f.Flags().FlagSet) } genExamplesCmd.AddCommand(genExampleCmd) } } func runGenExamplesCmd(gen workload.Generator) { w := os.Stdout meta := gen.Meta() fmt.Fprintf(w, "CREATE DATABASE IF NOT EXISTS %s;\n", meta.Name) fmt.Fprintf(w, "SET DATABASE=%s;\n", meta.Name) for _, table := range gen.Tables() { fmt.Fprintf(w, "DROP TABLE IF EXISTS \"%s\";\n", table.Name) fmt.Fprintf(w, "CREATE TABLE \"%s\" %s;\n", table.Name, table.Schema) for rowIdx := 0; rowIdx < table.InitialRows.NumBatches; rowIdx++ { for _, row := range table.InitialRows.BatchRows(rowIdx) { rowTuple := strings.Join(workloadsql.StringTuple(row), `,`) fmt.Fprintf(w, "INSERT INTO \"%s\" VALUES (%s);\n", table.Name, rowTuple) } } } fmt.Fprint(w, footerComment) } const footerComment = `-- -- -- If you can see this message, you probably want to redirect the output of -- 'cockroach gen example-data' to a file, or pipe it as input to 'cockroach sql'. `
pkg/cli/examples.go
0
https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8
[ 0.00017884709814097732, 0.000173564360011369, 0.00017025135457515717, 0.00017363969527650625, 0.0000024544390271330485 ]
{ "id": 3, "code_window": [ " ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): processed\n", " ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): referenced database ID 52: referenced descriptor not found\n", " ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): processed\n", " ParentID 52, ParentSchemaID 29: relation \"user_promo_codes\" (58): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_users\"\n", " ParentID 52, ParentSchemaID 29: relation \"user_promo_codes\" (58): processed\n", " ParentID 0, ParentSchemaID 0: namespace entry \"defaultdb\" (50): processed\n", " ParentID 0, ParentSchemaID 0: namespace entry \"movr\" (52): descriptor not found\n", " ParentID 0, ParentSchemaID 0: namespace entry \"postgres\" (51): processed\n", " ParentID 0, ParentSchemaID 0: namespace entry \"system\" (1): processed\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " ParentID 52, ParentSchemaID 29: relation \"user_promo_codes\" (58): referenced database ID 52: referenced descriptor not found\n" ], "file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose", "type": "replace", "edit_start_line_idx": 50 }
// Copyright 2021 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package migrations_test import ( "context" "fmt" "testing" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/stretchr/testify/require" ) // TestEnsureConstraintIDs tests that constraint IDs are added as expected. func TestEnsureConstraintIDs(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) // Start off with the version that did not support // constraint IDs. clusterArgs := base.TestClusterArgs{ ServerArgs: base.TestServerArgs{ Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ DisableAutomaticVersionUpgrade: 1, BinaryVersionOverride: clusterversion.ByKey( tabledesc.ConstraintIDsAddedToTableDescsVersion - 1), }, }, }, } c := keys.SystemSQLCodec ctx := context.Background() tc := testcluster.StartTestCluster(t, 1, clusterArgs) s := tc.Server(0) defer tc.Stopper().Stop(ctx) sqlDB := tc.ServerConn(0) tdb := sqlutils.MakeSQLRunner(sqlDB) // Create table with a primary key constraint. tdb.Exec(t, "CREATE TABLE t(name int primary key)") // Validate the comments on constraints are blocked. tdb.ExpectErr(t, "pq: cannot comment on constraint", "COMMENT ON CONSTRAINT \"t_pkey\" ON t IS 'primary_comment'") // Validate that we have a constraint ID due to post deserialization logic desc := desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, "defaultdb", "t") desc.PrimaryIndex.ConstraintID = 0 require.NoError(t, s.DB().Put( context.Background(), catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()), desc.DescriptorProto(), )) // Validate that the post serialization will recompute the constraint IDs // if they are missing. desc = desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, "defaultdb", "t") require.Equal(t, desc.PrimaryIndex.ConstraintID, descpb.ConstraintID(2)) // If we set both the constraint ID / next value to 0, then we will have // it assigned form scratch. desc.PrimaryIndex.ConstraintID = 0 desc.NextConstraintID = 0 require.NoError(t, s.DB().Put( context.Background(), catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()), desc.DescriptorProto(), )) // Validate that the descriptor is invalid, since the constraint IDs // are missing. tdb.CheckQueryResults(t, `SELECT strpos(desc_json, 'constraintId') > 0, strpos(desc_json, 'nextConstraintId') > 0 FROM ( SELECT jsonb_pretty( crdb_internal.pb_to_json( 'cockroach.sql.sqlbase.Descriptor', descriptor, false ) ) AS desc_json FROM system.descriptor WHERE id = `+ fmt.Sprintf("%d", desc.GetID())+ `);`, [][]string{{"false", "false"}}, ) // Migrate to the new cluster version. tdb.Exec(t, `SET CLUSTER SETTING version = $1`, clusterversion.ByKey(tabledesc.ConstraintIDsAddedToTableDescsVersion).String()) tdb.CheckQueryResultsRetry(t, "SHOW CLUSTER SETTING version", [][]string{{clusterversion.ByKey(tabledesc.ConstraintIDsAddedToTableDescsVersion).String()}}) // Validate the constraint IDs are populated. // Validate that the descriptor is invalid, since the constraint IDs // are missing. tdb.CheckQueryResults(t, `SELECT strpos(desc_json, 'constraintId') > 0, strpos(desc_json, 'nextConstraintId') > 0 FROM ( SELECT jsonb_pretty( crdb_internal.pb_to_json( 'cockroach.sql.sqlbase.Descriptor', descriptor, false ) ) AS desc_json FROM system.descriptor WHERE id = `+ fmt.Sprintf("%d", desc.GetID())+ `);`, [][]string{{"true", "true"}}, ) // Validate we can comment constraints. tdb.Exec(t, "COMMENT ON CONSTRAINT \"t_pkey\" ON t IS 'primary_comment'") }
pkg/migration/migrations/ensure_constraint_id_test.go
1
https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8
[ 0.0038828605320304632, 0.00044836054439656436, 0.00016177038196474314, 0.00016700627747923136, 0.000953742244746536 ]
{ "id": 3, "code_window": [ " ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): processed\n", " ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): referenced database ID 52: referenced descriptor not found\n", " ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): processed\n", " ParentID 52, ParentSchemaID 29: relation \"user_promo_codes\" (58): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_users\"\n", " ParentID 52, ParentSchemaID 29: relation \"user_promo_codes\" (58): processed\n", " ParentID 0, ParentSchemaID 0: namespace entry \"defaultdb\" (50): processed\n", " ParentID 0, ParentSchemaID 0: namespace entry \"movr\" (52): descriptor not found\n", " ParentID 0, ParentSchemaID 0: namespace entry \"postgres\" (51): processed\n", " ParentID 0, ParentSchemaID 0: namespace entry \"system\" (1): processed\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " ParentID 52, ParentSchemaID 29: relation \"user_promo_codes\" (58): referenced database ID 52: referenced descriptor not found\n" ], "file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose", "type": "replace", "edit_start_line_idx": 50 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package storage import ( "bytes" "context" "fmt" "math" "math/rand" "path/filepath" "testing" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/cockroach/pkg/util/randutil" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" ) const all, latest = true, false func makeKVT(key roachpb.Key, value []byte, ts hlc.Timestamp) MVCCKeyValue { return MVCCKeyValue{Key: MVCCKey{Key: key, Timestamp: ts}, Value: value} } func makeKVTxn( key roachpb.Key, val []byte, ts hlc.Timestamp, ) (roachpb.Transaction, roachpb.Value, roachpb.Intent) { txnID := uuid.MakeV4() txnMeta := enginepb.TxnMeta{ Key: key, ID: txnID, Epoch: 1, WriteTimestamp: ts, } return roachpb.Transaction{ TxnMeta: txnMeta, ReadTimestamp: ts, }, roachpb.Value{ RawBytes: val, }, roachpb.MakeIntent(&txnMeta, key) } func intents(intents ...roachpb.Intent) []roachpb.Intent { return intents } func kvs(kvs ...MVCCKeyValue) []MVCCKeyValue { return kvs } func iterateExpectErr( e Engine, startKey, endKey roachpb.Key, startTime, endTime hlc.Timestamp, revisions bool, intents []roachpb.Intent, ) func(*testing.T) { return func(t *testing.T) { t.Helper() t.Run("aggregate-intents", func(t *testing.T) { assertExpectErrs(t, e, startKey, endKey, startTime, endTime, revisions, intents) }) t.Run("first-intent", func(t *testing.T) { assertExpectErr(t, e, startKey, endKey, startTime, endTime, revisions, intents[0]) }) t.Run("export-intents", func(t *testing.T) { assertExportedErrs(t, e, startKey, endKey, startTime, endTime, revisions, intents, false) }) t.Run("export-intents-tbi", func(t *testing.T) { assertExportedErrs(t, e, startKey, endKey, startTime, endTime, revisions, intents, true) }) } } func assertExpectErr( t *testing.T, e Engine, startKey, endKey roachpb.Key, startTime, endTime hlc.Timestamp, revisions bool, expectedIntent roachpb.Intent, ) { iter := NewMVCCIncrementalIterator(e, MVCCIncrementalIterOptions{ EndKey: endKey, StartTime: startTime, EndTime: endTime, }) defer iter.Close() var iterFn func() if revisions { iterFn = iter.Next } else { iterFn = iter.NextKey } for iter.SeekGE(MakeMVCCMetadataKey(startKey)); ; iterFn() { if ok, _ := iter.Valid(); !ok || iter.UnsafeKey().Key.Compare(endKey) >= 0 { break } // pass } _, err := iter.Valid() if intentErr := (*roachpb.WriteIntentError)(nil); errors.As(err, &intentErr) { if !expectedIntent.Key.Equal(intentErr.Intents[0].Key) { t.Fatalf("Expected intent key %v, but got %v", expectedIntent.Key, intentErr.Intents[0].Key) } } else { t.Fatalf("expected error with intent %v but got %v", expectedIntent, err) } } func assertExpectErrs( t *testing.T, e Engine, startKey, endKey roachpb.Key, startTime, endTime hlc.Timestamp, revisions bool, expectedIntents []roachpb.Intent, ) { iter := NewMVCCIncrementalIterator(e, MVCCIncrementalIterOptions{ EndKey: endKey, StartTime: startTime, EndTime: endTime, IntentPolicy: MVCCIncrementalIterIntentPolicyAggregate, }) defer iter.Close() var iterFn func() if revisions { iterFn = iter.Next } else { iterFn = iter.NextKey } for iter.SeekGE(MakeMVCCMetadataKey(startKey)); ; iterFn() { if ok, _ := iter.Valid(); !ok || iter.UnsafeKey().Key.Compare(endKey) >= 0 { break } // pass } if iter.NumCollectedIntents() != len(expectedIntents) { t.Fatalf("Expected %d intents but found %d", len(expectedIntents), iter.NumCollectedIntents()) } err := iter.TryGetIntentError() if intentErr := (*roachpb.WriteIntentError)(nil); errors.As(err, &intentErr) { for i := range expectedIntents { if !expectedIntents[i].Key.Equal(intentErr.Intents[i].Key) { t.Fatalf("%d intent key: got %v, expected %v", i, intentErr.Intents[i].Key, expectedIntents[i].Key) } if !expectedIntents[i].Txn.ID.Equal(intentErr.Intents[i].Txn.ID) { t.Fatalf("%d intent key: got %v, expected %v", i, intentErr.Intents[i].Txn.ID, expectedIntents[i].Txn.ID) } } } else { t.Fatalf("Expected roachpb.WriteIntentError, found %T", err) } } func assertExportedErrs( t *testing.T, e Engine, startKey, endKey roachpb.Key, startTime, endTime hlc.Timestamp, revisions bool, expectedIntents []roachpb.Intent, useTBI bool, ) { const big = 1 << 30 sstFile := &MemFile{} _, _, _, err := e.ExportMVCCToSst(context.Background(), ExportOptions{ StartKey: MVCCKey{Key: startKey}, EndKey: endKey, StartTS: startTime, EndTS: endTime, ExportAllRevisions: revisions, TargetSize: big, MaxSize: big, MaxIntents: uint64(MaxIntentsPerWriteIntentError.Default()), StopMidKey: false, UseTBI: useTBI, }, sstFile) require.Error(t, err) if intentErr := (*roachpb.WriteIntentError)(nil); errors.As(err, &intentErr) { for i := range expectedIntents { if !expectedIntents[i].Key.Equal(intentErr.Intents[i].Key) { t.Fatalf("%d intent key: got %v, expected %v", i, intentErr.Intents[i].Key, expectedIntents[i].Key) } if !expectedIntents[i].Txn.ID.Equal(intentErr.Intents[i].Txn.ID) { t.Fatalf("%d intent key: got %v, expected %v", i, intentErr.Intents[i].Txn.ID, expectedIntents[i].Txn.ID) } } } else { t.Fatalf("Expected roachpb.WriteIntentError, found %T", err) } } func assertExportedKVs( t *testing.T, e Engine, startKey, endKey roachpb.Key, startTime, endTime hlc.Timestamp, revisions bool, expected []MVCCKeyValue, useTBI bool, ) { const big = 1 << 30 sstFile := &MemFile{} _, _, _, err := e.ExportMVCCToSst(context.Background(), ExportOptions{ StartKey: MVCCKey{Key: startKey}, EndKey: endKey, StartTS: startTime, EndTS: endTime, ExportAllRevisions: revisions, TargetSize: big, MaxSize: big, StopMidKey: false, UseTBI: useTBI, }, sstFile) require.NoError(t, err) data := sstFile.Data() if data == nil { require.Nil(t, expected) return } sst, err := NewMemSSTIterator(data, false) require.NoError(t, err) defer sst.Close() sst.SeekGE(MVCCKey{}) for i := range expected { ok, err := sst.Valid() require.NoError(t, err) require.Truef(t, ok, "iteration produced %d keys, expected %d", i, len(expected)) assert.Equalf(t, expected[i].Key, sst.UnsafeKey(), "key %d", i) if expected[i].Value == nil { assert.Equalf(t, []byte{}, sst.UnsafeValue(), "key %d %q", i, sst.UnsafeKey()) } else { assert.Equalf(t, expected[i].Value, sst.UnsafeValue(), "key %d %q", i, sst.UnsafeKey()) } sst.Next() } ok, err := sst.Valid() require.NoError(t, err) require.False(t, ok) } func nextIgnoreTimeExpectErr( t *testing.T, e Engine, startKey, endKey roachpb.Key, startTime, endTime hlc.Timestamp, errString string, ) { // The semantics of the methods NextIgnoringTime() should not change whether // or not we enable the TBI optimization. for _, useTBI := range []bool{true, false} { t.Run(fmt.Sprintf("useTBI-%t", useTBI), func(t *testing.T) { iter := NewMVCCIncrementalIterator(e, MVCCIncrementalIterOptions{ EndKey: endKey, EnableTimeBoundIteratorOptimization: useTBI, StartTime: startTime, EndTime: endTime, }) defer iter.Close() for iter.SeekGE(MakeMVCCMetadataKey(startKey)); ; iter.NextIgnoringTime() { if ok, _ := iter.Valid(); !ok || iter.UnsafeKey().Key.Compare(endKey) >= 0 { break } // pass } if _, err := iter.Valid(); !testutils.IsError(err, errString) { t.Fatalf("expected error %q but got %v", errString, err) } }) } } func assertNextIgnoreTimeIteratedKVs( t *testing.T, e Engine, startKey, endKey roachpb.Key, startTime, endTime hlc.Timestamp, expected []MVCCKeyValue, ) { // The semantics of the methods NextIgnoringTime() should not change whether // or not we enable the TBI optimization. for _, useTBI := range []bool{true, false} { t.Run(fmt.Sprintf("useTBI-%t", useTBI), func(t *testing.T) { iter := NewMVCCIncrementalIterator(e, MVCCIncrementalIterOptions{ EndKey: endKey, EnableTimeBoundIteratorOptimization: useTBI, StartTime: startTime, EndTime: endTime, }) defer iter.Close() var kvs []MVCCKeyValue for iter.SeekGE(MakeMVCCMetadataKey(startKey)); ; iter.NextIgnoringTime() { if ok, err := iter.Valid(); err != nil { t.Fatalf("unexpected error: %+v", err) } else if !ok || iter.UnsafeKey().Key.Compare(endKey) >= 0 { break } kvs = append(kvs, MVCCKeyValue{Key: iter.Key(), Value: iter.Value()}) } if len(kvs) != len(expected) { t.Fatalf("got %d kvs but expected %d: %v", len(kvs), len(expected), kvs) } for i := range kvs { if !kvs[i].Key.Equal(expected[i].Key) { t.Fatalf("%d key: got %v but expected %v", i, kvs[i].Key, expected[i].Key) } if !bytes.Equal(kvs[i].Value, expected[i].Value) { t.Fatalf("%d value: got %x but expected %x", i, kvs[i].Value, expected[i].Value) } } }) } } func assertIteratedKVs( t *testing.T, e Engine, startKey, endKey roachpb.Key, startTime, endTime hlc.Timestamp, revisions bool, expected []MVCCKeyValue, useTBI bool, ) { iter := NewMVCCIncrementalIterator(e, MVCCIncrementalIterOptions{ EndKey: endKey, EnableTimeBoundIteratorOptimization: useTBI, StartTime: startTime, EndTime: endTime, IntentPolicy: MVCCIncrementalIterIntentPolicyAggregate, }) defer iter.Close() var iterFn func() if revisions { iterFn = iter.Next } else { iterFn = iter.NextKey } var kvs []MVCCKeyValue for iter.SeekGE(MakeMVCCMetadataKey(startKey)); ; iterFn() { if ok, err := iter.Valid(); err != nil { t.Fatalf("unexpected error: %+v", err) } else if !ok || iter.UnsafeKey().Key.Compare(endKey) >= 0 { break } if iter.NumCollectedIntents() > 0 { t.Fatal("got unexpected intent error") } kvs = append(kvs, MVCCKeyValue{Key: iter.Key(), Value: iter.Value()}) } if len(kvs) != len(expected) { t.Fatalf("got %d kvs but expected %d: %v", len(kvs), len(expected), kvs) } for i := range kvs { if !kvs[i].Key.Equal(expected[i].Key) { t.Fatalf("%d key: got %v but expected %v", i, kvs[i].Key, expected[i].Key) } if !bytes.Equal(kvs[i].Value, expected[i].Value) { t.Fatalf("%d value: got %x but expected %x", i, kvs[i].Value, expected[i].Value) } } } func assertEqualKVs( e Engine, startKey, endKey roachpb.Key, startTime, endTime hlc.Timestamp, revisions bool, expected []MVCCKeyValue, ) func(*testing.T) { return func(t *testing.T) { t.Helper() t.Run("iterate", func(t *testing.T) { assertIteratedKVs(t, e, startKey, endKey, startTime, endTime, revisions, expected, false /* useTBI */) }) t.Run("iterate-tbi", func(t *testing.T) { assertIteratedKVs(t, e, startKey, endKey, startTime, endTime, revisions, expected, true /* useTBI */) }) t.Run("export", func(t *testing.T) { assertExportedKVs(t, e, startKey, endKey, startTime, endTime, revisions, expected, false /* useTBI */) }) t.Run("export-tbi", func(t *testing.T) { assertExportedKVs(t, e, startKey, endKey, startTime, endTime, revisions, expected, true /* useTBI */) }) } } // TestMVCCIncrementalIteratorNextIgnoringTime tests the iteration semantics of // the method NextIgnoreTime(). This method is supposed to return all the KVs // (versions and new keys) that would be encountered in a non-incremental // iteration. func TestMVCCIncrementalIteratorNextIgnoringTime(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() var ( keyMax = roachpb.KeyMax testKey1 = roachpb.Key("/db1") testKey2 = roachpb.Key("/db2") testValue1 = []byte("val1") testValue2 = []byte("val2") testValue3 = []byte("val3") testValue4 = []byte("val4") // Use a non-zero min, since we use IsEmpty to decide if a ts should be used // as upper/lower-bound during iterator initialization. tsMin = hlc.Timestamp{WallTime: 0, Logical: 1} ts1 = hlc.Timestamp{WallTime: 1, Logical: 0} ts2 = hlc.Timestamp{WallTime: 2, Logical: 0} ts3 = hlc.Timestamp{WallTime: 3, Logical: 0} ts4 = hlc.Timestamp{WallTime: 4, Logical: 0} tsMax = hlc.Timestamp{WallTime: math.MaxInt64, Logical: 0} ) kv1_1_1 := makeKVT(testKey1, testValue1, ts1) kv1_2_2 := makeKVT(testKey1, testValue2, ts2) kv2_2_2 := makeKVT(testKey2, testValue3, ts2) kv2_4_4 := makeKVT(testKey2, testValue4, ts4) kv1_3Deleted := makeKVT(testKey1, nil, ts3) for _, engineImpl := range mvccEngineImpls { t.Run(engineImpl.name, func(t *testing.T) { e := engineImpl.create() defer e.Close() t.Run("empty", func(t *testing.T) { assertNextIgnoreTimeIteratedKVs(t, e, localMax, keyMax, tsMin, ts3, nil) }) for _, kv := range kvs(kv1_1_1, kv1_2_2, kv2_2_2) { v := roachpb.Value{RawBytes: kv.Value} if err := MVCCPut(ctx, e, nil, kv.Key.Key, kv.Key.Timestamp, v, nil); err != nil { t.Fatal(err) } } // Exercise time ranges. t.Run("ts (0-0]", func(t *testing.T) { assertNextIgnoreTimeIteratedKVs(t, e, localMax, keyMax, tsMin, tsMin, nil) }) // Returns the kv_2_2_2 even though it is outside (startTime, endTime]. t.Run("ts (0-1]", func(t *testing.T) { assertNextIgnoreTimeIteratedKVs(t, e, localMax, keyMax, tsMin, ts1, kvs(kv1_1_1, kv2_2_2)) }) t.Run("ts (0-∞]", func(t *testing.T) { assertNextIgnoreTimeIteratedKVs(t, e, localMax, keyMax, tsMin, tsMax, kvs(kv1_2_2, kv1_1_1, kv2_2_2)) }) t.Run("ts (1-1]", func(t *testing.T) { assertNextIgnoreTimeIteratedKVs(t, e, localMax, keyMax, ts1, ts1, nil) }) // Returns the kv_1_1_1 even though it is outside (startTime, endTime]. t.Run("ts (1-2]", func(t *testing.T) { assertNextIgnoreTimeIteratedKVs(t, e, localMax, keyMax, ts1, ts2, kvs(kv1_2_2, kv1_1_1, kv2_2_2)) }) t.Run("ts (2-2]", func(t *testing.T) { assertNextIgnoreTimeIteratedKVs(t, e, localMax, keyMax, ts2, ts2, nil) }) // Exercise key ranges. t.Run("kv [1-1)", func(t *testing.T) { assertNextIgnoreTimeIteratedKVs(t, e, testKey1, testKey1, tsMin, tsMax, nil) }) t.Run("kv [1-2)", func(t *testing.T) { assertNextIgnoreTimeIteratedKVs(t, e, testKey1, testKey2, tsMin, tsMax, kvs(kv1_2_2, kv1_1_1)) }) // Exercise deletion. if err := MVCCDelete(ctx, e, nil, testKey1, ts3, nil); err != nil { t.Fatal(err) } // Returns the kv_1_1_1 even though it is outside (startTime, endTime]. t.Run("del", func(t *testing.T) { assertNextIgnoreTimeIteratedKVs(t, e, localMax, keyMax, ts1, tsMax, kvs(kv1_3Deleted, kv1_2_2, kv1_1_1, kv2_2_2)) }) // Insert an intent of testKey2. txn1ID := uuid.MakeV4() txn1 := roachpb.Transaction{ TxnMeta: enginepb.TxnMeta{ Key: testKey2, ID: txn1ID, Epoch: 1, WriteTimestamp: ts4, }, ReadTimestamp: ts4, } txn1Val := roachpb.Value{RawBytes: testValue4} if err := MVCCPut(ctx, e, nil, txn1.TxnMeta.Key, txn1.ReadTimestamp, txn1Val, &txn1); err != nil { t.Fatal(err) } // We have to be careful that we are testing the intent handling logic of // NextIgnoreTime() rather than the first SeekGE(). We do this by // ensuring that the SeekGE() doesn't encounter an intent. t.Run("intents", func(t *testing.T) { nextIgnoreTimeExpectErr(t, e, testKey1, testKey2.PrefixEnd(), tsMin, tsMax, "conflicting intents") }) t.Run("intents", func(t *testing.T) { nextIgnoreTimeExpectErr(t, e, localMax, keyMax, tsMin, ts4, "conflicting intents") }) // Intents above the upper time bound or beneath the lower time bound must // be ignored. Note that the lower time bound is exclusive while the upper // time bound is inclusive. // // The intent at ts=4 for kv2 lies outside the timespan // (startTime, endTime] so we do not raise an error and just move on to // its versioned KV. t.Run("intents", func(t *testing.T) { assertNextIgnoreTimeIteratedKVs(t, e, localMax, keyMax, tsMin, ts3, kvs(kv1_3Deleted, kv1_2_2, kv1_1_1, kv2_4_4, kv2_2_2)) }) t.Run("intents", func(t *testing.T) { assertNextIgnoreTimeIteratedKVs(t, e, localMax, keyMax, ts4, tsMax, kvs()) }) t.Run("intents", func(t *testing.T) { assertNextIgnoreTimeIteratedKVs(t, e, localMax, keyMax, ts4.Next(), tsMax, kvs()) }) }) } } func TestMVCCIncrementalIteratorInlinePolicy(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() var ( keyMax = roachpb.KeyMax testKey1 = roachpb.Key("/db1") testKey2 = roachpb.Key("/db2") testValue1 = []byte("val1") testValue2 = []byte("val2") // Use a non-zero min, since we use IsEmpty to decide if a ts should be used // as upper/lower-bound during iterator initialization. tsMin = hlc.Timestamp{WallTime: 0, Logical: 1} ts1 = hlc.Timestamp{WallTime: 1, Logical: 0} ts2 = hlc.Timestamp{WallTime: 2, Logical: 0} tsMax = hlc.Timestamp{WallTime: math.MaxInt64, Logical: 0} ) inline1_1_1 := makeKVT(testKey1, testValue1, hlc.Timestamp{}) kv2_1_1 := makeKVT(testKey2, testValue1, ts1) kv2_2_2 := makeKVT(testKey2, testValue2, ts2) for _, engineImpl := range mvccEngineImpls { e := engineImpl.create() defer e.Close() for _, kv := range []MVCCKeyValue{inline1_1_1, kv2_1_1, kv2_2_2} { v := roachpb.Value{RawBytes: kv.Value} if err := MVCCPut(ctx, e, nil, kv.Key.Key, kv.Key.Timestamp, v, nil); err != nil { t.Fatal(err) } } t.Run(engineImpl.name, func(t *testing.T) { t.Run("PolicyError returns error if inline value is found", func(t *testing.T) { iter := NewMVCCIncrementalIterator(e, MVCCIncrementalIterOptions{ EndKey: keyMax, StartTime: tsMin, EndTime: tsMax, InlinePolicy: MVCCIncrementalIterInlinePolicyError, }) defer iter.Close() iter.SeekGE(MakeMVCCMetadataKey(testKey1)) _, err := iter.Valid() assert.EqualError(t, err, "unexpected inline value found: \"/db1\"") }) t.Run("PolicyEmit returns inline values to caller", func(t *testing.T) { iter := NewMVCCIncrementalIterator(e, MVCCIncrementalIterOptions{ EndKey: keyMax, StartTime: tsMin, EndTime: tsMax, InlinePolicy: MVCCIncrementalIterInlinePolicyEmit, }) defer iter.Close() iter.SeekGE(MakeMVCCMetadataKey(testKey1)) expectInlineKeyValue(t, iter, inline1_1_1) iter.Next() expectKeyValue(t, iter, kv2_2_2) iter.Next() expectKeyValue(t, iter, kv2_1_1) }) }) } } func TestMVCCIncrementalIteratorIntentPolicy(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() var ( keyMax = roachpb.KeyMax testKey1 = roachpb.Key("/db1") testKey2 = roachpb.Key("/db2") testValue1 = []byte("val1") testValue2 = []byte("val2") testValue3 = []byte("val3") // Use a non-zero min, since we use IsEmpty to decide if a ts should be used // as upper/lower-bound during iterator initialization. tsMin = hlc.Timestamp{WallTime: 0, Logical: 1} ts1 = hlc.Timestamp{WallTime: 1, Logical: 0} ts2 = hlc.Timestamp{WallTime: 2, Logical: 0} ts3 = hlc.Timestamp{WallTime: 3, Logical: 0} tsMax = hlc.Timestamp{WallTime: math.MaxInt64, Logical: 0} ) makeTxn := func(key roachpb.Key, val []byte, ts hlc.Timestamp) (roachpb.Transaction, roachpb.Value, roachpb.Intent) { txnID := uuid.MakeV4() txnMeta := enginepb.TxnMeta{ Key: key, ID: txnID, Epoch: 1, WriteTimestamp: ts, } return roachpb.Transaction{ TxnMeta: txnMeta, ReadTimestamp: ts, }, roachpb.Value{ RawBytes: val, }, roachpb.MakeIntent(&txnMeta, key) } kv1_1_1 := makeKVT(testKey1, testValue1, ts1) kv1_2_2 := makeKVT(testKey1, testValue2, ts2) kv1_3_3 := makeKVT(testKey1, testValue3, ts3) kv2_1_1 := makeKVT(testKey2, testValue1, ts1) kv2_2_2 := makeKVT(testKey2, testValue2, ts2) txn, val, intent2_2_2 := makeTxn(testKey2, testValue2, ts2) intentErr := &roachpb.WriteIntentError{Intents: []roachpb.Intent{intent2_2_2}} for _, engineImpl := range mvccEngineImpls { e := engineImpl.create() defer e.Close() for _, kv := range []MVCCKeyValue{kv1_1_1, kv1_2_2, kv1_3_3, kv2_1_1} { v := roachpb.Value{RawBytes: kv.Value} if err := MVCCPut(ctx, e, nil, kv.Key.Key, kv.Key.Timestamp, v, nil); err != nil { t.Fatal(err) } } if err := MVCCPut(ctx, e, nil, txn.TxnMeta.Key, txn.ReadTimestamp, val, &txn); err != nil { t.Fatal(err) } t.Run(engineImpl.name, func(t *testing.T) { t.Run("PolicyError returns error if an intent is in the time range", func(t *testing.T) { iter := NewMVCCIncrementalIterator(e, MVCCIncrementalIterOptions{ EndKey: keyMax, StartTime: tsMin, EndTime: tsMax, IntentPolicy: MVCCIncrementalIterIntentPolicyError, }) defer iter.Close() iter.SeekGE(MakeMVCCMetadataKey(testKey1)) for ; ; iter.Next() { if ok, _ := iter.Valid(); !ok || iter.UnsafeKey().Key.Compare(keyMax) >= 0 { break } } _, err := iter.Valid() assert.EqualError(t, err, intentErr.Error()) }) t.Run("PolicyError ignores intents outside of time range", func(t *testing.T) { iter := NewMVCCIncrementalIterator(e, MVCCIncrementalIterOptions{ EndKey: keyMax, StartTime: ts2, EndTime: tsMax, IntentPolicy: MVCCIncrementalIterIntentPolicyError, }) defer iter.Close() iter.SeekGE(MakeMVCCMetadataKey(testKey1)) expectKeyValue(t, iter, kv1_3_3) iter.Next() valid, err := iter.Valid() assert.NoError(t, err) assert.False(t, valid) }) t.Run("PolicyEmit returns inline values to caller", func(t *testing.T) { iter := NewMVCCIncrementalIterator(e, MVCCIncrementalIterOptions{ EndKey: keyMax, StartTime: tsMin, EndTime: tsMax, IntentPolicy: MVCCIncrementalIterIntentPolicyEmit, }) defer iter.Close() iter.SeekGE(MakeMVCCMetadataKey(testKey1)) for _, kv := range []MVCCKeyValue{kv1_3_3, kv1_2_2, kv1_1_1} { expectKeyValue(t, iter, kv) iter.Next() } expectIntent(t, iter, intent2_2_2) iter.Next() expectKeyValue(t, iter, kv2_2_2) iter.Next() expectKeyValue(t, iter, kv2_1_1) }) t.Run("PolicyEmit ignores intents outside of time range", func(t *testing.T) { iter := NewMVCCIncrementalIterator(e, MVCCIncrementalIterOptions{ EndKey: keyMax, StartTime: ts2, EndTime: tsMax, IntentPolicy: MVCCIncrementalIterIntentPolicyEmit, }) defer iter.Close() iter.SeekGE(MakeMVCCMetadataKey(testKey1)) expectKeyValue(t, iter, kv1_3_3) iter.Next() valid, err := iter.Valid() assert.NoError(t, err) assert.False(t, valid) }) }) } } func expectKeyValue(t *testing.T, iter SimpleMVCCIterator, kv MVCCKeyValue) { valid, err := iter.Valid() assert.True(t, valid, "expected valid iterator") assert.NoError(t, err) unsafeKey := iter.UnsafeKey() unsafeVal := iter.UnsafeValue() assert.True(t, unsafeKey.Key.Equal(kv.Key.Key), "keys not equal") assert.Equal(t, kv.Key.Timestamp, unsafeKey.Timestamp) assert.Equal(t, kv.Value, unsafeVal) } func expectInlineKeyValue(t *testing.T, iter SimpleMVCCIterator, kv MVCCKeyValue) { valid, err := iter.Valid() assert.True(t, valid) assert.NoError(t, err) unsafeKey := iter.UnsafeKey() unsafeVal := iter.UnsafeValue() var meta enginepb.MVCCMetadata err = protoutil.Unmarshal(unsafeVal, &meta) require.NoError(t, err) assert.True(t, meta.IsInline()) assert.False(t, unsafeKey.IsValue()) assert.True(t, unsafeKey.Key.Equal(kv.Key.Key)) assert.Equal(t, kv.Value, meta.RawBytes) } func expectIntent(t *testing.T, iter SimpleMVCCIterator, intent roachpb.Intent) { valid, err := iter.Valid() assert.True(t, valid) assert.NoError(t, err) unsafeKey := iter.UnsafeKey() unsafeVal := iter.UnsafeValue() var meta enginepb.MVCCMetadata err = protoutil.Unmarshal(unsafeVal, &meta) require.NoError(t, err) assert.NotNil(t, meta.Txn) assert.False(t, unsafeKey.IsValue()) assert.True(t, unsafeKey.Key.Equal(intent.Key)) assert.Equal(t, meta.Timestamp, intent.Txn.WriteTimestamp.ToLegacyTimestamp()) } func TestMVCCIncrementalIterator(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() var ( keyMax = roachpb.KeyMax testKey1 = roachpb.Key("/db1") testKey2 = roachpb.Key("/db2") testValue1 = []byte("val1") testValue2 = []byte("val2") testValue3 = []byte("val3") testValue4 = []byte("val4") // Use a non-zero min, since we use IsEmpty to decide if a ts should be used // as upper/lower-bound during iterator initialization. tsMin = hlc.Timestamp{WallTime: 0, Logical: 1} ts1 = hlc.Timestamp{WallTime: 1, Logical: 0} ts2 = hlc.Timestamp{WallTime: 2, Logical: 0} ts3 = hlc.Timestamp{WallTime: 3, Logical: 0} ts4 = hlc.Timestamp{WallTime: 4, Logical: 0} tsMax = hlc.Timestamp{WallTime: math.MaxInt64, Logical: 0} ) // Keys are named as kv<key>_<value>_<ts>. kv1_1_1 := makeKVT(testKey1, testValue1, ts1) kv1_4_4 := makeKVT(testKey1, testValue4, ts4) kv1_2_2 := makeKVT(testKey1, testValue2, ts2) kv2_2_2 := makeKVT(testKey2, testValue3, ts2) kv1Deleted3 := makeKVT(testKey1, nil, ts3) for _, engineImpl := range mvccEngineImpls { t.Run(engineImpl.name+"-latest", func(t *testing.T) { e := engineImpl.create() defer e.Close() t.Run("empty", assertEqualKVs(e, localMax, keyMax, tsMin, ts3, latest, nil)) for _, kv := range kvs(kv1_1_1, kv1_2_2, kv2_2_2) { v := roachpb.Value{RawBytes: kv.Value} if err := MVCCPut(ctx, e, nil, kv.Key.Key, kv.Key.Timestamp, v, nil); err != nil { t.Fatal(err) } } // Exercise time ranges. t.Run("ts (0-0]", assertEqualKVs(e, localMax, keyMax, tsMin, tsMin, latest, nil)) t.Run("ts (0-1]", assertEqualKVs(e, localMax, keyMax, tsMin, ts1, latest, kvs(kv1_1_1))) t.Run("ts (0-∞]", assertEqualKVs(e, localMax, keyMax, tsMin, tsMax, latest, kvs(kv1_2_2, kv2_2_2))) t.Run("ts (1-1]", assertEqualKVs(e, localMax, keyMax, ts1, ts1, latest, nil)) t.Run("ts (1-2]", assertEqualKVs(e, localMax, keyMax, ts1, ts2, latest, kvs(kv1_2_2, kv2_2_2))) t.Run("ts (2-2]", assertEqualKVs(e, localMax, keyMax, ts2, ts2, latest, nil)) // Exercise key ranges. t.Run("kv [1-1)", assertEqualKVs(e, testKey1, testKey1, tsMin, tsMax, latest, nil)) t.Run("kv [1-2)", assertEqualKVs(e, testKey1, testKey2, tsMin, tsMax, latest, kvs(kv1_2_2))) // Exercise deletion. if err := MVCCDelete(ctx, e, nil, testKey1, ts3, nil); err != nil { t.Fatal(err) } t.Run("del", assertEqualKVs(e, localMax, keyMax, ts1, tsMax, latest, kvs(kv1Deleted3, kv2_2_2))) // Exercise intent handling. txn1, txn1Val, intentErr1 := makeKVTxn(testKey1, testValue4, ts4) if err := MVCCPut(ctx, e, nil, txn1.TxnMeta.Key, txn1.ReadTimestamp, txn1Val, &txn1); err != nil { t.Fatal(err) } txn2, txn2Val, intentErr2 := makeKVTxn(testKey2, testValue4, ts4) if err := MVCCPut(ctx, e, nil, txn2.TxnMeta.Key, txn2.ReadTimestamp, txn2Val, &txn2); err != nil { t.Fatal(err) } t.Run("intents-1", iterateExpectErr(e, testKey1, testKey1.PrefixEnd(), tsMin, tsMax, latest, intents(intentErr1))) t.Run("intents-2", iterateExpectErr(e, testKey2, testKey2.PrefixEnd(), tsMin, tsMax, latest, intents(intentErr2))) t.Run("intents-multi", iterateExpectErr(e, localMax, keyMax, tsMin, ts4, latest, intents(intentErr1, intentErr2))) // Intents above the upper time bound or beneath the lower time bound must // be ignored (#28358). Note that the lower time bound is exclusive while // the upper time bound is inclusive. t.Run("intents-filtered-1", assertEqualKVs(e, localMax, keyMax, tsMin, ts3, latest, kvs(kv1Deleted3, kv2_2_2))) t.Run("intents-filtered-2", assertEqualKVs(e, localMax, keyMax, ts4, tsMax, latest, kvs())) t.Run("intents-filtered-3", assertEqualKVs(e, localMax, keyMax, ts4.Next(), tsMax, latest, kvs())) intent1 := roachpb.MakeLockUpdate(&txn1, roachpb.Span{Key: testKey1}) intent1.Status = roachpb.COMMITTED if _, err := MVCCResolveWriteIntent(ctx, e, nil, intent1); err != nil { t.Fatal(err) } intent2 := roachpb.MakeLockUpdate(&txn2, roachpb.Span{Key: testKey2}) intent2.Status = roachpb.ABORTED if _, err := MVCCResolveWriteIntent(ctx, e, nil, intent2); err != nil { t.Fatal(err) } t.Run("intents-resolved", assertEqualKVs(e, localMax, keyMax, tsMin, tsMax, latest, kvs(kv1_4_4, kv2_2_2))) }) } for _, engineImpl := range mvccEngineImpls { t.Run(engineImpl.name+"-all", func(t *testing.T) { e := engineImpl.create() defer e.Close() t.Run("empty", assertEqualKVs(e, localMax, keyMax, tsMin, ts3, all, nil)) for _, kv := range kvs(kv1_1_1, kv1_2_2, kv2_2_2) { v := roachpb.Value{RawBytes: kv.Value} if err := MVCCPut(ctx, e, nil, kv.Key.Key, kv.Key.Timestamp, v, nil); err != nil { t.Fatal(err) } } // Exercise time ranges. t.Run("ts (0-0]", assertEqualKVs(e, localMax, keyMax, tsMin, tsMin, all, nil)) t.Run("ts (0-1]", assertEqualKVs(e, localMax, keyMax, tsMin, ts1, all, kvs(kv1_1_1))) t.Run("ts (0-∞]", assertEqualKVs(e, localMax, keyMax, tsMin, tsMax, all, kvs(kv1_2_2, kv1_1_1, kv2_2_2))) t.Run("ts (1-1]", assertEqualKVs(e, localMax, keyMax, ts1, ts1, all, nil)) t.Run("ts (1-2]", assertEqualKVs(e, localMax, keyMax, ts1, ts2, all, kvs(kv1_2_2, kv2_2_2))) t.Run("ts (2-2]", assertEqualKVs(e, localMax, keyMax, ts2, ts2, all, nil)) // Exercise key ranges. t.Run("kv [1-1)", assertEqualKVs(e, testKey1, testKey1, tsMin, tsMax, all, nil)) t.Run("kv [1-2)", assertEqualKVs(e, testKey1, testKey2, tsMin, tsMax, all, kvs(kv1_2_2, kv1_1_1))) // Exercise deletion. if err := MVCCDelete(ctx, e, nil, testKey1, ts3, nil); err != nil { t.Fatal(err) } t.Run("del", assertEqualKVs(e, localMax, keyMax, ts1, tsMax, all, kvs(kv1Deleted3, kv1_2_2, kv2_2_2))) // Exercise intent handling. txn1, txn1Val, intentErr1 := makeKVTxn(testKey1, testValue4, ts4) if err := MVCCPut(ctx, e, nil, txn1.TxnMeta.Key, txn1.ReadTimestamp, txn1Val, &txn1); err != nil { t.Fatal(err) } txn2, txn2Val, intentErr2 := makeKVTxn(testKey2, testValue4, ts4) if err := MVCCPut(ctx, e, nil, txn2.TxnMeta.Key, txn2.ReadTimestamp, txn2Val, &txn2); err != nil { t.Fatal(err) } // Single intent tests are verifying behavior when intent collection is not enabled. t.Run("intents-1", iterateExpectErr(e, testKey1, testKey1.PrefixEnd(), tsMin, tsMax, all, intents(intentErr1))) t.Run("intents-2", iterateExpectErr(e, testKey2, testKey2.PrefixEnd(), tsMin, tsMax, all, intents(intentErr2))) t.Run("intents-multi", iterateExpectErr(e, localMax, keyMax, tsMin, ts4, all, intents(intentErr1, intentErr2))) // Intents above the upper time bound or beneath the lower time bound must // be ignored (#28358). Note that the lower time bound is exclusive while // the upper time bound is inclusive. t.Run("intents-filtered-1", assertEqualKVs(e, localMax, keyMax, tsMin, ts3, all, kvs(kv1Deleted3, kv1_2_2, kv1_1_1, kv2_2_2))) t.Run("intents-filtered-2", assertEqualKVs(e, localMax, keyMax, ts4, tsMax, all, kvs())) t.Run("intents-filtered-3", assertEqualKVs(e, localMax, keyMax, ts4.Next(), tsMax, all, kvs())) intent1 := roachpb.MakeLockUpdate(&txn1, roachpb.Span{Key: testKey1}) intent1.Status = roachpb.COMMITTED if _, err := MVCCResolveWriteIntent(ctx, e, nil, intent1); err != nil { t.Fatal(err) } intent2 := roachpb.MakeLockUpdate(&txn2, roachpb.Span{Key: testKey2}) intent2.Status = roachpb.ABORTED if _, err := MVCCResolveWriteIntent(ctx, e, nil, intent2); err != nil { t.Fatal(err) } t.Run("intents-resolved", assertEqualKVs(e, localMax, keyMax, tsMin, tsMax, all, kvs(kv1_4_4, kv1Deleted3, kv1_2_2, kv1_1_1, kv2_2_2))) }) } } func slurpKVsInTimeRange( reader Reader, prefix roachpb.Key, startTime, endTime hlc.Timestamp, ) ([]MVCCKeyValue, error) { endKey := prefix.PrefixEnd() iter := NewMVCCIncrementalIterator(reader, MVCCIncrementalIterOptions{ EndKey: endKey, StartTime: startTime, EndTime: endTime, }) defer iter.Close() var kvs []MVCCKeyValue for iter.SeekGE(MakeMVCCMetadataKey(prefix)); ; iter.Next() { if ok, err := iter.Valid(); err != nil { return nil, err } else if !ok || iter.UnsafeKey().Key.Compare(endKey) >= 0 { break } kvs = append(kvs, MVCCKeyValue{Key: iter.Key(), Value: iter.Value()}) } return kvs, nil } // TestMVCCIncrementalIteratorIntentRewrittenConcurrently verifies that the // workaround in MVCCIncrementalIterator to double-check for deleted intents // properly handles cases where an intent originally in a time-bound iterator's // time range is rewritten at a timestamp outside of its time range. func TestMVCCIncrementalIteratorIntentRewrittenConcurrently(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) for _, engineImpl := range mvccEngineImpls { t.Run(engineImpl.name, func(t *testing.T) { e := engineImpl.create() defer e.Close() // Create a DB containing a single intent. ctx := context.Background() kA := roachpb.Key("kA") vA1 := roachpb.MakeValueFromString("vA1") vA2 := roachpb.MakeValueFromString("vA2") ts0 := hlc.Timestamp{WallTime: 0} ts1 := hlc.Timestamp{WallTime: 1} ts2 := hlc.Timestamp{WallTime: 2} ts3 := hlc.Timestamp{WallTime: 3} txn := &roachpb.Transaction{ TxnMeta: enginepb.TxnMeta{ Key: roachpb.Key("b"), ID: uuid.MakeV4(), Epoch: 1, WriteTimestamp: ts1, Sequence: 1, }, ReadTimestamp: ts1, } if err := MVCCPut(ctx, e, nil, kA, ts1, vA1, txn); err != nil { t.Fatal(err) } // Concurrently iterate over the intent using a time-bound iterator and move // the intent out of the time-bound iterator's time range by writing to it // again at a higher timestamp. g, _ := errgroup.WithContext(ctx) g.Go(func() error { // Re-write the intent with a higher timestamp. txn.WriteTimestamp = ts3 txn.Sequence = 2 // Use a batch since MVCCPut is not atomic when using an Engine and we // are not using latches to prevent a concurrent read in the other // goroutine. A non-atomic Put can cause the strict invariant checking // in intentInterleavingIter to be violated. b := e.NewBatch() defer b.Close() if err := MVCCPut(ctx, b, nil, kA, ts1, vA2, txn); err != nil { return err } return b.Commit(false) }) g.Go(func() error { // Iterate with a time range that includes the initial intent but does // not include the new intent. kvs, err := slurpKVsInTimeRange(e, kA, ts0, ts2) // There are two permissible outcomes from the scan. If the iteration // wins the race with the put that moves the intent then it should // observe the intent and return a write intent error. If the iteration // loses the race with the put that moves the intent then it should // observe and return nothing because there will be no committed or // provisional keys in its time range. if err != nil { if !testutils.IsError(err, `conflicting intents on "kA"`) { return err } } else { if len(kvs) != 0 { return errors.Errorf(`unexpected kvs: %v`, kvs) } } return nil }) if err := g.Wait(); err != nil { t.Fatal(err) } }) } } // TestMVCCIncrementalIteratorIntentDeletion checks a workaround in // MVCCIncrementalIterator for a bug in time-bound iterators, where an intent // has been deleted, but the time-bound iterator doesn't see the deletion. func TestMVCCIncrementalIteratorIntentDeletion(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) txn := func(key roachpb.Key, ts hlc.Timestamp) *roachpb.Transaction { return &roachpb.Transaction{ TxnMeta: enginepb.TxnMeta{ Key: key, ID: uuid.MakeV4(), Epoch: 1, WriteTimestamp: ts, }, ReadTimestamp: ts, } } intent := func(txn *roachpb.Transaction) roachpb.LockUpdate { intent := roachpb.MakeLockUpdate(txn, roachpb.Span{Key: txn.Key}) intent.Status = roachpb.COMMITTED return intent } ctx := context.Background() kA := roachpb.Key("kA") vA1 := roachpb.MakeValueFromString("vA1") vA2 := roachpb.MakeValueFromString("vA2") vA3 := roachpb.MakeValueFromString("vA3") kB := roachpb.Key("kB") vB1 := roachpb.MakeValueFromString("vB1") kC := roachpb.Key("kC") vC1 := roachpb.MakeValueFromString("vC1") ts0 := hlc.Timestamp{WallTime: 0} ts1 := hlc.Timestamp{WallTime: 1} ts2 := hlc.Timestamp{WallTime: 2} ts3 := hlc.Timestamp{WallTime: 3} txnA1 := txn(kA, ts1) txnA3 := txn(kA, ts3) txnB1 := txn(kB, ts1) txnC1 := txn(kC, ts1) db := createTestPebbleEngine() defer db.Close() // Set up two sstables very specifically: // // sst1 (time-bound metadata ts1->ts1) // kA -> (intent) // kA:1 -> vA1 // kB -> (intent) // kB:1 -> vB1 // kC -> (intent) // kC:1 -> vC1 // // sst2 (time-bound metadata ts2->ts3) the intent deletions are for the // intents at ts1, but there's no way know that when constructing the // metadata (hence the time-bound iterator bug) // kA -> (intent) [NB this overwrites the intent deletion] // kA:3 -> vA3 // kA:2 -> vA2 // kB -> (intent deletion) require.NoError(t, MVCCPut(ctx, db, nil, kA, txnA1.ReadTimestamp, vA1, txnA1)) require.NoError(t, MVCCPut(ctx, db, nil, kB, txnB1.ReadTimestamp, vB1, txnB1)) require.NoError(t, MVCCPut(ctx, db, nil, kC, txnC1.ReadTimestamp, vC1, txnC1)) require.NoError(t, db.Flush()) require.NoError(t, db.Compact()) _, err := MVCCResolveWriteIntent(ctx, db, nil, intent(txnA1)) require.NoError(t, err) _, err = MVCCResolveWriteIntent(ctx, db, nil, intent(txnB1)) require.NoError(t, err) require.NoError(t, MVCCPut(ctx, db, nil, kA, ts2, vA2, nil)) require.NoError(t, MVCCPut(ctx, db, nil, kA, txnA3.WriteTimestamp, vA3, txnA3)) require.NoError(t, db.Flush()) // The kA ts1 intent has been resolved. There's now a new intent on kA, but // the timestamp (ts3) is too new so it should be ignored. kvs, err := slurpKVsInTimeRange(db, kA, ts0, ts1) require.NoError(t, err) require.Equal(t, []MVCCKeyValue{ {Key: MVCCKey{Key: kA, Timestamp: ts1}, Value: vA1.RawBytes}, }, kvs) // kA has a value at ts2. Again the intent is too new (ts3), so ignore. kvs, err = slurpKVsInTimeRange(db, kA, ts0, ts2) require.NoError(t, err) require.Equal(t, []MVCCKeyValue{ {Key: MVCCKey{Key: kA, Timestamp: ts2}, Value: vA2.RawBytes}, {Key: MVCCKey{Key: kA, Timestamp: ts1}, Value: vA1.RawBytes}, }, kvs) // At ts3, we should see the new intent _, err = slurpKVsInTimeRange(db, kA, ts0, ts3) require.EqualError(t, err, `conflicting intents on "kA"`) // Similar to the kA ts1 check, but there is no newer intent. We expect to // pick up the intent deletion and it should cancel out the intent, leaving // only the value at ts1. kvs, err = slurpKVsInTimeRange(db, kB, ts0, ts1) require.NoError(t, err) require.Equal(t, []MVCCKeyValue{ {Key: MVCCKey{Key: kB, Timestamp: ts1}, Value: vB1.RawBytes}, }, kvs) // Sanity check that we see the still unresolved intent for kC ts1. _, err = slurpKVsInTimeRange(db, kC, ts0, ts1) require.EqualError(t, err, `conflicting intents on "kC"`) } func TestMVCCIncrementalIteratorIntentStraddlesSStables(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) // Create a DB containing 2 keys, a and b, where b has an intent. We use the // regular MVCCPut operation to generate these keys, which we'll later be // copying into manually created sstables. ctx := context.Background() db1, err := Open(ctx, InMemory(), ForTesting) require.NoError(t, err) defer db1.Close() put := func(key, value string, ts int64, txn *roachpb.Transaction) { v := roachpb.MakeValueFromString(value) if err := MVCCPut( ctx, db1, nil, roachpb.Key(key), hlc.Timestamp{WallTime: ts}, v, txn, ); err != nil { t.Fatal(err) } } put("a", "a value", 1, nil) put("b", "b value", 2, &roachpb.Transaction{ TxnMeta: enginepb.TxnMeta{ Key: roachpb.Key("b"), ID: uuid.MakeV4(), Epoch: 1, WriteTimestamp: hlc.Timestamp{WallTime: 2}, }, ReadTimestamp: hlc.Timestamp{WallTime: 2}, }) // Create a second DB in which we'll create a specific SSTable structure: the // first SSTable contains 2 KVs where the first is a regular versioned key // and the second is the MVCC metadata entry (i.e. an intent). The next // SSTable contains the provisional value for the intent. The effect is that // the metadata entry is separated from the entry it is metadata for. // // SSTable 1: // a@1 // b@<meta> // // SSTable 2: // b@2 db2, err := Open(ctx, InMemory(), ForTesting) require.NoError(t, err) defer db2.Close() // NB: If the original intent was separated, iterating using an interleaving // iterator, as done below, and writing to an sst, transforms the separated // intent to an interleaved intent. This is ok for now since both kinds of // intents are supported. // TODO(sumeer): change this test before interleaved intents are disallowed. ingest := func(it MVCCIterator, count int) { memFile := &MemFile{} sst := MakeIngestionSSTWriter(memFile) defer sst.Close() for i := 0; i < count; i++ { ok, err := it.Valid() if err != nil { t.Fatal(err) } if !ok { t.Fatal("expected key") } if err := sst.Put(it.Key(), it.Value()); err != nil { t.Fatal(err) } it.Next() } if err := sst.Finish(); err != nil { t.Fatal(err) } if err := db2.WriteFile(`ingest`, memFile.Data()); err != nil { t.Fatal(err) } if err := db2.IngestExternalFiles(ctx, []string{`ingest`}); err != nil { t.Fatal(err) } } { // Iterate over the entries in the first DB, ingesting them into SSTables // in the second DB. it := db1.NewMVCCIterator(MVCCKeyAndIntentsIterKind, IterOptions{ UpperBound: keys.MaxKey, }) defer it.Close() it.SeekGE(MVCCKey{Key: keys.LocalMax}) ingest(it, 2) ingest(it, 1) } { // Use an incremental iterator to simulate an incremental backup from (1, // 2]. Note that incremental iterators are exclusive on the start time and // inclusive on the end time. The expectation is that we'll see a write // intent error. it := NewMVCCIncrementalIterator(db2, MVCCIncrementalIterOptions{ EndKey: keys.MaxKey, StartTime: hlc.Timestamp{WallTime: 1}, EndTime: hlc.Timestamp{WallTime: 2}, }) defer it.Close() for it.SeekGE(MVCCKey{Key: keys.LocalMax}); ; it.Next() { ok, err := it.Valid() if err != nil { if errors.HasType(err, (*roachpb.WriteIntentError)(nil)) { // This is the write intent error we were expecting. return } t.Fatalf("%T: %s", err, err) } if !ok { break } } t.Fatalf("expected write intent error, but found success") } } func TestMVCCIterateTimeBound(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) dir, cleanupFn := testutils.TempDir(t) defer cleanupFn() const numKeys = 1000 const numBatches = 10 const batchTimeSpan = 10 const valueSize = 8 eng, err := loadTestData(filepath.Join(dir, "mvcc_data"), numKeys, numBatches, batchTimeSpan, valueSize) if err != nil { t.Fatal(err) } defer eng.Close() for _, testCase := range []struct { start hlc.Timestamp end hlc.Timestamp }{ // entire time range {hlc.Timestamp{WallTime: 0, Logical: 0}, hlc.Timestamp{WallTime: 110, Logical: 0}}, // one SST {hlc.Timestamp{WallTime: 10, Logical: 0}, hlc.Timestamp{WallTime: 19, Logical: 0}}, // one SST, plus the min of the following SST {hlc.Timestamp{WallTime: 10, Logical: 0}, hlc.Timestamp{WallTime: 20, Logical: 0}}, // one SST, plus the max of the preceding SST {hlc.Timestamp{WallTime: 9, Logical: 0}, hlc.Timestamp{WallTime: 19, Logical: 0}}, // one SST, plus the min of the following and the max of the preceding SST {hlc.Timestamp{WallTime: 9, Logical: 0}, hlc.Timestamp{WallTime: 21, Logical: 0}}, // one SST, not min or max {hlc.Timestamp{WallTime: 17, Logical: 0}, hlc.Timestamp{WallTime: 18, Logical: 0}}, // one SST's max {hlc.Timestamp{WallTime: 18, Logical: 0}, hlc.Timestamp{WallTime: 19, Logical: 0}}, // one SST's min {hlc.Timestamp{WallTime: 19, Logical: 0}, hlc.Timestamp{WallTime: 20, Logical: 0}}, // random endpoints {hlc.Timestamp{WallTime: 32, Logical: 0}, hlc.Timestamp{WallTime: 78, Logical: 0}}, } { t.Run(fmt.Sprintf("%s-%s", testCase.start, testCase.end), func(t *testing.T) { defer leaktest.AfterTest(t)() expectedKVs := collectMatchingWithMVCCIterator(t, eng, testCase.start, testCase.end) assertEqualKVs(eng, keys.LocalMax, keys.MaxKey, testCase.start, testCase.end, latest, expectedKVs)(t) }) } } func collectMatchingWithMVCCIterator( t *testing.T, eng Engine, start, end hlc.Timestamp, ) []MVCCKeyValue { var expectedKVs []MVCCKeyValue iter := eng.NewMVCCIterator(MVCCKeyAndIntentsIterKind, IterOptions{UpperBound: roachpb.KeyMax}) defer iter.Close() iter.SeekGE(MVCCKey{Key: localMax}) for { ok, err := iter.Valid() if err != nil { t.Fatal(err) } else if !ok { break } ts := iter.Key().Timestamp if (ts.Less(end) || end == ts) && start.Less(ts) { expectedKVs = append(expectedKVs, MVCCKeyValue{Key: iter.Key(), Value: iter.Value()}) } iter.Next() } if len(expectedKVs) < 1 { t.Fatalf("source of truth had no expected KVs; likely a bug in the test itself") } return expectedKVs } func runIncrementalBenchmark( b *testing.B, emk engineMaker, useTBI bool, ts hlc.Timestamp, opts benchDataOptions, ) { eng, _ := setupMVCCData(context.Background(), b, emk, opts) { // Pull all of the sstables into the cache. This // probably defeats a lot of the benefits of the // time-based optimization. iter := eng.NewMVCCIterator(MVCCKeyAndIntentsIterKind, IterOptions{UpperBound: roachpb.KeyMax}) _, _ = iter.ComputeStats(keys.LocalMax, roachpb.KeyMax, 0) iter.Close() } defer eng.Close() startKey := roachpb.Key(encoding.EncodeUvarintAscending([]byte("key-"), uint64(0))) endKey := roachpb.Key(encoding.EncodeUvarintAscending([]byte("key-"), uint64(opts.numKeys))) b.ResetTimer() for i := 0; i < b.N; i++ { it := NewMVCCIncrementalIterator(eng, MVCCIncrementalIterOptions{ EnableTimeBoundIteratorOptimization: useTBI, EndKey: endKey, StartTime: ts, EndTime: hlc.MaxTimestamp, }) defer it.Close() it.SeekGE(MVCCKey{Key: startKey}) for { if ok, err := it.Valid(); err != nil { b.Fatalf("failed incremental iteration: %+v", err) } else if !ok { break } it.Next() } } } func BenchmarkMVCCIncrementalIterator(b *testing.B) { defer log.Scope(b).Close(b) numVersions := 100 numKeys := 1000 // Mean of 50 versions * 1000 bytes results in more than one block per // versioned key, so there is some chance of // EnableTimeBoundIteratorOptimization=true being useful. valueBytes := 1000 setupMVCCPebbleWithBlockProperties := func(b testing.TB, dir string) Engine { peb, err := Open( context.Background(), Filesystem(dir), CacheSize(testCacheSize), func(cfg *engineConfig) error { cfg.Opts.FormatMajorVersion = pebble.FormatBlockPropertyCollector return nil }) if err != nil { b.Fatalf("could not create new pebble instance at %s: %+v", dir, err) } return peb } for _, useTBI := range []bool{true, false} { b.Run(fmt.Sprintf("useTBI=%v", useTBI), func(b *testing.B) { for _, tsExcludePercent := range []float64{0, 0.95} { wallTime := int64((5 * (float64(numVersions)*tsExcludePercent + 1))) ts := hlc.Timestamp{WallTime: wallTime} b.Run(fmt.Sprintf("ts=%d", ts.WallTime), func(b *testing.B) { runIncrementalBenchmark(b, setupMVCCPebbleWithBlockProperties, useTBI, ts, benchDataOptions{ numVersions: numVersions, numKeys: numKeys, valueBytes: valueBytes, }) }) } }) } } // BenchmarkMVCCIncrementalIteratorForOldData is a benchmark for the case of // finding old data when most data is in L6. This uses the MVCC timestamp to // define age, for convenience, though it could be a different field in the // key if one wrote a BlockPropertyCollector that could parse the key to find // the field (for instance the crdb_internal_ttl_expiration used in // https://github.com/cockroachdb/cockroach/pull/70241). func BenchmarkMVCCIncrementalIteratorForOldData(b *testing.B) { defer log.Scope(b).Close(b) numKeys := 10000 // 1 in 400 keys is being looked for. Roughly corresponds to a TTL of // slightly longer than 1 year, where each day, we run a pass to expire 1 // day of keys. The old keys are uniformly distributed in the key space, // which is the worst case for block property filters. keyAgeInterval := 400 setupMVCCPebbleWithBlockProperties := func(b *testing.B) Engine { eng, err := Open( context.Background(), InMemory(), // Use a small cache size. Scanning large tables with mostly cold data // will mostly miss the cache (especially since the block cache is meant // to be scan resistant). CacheSize(1<<10), func(cfg *engineConfig) error { cfg.Opts.FormatMajorVersion = pebble.FormatBlockPropertyCollector return nil }) if err != nil { b.Fatal(err) } return eng } baseTimestamp := int64(1000) setupData := func(b *testing.B, eng Engine, valueSize int) { // Generate the same data every time. rng := rand.New(rand.NewSource(1449168817)) batch := eng.NewBatch() for i := 0; i < numKeys; i++ { if (i+1)%100 == 0 { if err := batch.Commit(false /* sync */); err != nil { b.Fatal(err) } batch.Close() batch = eng.NewBatch() } key := encoding.EncodeUvarintAscending([]byte("key-"), uint64(i)) value := roachpb.MakeValueFromBytes(randutil.RandBytes(rng, valueSize)) value.InitChecksum(key) ts := hlc.Timestamp{WallTime: baseTimestamp + 100*int64(i%keyAgeInterval)} if err := MVCCPut( context.Background(), batch, nil /* ms */, key, ts, value, nil); err != nil { b.Fatal(err) } } if err := eng.Flush(); err != nil { b.Fatal(err) } if err := eng.Compact(); err != nil { b.Fatal(err) } } for _, valueSize := range []int{100, 500, 1000, 2000} { eng := setupMVCCPebbleWithBlockProperties(b) setupData(b, eng, valueSize) b.Run(fmt.Sprintf("valueSize=%d", valueSize), func(b *testing.B) { for _, useTBI := range []bool{true, false} { b.Run(fmt.Sprintf("useTBI=%t", useTBI), func(b *testing.B) { startKey := roachpb.Key(encoding.EncodeUvarintAscending([]byte("key-"), uint64(0))) endKey := roachpb.Key(encoding.EncodeUvarintAscending([]byte("key-"), uint64(numKeys))) b.ResetTimer() for i := 0; i < b.N; i++ { it := NewMVCCIncrementalIterator(eng, MVCCIncrementalIterOptions{ EnableTimeBoundIteratorOptimization: useTBI, EndKey: endKey, StartTime: hlc.Timestamp{}, EndTime: hlc.Timestamp{WallTime: baseTimestamp}, }) it.SeekGE(MVCCKey{Key: startKey}) for { if ok, err := it.Valid(); err != nil { b.Fatalf("failed incremental iteration: %+v", err) } else if !ok { break } it.Next() } it.Close() } }) } }) eng.Close() } }
pkg/storage/mvcc_incremental_iterator_test.go
0
https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8
[ 0.000453155895229429, 0.00017261356697417796, 0.00016121465887408704, 0.0001703559246379882, 0.000025440107492613606 ]
{ "id": 3, "code_window": [ " ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): processed\n", " ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): referenced database ID 52: referenced descriptor not found\n", " ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): processed\n", " ParentID 52, ParentSchemaID 29: relation \"user_promo_codes\" (58): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_users\"\n", " ParentID 52, ParentSchemaID 29: relation \"user_promo_codes\" (58): processed\n", " ParentID 0, ParentSchemaID 0: namespace entry \"defaultdb\" (50): processed\n", " ParentID 0, ParentSchemaID 0: namespace entry \"movr\" (52): descriptor not found\n", " ParentID 0, ParentSchemaID 0: namespace entry \"postgres\" (51): processed\n", " ParentID 0, ParentSchemaID 0: namespace entry \"system\" (1): processed\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " ParentID 52, ParentSchemaID 29: relation \"user_promo_codes\" (58): referenced database ID 52: referenced descriptor not found\n" ], "file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose", "type": "replace", "edit_start_line_idx": 50 }
// Copyright 2016 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package tree import ( "bytes" "context" "fmt" "math" "strings" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" "github.com/lib/pq/oid" ) // SpecializedVectorizedBuiltin is used to map overloads // to the vectorized operator that is specific to // that implementation of the builtin function. type SpecializedVectorizedBuiltin int // TODO (rohany): What is the best place to put this list? // I want to put it in builtins or exec, but those create an import // cycle with exec. tree is imported by both of them, so // this package seems like a good place to do it. // Keep this list alphabetized so that it is easy to manage. const ( _ SpecializedVectorizedBuiltin = iota SubstringStringIntInt ) // Overload is one of the overloads of a built-in function. // Each FunctionDefinition may contain one or more overloads. type Overload struct { Types TypeList ReturnType ReturnTyper Volatility Volatility // PreferredOverload determines overload resolution as follows. // When multiple overloads are eligible based on types even after all of of // the heuristics to pick one have been used, if one of the overloads is a // Overload with the `PreferredOverload` flag set to true it can be selected // rather than returning a no-such-method error. // This should generally be avoided -- avoiding introducing ambiguous // overloads in the first place is a much better solution -- and only done // after consultation with @knz @nvanbenschoten. PreferredOverload bool // Info is a description of the function, which is surfaced on the CockroachDB // docs site on the "Functions and Operators" page. Descriptions typically use // third-person with the function as an implicit subject (e.g. "Calculates // infinity"), but should focus more on ease of understanding so other structures // might be more appropriate. Info string AggregateFunc func([]*types.T, *EvalContext, Datums) AggregateFunc WindowFunc func([]*types.T, *EvalContext) WindowFunc // Only one of the following three attributes can be set. // Fn is the normal builtin implementation function. It's for functions that // take in Datums and return a Datum. Fn func(*EvalContext, Datums) (Datum, error) // FnWithExprs is for builtins that need access to their arguments as Exprs // and not pre-evaluated Datums, but is otherwise identical to Fn. FnWithExprs func(*EvalContext, Exprs) (Datum, error) // Generator is for SRFs. SRFs take Datums and return multiple rows of Datums. Generator GeneratorFactory // GeneratorWithExprs is for SRFs that need access to their arguments as Exprs // and not pre-evaluated Datums, but is otherwise identical to Generator. GeneratorWithExprs GeneratorWithExprsFactory // SQLFn must be set for overloads of type SQLClass. It should return a SQL // statement which will be executed as a common table expression in the query. SQLFn func(*EvalContext, Datums) (string, error) // counter, if non-nil, should be incremented upon successful // type check of expressions using this overload. counter telemetry.Counter // SpecializedVecBuiltin is used to let the vectorized engine // know when an Overload has a specialized vectorized operator. SpecializedVecBuiltin SpecializedVectorizedBuiltin // IgnoreVolatilityCheck ignores checking the functions overload's // volatility against Postgres's volatility at test time. // This should be used with caution. IgnoreVolatilityCheck bool // Oid is the cached oidHasher.BuiltinOid result for this Overload. It's // populated at init-time. Oid oid.Oid // DistsqlBlocklist is set to true when a function cannot be evaluated in // DistSQL. One example is when the type information for function arguments // cannot be recovered. DistsqlBlocklist bool } // params implements the overloadImpl interface. func (b Overload) params() TypeList { return b.Types } // returnType implements the overloadImpl interface. func (b Overload) returnType() ReturnTyper { return b.ReturnType } // preferred implements the overloadImpl interface. func (b Overload) preferred() bool { return b.PreferredOverload } // FixedReturnType returns a fixed type that the function returns, returning Any // if the return type is based on the function's arguments. func (b Overload) FixedReturnType() *types.T { if b.ReturnType == nil { return nil } return returnTypeToFixedType(b.ReturnType, nil) } // InferReturnTypeFromInputArgTypes returns the type that the function returns, // inferring the type based on the function's inputTypes if necessary. func (b Overload) InferReturnTypeFromInputArgTypes(inputTypes []*types.T) *types.T { retTyp := b.FixedReturnType() // If the output type of the function depends on its inputs, then // the output of FixedReturnType will be ambiguous. In the ambiguous // cases, use the information about the input types to construct the // appropriate output type. The tree.ReturnTyper interface is // []tree.TypedExpr -> *types.T, so construct the []tree.TypedExpr // from the types that we know are the inputs. Note that we don't // try to create datums of each input type, and instead use this // "TypedDummy" construct. This is because some types don't have resident // members (like an ENUM with no values), and we shouldn't error out // trying to infer the return type in those cases. if retTyp.IsAmbiguous() { args := make([]TypedExpr, len(inputTypes)) for i, t := range inputTypes { args[i] = &TypedDummy{Typ: t} } // Evaluate ReturnType with the fake input set of arguments. retTyp = returnTypeToFixedType(b.ReturnType, args) } return retTyp } // IsGenerator returns true if the function is a set returning function (SRF). func (b Overload) IsGenerator() bool { return b.Generator != nil || b.GeneratorWithExprs != nil } // Signature returns a human-readable signature. // If simplify is bool, tuple-returning functions with just // 1 tuple element unwrap the return type in the signature. func (b Overload) Signature(simplify bool) string { retType := b.FixedReturnType() if simplify { if retType.Family() == types.TupleFamily && len(retType.TupleContents()) == 1 { retType = retType.TupleContents()[0] } } return fmt.Sprintf("(%s) -> %s", b.Types.String(), retType) } // overloadImpl is an implementation of an overloaded function. It provides // access to the parameter type list and the return type of the implementation. // // This is a more general type than Overload defined above, because it also // works with the built-in binary and unary operators. type overloadImpl interface { params() TypeList returnType() ReturnTyper // allows manually resolving preference between multiple compatible overloads. preferred() bool } var _ overloadImpl = &Overload{} var _ overloadImpl = &UnaryOp{} var _ overloadImpl = &BinOp{} var _ overloadImpl = &CmpOp{} // GetParamsAndReturnType gets the parameters and return type of an // overloadImpl. func GetParamsAndReturnType(impl overloadImpl) (TypeList, ReturnTyper) { return impl.params(), impl.returnType() } // TypeList is a list of types representing a function parameter list. type TypeList interface { // Match checks if all types in the TypeList match the corresponding elements in types. Match(types []*types.T) bool // MatchAt checks if the parameter type at index i of the TypeList matches type typ. // In all implementations, types.Null will match with each parameter type, allowing // NULL values to be used as arguments. MatchAt(typ *types.T, i int) bool // MatchLen checks that the TypeList can support l parameters. MatchLen(l int) bool // GetAt returns the type at the given index in the TypeList, or nil if the TypeList // cannot have a parameter at index i. GetAt(i int) *types.T // Length returns the number of types in the list Length() int // Types returns a realized copy of the list. variadic lists return a list of size one. Types() []*types.T // String returns a human readable signature String() string } var _ TypeList = ArgTypes{} var _ TypeList = HomogeneousType{} var _ TypeList = VariadicType{} // ArgTypes is very similar to ArgTypes except it allows keeping a string // name for each argument as well and using those when printing the // human-readable signature. type ArgTypes []struct { Name string Typ *types.T } // Match is part of the TypeList interface. func (a ArgTypes) Match(types []*types.T) bool { if len(types) != len(a) { return false } for i := range types { if !a.MatchAt(types[i], i) { return false } } return true } // MatchAt is part of the TypeList interface. func (a ArgTypes) MatchAt(typ *types.T, i int) bool { // The parameterized types for Tuples are checked in the type checking // routines before getting here, so we only need to check if the argument // type is a types.TUPLE below. This allows us to avoid defining overloads // for types.Tuple{}, types.Tuple{types.Any}, types.Tuple{types.Any, types.Any}, // etc. for Tuple operators. if typ.Family() == types.TupleFamily { typ = types.AnyTuple } return i < len(a) && (typ.Family() == types.UnknownFamily || a[i].Typ.Equivalent(typ)) } // MatchLen is part of the TypeList interface. func (a ArgTypes) MatchLen(l int) bool { return len(a) == l } // GetAt is part of the TypeList interface. func (a ArgTypes) GetAt(i int) *types.T { return a[i].Typ } // Length is part of the TypeList interface. func (a ArgTypes) Length() int { return len(a) } // Types is part of the TypeList interface. func (a ArgTypes) Types() []*types.T { n := len(a) ret := make([]*types.T, n) for i, s := range a { ret[i] = s.Typ } return ret } func (a ArgTypes) String() string { var s strings.Builder for i, arg := range a { if i > 0 { s.WriteString(", ") } s.WriteString(arg.Name) s.WriteString(": ") s.WriteString(arg.Typ.String()) } return s.String() } // HomogeneousType is a TypeList implementation that accepts any arguments, as // long as all are the same type or NULL. The homogeneous constraint is enforced // in typeCheckOverloadedExprs. type HomogeneousType struct{} // Match is part of the TypeList interface. func (HomogeneousType) Match(types []*types.T) bool { return true } // MatchAt is part of the TypeList interface. func (HomogeneousType) MatchAt(typ *types.T, i int) bool { return true } // MatchLen is part of the TypeList interface. func (HomogeneousType) MatchLen(l int) bool { return true } // GetAt is part of the TypeList interface. func (HomogeneousType) GetAt(i int) *types.T { return types.Any } // Length is part of the TypeList interface. func (HomogeneousType) Length() int { return 1 } // Types is part of the TypeList interface. func (HomogeneousType) Types() []*types.T { return []*types.T{types.Any} } func (HomogeneousType) String() string { return "anyelement..." } // VariadicType is a TypeList implementation which accepts a fixed number of // arguments at the beginning and an arbitrary number of homogenous arguments // at the end. type VariadicType struct { FixedTypes []*types.T VarType *types.T } // Match is part of the TypeList interface. func (v VariadicType) Match(types []*types.T) bool { for i := range types { if !v.MatchAt(types[i], i) { return false } } return true } // MatchAt is part of the TypeList interface. func (v VariadicType) MatchAt(typ *types.T, i int) bool { if i < len(v.FixedTypes) { return typ.Family() == types.UnknownFamily || v.FixedTypes[i].Equivalent(typ) } return typ.Family() == types.UnknownFamily || v.VarType.Equivalent(typ) } // MatchLen is part of the TypeList interface. func (v VariadicType) MatchLen(l int) bool { return l >= len(v.FixedTypes) } // GetAt is part of the TypeList interface. func (v VariadicType) GetAt(i int) *types.T { if i < len(v.FixedTypes) { return v.FixedTypes[i] } return v.VarType } // Length is part of the TypeList interface. func (v VariadicType) Length() int { return len(v.FixedTypes) + 1 } // Types is part of the TypeList interface. func (v VariadicType) Types() []*types.T { result := make([]*types.T, len(v.FixedTypes)+1) for i := range v.FixedTypes { result[i] = v.FixedTypes[i] } result[len(result)-1] = v.VarType return result } func (v VariadicType) String() string { var s bytes.Buffer for i, t := range v.FixedTypes { if i != 0 { s.WriteString(", ") } s.WriteString(t.String()) } if len(v.FixedTypes) > 0 { s.WriteString(", ") } fmt.Fprintf(&s, "%s...", v.VarType) return s.String() } // UnknownReturnType is returned from ReturnTypers when the arguments provided are // not sufficient to determine a return type. This is necessary for cases like overload // resolution, where the argument types are not resolved yet so the type-level function // will be called without argument types. If a ReturnTyper returns unknownReturnType, // then the candidate function set cannot be refined. This means that only ReturnTypers // that never return unknownReturnType, like those created with FixedReturnType, can // help reduce overload ambiguity. var UnknownReturnType *types.T // ReturnTyper defines the type-level function in which a builtin function's return type // is determined. ReturnTypers should make sure to return unknownReturnType when necessary. type ReturnTyper func(args []TypedExpr) *types.T // FixedReturnType functions simply return a fixed type, independent of argument types. func FixedReturnType(typ *types.T) ReturnTyper { return func(args []TypedExpr) *types.T { return typ } } // IdentityReturnType creates a returnType that is a projection of the idx'th // argument type. func IdentityReturnType(idx int) ReturnTyper { return func(args []TypedExpr) *types.T { if len(args) == 0 { return UnknownReturnType } return args[idx].ResolvedType() } } // ArrayOfFirstNonNullReturnType returns an array type from the first non-null // type in the argument list. func ArrayOfFirstNonNullReturnType() ReturnTyper { return func(args []TypedExpr) *types.T { if len(args) == 0 { return UnknownReturnType } for _, arg := range args { if t := arg.ResolvedType(); t.Family() != types.UnknownFamily { return types.MakeArray(t) } } return types.Unknown } } // FirstNonNullReturnType returns the type of the first non-null argument, or // types.Unknown if all arguments are null. There must be at least one argument, // or else FirstNonNullReturnType returns UnknownReturnType. This method is used // with HomogeneousType functions, in which all arguments have been checked to // have the same type (or be null). func FirstNonNullReturnType() ReturnTyper { return func(args []TypedExpr) *types.T { if len(args) == 0 { return UnknownReturnType } for _, arg := range args { if t := arg.ResolvedType(); t.Family() != types.UnknownFamily { return t } } return types.Unknown } } func returnTypeToFixedType(s ReturnTyper, inputTyps []TypedExpr) *types.T { if t := s(inputTyps); t != UnknownReturnType { return t } return types.Any } type typeCheckOverloadState struct { overloads []overloadImpl overloadIdxs []uint8 // index into overloads exprs []Expr typedExprs []TypedExpr resolvableIdxs []int // index into exprs/typedExprs constIdxs []int // index into exprs/typedExprs placeholderIdxs []int // index into exprs/typedExprs } // typeCheckOverloadedExprs determines the correct overload to use for the given set of // expression parameters, along with an optional desired return type. It returns the expression // parameters after being type checked, along with a slice of candidate overloadImpls. The // slice may have length: // 0: overload resolution failed because no compatible overloads were found // 1: overload resolution succeeded // 2+: overload resolution failed because of ambiguity // The inBinOp parameter denotes whether this type check is occurring within a binary operator, // in which case we may need to make a guess that the two parameters are of the same type if one // of them is NULL. func typeCheckOverloadedExprs( ctx context.Context, semaCtx *SemaContext, desired *types.T, overloads []overloadImpl, inBinOp bool, exprs ...Expr, ) ([]TypedExpr, []overloadImpl, error) { if len(overloads) > math.MaxUint8 { return nil, nil, errors.AssertionFailedf("too many overloads (%d > 255)", len(overloads)) } var s typeCheckOverloadState s.exprs = exprs s.overloads = overloads // Special-case the HomogeneousType overload. We determine its return type by checking that // all parameters have the same type. for i, overload := range overloads { // Only one overload can be provided if it has parameters with HomogeneousType. if _, ok := overload.params().(HomogeneousType); ok { if len(overloads) > 1 { return nil, nil, errors.AssertionFailedf( "only one overload can have HomogeneousType parameters") } typedExprs, _, err := TypeCheckSameTypedExprs(ctx, semaCtx, desired, exprs...) if err != nil { return nil, nil, err } return typedExprs, overloads[i : i+1], nil } } // Hold the resolved type expressions of the provided exprs, in order. s.typedExprs = make([]TypedExpr, len(exprs)) s.constIdxs, s.placeholderIdxs, s.resolvableIdxs = typeCheckSplitExprs(ctx, semaCtx, exprs) // If no overloads are provided, just type check parameters and return. if len(overloads) == 0 { for _, i := range s.resolvableIdxs { typ, err := exprs[i].TypeCheck(ctx, semaCtx, types.Any) if err != nil { return nil, nil, pgerror.Wrapf(err, pgcode.InvalidParameterValue, "error type checking resolved expression:") } s.typedExprs[i] = typ } if err := defaultTypeCheck(ctx, semaCtx, &s, false); err != nil { return nil, nil, err } return s.typedExprs, nil, nil } s.overloadIdxs = make([]uint8, len(overloads)) for i := 0; i < len(overloads); i++ { s.overloadIdxs[i] = uint8(i) } // Filter out incorrect parameter length overloads. s.overloadIdxs = filterOverloads(s.overloads, s.overloadIdxs, func(o overloadImpl) bool { return o.params().MatchLen(len(exprs)) }) // Filter out overloads which constants cannot become. for _, i := range s.constIdxs { constExpr := exprs[i].(Constant) s.overloadIdxs = filterOverloads(s.overloads, s.overloadIdxs, func(o overloadImpl) bool { return canConstantBecome(constExpr, o.params().GetAt(i)) }) } // TODO(nvanbenschoten): We should add a filtering step here to filter // out impossible candidates based on identical parameters. For instance, // f(int, float) is not a possible candidate for the expression f($1, $1). // Filter out overloads on resolved types. for _, i := range s.resolvableIdxs { paramDesired := types.Any // If all remaining candidates require the same type for this parameter, // begin desiring that type for the corresponding argument expression. // Note that this is always the case when we have a single overload left. var sameType *types.T for _, ovIdx := range s.overloadIdxs { typ := s.overloads[ovIdx].params().GetAt(i) if sameType == nil { sameType = typ } else if !typ.Identical(sameType) { sameType = nil break } } if sameType != nil { paramDesired = sameType } typ, err := exprs[i].TypeCheck(ctx, semaCtx, paramDesired) if err != nil { return nil, nil, err } s.typedExprs[i] = typ s.overloadIdxs = filterOverloads(s.overloads, s.overloadIdxs, func(o overloadImpl) bool { return o.params().MatchAt(typ.ResolvedType(), i) }) } // At this point, all remaining overload candidates accept the argument list, // so we begin checking for a single remaining candidate implementation to choose. // In case there is more than one candidate remaining, the following code uses // heuristics to find a most preferable candidate. if ok, typedExprs, fns, err := checkReturn(ctx, semaCtx, &s); ok { return typedExprs, fns, err } // The first heuristic is to prefer candidates that return the desired type, // if a desired type was provided. if desired.Family() != types.AnyFamily { s.overloadIdxs = filterOverloads(s.overloads, s.overloadIdxs, func(o overloadImpl) bool { // For now, we only filter on the return type for overloads with // fixed return types. This could be improved, but is not currently // critical because we have no cases of functions with multiple // overloads that do not all expose FixedReturnTypes. if t := o.returnType()(nil); t != UnknownReturnType { return t.Equivalent(desired) } return true }) if ok, typedExprs, fns, err := checkReturn(ctx, semaCtx, &s); ok { return typedExprs, fns, err } } var homogeneousTyp *types.T if len(s.resolvableIdxs) > 0 { homogeneousTyp = s.typedExprs[s.resolvableIdxs[0]].ResolvedType() for _, i := range s.resolvableIdxs[1:] { if !homogeneousTyp.Equivalent(s.typedExprs[i].ResolvedType()) { homogeneousTyp = nil break } } } if len(s.constIdxs) > 0 { allConstantsAreHomogenous := false if ok, typedExprs, fns, err := filterAttempt(ctx, semaCtx, &s, func() { // The second heuristic is to prefer candidates where all constants can // become a homogeneous type, if all resolvable expressions became one. // This is only possible if resolvable expressions were resolved // homogeneously up to this point. if homogeneousTyp != nil { allConstantsAreHomogenous = true for _, i := range s.constIdxs { if !canConstantBecome(exprs[i].(Constant), homogeneousTyp) { allConstantsAreHomogenous = false break } } if allConstantsAreHomogenous { for _, i := range s.constIdxs { s.overloadIdxs = filterOverloads(s.overloads, s.overloadIdxs, func(o overloadImpl) bool { return o.params().GetAt(i).Equivalent(homogeneousTyp) }) } } } }); ok { return typedExprs, fns, err } if ok, typedExprs, fns, err := filterAttempt(ctx, semaCtx, &s, func() { // The third heuristic is to prefer candidates where all constants can // become their "natural" types. for _, i := range s.constIdxs { natural := naturalConstantType(exprs[i].(Constant)) if natural != nil { s.overloadIdxs = filterOverloads(s.overloads, s.overloadIdxs, func(o overloadImpl) bool { return o.params().GetAt(i).Equivalent(natural) }) } } }); ok { return typedExprs, fns, err } // At this point, it's worth seeing if we have constants that can't actually // parse as the type that canConstantBecome claims they can. For example, // every string literal will report that it can become an interval, but most // string literals do not encode valid intervals. This may uncover some // overloads with invalid type signatures. // // This parsing is sufficiently expensive (see the comment on // StrVal.AvailableTypes) that we wait until now, when we've eliminated most // overloads from consideration, so that we only need to check each constant // against a limited set of types. We can't hold off on this parsing any // longer, though: the remaining heuristics are overly aggressive and will // falsely reject the only valid overload in some cases. // // This case is broken into two parts. We first attempt to use the // information about the homogeneity of our constants collected by previous // heuristic passes. If: // * all our constants are homogeneous // * we only have a single overload left // * the constant overload parameters are homogeneous as well // then match this overload with the homogeneous constants. Otherwise, // continue to filter overloads by whether or not the constants can parse // into the desired types of the overloads. // This first case is important when resolving overloads for operations // between user-defined types, where we need to propagate the concrete // resolved type information over to the constants, rather than attempting // to resolve constants as the placeholder type for the user defined type // family (like `AnyEnum`). if len(s.overloadIdxs) == 1 && allConstantsAreHomogenous { overloadParamsAreHomogenous := true p := s.overloads[s.overloadIdxs[0]].params() for _, i := range s.constIdxs { if !p.GetAt(i).Equivalent(homogeneousTyp) { overloadParamsAreHomogenous = false break } } if overloadParamsAreHomogenous { // Type check our constants using the homogeneous type rather than // the type in overload parameter. This lets us type check user defined // types with a concrete type instance, rather than an ambiguous type. for _, i := range s.constIdxs { typ, err := s.exprs[i].TypeCheck(ctx, semaCtx, homogeneousTyp) if err != nil { return nil, nil, err } s.typedExprs[i] = typ } _, typedExprs, fn, err := checkReturnPlaceholdersAtIdx(ctx, semaCtx, &s, int(s.overloadIdxs[0])) return typedExprs, fn, err } } for _, i := range s.constIdxs { constExpr := exprs[i].(Constant) s.overloadIdxs = filterOverloads(s.overloads, s.overloadIdxs, func(o overloadImpl) bool { semaCtx := MakeSemaContext() _, err := constExpr.ResolveAsType(ctx, &semaCtx, o.params().GetAt(i)) return err == nil }) } if ok, typedExprs, fn, err := checkReturn(ctx, semaCtx, &s); ok { return typedExprs, fn, err } // The fourth heuristic is to prefer candidates that accepts the "best" // mutual type in the resolvable type set of all constants. if bestConstType, ok := commonConstantType(s.exprs, s.constIdxs); ok { for _, i := range s.constIdxs { s.overloadIdxs = filterOverloads(s.overloads, s.overloadIdxs, func(o overloadImpl) bool { return o.params().GetAt(i).Equivalent(bestConstType) }) } if ok, typedExprs, fns, err := checkReturn(ctx, semaCtx, &s); ok { return typedExprs, fns, err } if homogeneousTyp != nil { if !homogeneousTyp.Equivalent(bestConstType) { homogeneousTyp = nil } } else { homogeneousTyp = bestConstType } } } // The fifth heuristic is to defer to preferred candidates, if one has been // specified in the overload list. if ok, typedExprs, fns, err := filterAttempt(ctx, semaCtx, &s, func() { s.overloadIdxs = filterOverloads(s.overloads, s.overloadIdxs, func(o overloadImpl) bool { return o.preferred() }) }); ok { return typedExprs, fns, err } // The sixth heuristic is to prefer candidates where all placeholders can be // given the same type as all constants and resolvable expressions. This is // only possible if all constants and resolvable expressions were resolved // homogeneously up to this point. if homogeneousTyp != nil && len(s.placeholderIdxs) > 0 { // Before we continue, try to propagate the homogeneous type to the // placeholders. This might not have happened yet, if the overloads' // parameter types are ambiguous (like in the case of tuple-tuple binary // operators). for _, i := range s.placeholderIdxs { if _, err := exprs[i].TypeCheck(ctx, semaCtx, homogeneousTyp); err != nil { return nil, nil, err } s.overloadIdxs = filterOverloads(s.overloads, s.overloadIdxs, func(o overloadImpl) bool { return o.params().GetAt(i).Equivalent(homogeneousTyp) }) } if ok, typedExprs, fns, err := checkReturn(ctx, semaCtx, &s); ok { return typedExprs, fns, err } } // This is a total hack for AnyEnum whilst we don't have postgres type resolution. // This enables AnyEnum array ops to not need a cast, e.g. array['a']::enum[] = '{a}'. // If we have one remaining candidate containing AnyEnum, cast all remaining // arguments to a known enum and check that the rest match. This is a poor man's // implicit cast / postgres "same argument" resolution clone. if len(s.overloadIdxs) == 1 { params := s.overloads[s.overloadIdxs[0]].params() var knownEnum *types.T // Check we have all "AnyEnum" (or "AnyEnum" array) arguments and that // one argument is typed with an enum. attemptAnyEnumCast := func() bool { for i := 0; i < params.Length(); i++ { typ := params.GetAt(i) // Note we are deliberately looking at whether the built-in takes in // AnyEnum as an argument, not the exprs given to the overload itself. if !(typ.Identical(types.AnyEnum) || typ.Identical(types.MakeArray(types.AnyEnum))) { return false } if s.typedExprs[i] != nil { // Assign the known enum if it was previously unassigned. // Otherwise, double check it matches a previously defined enum. posEnum := s.typedExprs[i].ResolvedType() if !posEnum.UserDefined() { return false } if posEnum.Family() == types.ArrayFamily { posEnum = posEnum.ArrayContents() } if knownEnum == nil { knownEnum = posEnum } else if !posEnum.Identical(knownEnum) { return false } } } return knownEnum != nil }() // If we have all arguments as AnyEnum, and we know at least one of the // enum's actual type, try type cast the rest. if attemptAnyEnumCast { // Copy exprs to prevent any overwrites of underlying s.exprs array later. sCopy := s sCopy.exprs = make([]Expr, len(s.exprs)) copy(sCopy.exprs, s.exprs) if ok, typedExprs, fns, err := filterAttempt(ctx, semaCtx, &sCopy, func() { for _, idx := range append(s.constIdxs, s.placeholderIdxs...) { p := params.GetAt(idx) typCast := knownEnum if p.Family() == types.ArrayFamily { typCast = types.MakeArray(knownEnum) } sCopy.exprs[idx] = &CastExpr{Expr: sCopy.exprs[idx], Type: typCast, SyntaxMode: CastShort} } }); ok { return typedExprs, fns, err } } } // In a binary expression, in the case of one of the arguments being untyped NULL, // we prefer overloads where we infer the type of the NULL to be the same as the // other argument. This is used to differentiate the behavior of // STRING[] || NULL and STRING || NULL. if inBinOp && len(s.exprs) == 2 { if ok, typedExprs, fns, err := filterAttempt(ctx, semaCtx, &s, func() { var err error left := s.typedExprs[0] if left == nil { left, err = s.exprs[0].TypeCheck(ctx, semaCtx, types.Any) if err != nil { return } } right := s.typedExprs[1] if right == nil { right, err = s.exprs[1].TypeCheck(ctx, semaCtx, types.Any) if err != nil { return } } leftType := left.ResolvedType() rightType := right.ResolvedType() leftIsNull := leftType.Family() == types.UnknownFamily rightIsNull := rightType.Family() == types.UnknownFamily oneIsNull := (leftIsNull || rightIsNull) && !(leftIsNull && rightIsNull) if oneIsNull { if leftIsNull { leftType = rightType } if rightIsNull { rightType = leftType } s.overloadIdxs = filterOverloads(s.overloads, s.overloadIdxs, func(o overloadImpl) bool { return o.params().GetAt(0).Equivalent(leftType) && o.params().GetAt(1).Equivalent(rightType) }) } }); ok { return typedExprs, fns, err } } // After the previous heuristic, in a binary expression, in the case of one of the arguments being untyped // NULL, we prefer overloads where we infer the type of the NULL to be a STRING. This is used // to choose INT || NULL::STRING over INT || NULL::INT[]. if inBinOp && len(s.exprs) == 2 { if ok, typedExprs, fns, err := filterAttempt(ctx, semaCtx, &s, func() { var err error left := s.typedExprs[0] if left == nil { left, err = s.exprs[0].TypeCheck(ctx, semaCtx, types.Any) if err != nil { return } } right := s.typedExprs[1] if right == nil { right, err = s.exprs[1].TypeCheck(ctx, semaCtx, types.Any) if err != nil { return } } leftType := left.ResolvedType() rightType := right.ResolvedType() leftIsNull := leftType.Family() == types.UnknownFamily rightIsNull := rightType.Family() == types.UnknownFamily oneIsNull := (leftIsNull || rightIsNull) && !(leftIsNull && rightIsNull) if oneIsNull { if leftIsNull { leftType = types.String } if rightIsNull { rightType = types.String } s.overloadIdxs = filterOverloads(s.overloads, s.overloadIdxs, func(o overloadImpl) bool { return o.params().GetAt(0).Equivalent(leftType) && o.params().GetAt(1).Equivalent(rightType) }) } }); ok { return typedExprs, fns, err } } if err := defaultTypeCheck(ctx, semaCtx, &s, len(s.overloads) > 0); err != nil { return nil, nil, err } possibleOverloads := make([]overloadImpl, len(s.overloadIdxs)) for i, o := range s.overloadIdxs { possibleOverloads[i] = s.overloads[o] } return s.typedExprs, possibleOverloads, nil } // filterAttempt attempts to filter the overloads down to a single candidate. // If it succeeds, it will return true, along with the overload (in a slice for // convenience) and a possible error. If it fails, it will return false and // undo any filtering performed during the attempt. func filterAttempt( ctx context.Context, semaCtx *SemaContext, s *typeCheckOverloadState, attempt func(), ) (ok bool, _ []TypedExpr, _ []overloadImpl, _ error) { before := s.overloadIdxs attempt() if len(s.overloadIdxs) == 1 { ok, typedExprs, fns, err := checkReturn(ctx, semaCtx, s) if err != nil { return false, nil, nil, err } if ok { return true, typedExprs, fns, err } } s.overloadIdxs = before return false, nil, nil, nil } // filterOverloads filters overloads which do not satisfy the predicate. func filterOverloads( overloads []overloadImpl, overloadIdxs []uint8, fn func(overloadImpl) bool, ) []uint8 { for i := 0; i < len(overloadIdxs); { if fn(overloads[overloadIdxs[i]]) { i++ } else { overloadIdxs[i], overloadIdxs[len(overloadIdxs)-1] = overloadIdxs[len(overloadIdxs)-1], overloadIdxs[i] overloadIdxs = overloadIdxs[:len(overloadIdxs)-1] } } return overloadIdxs } // defaultTypeCheck type checks the constant and placeholder expressions without a preference // and adds them to the type checked slice. func defaultTypeCheck( ctx context.Context, semaCtx *SemaContext, s *typeCheckOverloadState, errorOnPlaceholders bool, ) error { for _, i := range s.constIdxs { typ, err := s.exprs[i].TypeCheck(ctx, semaCtx, types.Any) if err != nil { return pgerror.Wrapf(err, pgcode.InvalidParameterValue, "error type checking constant value") } s.typedExprs[i] = typ } for _, i := range s.placeholderIdxs { if errorOnPlaceholders { _, err := s.exprs[i].TypeCheck(ctx, semaCtx, types.Any) return err } // If we dont want to error on args, avoid type checking them without a desired type. s.typedExprs[i] = StripParens(s.exprs[i]).(*Placeholder) } return nil } // checkReturn checks the number of remaining overloaded function // implementations. // Returns ok=true if we should stop overload resolution, and returning either // 1. the chosen overload in a slice, or // 2. nil, // along with the typed arguments. // This modifies values within s as scratch slices, but only in the case where // it returns true, which signals to the calling function that it should // immediately return, so any mutations to s are irrelevant. func checkReturn( ctx context.Context, semaCtx *SemaContext, s *typeCheckOverloadState, ) (ok bool, _ []TypedExpr, _ []overloadImpl, _ error) { switch len(s.overloadIdxs) { case 0: if err := defaultTypeCheck(ctx, semaCtx, s, false); err != nil { return false, nil, nil, err } return true, s.typedExprs, nil, nil case 1: idx := s.overloadIdxs[0] o := s.overloads[idx] p := o.params() for _, i := range s.constIdxs { des := p.GetAt(i) typ, err := s.exprs[i].TypeCheck(ctx, semaCtx, des) if err != nil { return false, s.typedExprs, nil, pgerror.Wrapf( err, pgcode.InvalidParameterValue, "error type checking constant value", ) } if des != nil && !typ.ResolvedType().Equivalent(des) { return false, nil, nil, errors.AssertionFailedf( "desired constant value type %s but set type %s", log.Safe(des), log.Safe(typ.ResolvedType()), ) } s.typedExprs[i] = typ } return checkReturnPlaceholdersAtIdx(ctx, semaCtx, s, int(idx)) default: return false, nil, nil, nil } } // checkReturnPlaceholdersAtIdx checks that the placeholders for the // overload at the input index are valid. It has the same return values // as checkReturn. func checkReturnPlaceholdersAtIdx( ctx context.Context, semaCtx *SemaContext, s *typeCheckOverloadState, idx int, ) (bool, []TypedExpr, []overloadImpl, error) { o := s.overloads[idx] p := o.params() for _, i := range s.placeholderIdxs { des := p.GetAt(i) typ, err := s.exprs[i].TypeCheck(ctx, semaCtx, des) if err != nil { if des.IsAmbiguous() { return false, nil, nil, nil } return false, nil, nil, err } s.typedExprs[i] = typ } return true, s.typedExprs, s.overloads[idx : idx+1], nil } func formatCandidates(prefix string, candidates []overloadImpl) string { var buf bytes.Buffer for _, candidate := range candidates { buf.WriteString(prefix) buf.WriteByte('(') params := candidate.params() tLen := params.Length() inputTyps := make([]TypedExpr, tLen) for i := 0; i < tLen; i++ { t := params.GetAt(i) inputTyps[i] = &TypedDummy{Typ: t} if i > 0 { buf.WriteString(", ") } buf.WriteString(t.String()) } buf.WriteString(") -> ") buf.WriteString(returnTypeToFixedType(candidate.returnType(), inputTyps).String()) if candidate.preferred() { buf.WriteString(" [preferred]") } buf.WriteByte('\n') } return buf.String() }
pkg/sql/sem/tree/overload.go
0
https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8
[ 0.0003708941803779453, 0.00017061599646694958, 0.00015604721556883305, 0.0001686929608695209, 0.00002005200985877309 ]
{ "id": 3, "code_window": [ " ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): processed\n", " ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): referenced database ID 52: referenced descriptor not found\n", " ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): processed\n", " ParentID 52, ParentSchemaID 29: relation \"user_promo_codes\" (58): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_users\"\n", " ParentID 52, ParentSchemaID 29: relation \"user_promo_codes\" (58): processed\n", " ParentID 0, ParentSchemaID 0: namespace entry \"defaultdb\" (50): processed\n", " ParentID 0, ParentSchemaID 0: namespace entry \"movr\" (52): descriptor not found\n", " ParentID 0, ParentSchemaID 0: namespace entry \"postgres\" (51): processed\n", " ParentID 0, ParentSchemaID 0: namespace entry \"system\" (1): processed\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " ParentID 52, ParentSchemaID 29: relation \"user_promo_codes\" (58): referenced database ID 52: referenced descriptor not found\n" ], "file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose", "type": "replace", "edit_start_line_idx": 50 }
# If you change this file, regenerate the certificate with # openssl req -new -x509 -sha256 -key testserver.key -out testserver.crt -days 3650 -config testserver_config.cnf # default_bits = 2048 distinguished_name = dn x509_extensions = san req_extensions = san extensions = san prompt = no [ dn ] organizationName = MyCompany [ san ] subjectAltName = DNS:localhost
pkg/ccl/sqlproxyccl/testdata/testserver_config.cnf
0
https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8
[ 0.0001688458287389949, 0.0001660445996094495, 0.00016324337047990412, 0.0001660445996094495, 0.0000028012291295453906 ]

No dataset card yet

New: Create and edit this dataset card directly on the website!

Contribute a Dataset Card
Downloads last month
0
Add dataset card

Collection including code-philia/CoEdPilot-file-locator