ADAPT-Chase commited on
Commit
e285425
·
verified ·
1 Parent(s): c36a28e

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +188 -0
  2. platform/dbops/archive/databases_old/data/clickhouse/config/config.xml +1866 -0
  3. platform/dbops/archive/databases_old/data/clickhouse/config/users.xml +121 -0
  4. platform/dbops/archive/databases_old/data/clickhouse/data/metadata/INFORMATION_SCHEMA.sql +2 -0
  5. platform/dbops/archive/databases_old/data/clickhouse/data/metadata/default.sql +2 -0
  6. platform/dbops/archive/databases_old/data/clickhouse/data/metadata/information_schema.sql +2 -0
  7. platform/dbops/archive/databases_old/data/clickhouse/data/metadata/system.sql +2 -0
  8. platform/dbops/archive/databases_old/data/clickhouse/data/preprocessed_configs/config.xml +1888 -0
  9. platform/dbops/archive/databases_old/data/clickhouse/data/preprocessed_configs/users.xml +126 -0
  10. platform/dbops/archive/databases_old/data/clickhouse/data/status +3 -0
  11. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/checksums.txt +0 -0
  12. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/columns.txt +7 -0
  13. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/columns_substreams.txt +14 -0
  14. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/count.txt +1 -0
  15. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/default_compression_codec.txt +1 -0
  16. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/event_date.bin +3 -0
  17. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/event_date.cmrk2 +0 -0
  18. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/event_time.bin +3 -0
  19. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/event_time.cmrk2 +0 -0
  20. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/hostname.bin +3 -0
  21. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/hostname.cmrk2 +0 -0
  22. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/hostname.dict.bin +3 -0
  23. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/hostname.dict.cmrk2 +0 -0
  24. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/metadata_version.txt +1 -0
  25. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/metric.bin +3 -0
  26. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/metric.cmrk2 +0 -0
  27. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/metric.dict.bin +3 -0
  28. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/metric.dict.cmrk2 +0 -0
  29. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/minmax_event_date.idx +1 -0
  30. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/partition.dat +0 -0
  31. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/primary.cidx +0 -0
  32. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/serialization.json +1 -0
  33. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/value.bin +3 -0
  34. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/value.cmrk2 +0 -0
  35. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/columns.txt +7 -0
  36. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/columns_substreams.txt +14 -0
  37. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/count.txt +1 -0
  38. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/event_date.bin +3 -0
  39. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/event_date.cmrk2 +0 -0
  40. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/event_time.bin +3 -0
  41. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/event_time.cmrk2 +0 -0
  42. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/hostname.bin +3 -0
  43. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/hostname.cmrk2 +0 -0
  44. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/hostname.dict.bin +3 -0
  45. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/hostname.dict.cmrk2 +0 -0
  46. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/metric.bin +3 -0
  47. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/metric.cmrk2 +0 -0
  48. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/metric.dict.bin +3 -0
  49. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/metric.dict.cmrk2 +0 -0
  50. platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/minmax_event_date.idx +1 -0
.gitattributes CHANGED
@@ -1976,3 +1976,191 @@ platform/dbops/archive/databases_old/data/pulsar/data/bookkeeper/current/ledgers
1976
  platform/dbops/archive/databases_old/data/qdrant/storage/collections/quantum_secure_data/0/wal/open-1 filter=lfs diff=lfs merge=lfs -text
1977
  platform/dbops/archive/databases_old/data/qdrant/storage/collections/quantum_secure_data/0/wal/open-2 filter=lfs diff=lfs merge=lfs -text
1978
  platform/dbops/archive/databases_old/data/qdrant/storage/collections/quantum_secure_data/0/segments/0c15fd83-fc15-4628-8473-6494dc8bb4ee/vector_storage/deleted/flags_a.dat filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1976
  platform/dbops/archive/databases_old/data/qdrant/storage/collections/quantum_secure_data/0/wal/open-1 filter=lfs diff=lfs merge=lfs -text
1977
  platform/dbops/archive/databases_old/data/qdrant/storage/collections/quantum_secure_data/0/wal/open-2 filter=lfs diff=lfs merge=lfs -text
1978
  platform/dbops/archive/databases_old/data/qdrant/storage/collections/quantum_secure_data/0/segments/0c15fd83-fc15-4628-8473-6494dc8bb4ee/vector_storage/deleted/flags_a.dat filter=lfs diff=lfs merge=lfs -text
1979
+ platform/dbops/archive/databases_old/data/qdrant/storage/collections/quantum_secure_data/0/segments/14f70cca-8577-49d1-bb59-f0465871a911/vector_storage/deleted/flags_a.dat filter=lfs diff=lfs merge=lfs -text
1980
+ platform/dbops/archive/databases_old/data/qdrant/storage/collections/quantum_secure_data/0/segments/3a2c80f1-eb3d-4a36-88e1-4816121a3c0e/vector_storage/deleted/flags_a.dat filter=lfs diff=lfs merge=lfs -text
1981
+ platform/dbops/archive/databases_old/data/qdrant/storage/collections/quantum_secure_data/0/segments/6384f3f8-97d8-4f0c-a42f-a829ec967a63/vector_storage/deleted/flags_a.dat filter=lfs diff=lfs merge=lfs -text
1982
+ platform/dbops/archive/databases_old/data/qdrant/storage/collections/quantum_secure_data/0/segments/7cfc921d-0d00-4a8f-a75a-f55e60287826/vector_storage/deleted/flags_a.dat filter=lfs diff=lfs merge=lfs -text
1983
+ platform/dbops/archive/databases_old/data/qdrant/storage/collections/quantum_secure_data/0/segments/7cfc921d-0d00-4a8f-a75a-f55e60287826/vector_storage/vectors/chunk_0.mmap filter=lfs diff=lfs merge=lfs -text
1984
+ platform/dbops/archive/databases_old/data/qdrant/storage/collections/quantum_secure_data/0/segments/8899e2ed-f478-48ef-ba7d-db5dc04201fe/vector_storage/deleted/flags_a.dat filter=lfs diff=lfs merge=lfs -text
1985
+ platform/dbops/archive/databases_old/data/qdrant/storage/collections/quantum_secure_data/0/segments/9c9bb7a8-8451-474a-8299-c2951af9a53a/vector_storage/deleted/flags_a.dat filter=lfs diff=lfs merge=lfs -text
1986
+ platform/dbops/archive/databases_old/data/qdrant/storage/collections/quantum_secure_data/0/segments/df3417df-c2cf-4768-b3cc-29ea61bac9d5/vector_storage/deleted/flags_a.dat filter=lfs diff=lfs merge=lfs -text
1987
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/shard-cache/0069ff3fc1d984efbc4947cd044d01ca74db6f95691ab6d1ac0500578172e848.mdb filter=lfs diff=lfs merge=lfs -text
1988
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/shard-cache/42ab132f53e98a124a3d617dfec75ed8cdf25830ace6f889c291defe27a94d8a.mdb filter=lfs diff=lfs merge=lfs -text
1989
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/shard-cache/5048d476c4a8cde7f218f9b1bb4600a53b8ce421a0e8f3c0f511455792825d73.mdb filter=lfs diff=lfs merge=lfs -text
1990
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/shard-cache/0bbe0391f60970537a7312f483174ff5ad78813f63f4548daa18418b53fc395a.mdb filter=lfs diff=lfs merge=lfs -text
1991
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/shard-cache/526e064140e4a9ec2ce5e7d4f91b4cc6e059288888dfb57e754ac3a972e09876.mdb filter=lfs diff=lfs merge=lfs -text
1992
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/shard-cache/1b446be0a4881fd0f37b1b071d84c7ed79bba5efc84265e9c5f5373395ec2b5f.mdb filter=lfs diff=lfs merge=lfs -text
1993
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/shard-cache/2aedbad10fbedb089db36de558dca207d713e7e0dfaee0f6a4b40b3de1e309d5.mdb filter=lfs diff=lfs merge=lfs -text
1994
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/shard-cache/31ba047275e8be95e1d9489dd29162622ca92d223bb21741e724bf1e72fca827.mdb filter=lfs diff=lfs merge=lfs -text
1995
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/shard-cache/33365fdf4625dad8ff6cb92932bb5cdd7ddbb6fa0a50e499081ae1e92ff9ece1.mdb filter=lfs diff=lfs merge=lfs -text
1996
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/shard-cache/80d83efb5cd7c15e1cb02392ce24b36ee71dd3d35ddce6d67ea71ce7c1dd1e91.mdb filter=lfs diff=lfs merge=lfs -text
1997
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/shard-cache/6d8930ab2e13afb671c304e540a1627a53a841ee255d562ca9e5c52d85720938.mdb filter=lfs diff=lfs merge=lfs -text
1998
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/shard-cache/8ffc1e3510aa092a64a42b8d772daebb8a0f459bf3b1b00aefb6cc2430489684.mdb filter=lfs diff=lfs merge=lfs -text
1999
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/shard-cache/905f9a7b50624546a37984f6f3e1034e33cc297418f1fc8847e2c0b4e9c8716d.mdb filter=lfs diff=lfs merge=lfs -text
2000
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/shard-cache/a15caaf8168f8d6c85a529b1f1b7bbdcc2d81a6ed16d068ad0316f5d15e16f70.mdb filter=lfs diff=lfs merge=lfs -text
2001
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/shard-cache/be454dbf44ba25d4ec68f46fbc0e61437845bd76e81635aec7d4d1f5a3be21fd.mdb filter=lfs diff=lfs merge=lfs -text
2002
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/shard-cache/e24efc138cfe84e55eab93a37d288fb71c35d0790572954262de2b23d38704b5.mdb filter=lfs diff=lfs merge=lfs -text
2003
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/shard-cache/e43556b80daf76eadb21945aabd3cd011b09529f7489ba032abd66568e1e2cea.mdb filter=lfs diff=lfs merge=lfs -text
2004
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/shard-cache/f50e83bb61e16036d1773b74e8129c9867b7e305b1f395116b470f3de9394f19.mdb filter=lfs diff=lfs merge=lfs -text
2005
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/-P/-Pr0RnpXUXb5M2pnXIP_SIT-nTJZO9JBvaIWWcjgU0ZkZWZhdWx0/AAAAADsEAAAvz_4DAAAAAJqJ0RI= filter=lfs diff=lfs merge=lfs -text
2006
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/05/05KNevOSV0O3YyIQMgD2IFubOoAYPjBf_ib_GbwtWMNkZWZhdWx0/AAAAAEoEAAAwpf8DAAAAAHmcDmk= filter=lfs diff=lfs merge=lfs -text
2007
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/0c/0cYN0iwrdk634eSgxWxAqbYXbXF-8iOrbJSmd_HlBlNkZWZhdWx0/AAAAAEcDAABKn-oCAAAAAGJ2EhQ= filter=lfs diff=lfs merge=lfs -text
2008
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/1J/1JJtZ2qQ79x58cob-H5l0N9Q0N_c2qD0XyBaOGGd_lhkZWZhdWx0/AAAAAEoEAACUXP8DAAAAAM4UvMI= filter=lfs diff=lfs merge=lfs -text
2009
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/1P/1PtzNNTL2LsSqLMbU8DQnXWxs69t__ZMkySFz0lsRSZkZWZhdWx0/AAAAAE0EAAAZlf4DAAAAAIjkQmQ= filter=lfs diff=lfs merge=lfs -text
2010
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/1w/1wg3gk5zHPXViCSLmpSwJQGyNReU1MM9Hs8GxwoaXXtkZWZhdWx0/AAAAADYEAABAYv4DAAAAALx5450= filter=lfs diff=lfs merge=lfs -text
2011
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/2S/2StGE0mTvEBTd0ITmfmmM4PT9hGZKcQB7MO9VjYHHA9kZWZhdWx0/AAAAAFUEAAC3mv4DAAAAABgVK-A= filter=lfs diff=lfs merge=lfs -text
2012
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/31/319571y0VMHInA0CY-nWOh3qAnvCBnqJGyzMtUPBw4NkZWZhdWx0/AAAAADYCAAAj8R4CAAAAALVbuX0= filter=lfs diff=lfs merge=lfs -text
2013
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/3h/3hgwwd2mnGOT4Tm-1xxB05FMW823PCp56i5MNBcLBHZkZWZhdWx0/AAAAADkEAABICAAEAAAAALzbLZo= filter=lfs diff=lfs merge=lfs -text
2014
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/41/41zfN2xpRhmyhJMO_hrKyu6mg4e8C_PzgTvtxUwa8ghkZWZhdWx0/AAAAAEgEAAC6Y_8DAAAAACEZ1wY= filter=lfs diff=lfs merge=lfs -text
2015
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/4F/4FiB4mWJSo50BgESXY8KamSftoxxyMjriG9h1hW-OeRkZWZhdWx0/AAAAAGAEAACxkf8DAAAAACJQbIE= filter=lfs diff=lfs merge=lfs -text
2016
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/4S/4S9phsxh39P4BjeHV59PfdpzQ-YzuS7ezx2H2ku2ISlkZWZhdWx0/AAAAAEQEAAAdUf8DAAAAABDLxbQ= filter=lfs diff=lfs merge=lfs -text
2017
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/4t/4t2AqLFkIKYfuR5LZyAbmiy2jLLNonagqHSihqPAuCRkZWZhdWx0/AAAAABYEAAARz_8DAAAAAC15TqQ= filter=lfs diff=lfs merge=lfs -text
2018
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/5M/5MtOupg9z0TBa4-B-RcNCYoyiqs1JIhH4K0Olbvy1nhkZWZhdWx0/AAAAAB0EAAAz6P8DAAAAAJ8Uu5Y= filter=lfs diff=lfs merge=lfs -text
2019
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/5S/5S7mh9Y_CdQDVn_z7W89aOngwBQcAF7Iasq1FuSpIMdkZWZhdWx0/AAAAAEwEAAAMBQAEAAAAACwbN7A= filter=lfs diff=lfs merge=lfs -text
2020
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/5d/5djEH1KCtLMrG3cwa2JprzGNvFnSEdSWtY9LF7mhgKtkZWZhdWx0/AAAAABUEAABcD_8DAAAAAMCJmpw= filter=lfs diff=lfs merge=lfs -text
2021
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/5m/5m9WSnfo-0mbEnk6UoF00RZJYE7ZU0pwXn02DFgrScRkZWZhdWx0/AAAAADcEAABSzf8DAAAAAK860co= filter=lfs diff=lfs merge=lfs -text
2022
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/6B/6BsHEUtCnPzxpLJIukPcQ_rOM2u-j6livAOKutjgdTlkZWZhdWx0/AAAAAD0EAADrDgAEAAAAAC5KuE0= filter=lfs diff=lfs merge=lfs -text
2023
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/6R/6RTjjwNyEf4Aun8C0piGT6fFt-3lkiu5Hyb3mAa2YpxkZWZhdWx0/AAAAAEsEAAD3uf8DAAAAAInpbS4= filter=lfs diff=lfs merge=lfs -text
2024
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/6a/6afuQll0nCQ6RijwCnujGIdaKmarQJ9yONfB__qluORkZWZhdWx0/AAAAABsEAAAWiv8DAAAAAC0XTjc= filter=lfs diff=lfs merge=lfs -text
2025
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/6h/6h8g39E2zQ4_h8UjlyxX4KifiiiYfr4pG4PMYu8xqb9kZWZhdWx0/AAAAAE0EAAAErv8DAAAAAAsmTqY= filter=lfs diff=lfs merge=lfs -text
2026
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/7_/7_BYgLwZqcMy_kugMnyOvGHxHZrWcX_NB3nAPRZIrzpkZWZhdWx0/AAAAAGoEAADYU_8DAAAAAK2GbQ8= filter=lfs diff=lfs merge=lfs -text
2027
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/7c/7c8mUJYLd45g6fgQCtenmpCV1EN6RCx4_D9mreS4fcVkZWZhdWx0/AAAAAFcEAAD6U_8DAAAAAA287eE= filter=lfs diff=lfs merge=lfs -text
2028
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/7i/7i2_VR0nxFkDVLWjdu7wTk9HktvR29IJi9oM2Jux9QtkZWZhdWx0/AAAAAE0EAAApMf4DAAAAAELewsw= filter=lfs diff=lfs merge=lfs -text
2029
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/7q/7q4egJgBNjBLcHYfoiMXnskkjkHb1TvINCgs2GcvNtZkZWZhdWx0/AAAAAIUEAAB5if8DAAAAAAhkVpA= filter=lfs diff=lfs merge=lfs -text
2030
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/8D/8DFS-r7bv3uzf8x8Fo11309s5HBi3ZdD50UUpaVUAd1kZWZhdWx0/AAAAAEIEAACck_8DAAAAAAX1Brk= filter=lfs diff=lfs merge=lfs -text
2031
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/8s/8s2AGyopIKqSCYEOmN62ff_Q7emcX6pWXNLMutpPoZNkZWZhdWx0/AAAAAFEEAAAqqP8DAAAAAKlOWTY= filter=lfs diff=lfs merge=lfs -text
2032
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/9q/9q4jFql0-YlI0z1vxmS73C3I2O3pq6K7veRiOsIPGq1kZWZhdWx0/AAAAAEQEAABX0f8DAAAAAEDBRg8= filter=lfs diff=lfs merge=lfs -text
2033
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/9y/9yWyYnmV3rvHryFkK6NmOt0IU2FCYZVyGmOk6Tb31H9kZWZhdWx0/AAAAAFkEAADnPP8DAAAAADNY0uE= filter=lfs diff=lfs merge=lfs -text
2034
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/AJ/AJWNa0mjm5Kbry1dYtyhjZT1Gc7sr4EsQZeAxo0B_ftkZWZhdWx0/AAAAAFMEAACjAwAEAAAAAOM6z0U= filter=lfs diff=lfs merge=lfs -text
2035
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/AX/AX4ojStmiUbZ42JhmqXeW4kNE0DYE8v5y5RTVutKHwZkZWZhdWx0/AAAAAFQEAACXJ_4DAAAAAKK5uX4= filter=lfs diff=lfs merge=lfs -text
2036
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/BW/BWuZIrOxboiBdJdOMno_-qTW3cLxh3ZQ1XBNiAjehhtkZWZhdWx0/AAAAAGQEAACC3P8DAAAAAC5MUmA= filter=lfs diff=lfs merge=lfs -text
2037
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/Bl/BlSBV5NaoR2m9Se1Ze2n4XQLUN_v4MaduDqrkc2xPF9kZWZhdWx0/AAAAAFcEAABwzf4DAAAAAFu8sLw= filter=lfs diff=lfs merge=lfs -text
2038
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/CK/CKMu8evw-YVkVUb6hjbFCsQuPknkKSjPV_A_q-74aM1kZWZhdWx0/AAAAABwEAABmKv8DAAAAACh1ypQ= filter=lfs diff=lfs merge=lfs -text
2039
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/CO/COvqwDxhFBTPodIGE9t_cxyXLfndMbHBqXDIJZsjwH9kZWZhdWx0/AAAAADoEAAAt3v8DAAAAAGqRAuc= filter=lfs diff=lfs merge=lfs -text
2040
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/CX/CXV5QGAKt87FSFx2WowLH8Hfus_WN_hlyFE_Y7ATBYFkZWZhdWx0/AAAAAFMEAADihf8DAAAAALQtIzE= filter=lfs diff=lfs merge=lfs -text
2041
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/Cc/Cc9sa983DgAGeOcMoJALFcuZSAllOI2mdWhEh-Hm1IBkZWZhdWx0/AAAAADMEAAARAP8DAAAAAP233qM= filter=lfs diff=lfs merge=lfs -text
2042
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/D-/D-Dsb8iDipOysQupU2Tjoclr1Wi5Bf5T-dOWsfSnEH9kZWZhdWx0/AAAAADoEAADbqP8DAAAAAAfSPDA= filter=lfs diff=lfs merge=lfs -text
2043
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/DB/DBN05qi8phh3pxk8POK_mbz0HXFz8kcoZE7YG8WXRUpkZWZhdWx0/AAAAAEYEAADJGf8DAAAAAC8TJC0= filter=lfs diff=lfs merge=lfs -text
2044
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/DM/DMvolWUIncYD34O4bkRQivoOxcmHP9FdnD-PgQmzpXhkZWZhdWx0/AAAAACIEAACHzf8DAAAAALcXH68= filter=lfs diff=lfs merge=lfs -text
2045
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/Dg/Dg8q7a8FlJ1FN_kL8sCPenJkvmYmREIWwtEhu7KtE6JkZWZhdWx0/AAAAAC4EAABj__8DAAAAAPkn7FE= filter=lfs diff=lfs merge=lfs -text
2046
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/Dm/DmooxqFu1HYhRoNNHU_s1BamHdqwexZk2mpzjrmKxxhkZWZhdWx0/AAAAAE4EAADZzv8DAAAAAG_62Hw= filter=lfs diff=lfs merge=lfs -text
2047
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/EG/EG_UfVtiiiE30wbDYJqF6UJU3viJi0b-_dubjgqvZNxkZWZhdWx0/AAAAAFUEAACvCwAEAAAAADWclWE= filter=lfs diff=lfs merge=lfs -text
2048
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/FH/FHmOOF19Acah_bVttotT2ogcs6LJaZm7e_7RFTNY_iJkZWZhdWx0/AAAAAFEEAACr__8DAAAAAP6RPMk= filter=lfs diff=lfs merge=lfs -text
2049
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/Fh/FhawmFfDKEjKUKQ2qPSX-tiV8ZcI9ChPyPCVVwdHyDZkZWZhdWx0/AAAAAEkEAAAQlP4DAAAAAAnbd0M= filter=lfs diff=lfs merge=lfs -text
2050
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/GT/GT2k4mu3WbL1d8ddvrtgcV6NXBam9PtaA_it_WZ0achkZWZhdWx0/AAAAAE0EAADclv8DAAAAAHOHjcg= filter=lfs diff=lfs merge=lfs -text
2051
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/GY/GYg85Combl50Z7DJZyiiRPLKC2hB-qHsJL0wKso543FkZWZhdWx0/AAAAACwEAAA8Gf8DAAAAAFT9kyo= filter=lfs diff=lfs merge=lfs -text
2052
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/Gc/Gc3oIQj8_QH-y0s9oGn1XQ13S41d1M74uew0YOP6TpFkZWZhdWx0/AAAAAEoEAAAFfP8DAAAAAADzDDg= filter=lfs diff=lfs merge=lfs -text
2053
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/Gf/Gf1uTeLVirIKNdteh1l2jM7ysMNeYudt20WnI3FWhVFkZWZhdWx0/AAAAADIEAAD7Gf8DAAAAAJNjyQ8= filter=lfs diff=lfs merge=lfs -text
2054
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/H5/H5UAuLrCmVMAnhvQGFAOAnCP8WOFQH_uqLxDyqE5TZhkZWZhdWx0/AAAAAGYEAAD9xP4DAAAAAPQ2WLA= filter=lfs diff=lfs merge=lfs -text
2055
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/HA/HAIYNPxZgTeDM0K96BJP6xFmApZ12-5IP6OFrLLcwNlkZWZhdWx0/AAAAAE8EAABaQf8DAAAAACpAhGw= filter=lfs diff=lfs merge=lfs -text
2056
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/HU/HUd9asMF02ilRkZboikrn5vfddPFjfYW9uKdUAztZwpkZWZhdWx0/AAAAAGQEAACH5P8DAAAAAKVR1t0= filter=lfs diff=lfs merge=lfs -text
2057
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/He/HebXhsrW2jghfI7BFKR3MA1G41xLDzWpU4Q1q9c1d7tkZWZhdWx0/AAAAAEYEAAAyPP8DAAAAAO-3qTc= filter=lfs diff=lfs merge=lfs -text
2058
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/IH/IHo4autfKcbiXp7GQFmByVG9JwfK6raUdtGoEqSTHkFkZWZhdWx0/AAAAADUEAABib_4DAAAAAHeCkWA= filter=lfs diff=lfs merge=lfs -text
2059
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/K2/K2eyd85U_mUQDg1dS4Y94bLTG_ccU4H16fp-DD08fc5kZWZhdWx0/AAAAADYEAAB_BAAEAAAAADQQ694= filter=lfs diff=lfs merge=lfs -text
2060
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/KL/KLIP-g_o62dVWwwUmqFet_kjDMU76_ppdvOm-DeP55pkZWZhdWx0/AAAAADkEAAC-X_4DAAAAAAe0eX0= filter=lfs diff=lfs merge=lfs -text
2061
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/KQ/KQjMV9u1yQXHxiBaNxwRIr9C-IvhnYXTrp6HIYZoaIJkZWZhdWx0/AAAAAFwEAACgzf8DAAAAAF_o6cs= filter=lfs diff=lfs merge=lfs -text
2062
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/Kf/Kfwd7RBDMvqOauE250uox9qLC1wtTSskUIFxnG_RJVtkZWZhdWx0/AAAAAEcEAADra_4DAAAAADTl7fw= filter=lfs diff=lfs merge=lfs -text
2063
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/Kl/KlFZnckBEoZSHOZVJH3RwHfb6gHZ5wRV5iHl0NROPihkZWZhdWx0/AAAAAFYEAAAbMP8DAAAAANk01j0= filter=lfs diff=lfs merge=lfs -text
2064
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/LV/LVyYCSoxNiUrYNxEIDdV35in7f5J_UsnFC0gvjJuiZtkZWZhdWx0/AAAAACcEAAAsd_8DAAAAAEexGsA= filter=lfs diff=lfs merge=lfs -text
2065
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/La/La8XeUW051QeUGDrUXOypoaOVR_P6b5rnQUG4nMouyJkZWZhdWx0/AAAAAEwEAADDEP8DAAAAAIGlqts= filter=lfs diff=lfs merge=lfs -text
2066
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/Lc/LcZxu9U87-k5EaYBcQXyJA-nu0o4qUbj4E9ODaS79x1kZWZhdWx0/AAAAACMEAADd7_4DAAAAAE0UVf0= filter=lfs diff=lfs merge=lfs -text
2067
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/M6/M6Xmfxgi9CtMBXKPI3IkAD2S7WAtnLkHYJI9dlkg1EhkZWZhdWx0/AAAAABwEAABq3_4DAAAAAMTMg7Q= filter=lfs diff=lfs merge=lfs -text
2068
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/MG/MGj6qClq6KjcjjYOSaj-IWyIVLf-FJwmTdAH_GB_qdpkZWZhdWx0/AAAAAEcEAAD8DgAEAAAAAHIqViA= filter=lfs diff=lfs merge=lfs -text
2069
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/NB/NB-kXY7bNWjNhJ35bCjayTpcX-C9n1MSfcpFC_I_1RRkZWZhdWx0/AAAAADwEAADgxP8DAAAAAEJjs7Y= filter=lfs diff=lfs merge=lfs -text
2070
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/NB/NBrj7Hs_PeINbORU9Re6Y3Y3qKIFycynMHp6hG98DutkZWZhdWx0/DQAAAB4EAACUPPMDAAAAACoqI-U= filter=lfs diff=lfs merge=lfs -text
2071
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/Np/NpEVFVlwy9zX7mFVRtF8a437j2e2eVw18gCFfB0-erNkZWZhdWx0/AAAAAE8EAABr8P4DAAAAANVtCSQ= filter=lfs diff=lfs merge=lfs -text
2072
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/OH/OHc2Y4EYFPK82gOSjbhAVtqLMHp7VFmTjC9j25nc1qdkZWZhdWx0/AAAAAEwEAACcof4DAAAAAOVlT1Q= filter=lfs diff=lfs merge=lfs -text
2073
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/QS/QSEml1nwvu8RwCGeyJiplaYESnZ1xKbubiOUYwFMOGJkZWZhdWx0/AAAAAF0EAAC5Uv8DAAAAAOUfKBQ= filter=lfs diff=lfs merge=lfs -text
2074
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/QV/QV4zftywws0qgz2qxv11HqBOdZOglA2pvBZs42sfxzBkZWZhdWx0/AAAAAKICAABx5IMCAAAAAJDRDtA= filter=lfs diff=lfs merge=lfs -text
2075
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/Qd/Qd5T_bBLjhbO_YYEA5NpKsNEkccoBhDuLPHjrvpXUkZkZWZhdWx0/AAAAACoEAACq6_4DAAAAALsGCZE= filter=lfs diff=lfs merge=lfs -text
2076
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/Qw/QwlJVGAeTTrAKqtloEfdX4sh3AysfUakRLGOa7liXjZkZWZhdWx0/AAAAADEEAABBZv4DAAAAAKhBo80= filter=lfs diff=lfs merge=lfs -text
2077
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/RK/RKwImU0xG5ZIiR_9WpQRsBbwh_9sSBERKHTPp2ryMvNkZWZhdWx0/AAAAAE4EAAD9S_4DAAAAAI4MX_c= filter=lfs diff=lfs merge=lfs -text
2078
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/RL/RL34Sd8YE4vA0y9HSrAUYZQqSz3WDBdaf3BK4VaKxKJkZWZhdWx0/AAAAAC0EAACp-v4DAAAAABn1GrY= filter=lfs diff=lfs merge=lfs -text
2079
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/TL/TL_hKa25u8LM_dnDe4BtlOacA8rDDBQdGStIHCqkwmpkZWZhdWx0/AAAAAFYEAABol_8DAAAAAL-ax70= filter=lfs diff=lfs merge=lfs -text
2080
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/TO/TOMM9q88w1g8qVFTsNLXD3uLiO2hfk1XZrAkaMFKNEdkZWZhdWx0/AAAAADMEAACerf8DAAAAAAPlsEA= filter=lfs diff=lfs merge=lfs -text
2081
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/Tc/TcndmyEGgp8AfU-s3giFzB4KZc6SXBN1K7v-adNpStJkZWZhdWx0/AAAAADwEAAAgvf8DAAAAAKFkMuA= filter=lfs diff=lfs merge=lfs -text
2082
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/Ti/TirVorWi0w8Gcplj1UYi0e7PxcRAaZP4qX8Czc7LD0BkZWZhdWx0/AAAAAB0EAAA2hP8DAAAAAP0bi5A= filter=lfs diff=lfs merge=lfs -text
2083
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/Ui/UiLTe_rucfyABR5XTigrHg9-PQOYhLDERuBrdLRMXH9kZWZhdWx0/AAAAAB4EAACGTv4DAAAAACKbfhc= filter=lfs diff=lfs merge=lfs -text
2084
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/Uz/UzGG4soLifkvQj9BCIbFFaEgFIJ9dVIcw1PzywlX-npkZWZhdWx0/AAAAABkEAADnyf8DAAAAAN0HdU4= filter=lfs diff=lfs merge=lfs -text
2085
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/VN/VNApnxBkAUmYKB_yYrB4RLK4d0FdiDiOXpc5g0PVca1kZWZhdWx0/AAAAAEAEAACv0_4DAAAAADOV8rI= filter=lfs diff=lfs merge=lfs -text
2086
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/WI/WIHnw9KaYVbUybRZPjneqeTLnx8rIESvQ1rN4IrRqRBkZWZhdWx0/AAAAAFgEAACN-P8DAAAAADGvlyU= filter=lfs diff=lfs merge=lfs -text
2087
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/WQ/WQpo3d2PyueEjBUwGJPcQJJxaGsUiZDwGXM6UmFw6ExkZWZhdWx0/AAAAAEwEAAA61f4DAAAAABSmBrI= filter=lfs diff=lfs merge=lfs -text
2088
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/W_/W_RPeqmUuWFbUNWuTSmeM-hJh_McRK4hLWD_ezMUdadkZWZhdWx0/AAAAAE8EAABkt_4DAAAAABcXndU= filter=lfs diff=lfs merge=lfs -text
2089
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/Wy/WySW7wR7J2yPAWNkwuQy15SlbjmvuiReXobrXanBpJRkZWZhdWx0/AAAAACUAAAAZMR8AAAAAAG_o4Q0= filter=lfs diff=lfs merge=lfs -text
2090
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/XM/XMhsSB8kazEiXFcgpyva7WerNLWaPkN7j6cwWMgK4idkZWZhdWx0/AAAAAGQEAACxDAAEAAAAAMlg6_8= filter=lfs diff=lfs merge=lfs -text
2091
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/YB/YBPL1uaOB17J2dZzdDiKsnPW3B3QBRUZ3J_HdVXGY9tkZWZhdWx0/AAAAACcEAABegv8DAAAAAJlR5uw= filter=lfs diff=lfs merge=lfs -text
2092
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/YO/YOh5IYSvpV4V-7Z9B6i5EzXe1EAjLbe-gCmFSkAqb6RkZWZhdWx0/AAAAACAEAADrof4DAAAAAIg3J3Q= filter=lfs diff=lfs merge=lfs -text
2093
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/ZU/ZUHq9SlsqSbIPjzpNRPDgX7-GIy8wFKfzRrFEp6HgGRkZWZhdWx0/AAAAAEUEAACnRf8DAAAAADz81Ik= filter=lfs diff=lfs merge=lfs -text
2094
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/ZU/ZUZIXhXYsLwJeMKoxvNCR8koa1-4znHf8QxQrJOhCclkZWZhdWx0/AAAAAB4EAACo8v8DAAAAAEntdlA= filter=lfs diff=lfs merge=lfs -text
2095
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/ZW/ZWkrFJCcE1_YsW_yQMua13KYD9NdXiNuAwvYzQkOPEpkZWZhdWx0/AAAAABkEAADQ_f8DAAAAALRXkFc= filter=lfs diff=lfs merge=lfs -text
2096
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/Zu/ZuzLXm0kNCGA4v_JwdBiJm3yFGjLUhjlhZdzqW8NUvhkZWZhdWx0/AAAAAEoEAABj4_8DAAAAAKhcqHA= filter=lfs diff=lfs merge=lfs -text
2097
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/Zy/ZyADf9ixVQpkZFJ-qTgLXu3s22OyGmCxx2Js4evQ8WxkZWZhdWx0/AAAAACwEAADe3f8DAAAAAPviRF4= filter=lfs diff=lfs merge=lfs -text
2098
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/_L/_LBhCkXNvY_0jx-UBEEaHR3GQ2Nkg-5lZ3oTtwKbM91kZWZhdWx0/AAAAACoEAAA8fv8DAAAAAFW0NEM= filter=lfs diff=lfs merge=lfs -text
2099
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/aA/aAE-1IlUjIaqQuOyDBi3nvo08dMzb3USYGgkyJ3kcKpkZWZhdWx0/AAAAANwDAAAvVaoDAAAAANwCcpo= filter=lfs diff=lfs merge=lfs -text
2100
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/aD/aDPCKX52cXk0RydM1XdIKM_RWQprtei0gviT4G5547VkZWZhdWx0/AAAAADYEAAC3mP4DAAAAAPlTu8k= filter=lfs diff=lfs merge=lfs -text
2101
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/aP/aPQaVtY3-ehhiii_sonbgCZzJp1wFDb7YUU4a52_jTlkZWZhdWx0/AAAAAFAEAABtOP8DAAAAAHsXLiw= filter=lfs diff=lfs merge=lfs -text
2102
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/b0/b0YmqdiRNte3fagLWdQGpdBl-HbJNAqIIOwxyW_j23FkZWZhdWx0/AAAAAC0EAAAf9P8DAAAAAOrMxGE= filter=lfs diff=lfs merge=lfs -text
2103
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/bl/blmgOMWkFH5tZu1SHlOMRlmp4eB4RVrG5tl4YHjeWO5kZWZhdWx0/AAAAAFAEAABh3f8DAAAAAM5cEbo= filter=lfs diff=lfs merge=lfs -text
2104
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/cs/csFIAsKk0yjBLfXPolUHMKML3hoguxpBvEe3r9iqCndkZWZhdWx0/AAAAADAEAABhqP8DAAAAANWirtM= filter=lfs diff=lfs merge=lfs -text
2105
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/dv/dvJKJKHKkeRMSbvvrmuaMgKSzsK-fYX9_ppnJ4K2C7xkZWZhdWx0/AAAAAD4EAAA7nP8DAAAAAKg3wdM= filter=lfs diff=lfs merge=lfs -text
2106
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/e-/e-6WyDSxprOA1NChvh3PSHm_Qz7YI-DzJAQz2gend-JkZWZhdWx0/AAAAAB4EAADHhP8DAAAAAPm3-OI= filter=lfs diff=lfs merge=lfs -text
2107
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/eJ/eJt8GHvSQRN5KayEiX40_p9Sxs6rLafPwZ4rzJCvaZBkZWZhdWx0/AAAAACIEAABLx_8DAAAAAPTg7pw= filter=lfs diff=lfs merge=lfs -text
2108
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/fZ/fZvBsPKCUCq2IKMgh26Uvg-0PW81LxaoA2dPtKZT0vxkZWZhdWx0/AAAAAE8EAACY7_8DAAAAADeFKR0= filter=lfs diff=lfs merge=lfs -text
2109
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/f_/f_33Z_XW4jaiSrjdM9lCVAyi-dD6gE6PVpz5YYFvDhlkZWZhdWx0/AAAAAA8EAADKDgAEAAAAAMANwQw= filter=lfs diff=lfs merge=lfs -text
2110
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/fz/fz7-skiIJ_rwhCLln_bIS0iVCDMB5WQOxs4eLnnB8IxkZWZhdWx0/CQAAACkEAAAPHPMDAAAAAH2evSU= filter=lfs diff=lfs merge=lfs -text
2111
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/hq/hqLo2YiS-NRub2g45x6y0KSkBawWjOar0Q-klIvDQUhkZWZhdWx0/AAAAAKQCAAB7Q4ICAAAAADThU04= filter=lfs diff=lfs merge=lfs -text
2112
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/i2/i2V_KxAXkvk2IXaQeAJauLpMv7vfoxz5UWtMHrR-XjBkZWZhdWx0/AAAAAEIEAADPQ_8DAAAAACuT8Ns= filter=lfs diff=lfs merge=lfs -text
2113
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/iF/iFTVC_0qlqMyyw_VftzorP7De_j-9korfXZtz9TgH31kZWZhdWx0/AAAAADUEAABX3_4DAAAAAIC_4zk= filter=lfs diff=lfs merge=lfs -text
2114
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/iW/iWS7FhnAGKN8ZgO5xxGOsLChQwVuPszFcxNjVqT1qVBkZWZhdWx0/AAAAAEkEAABfuP8DAAAAAHnM3Rw= filter=lfs diff=lfs merge=lfs -text
2115
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/ia/ia6wiNzezGW2q6C5zgBxnTSGogwHFALEgdSndsIB87VkZWZhdWx0/AAAAACkEAABXM_8DAAAAAIrPYEI= filter=lfs diff=lfs merge=lfs -text
2116
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/j2/j208cQYgbl5FwIy1CJiximw4vxkyiiyM0lMd6cPUomxkZWZhdWx0/AAAAAD8EAAC5iP4DAAAAAFlT8H8= filter=lfs diff=lfs merge=lfs -text
2117
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/je/jeq-0KrONHOMruT4q8kbTuLlzN6xWjwSOXN0Hl90u2xkZWZhdWx0/AAAAADUEAAAuCAAEAAAAAK6_iPE= filter=lfs diff=lfs merge=lfs -text
2118
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/jp/jpZdgKkqIGnrGJa_RpCyZQLdw5D5tfl6WGhyr2eL_NRkZWZhdWx0/AAAAABoEAAAVt_8DAAAAAOG9YcE= filter=lfs diff=lfs merge=lfs -text
2119
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/kQ/kQRIMsq69dGkHbOe0-V9Ivl7S-ZNK73-ZuPD2sGrJwFkZWZhdWx0/AAAAADsEAABm0f4DAAAAAIdjGKc= filter=lfs diff=lfs merge=lfs -text
2120
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/kg/kg9UWTCw50y9r3VaM8Ly-y4KZFi-T9edDOOONLsR_mVkZWZhdWx0/AAAAAEcEAADdlP4DAAAAALaVwwE= filter=lfs diff=lfs merge=lfs -text
2121
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/lR/lR09z9CzX0KJ7COo2lU21t_RVXHoTzRO23FalvhFhkZkZWZhdWx0/AAAAACYEAAB-CgAEAAAAABJX00s= filter=lfs diff=lfs merge=lfs -text
2122
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/lc/lctvWZkx98H_7-zo20I2ifxBq_siWjIS6S6AtNC-pgpkZWZhdWx0/AAAAABsEAAB19_4DAAAAALgEMgI= filter=lfs diff=lfs merge=lfs -text
2123
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/lt/ltl-F_jtJE-mnlOmtgUMU1xBxgyeyis0D2p5fSH63ilkZWZhdWx0/AAAAAD8EAACfnP8DAAAAAIHp9w4= filter=lfs diff=lfs merge=lfs -text
2124
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/lz/lzefCSKP9AEaKDMGdl78NLyotMQE35RTdOupfTXBwM5kZWZhdWx0/AAAAACcEAAA2JP8DAAAAAC71AKA= filter=lfs diff=lfs merge=lfs -text
2125
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/m3/m3p-AgNFvpcIYAGnwlyqiGdli8F1fGP-C-TvNo6-UWRkZWZhdWx0/AAAAAEgEAABZ5f8DAAAAAJbtpVE= filter=lfs diff=lfs merge=lfs -text
2126
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/mU/mUFXiUAwO4tyqLhjNDTHz4i9PFFDGR-puO9DmpqrQPZkZWZhdWx0/AAAAAC4EAACHv_8DAAAAAFMObfQ= filter=lfs diff=lfs merge=lfs -text
2127
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/me/meUG_kIWBJGtaLUbWb_BNVG04ih5V8Vl8v8jcbIs1-tkZWZhdWx0/AAAAAEkEAAALcP4DAAAAAAHZRQE= filter=lfs diff=lfs merge=lfs -text
2128
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/nB/nBTH7R9-ilmDHUuvbUShNx4JhMHGu8QdKSsD42bSzFhkZWZhdWx0/AAAAACMEAABOQ_8DAAAAABbocfk= filter=lfs diff=lfs merge=lfs -text
2129
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/nC/nCV3XmuQ_r0Gt2N1YIqJzlaIw6LZWbrgrQ_fzdcGfhVkZWZhdWx0/AAAAAD8EAAD59_4DAAAAANhogZw= filter=lfs diff=lfs merge=lfs -text
2130
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/nm/nmlJ45JlqPa5zVl9GJRkTl474QJxrC5UqbPhKFkVcndkZWZhdWx0/AAAAADAEAABW2P8DAAAAAHI0TpE= filter=lfs diff=lfs merge=lfs -text
2131
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/nr/nrtrxkOQtQ0Jy4O5sW3pvPsdzdDbFtcIcIT1QBt1SjpkZWZhdWx0/AAAAABQEAAC5Nv8DAAAAAHkGehU= filter=lfs diff=lfs merge=lfs -text
2132
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/o2/o25GIOYOwlDg9T1WoKE6EEWgcDiT7j0r4dihe1AbbLtkZWZhdWx0/AAAAAEQEAADNv_8DAAAAAGtsID4= filter=lfs diff=lfs merge=lfs -text
2133
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/oS/oStASJ7yP5j-z_VcBBATvKiTZwGJncFpV2cU0ci9miZkZWZhdWx0/AAAAACQEAAB7DgAEAAAAAL4QLN4= filter=lfs diff=lfs merge=lfs -text
2134
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/q4/q4F3w3Ulqnz3fMrpGucUnohC9euFBv-t5ve-0prwZ4pkZWZhdWx0/AAAAAGcEAAApav8DAAAAADws4pQ= filter=lfs diff=lfs merge=lfs -text
2135
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/qZ/qZgknTE5PDPpqs7zQyxd2QT7rEwp6dHLrstJ1y5TCS9kZWZhdWx0/AAAAADEEAADnx_8DAAAAAJDM5gw= filter=lfs diff=lfs merge=lfs -text
2136
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/qd/qdjXhEm1q5T5aY_Bqdz3J6379EpCSIhG1h1yFKk4JyVkZWZhdWx0/AAAAACMEAADfzP8DAAAAALCzAHQ= filter=lfs diff=lfs merge=lfs -text
2137
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/r8/r8XVFPP3CSU97D8lSb5-XYVrsc1TLIT2Nm5hHdiVvmlkZWZhdWx0/JQAAAFcEAADzut4DAAAAAFp9uMQ= filter=lfs diff=lfs merge=lfs -text
2138
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/rM/rMijX4YpdzhLsPcx7vsIxxVFOjNt73WPPkLPJB41ndxkZWZhdWx0/AAAAAFUEAAD_S_8DAAAAABjQi3I= filter=lfs diff=lfs merge=lfs -text
2139
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/s9/s9irLxtRQuvkpbK9EkMTIq83jQv2YgPN-zpdLYlklV1kZWZhdWx0/AAAAAGUEAADkCgAEAAAAANyy6F4= filter=lfs diff=lfs merge=lfs -text
2140
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/sB/sBm_iXU5eC2MIbBlMgfjPoSUgWJQsY0Da7TGZi1kMjtkZWZhdWx0/AAAAADMEAADJ8v4DAAAAAKrt-Lk= filter=lfs diff=lfs merge=lfs -text
2141
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/sR/sRo8jHDmeSxqdagNB5lVUir9g_hOGcDlgo5yVK-EHVBkZWZhdWx0/AAAAAA4AAADR6g4AAAAAACwy41k= filter=lfs diff=lfs merge=lfs -text
2142
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/sR/sRo8jHDmeSxqdagNB5lVUir9g_hOGcDlgo5yVK-EHVBkZWZhdWx0/DQAAACgEAAA1-vIDAAAAAOU4XsA= filter=lfs diff=lfs merge=lfs -text
2143
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/tK/tKfXGua5KYFpIG4BSMV9j-ZurWon_wZylzDxX61_qd1kZWZhdWx0/AAAAACEEAABW9v8DAAAAAEHzAlE= filter=lfs diff=lfs merge=lfs -text
2144
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/tU/tUa0CBVvi3NpNrUgu2A8AM-DdC-1N-NbG5Al7ee1RcZkZWZhdWx0/AAAAAEYEAADIzv8DAAAAAADz8C8= filter=lfs diff=lfs merge=lfs -text
2145
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/tm/tmmpTzhK6YKC63V-4i__4yZH182zjL4cHpoTT7FT5etkZWZhdWx0/AAAAAFsEAABrI_8DAAAAAKnqrXo= filter=lfs diff=lfs merge=lfs -text
2146
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/v0/v0FwcfOPSorQZ9FU6UOk_9wjRI28_rfCQnnmmm5X2u1kZWZhdWx0/AAAAACYEAABi4P8DAAAAAMyXAjE= filter=lfs diff=lfs merge=lfs -text
2147
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/vH/vHyp3zfayffiFHP0gFzmIAzAOiL3FhCjZjEpmPsNZtBkZWZhdWx0/AAAAAEQEAACu6P8DAAAAAFZstaw= filter=lfs diff=lfs merge=lfs -text
2148
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/vN/vNEtSxEIjsMJaKY_CUkFNRkgtio4AiXwCFioFwxr3PlkZWZhdWx0/AAAAAFIEAADzyv8DAAAAAGgxQAk= filter=lfs diff=lfs merge=lfs -text
2149
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/vS/vSL3AVSvtBF-v0dpmZMQAE4BCp6UCP1AZjIBMzMdBlZkZWZhdWx0/AAAAAFMEAAAs6P8DAAAAAIJ2b4A= filter=lfs diff=lfs merge=lfs -text
2150
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/wX/wX3wcLYk1U4DG9qc8P2Mwk9Sc_wb9kJXvxMdih0Sx5hkZWZhdWx0/AAAAAFMEAADs_P8DAAAAAB0M2XA= filter=lfs diff=lfs merge=lfs -text
2151
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/wr/wrbulsgwvQuyMwQrSHNur7gulyRhCnbONUhpVliNkfRkZWZhdWx0/AAAAAE4EAAACp_8DAAAAAJVjEVk= filter=lfs diff=lfs merge=lfs -text
2152
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/xH/xHRyUYVrbwck2UzLoggxSQSo2HE3hNEmRR1yNpE78-tkZWZhdWx0/AAAAADEEAAChcf8DAAAAAD1fRhc= filter=lfs diff=lfs merge=lfs -text
2153
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/xo/xo6OiaRoNMtnQVUKzjtobHEqShExKx_Sq5wN75E8hclkZWZhdWx0/AAAAADUEAAAE4P8DAAAAADuAehE= filter=lfs diff=lfs merge=lfs -text
2154
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/zU/zUbxO1B5TORUBYTVTHaBjl7kjW-IwJGaZEkCgkLinkJkZWZhdWx0/.AAAAADEEAABrV_4DAAAAADAknYo=.04GNfz filter=lfs diff=lfs merge=lfs -text
2155
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/zU/zUbxO1B5TORUBYTVTHaBjl7kjW-IwJGaZEkCgkLinkJkZWZhdWx0/AAAAADEEAABrV_4DAAAAADAknYo= filter=lfs diff=lfs merge=lfs -text
2156
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/za/zaRgYJWlqCDPNMbcgMOuTjjwwPiAfTvrDXDjVBu2Xe5kZWZhdWx0/AAAAAEsEAACPuv8DAAAAAOw9Z-o= filter=lfs diff=lfs merge=lfs -text
2157
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/chunk-cache/zl/zlRTEXmq2wzKpvLUzwQKgxIf0q7u6uN7vV-MBuLK3c5kZWZhdWx0/AAAAAC8EAAD5Jf4DAAAAAHDPlQM= filter=lfs diff=lfs merge=lfs -text
2158
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/staging/shard-session/xorb_metadata/364fb618bc6631634536fc23239f981380d70bb053436bb35938314663eb8546.mdb filter=lfs diff=lfs merge=lfs -text
2159
+ platform/dbops/archive/databases_old/data/workspace/.hf_home/xet/https___cas_serv-tGqkUaZf_CBPHQ6h/staging/shard-session/xorb_metadata/76ad2692c0f61d03864ab0841f0ebb093e7ca1bb2ddf6c1818059f2f304ee000.mdb filter=lfs diff=lfs merge=lfs -text
2160
+ platform/dbops/archive/databases_old/data/workspace/elizabeth_xet_ready/elizabeth_data_20250824_011825/chromadb/chroma.sqlite3 filter=lfs diff=lfs merge=lfs -text
2161
+ platform/dbops/archive/databases_old/data/workspace/elizabeth_xet_ready/elizabeth_data_20250824_011825/databases/elizabeth_memory.db filter=lfs diff=lfs merge=lfs -text
2162
+ platform/dbops/archive/databases_old/data/workspace/elizabeth_xet_ready/elizabeth_data_20250824_011825/databases/nova_memory.db filter=lfs diff=lfs merge=lfs -text
2163
+ platform/dbops/archive/databases_old/data/workspace/elizabeth_xet_ready/elizabeth_data_20250824_011825/logs/training_startup.log filter=lfs diff=lfs merge=lfs -text
2164
+ platform/dbops/archive/databases_old/data/workspace/elizabeth_xet_ready/elizabeth_data_20250824_011825/code/archive/data/elizabeth_memory.db filter=lfs diff=lfs merge=lfs -text
2165
+ platform/dbops/archive/databases_old/data/workspace/elizabeth_xet_ready/elizabeth_data_20250824_011825/code/archive/data/nova_memory.db filter=lfs diff=lfs merge=lfs -text
2166
+ platform/dbops/archive/databases_old/data/clickhouse/logs/clickhouse-server.log filter=lfs diff=lfs merge=lfs -text
platform/dbops/archive/databases_old/data/clickhouse/config/config.xml ADDED
@@ -0,0 +1,1866 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--
2
+ NOTE: User and query level settings are set up in "users.xml" file.
3
+ If you have accidentally specified user-level settings here, server won't start.
4
+ You can either move the settings to the right place inside "users.xml" file
5
+ or add <skip_check_for_incorrect_settings>1</skip_check_for_incorrect_settings> here.
6
+ -->
7
+ <clickhouse>
8
+ <logger>
9
+ <!-- Possible levels [1]:
10
+
11
+ - none (turns off logging)
12
+ - fatal
13
+ - critical
14
+ - error
15
+ - warning
16
+ - notice
17
+ - information
18
+ - debug
19
+ - trace
20
+ - test (not for production usage)
21
+
22
+ [1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
23
+ -->
24
+ <level>trace</level>
25
+
26
+ <!-- Startup level is used to set the root logger level at server startup.
27
+ It is useful for debugging startup issues.
28
+ The root logger level will be reset to the default level after the server is fully initialized -->
29
+ <!-- <startupLevel>trace</startupLevel> -->
30
+ <!-- Shutdown level is used to set the root logger level at server Shutdown.
31
+ It is useful for debugging shutdown issues -->
32
+ <!-- <shutdownLevel>trace</shutdownLevel> -->
33
+
34
+ <log>/var/log/clickhouse-server/clickhouse-server.log</log>
35
+ <errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
36
+ <!-- Rotation policy
37
+ See https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/FileChannel.h#L54-L85
38
+ -->
39
+ <size>1000M</size>
40
+ <count>10</count>
41
+
42
+ <!-- <console>1</console> --> <!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
43
+ <!-- <console_log_level>trace</console_log_level> -->
44
+
45
+ <!-- <use_syslog>0</use_syslog> -->
46
+ <!-- <syslog_level>trace</syslog_level> -->
47
+
48
+ <!-- <stream_compress>0</stream_compress> -->
49
+
50
+ <!-- By default logging happens in different threads so it does not block the execution
51
+ If the amount of messages waiting to be flushed is too large, new messages will be dropped.
52
+ -->
53
+ <!-- <async>1</async> -->
54
+ <!-- <async_queue_max_size>100000</async_queue_max_size> -->
55
+
56
+ <!-- Per level overrides (legacy):
57
+
58
+ For example to suppress logging of the ConfigReloader you can use:
59
+ NOTE: levels.logger is reserved, see below.
60
+ -->
61
+ <!--
62
+ <levels>
63
+ <ConfigReloader>none</ConfigReloader>
64
+ </levels>
65
+ -->
66
+
67
+ <!-- Per level overrides:
68
+
69
+ For example to suppress logging of the RBAC for default user you can use:
70
+ (But please note that the logger name maybe changed from version to version, even after minor upgrade)
71
+ -->
72
+ <!--
73
+ <levels>
74
+ <logger>
75
+ <name>ContextAccess (default)</name>
76
+ <level>none</level>
77
+ </logger>
78
+ <logger>
79
+ <name>DatabaseOrdinary (test)</name>
80
+ <level>none</level>
81
+ </logger>
82
+ </levels>
83
+ -->
84
+ <!-- Structured log formatting:
85
+ You can specify log format(for now, JSON only). In that case, the console log will be printed
86
+ in specified format like JSON.
87
+ For example, as below:
88
+
89
+ {"date_time":"1650918987.180175","thread_name":"#1","thread_id":"254545","level":"Trace","query_id":"","logger_name":"BaseDaemon","message":"Received signal 2","source_file":"../base/daemon/BaseDaemon.cpp; virtual void SignalListener::run()","source_line":"192"}
90
+ {"date_time_utc":"2024-11-06T09:06:09Z","thread_name":"#1","thread_id":"254545","level":"Trace","query_id":"","logger_name":"BaseDaemon","message":"Received signal 2","source_file":"../base/daemon/BaseDaemon.cpp; virtual void SignalListener::run()","source_line":"192"}
91
+ To enable JSON logging support, please uncomment the entire <formatting> tag below.
92
+
93
+ a) You can modify key names by changing values under tag values inside <names> tag.
94
+ For example, to change DATE_TIME to MY_DATE_TIME, you can do like:
95
+ <date_time>MY_DATE_TIME</date_time>
96
+ <date_time_utc>MY_UTC_DATE_TIME</date_time_utc>
97
+ b) You can stop unwanted log properties from appearing in logs. To do so, you can simply comment out (recommended)
98
+ that property from this file.
99
+ For example, if you do not want your log to print query_id, you can comment out only <query_id> tag.
100
+ However, if you comment out all the tags under <names>, the program will print default values for as
101
+ below.
102
+ -->
103
+ <!-- <formatting>
104
+ <type>json</type>
105
+ <names>
106
+ <date_time>date_time</date_time>
107
+ <date_time_utc>date_time_utc</date_time_utc>
108
+ <thread_name>thread_name</thread_name>
109
+ <thread_id>thread_id</thread_id>
110
+ <level>level</level>
111
+ <query_id>query_id</query_id>
112
+ <logger_name>logger_name</logger_name>
113
+ <message>message</message>
114
+ <source_file>source_file</source_file>
115
+ <source_line>source_line</source_line>
116
+ </names>
117
+ </formatting> -->
118
+ </logger>
119
+
120
+ <url_scheme_mappers>
121
+ <s3>
122
+ <to>https://{bucket}.s3.amazonaws.com</to>
123
+ </s3>
124
+ <gs>
125
+ <to>https://storage.googleapis.com/{bucket}</to>
126
+ </gs>
127
+ <oss>
128
+ <to>https://{bucket}.oss.aliyuncs.com</to>
129
+ </oss>
130
+ </url_scheme_mappers>
131
+
132
+ <!-- Add headers to response in options request. OPTIONS method is used in CORS preflight requests. -->
133
+ <http_options_response>
134
+ <header>
135
+ <name>Access-Control-Allow-Origin</name>
136
+ <value>*</value>
137
+ </header>
138
+ <header>
139
+ <name>Access-Control-Allow-Headers</name>
140
+ <value>origin, x-requested-with, x-clickhouse-format, x-clickhouse-user, x-clickhouse-key, Authorization</value>
141
+ </header>
142
+ <header>
143
+ <name>Access-Control-Allow-Methods</name>
144
+ <value>POST, GET, OPTIONS</value>
145
+ </header>
146
+ <header>
147
+ <name>Access-Control-Max-Age</name>
148
+ <value>86400</value>
149
+ </header>
150
+ </http_options_response>
151
+
152
+ <!-- The name that will be shown in the clickhouse-client.
153
+ By default, anything with "production" will be highlighted in red in query prompt.
154
+ -->
155
+ <!--display_name>production</display_name-->
156
+
157
+ <!-- Port for HTTP API. See also 'https_port' for secure connections.
158
+ This interface is also used by ODBC and JDBC drivers (DataGrip, Dbeaver, ...)
159
+ and by most of the web interfaces (embedded UI, Grafana, Redash, ...).
160
+ -->
161
+ <http_port>8123</http_port>
162
+
163
+ <!-- Port for interaction by native protocol with:
164
+ - clickhouse-client and other native ClickHouse tools (clickhouse-benchmark);
165
+ - clickhouse-server with other clickhouse-servers for distributed query processing;
166
+ - ClickHouse drivers and applications supporting native protocol
167
+ (this protocol is also informally called as "the TCP protocol");
168
+ See also 'tcp_port_secure' for secure connections.
169
+ -->
170
+ <tcp_port>9000</tcp_port>
171
+
172
+ <!-- Chunked capabilities for native protocol by server.
173
+ Can be enabled separately for send and receive channels.
174
+ Supported modes:
175
+ - chunked - server requires from client to have chunked enabled;
176
+ - chunked_optional - server supports both chunked and notchunked protocol;
177
+ - notchunked - server requires from client notchunked protocol (current default);
178
+ - notchunked_optional - server supports both chunked and notchunked protocol.
179
+ -->
180
+ <!--
181
+ <proto_caps>
182
+ <send>notchunked_optional</send>
183
+ <recv>notchunked_optional</recv>
184
+ </proto_caps>
185
+ -->
186
+
187
+ <!-- Compatibility with MySQL protocol.
188
+ ClickHouse will pretend to be MySQL for applications connecting to this port.
189
+ -->
190
+ <mysql_port>9004</mysql_port>
191
+
192
+ <!-- Compatibility with PostgreSQL protocol.
193
+ ClickHouse will pretend to be PostgreSQL for applications connecting to this port.
194
+ -->
195
+ <postgresql_port>9005</postgresql_port>
196
+
197
+ <!-- HTTP API with TLS (HTTPS).
198
+ You have to configure certificate to enable this interface.
199
+ See the OpenSSL section below.
200
+ -->
201
+ <!-- <https_port>8443</https_port> -->
202
+
203
+ <!-- Native interface with TLS.
204
+ You have to configure certificate to enable this interface.
205
+ See the OpenSSL section below.
206
+ -->
207
+ <!-- <tcp_port_secure>9440</tcp_port_secure> -->
208
+
209
+ <!-- Native interface wrapped with PROXYv1 protocol
210
+ PROXYv1 header is sent for every connection.
211
+ ClickHouse will extract information about proxy-forwarded client address from the header.
212
+ -->
213
+ <!-- <tcp_with_proxy_port>9011</tcp_with_proxy_port> -->
214
+
215
+ <!-- Port for communication between replicas. Used for data exchange.
216
+ It provides low-level data access between servers.
217
+ This port should not be accessible from untrusted networks.
218
+ See also 'interserver_http_credentials'.
219
+ Data transferred over connections to this port should not go through untrusted networks.
220
+ See also 'interserver_https_port'.
221
+ -->
222
+ <interserver_http_port>9009</interserver_http_port>
223
+
224
+ <!-- Port for communication between replicas with TLS.
225
+ You have to configure certificate to enable this interface.
226
+ See the OpenSSL section below.
227
+ See also 'interserver_http_credentials'.
228
+ -->
229
+ <!-- <interserver_https_port>9010</interserver_https_port> -->
230
+
231
+ <!-- Hostname that is used by other replicas to request this server.
232
+ If not specified, then it is determined analogous to 'hostname -f' command.
233
+ This setting could be used to switch replication to another network interface
234
+ (the server may be connected to multiple networks via multiple addresses)
235
+ -->
236
+
237
+ <!--
238
+ <interserver_http_host>example.clickhouse.com</interserver_http_host>
239
+ -->
240
+
241
+ <!-- Port for the SSH server which allows to connect and execute
242
+ queries in an interactive fashion using the embedded client over the PTY.
243
+ -->
244
+ <!-- <tcp_ssh_port>9022</tcp_ssh_port> -->
245
+
246
+ <ssh_server>
247
+ <!-- The public part of the host key will be written to the known_hosts file
248
+ on the SSH client side on the first connect.
249
+ -->
250
+ <!-- <host_rsa_key>path_to_the_ssh_key</host_rsa_key> -->
251
+ <!-- <host_ecdsa_key>path_to_the_ssh_key</host_ecdsa_key> -->
252
+ <!-- <host_ed25519_key>path_to_the_ssh_key</host_ed25519_key> -->
253
+
254
+ <!-- Unlocks the possibility to pass the client options as environment
255
+ variables in the form of: ssh -o SetEnv="key1=value1 key2=value2".
256
+ This is considered unsafe and should be used with caution.
257
+ -->
258
+ <!-- <enable_client_options_passing>false</enable_client_options_passing>-->
259
+ </ssh_server>
260
+
261
+ <!-- You can specify credentials for authentication between replicas.
262
+ This is required when interserver_https_port is accessible from untrusted networks,
263
+ and also recommended to avoid SSRF attacks from possibly compromised services in your network.
264
+ -->
265
+ <!--<interserver_http_credentials>
266
+ <user>interserver</user>
267
+ <password></password>
268
+ </interserver_http_credentials>-->
269
+
270
+ <!-- Listen specified address.
271
+ Use :: (wildcard IPv6 address), if you want to accept connections both with IPv4 and IPv6 from everywhere.
272
+ Notes:
273
+ If you open connections from wildcard address, make sure that at least one of the following measures is applied:
274
+ - server is protected by firewall and not accessible from untrusted networks;
275
+ - all users are restricted to subset of network addresses (see users.xml);
276
+ - all users have strong passwords, only secure (TLS) interfaces are accessible, or connections are only made via TLS interfaces.
277
+ - users without password have readonly access.
278
+ See also: https://www.shodan.io/search?query=clickhouse
279
+ -->
280
+ <!-- <listen_host>::</listen_host> -->
281
+
282
+
283
+ <!-- Same for hosts without support for IPv6: -->
284
+ <!-- <listen_host>0.0.0.0</listen_host> -->
285
+
286
+ <!-- Default values - try listen localhost on IPv4 and IPv6. -->
287
+ <!--
288
+ <listen_host>::1</listen_host>
289
+ <listen_host>127.0.0.1</listen_host>
290
+ -->
291
+
292
+ <!-- <interserver_listen_host>::</interserver_listen_host> -->
293
+ <!-- Listen host for communication between replicas. Used for data exchange -->
294
+ <!-- Default values - equal to listen_host -->
295
+
296
+ <!-- Don't exit if IPv6 or IPv4 networks are unavailable while trying to listen. -->
297
+ <!-- <listen_try>0</listen_try> -->
298
+
299
+ <!-- Allow multiple servers to listen on the same address:port. This is not recommended.
300
+ -->
301
+ <!-- <listen_reuse_port>0</listen_reuse_port> -->
302
+
303
+ <!-- <listen_backlog>4096</listen_backlog> -->
304
+
305
+ <!-- <max_connections>4096</max_connections> -->
306
+
307
+ <!-- For 'Connection: keep-alive' in HTTP 1.1 -->
308
+ <keep_alive_timeout>10</keep_alive_timeout>
309
+
310
+ <!-- Enable verbose output in /replicas_status handler. -->
311
+ <!-- <enable_verbose_replicas_status/>true<enable_verbose_replicas_status>-->
312
+
313
+ <!-- Enable stacktrace in default http handler. -->
314
+ <!-- <enable_http_stacktrace/>true<enable_http_stacktrace>-->
315
+
316
+ <!-- gRPC protocol (see src/Server/grpc_protos/clickhouse_grpc.proto for the API) -->
317
+ <!-- <grpc_port>9100</grpc_port> -->
318
+ <grpc>
319
+ <enable_ssl>false</enable_ssl>
320
+
321
+ <!-- The following two files are used only if enable_ssl=1 -->
322
+ <ssl_cert_file>/path/to/ssl_cert_file</ssl_cert_file>
323
+ <ssl_key_file>/path/to/ssl_key_file</ssl_key_file>
324
+
325
+ <!-- Whether server will request client for a certificate -->
326
+ <ssl_require_client_auth>false</ssl_require_client_auth>
327
+
328
+ <!-- The following file is used only if ssl_require_client_auth=1 -->
329
+ <ssl_ca_cert_file>/path/to/ssl_ca_cert_file</ssl_ca_cert_file>
330
+
331
+ <!-- Default transport compression type (can be overridden by client, see the transport_compression_type field in QueryInfo).
332
+ Supported algorithms: none, deflate, gzip, stream_gzip -->
333
+ <transport_compression_type>none</transport_compression_type>
334
+
335
+ <!-- Default transport compression level. Supported levels: 0..3 -->
336
+ <transport_compression_level>0</transport_compression_level>
337
+
338
+ <!-- Send/receive message size limits in bytes. -1 means unlimited -->
339
+ <max_send_message_size>-1</max_send_message_size>
340
+ <max_receive_message_size>-1</max_receive_message_size>
341
+
342
+ <!-- Enable if you want very detailed logs -->
343
+ <verbose_logs>false</verbose_logs>
344
+ </grpc>
345
+
346
+ <!-- Used with https_port and tcp_port_secure. Full ssl options list: https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71
347
+ Note: ClickHouse Cloud https://clickhouse.com/cloud always has secure connections configured.
348
+ -->
349
+ <openSSL>
350
+ <server> <!-- Used for https server AND secure tcp port -->
351
+ <!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
352
+ <!-- <certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
353
+ <privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile> -->
354
+ <!-- dhparams are optional. You can delete the <dhParamsFile> element.
355
+ To generate dhparams, use the following command:
356
+ openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096
357
+ Only file format with BEGIN DH PARAMETERS is supported.
358
+ -->
359
+ <!-- <dhParamsFile>/etc/clickhouse-server/dhparam.pem</dhParamsFile>-->
360
+ <verificationMode>none</verificationMode>
361
+ <loadDefaultCAFile>true</loadDefaultCAFile>
362
+ <cacheSessions>true</cacheSessions>
363
+ <disableProtocols>sslv2,sslv3</disableProtocols>
364
+ <preferServerCiphers>true</preferServerCiphers>
365
+
366
+ <invalidCertificateHandler>
367
+ <!-- The server, in contrast to the client, cannot ask about the certificate interactively.
368
+ The only reasonable option is to reject.
369
+ -->
370
+ <name>RejectCertificateHandler</name>
371
+ </invalidCertificateHandler>
372
+ </server>
373
+
374
+ <client> <!-- Used for connecting to https dictionary source and secured Zookeeper communication -->
375
+ <loadDefaultCAFile>true</loadDefaultCAFile>
376
+ <cacheSessions>true</cacheSessions>
377
+ <disableProtocols>sslv2,sslv3</disableProtocols>
378
+ <preferServerCiphers>true</preferServerCiphers>
379
+ <!-- Use for self-signed: <verificationMode>none</verificationMode> -->
380
+ <invalidCertificateHandler>
381
+ <!-- Use for self-signed: <name>AcceptCertificateHandler</name> -->
382
+ <name>RejectCertificateHandler</name>
383
+ </invalidCertificateHandler>
384
+ </client>
385
+ </openSSL>
386
+
387
+ <!-- Default root page on http[s] server. -->
388
+ <!--
389
+ <http_server_default_response><![CDATA[Greetings from ClickHouse!]]></http_server_default_response>
390
+ -->
391
+
392
+ <!-- The maximum number of query processing threads, excluding threads for retrieving data from remote servers, allowed to run all queries.
393
+ This is not a hard limit. In case if the limit is reached the query will still get at least one thread to run.
394
+ Query can upscale to desired number of threads during execution if more threads become available.
395
+ -->
396
+ <concurrent_threads_soft_limit_num>0</concurrent_threads_soft_limit_num>
397
+ <concurrent_threads_soft_limit_ratio_to_cores>2</concurrent_threads_soft_limit_ratio_to_cores>
398
+ <concurrent_threads_scheduler>fair_round_robin</concurrent_threads_scheduler>
399
+
400
+ <!-- Maximum number of concurrent queries. -->
401
+ <max_concurrent_queries>1000</max_concurrent_queries>
402
+
403
+ <!-- Maximum memory usage (resident set size) for server process.
404
+ Zero value or unset means default. Default is "max_server_memory_usage_to_ram_ratio" of available physical RAM.
405
+ If the value is larger than "max_server_memory_usage_to_ram_ratio" of available physical RAM, it will be cut down.
406
+
407
+ The constraint is checked on query execution time.
408
+ If a query tries to allocate memory and the current memory usage plus allocation is greater
409
+ than specified threshold, exception will be thrown.
410
+
411
+ It is not practical to set this constraint to small values like just a few gigabytes,
412
+ because memory allocator will keep this amount of memory in caches and the server will deny service of queries.
413
+ -->
414
+ <max_server_memory_usage>0</max_server_memory_usage>
415
+
416
+ <!-- Maximum number of threads in the Global thread pool.
417
+ This will default to a maximum of 10000 threads if not specified.
418
+ This setting will be useful in scenarios where there are a large number
419
+ of distributed queries that are running concurrently but are idling most
420
+ of the time, in which case a higher number of threads might be required.
421
+ -->
422
+
423
+ <max_thread_pool_size>10000</max_thread_pool_size>
424
+
425
+ <!-- Configure other thread pools: -->
426
+ <!--
427
+ <background_buffer_flush_schedule_pool_size>16</background_buffer_flush_schedule_pool_size>
428
+ <background_pool_size>16</background_pool_size>
429
+ <background_merges_mutations_concurrency_ratio>2</background_merges_mutations_concurrency_ratio>
430
+ <background_merges_mutations_scheduling_policy>round_robin</background_merges_mutations_scheduling_policy>
431
+ <background_move_pool_size>8</background_move_pool_size>
432
+ <background_fetches_pool_size>8</background_fetches_pool_size>
433
+ <background_common_pool_size>8</background_common_pool_size>
434
+ <background_schedule_pool_size>128</background_schedule_pool_size>
435
+ <background_message_broker_schedule_pool_size>16</background_message_broker_schedule_pool_size>
436
+ <background_distributed_schedule_pool_size>16</background_distributed_schedule_pool_size>
437
+ <tables_loader_foreground_pool_size>0</tables_loader_foreground_pool_size>
438
+ <tables_loader_background_pool_size>0</tables_loader_background_pool_size>
439
+ -->
440
+
441
+ <!-- Enables asynchronous loading of databases and tables to speedup server startup.
442
+ Queries to not yet loaded entity will be blocked until load is finished.
443
+ -->
444
+ <async_load_databases>true</async_load_databases>
445
+
446
+ <!-- On memory constrained environments you may have to set this to value larger than 1.
447
+ -->
448
+ <max_server_memory_usage_to_ram_ratio>0.9</max_server_memory_usage_to_ram_ratio>
449
+
450
+ <!-- Simple server-wide memory profiler. Collect a stack trace at every peak allocation step (in bytes).
451
+ Data will be stored in system.trace_log table with query_id = empty string.
452
+ Zero means disabled.
453
+ -->
454
+ <total_memory_profiler_step>4194304</total_memory_profiler_step>
455
+
456
+ <!-- Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type.
457
+ The probability is for every alloc/free regardless to the size of the allocation.
458
+ Note that sampling happens only when the amount of untracked memory exceeds the untracked memory limit,
459
+ which is 4 MiB by default but can be lowered if 'total_memory_profiler_step' is lowered.
460
+ You may want to set 'total_memory_profiler_step' to 1 for extra fine grained sampling.
461
+ -->
462
+ <total_memory_tracker_sample_probability>0</total_memory_tracker_sample_probability>
463
+
464
+ <!-- Set limit on number of open files (default: maximum). This setting makes sense on Mac OS X because getrlimit() fails to retrieve
465
+ correct maximum value. -->
466
+ <!-- <max_open_files>262144</max_open_files> -->
467
+
468
+ <!-- Size of cache of uncompressed blocks of data, used in tables of MergeTree family.
469
+ In bytes. Cache is single for server. Memory is allocated only on demand.
470
+ Cache is used when 'use_uncompressed_cache' user setting turned on (off by default).
471
+ Uncompressed cache is advantageous only for very short queries and in rare cases.
472
+
473
+ Note: uncompressed cache can be pointless for lz4, because memory bandwidth
474
+ is slower than multi-core decompression on some server configurations.
475
+ Enabling it can sometimes paradoxically make queries slower.
476
+ -->
477
+ <uncompressed_cache_size>8589934592</uncompressed_cache_size>
478
+
479
+ <!-- Approximate size of mark cache, used in tables of MergeTree family.
480
+ In bytes. Cache is single for server. Memory is allocated only on demand.
481
+ You should not lower this value. -->
482
+ <!-- <mark_cache_size>5368709120</mark_cache_size> -->
483
+
484
+ <!-- For marks of secondary indices. -->
485
+ <!-- <index_mark_cache_size>5368709120</index_mark_cache_size> -->
486
+
487
+ <!-- If you enable the `min_bytes_to_use_mmap_io` setting,
488
+ the data in MergeTree tables can be read with mmap to avoid copying from kernel to userspace.
489
+ It makes sense only for large files and helps only if data reside in page cache.
490
+ To avoid frequent open/mmap/munmap/close calls (which are very expensive due to consequent page faults)
491
+ and to reuse mappings from several threads and queries,
492
+ the cache of mapped files is maintained. Its size is the number of mapped regions (usually equal to the number of mapped files).
493
+ The amount of data in mapped files can be monitored
494
+ in system.metrics, system.metric_log by the MMappedFiles, MMappedFileBytes metrics
495
+ and in system.asynchronous_metrics, system.asynchronous_metrics_log by the MMapCacheCells metric,
496
+ and also in system.events, system.processes, system.query_log, system.query_thread_log, system.query_views_log by the
497
+ CreatedReadBufferMMap, CreatedReadBufferMMapFailed, MMappedFileCacheHits, MMappedFileCacheMisses events.
498
+ Note that the amount of data in mapped files does not consume memory directly and is not accounted
499
+ in query or server memory usage - because this memory can be discarded similar to OS page cache.
500
+ The cache is dropped (the files are closed) automatically on removal of old parts in MergeTree,
501
+ also it can be dropped manually by the SYSTEM DROP MMAP CACHE query.
502
+ -->
503
+ <!-- <mmap_cache_size>1024</mmap_cache_size> -->
504
+
505
+ <!-- Cache size in bytes for compiled expressions.-->
506
+ <!-- <compiled_expression_cache_size>134217728</compiled_expression_cache_size> -->
507
+
508
+ <!-- Cache size in elements for compiled expressions.-->
509
+ <!-- <compiled_expression_cache_elements_size>10000</compiled_expression_cache_elements_size> -->
510
+
511
+ <!-- Size of the query condition cache in bytes. -->
512
+ <query_condition_cache_size>106700800</query_condition_cache_size>
513
+
514
+ <!-- Configuration for the query cache -->
515
+ <!--
516
+ <query_cache>
517
+ <max_size_in_bytes>1073741824</max_size_in_bytes>
518
+ <max_entries>1024</max_entries>
519
+ <max_entry_size_in_bytes>1048576</max_entry_size_in_bytes>
520
+ <max_entry_size_in_rows>30000000</max_entry_size_in_rows>
521
+ </query_cache>
522
+ -->
523
+
524
+ <!-- Cache path for custom (created from SQL) cached disks -->
525
+ <custom_cached_disks_base_directory>/var/lib/clickhouse/caches/</custom_cached_disks_base_directory>
526
+
527
+ <validate_tcp_client_information>false</validate_tcp_client_information>
528
+
529
+ <!-- Path to data directory, with trailing slash. -->
530
+ <path>/var/lib/clickhouse/</path>
531
+
532
+ <!-- Multi-disk configuration example: -->
533
+ <!--
534
+ <storage_configuration>
535
+ <disks>
536
+ <default>
537
+ <keep_free_space_bytes>0</keep_free_space_bytes>
538
+ </default>
539
+ <data>
540
+ <path>/data/</path>
541
+ <keep_free_space_bytes>0</keep_free_space_bytes>
542
+ </data>
543
+ <s3>
544
+ <type>s3</type>
545
+ <endpoint>http://path/to/endpoint</endpoint>
546
+ <access_key_id>your_access_key_id</access_key_id>
547
+ <secret_access_key>your_secret_access_key</secret_access_key>
548
+ </s3>
549
+ <blob_storage_disk>
550
+ <type>azure_blob_storage</type>
551
+ <storage_account_url>http://account.blob.core.windows.net</storage_account_url>
552
+ <container_name>container</container_name>
553
+ <account_name>account</account_name>
554
+ <account_key>pass123</account_key>
555
+ <metadata_path>/var/lib/clickhouse/disks/blob_storage_disk/</metadata_path>
556
+ <skip_access_check>false</skip_access_check>
557
+ </blob_storage_disk>
558
+ </disks>
559
+
560
+ <policies>
561
+ <all>
562
+ <volumes>
563
+ <main>
564
+ <disk>default</disk>
565
+ <disk>data</disk>
566
+ <disk>s3</disk>
567
+ <disk>blob_storage_disk</disk>
568
+
569
+ <max_data_part_size_bytes></max_data_part_size_bytes>
570
+ <max_data_part_size_ratio></max_data_part_size_ratio>
571
+ <perform_ttl_move_on_insert>true</perform_ttl_move_on_insert>
572
+ <load_balancing>round_robin</load_balancing>
573
+ </main>
574
+ </volumes>
575
+ <move_factor>0.2</move_factor>
576
+ </all>
577
+ </policies>
578
+ </storage_configuration>
579
+ -->
580
+
581
+ <!-- Default database disk storing metadata files: -->
582
+ <!--
583
+ <database_disk>
584
+ <disk>default</disk>
585
+ </database_disk>
586
+ -->
587
+
588
+ <!-- Path to temporary data for processing heavy queries. -->
589
+ <!-- NOTE: all files with `tmp` prefix will be removed at server startup -->
590
+ <tmp_path>/var/lib/clickhouse/tmp/</tmp_path>
591
+
592
+ <!-- Disable AuthType plaintext_password and no_password for ACL. -->
593
+ <allow_plaintext_password>1</allow_plaintext_password>
594
+ <allow_no_password>1</allow_no_password>
595
+ <allow_implicit_no_password>1</allow_implicit_no_password>
596
+
597
+ <!-- When a user does not specify a password type in the CREATE USER query, the default password type is used.
598
+ Accepted values are: 'plaintext_password', 'sha256_password', 'double_sha1_password', 'bcrypt_password'.
599
+ -->
600
+ <default_password_type>sha256_password</default_password_type>
601
+
602
+ <!-- Work factor for bcrypt_password authentication type -->
603
+ <bcrypt_workfactor>12</bcrypt_workfactor>
604
+
605
+ <!-- Complexity requirements for user passwords.
606
+ Note: ClickHouse Cloud https://clickhouse.com/cloud is always configured for strong passwords.
607
+ -->
608
+ <!-- <password_complexity>
609
+ <rule>
610
+ <pattern>.{12}</pattern>
611
+ <message>be at least 12 characters long</message>
612
+ </rule>
613
+ <rule>
614
+ <pattern>\p{N}</pattern>
615
+ <message>contain at least 1 numeric character</message>
616
+ </rule>
617
+ <rule>
618
+ <pattern>\p{Ll}</pattern>
619
+ <message>contain at least 1 lowercase character</message>
620
+ </rule>
621
+ <rule>
622
+ <pattern>\p{Lu}</pattern>
623
+ <message>contain at least 1 uppercase character</message>
624
+ </rule>
625
+ <rule>
626
+ <pattern>[^\p{L}\p{N}]</pattern>
627
+ <message>contain at least 1 special character</message>
628
+ </rule>
629
+ </password_complexity> -->
630
+
631
+ <!-- Policy from the <storage_configuration> for the temporary files.
632
+ If not set <tmp_path> is used, otherwise <tmp_path> is ignored.
633
+
634
+ Notes:
635
+ - move_factor is ignored
636
+ - keep_free_space_bytes is ignored
637
+ - max_data_part_size_bytes is ignored
638
+ - you must have exactly one volume in that policy
639
+
640
+ NOTE: all files with `tmp` prefix will be removed at server startup
641
+ -->
642
+ <!-- <tmp_policy>tmp</tmp_policy> -->
643
+
644
+ <!-- Directory with user provided files that are accessible by 'file' table function. -->
645
+ <user_files_path>/var/lib/clickhouse/user_files/</user_files_path>
646
+
647
+ <!-- LDAP server definitions. -->
648
+ <ldap_servers>
649
+ <!-- List LDAP servers with their connection parameters here to later 1) use them as authenticators for dedicated local users,
650
+ who have 'ldap' authentication mechanism specified instead of 'password', or to 2) use them as remote user directories.
651
+ Parameters:
652
+ host - LDAP server hostname or IP, this parameter is mandatory and cannot be empty.
653
+ port - LDAP server port, default is 636 if enable_tls is set to true, 389 otherwise.
654
+ bind_dn - template used to construct the DN to bind to.
655
+ The resulting DN will be constructed by replacing all '{user_name}' substrings of the template with the actual
656
+ user name during each authentication attempt.
657
+ user_dn_detection - section with LDAP search parameters for detecting the actual user DN of the bound user.
658
+ This is mainly used in search filters for further role mapping when the server is Active Directory. The
659
+ resulting user DN will be used when replacing '{user_dn}' substrings wherever they are allowed. By default,
660
+ user DN is set equal to bind DN, but once search is performed, it will be updated with to the actual detected
661
+ user DN value.
662
+ base_dn - template used to construct the base DN for the LDAP search.
663
+ The resulting DN will be constructed by replacing all '{user_name}' and '{bind_dn}' substrings
664
+ of the template with the actual user name and bind DN during the LDAP search.
665
+ scope - scope of the LDAP search.
666
+ Accepted values are: 'base', 'one_level', 'children', 'subtree' (the default).
667
+ search_filter - template used to construct the search filter for the LDAP search.
668
+ The resulting filter will be constructed by replacing all '{user_name}', '{bind_dn}', and '{base_dn}'
669
+ substrings of the template with the actual user name, bind DN, and base DN during the LDAP search.
670
+ Note, that the special characters must be escaped properly in XML.
671
+ verification_cooldown - a period of time, in seconds, after a successful bind attempt, during which a user will be assumed
672
+ to be successfully authenticated for all consecutive requests without contacting the LDAP server.
673
+ Specify 0 (the default) to disable caching and force contacting the LDAP server for each authentication request.
674
+ enable_tls - flag to trigger use of secure connection to the LDAP server.
675
+ Specify 'no' for plain text (ldap://) protocol (not recommended).
676
+ Specify 'yes' for LDAP over SSL/TLS (ldaps://) protocol (recommended, the default).
677
+ Specify 'starttls' for legacy StartTLS protocol (plain text (ldap://) protocol, upgraded to TLS).
678
+ tls_minimum_protocol_version - the minimum protocol version of SSL/TLS.
679
+ Accepted values are: 'ssl2', 'ssl3', 'tls1.0', 'tls1.1', 'tls1.2' (the default).
680
+ tls_require_cert - SSL/TLS peer certificate verification behavior.
681
+ Accepted values are: 'never', 'allow', 'try', 'demand' (the default).
682
+ tls_cert_file - path to certificate file.
683
+ tls_key_file - path to certificate key file.
684
+ tls_ca_cert_file - path to CA certificate file.
685
+ tls_ca_cert_dir - path to the directory containing CA certificates.
686
+ tls_cipher_suite - allowed cipher suite (in OpenSSL notation).
687
+ Example:
688
+ <my_ldap_server>
689
+ <host>localhost</host>
690
+ <port>636</port>
691
+ <bind_dn>uid={user_name},ou=users,dc=example,dc=com</bind_dn>
692
+ <verification_cooldown>300</verification_cooldown>
693
+ <enable_tls>yes</enable_tls>
694
+ <tls_minimum_protocol_version>tls1.2</tls_minimum_protocol_version>
695
+ <tls_require_cert>demand</tls_require_cert>
696
+ <tls_cert_file>/path/to/tls_cert_file</tls_cert_file>
697
+ <tls_key_file>/path/to/tls_key_file</tls_key_file>
698
+ <tls_ca_cert_file>/path/to/tls_ca_cert_file</tls_ca_cert_file>
699
+ <tls_ca_cert_dir>/path/to/tls_ca_cert_dir</tls_ca_cert_dir>
700
+ <tls_cipher_suite>ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384</tls_cipher_suite>
701
+ </my_ldap_server>
702
+ Example (typical Active Directory with configured user DN detection for further role mapping):
703
+ <my_ad_server>
704
+ <host>localhost</host>
705
+ <port>389</port>
706
+ <bind_dn>EXAMPLE\{user_name}</bind_dn>
707
+ <user_dn_detection>
708
+ <base_dn>CN=Users,DC=example,DC=com</base_dn>
709
+ <search_filter>(&amp;(objectClass=user)(sAMAccountName={user_name}))</search_filter>
710
+ </user_dn_detection>
711
+ <enable_tls>no</enable_tls>
712
+ </my_ad_server>
713
+ -->
714
+ </ldap_servers>
715
+
716
+ <!-- To enable Kerberos authentication support for HTTP requests (GSS-SPNEGO), for those users who are explicitly configured
717
+ to authenticate via Kerberos, define a single 'kerberos' section here.
718
+ Parameters:
719
+ principal - canonical service principal name, that will be acquired and used when accepting security contexts.
720
+ This parameter is optional, if omitted, the default principal will be used.
721
+ This parameter cannot be specified together with 'realm' parameter.
722
+ realm - a realm, that will be used to restrict authentication to only those requests whose initiator's realm matches it.
723
+ This parameter is optional, if omitted, no additional filtering by realm will be applied.
724
+ This parameter cannot be specified together with 'principal' parameter.
725
+ Example:
726
+ <kerberos />
727
+ Example:
728
+ <kerberos>
729
+ <principal>HTTP/clickhouse.example.com@EXAMPLE.COM</principal>
730
+ </kerberos>
731
+ Example:
732
+ <kerberos>
733
+ <realm>EXAMPLE.COM</realm>
734
+ </kerberos>
735
+ -->
736
+
737
+ <!-- Sources to read users, roles, access rights, profiles of settings, quotas. -->
738
+ <user_directories>
739
+ <users_xml>
740
+ <!-- Path to configuration file with predefined users. -->
741
+ <path>users.xml</path>
742
+ </users_xml>
743
+ <local_directory>
744
+ <!-- Path to folder where users created by SQL commands are stored. -->
745
+ <path>/var/lib/clickhouse/access/</path>
746
+ </local_directory>
747
+
748
+ <!-- To add an LDAP server as a remote user directory of users that are not defined locally, define a single 'ldap' section
749
+ with the following parameters:
750
+ server - one of LDAP server names defined in 'ldap_servers' config section above.
751
+ This parameter is mandatory and cannot be empty.
752
+ roles - section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server.
753
+ If no roles are specified here or assigned during role mapping (below), user will not be able to perform any
754
+ actions after authentication.
755
+ role_mapping - section with LDAP search parameters and mapping rules.
756
+ When a user authenticates, while still bound to LDAP, an LDAP search is performed using search_filter and the
757
+ name of the logged in user. For each entry found during that search, the value of the specified attribute is
758
+ extracted. For each attribute value that has the specified prefix, the prefix is removed, and the rest of the
759
+ value becomes the name of a local role defined in ClickHouse, which is expected to be created beforehand by
760
+ CREATE ROLE command.
761
+ There can be multiple 'role_mapping' sections defined inside the same 'ldap' section. All of them will be
762
+ applied.
763
+ base_dn - template used to construct the base DN for the LDAP search.
764
+ The resulting DN will be constructed by replacing all '{user_name}', '{bind_dn}', and '{user_dn}'
765
+ substrings of the template with the actual user name, bind DN, and user DN during each LDAP search.
766
+ scope - scope of the LDAP search.
767
+ Accepted values are: 'base', 'one_level', 'children', 'subtree' (the default).
768
+ search_filter - template used to construct the search filter for the LDAP search.
769
+ The resulting filter will be constructed by replacing all '{user_name}', '{bind_dn}', '{user_dn}', and
770
+ '{base_dn}' substrings of the template with the actual user name, bind DN, user DN, and base DN during
771
+ each LDAP search.
772
+ Note, that the special characters must be escaped properly in XML.
773
+ attribute - attribute name whose values will be returned by the LDAP search. 'cn', by default.
774
+ prefix - prefix, that will be expected to be in front of each string in the original list of strings returned by
775
+ the LDAP search. Prefix will be removed from the original strings and resulting strings will be treated
776
+ as local role names. Empty, by default.
777
+ Example:
778
+ <ldap>
779
+ <server>my_ldap_server</server>
780
+ <roles>
781
+ <my_local_role1 />
782
+ <my_local_role2 />
783
+ </roles>
784
+ <role_mapping>
785
+ <base_dn>ou=groups,dc=example,dc=com</base_dn>
786
+ <scope>subtree</scope>
787
+ <search_filter>(&amp;(objectClass=groupOfNames)(member={bind_dn}))</search_filter>
788
+ <attribute>cn</attribute>
789
+ <prefix>clickhouse_</prefix>
790
+ </role_mapping>
791
+ </ldap>
792
+ Example (typical Active Directory with role mapping that relies on the detected user DN):
793
+ <ldap>
794
+ <server>my_ad_server</server>
795
+ <role_mapping>
796
+ <base_dn>CN=Users,DC=example,DC=com</base_dn>
797
+ <attribute>CN</attribute>
798
+ <scope>subtree</scope>
799
+ <search_filter>(&amp;(objectClass=group)(member={user_dn}))</search_filter>
800
+ <prefix>clickhouse_</prefix>
801
+ </role_mapping>
802
+ </ldap>
803
+ -->
804
+ </user_directories>
805
+
806
+ <access_control_improvements>
807
+ <!-- Enables logic that users without permissive row policies can still read rows using a SELECT query.
808
+ For example, if there two users A, B and a row policy is defined only for A, then
809
+ if this setting is true the user B will see all rows, and if this setting is false the user B will see no rows.
810
+ By default this setting is true. -->
811
+ <users_without_row_policies_can_read_rows>true</users_without_row_policies_can_read_rows>
812
+
813
+ <!-- By default, for backward compatibility ON CLUSTER queries ignore CLUSTER grant,
814
+ however you can change this behaviour by setting this to true -->
815
+ <on_cluster_queries_require_cluster_grant>true</on_cluster_queries_require_cluster_grant>
816
+
817
+ <!-- By default, for backward compatibility "SELECT * FROM system.<table>" doesn't require any grants and can be executed
818
+ by any user. You can change this behaviour by setting this to true.
819
+ If it's set to true then this query requires "GRANT SELECT ON system.<table>" just like as for non-system tables.
820
+ Exceptions: a few system tables ("tables", "columns", "databases", and some constant tables like "one", "contributors")
821
+ are still accessible for everyone; and if there is a SHOW privilege (e.g. "SHOW USERS") granted the corresponding system
822
+ table (i.e. "system.users") will be accessible. -->
823
+ <select_from_system_db_requires_grant>true</select_from_system_db_requires_grant>
824
+
825
+ <!-- By default, for backward compatibility "SELECT * FROM information_schema.<table>" doesn't require any grants and can be
826
+ executed by any user. You can change this behaviour by setting this to true.
827
+ If it's set to true then this query requires "GRANT SELECT ON information_schema.<table>" just like as for ordinary tables. -->
828
+ <select_from_information_schema_requires_grant>true</select_from_information_schema_requires_grant>
829
+
830
+ <!-- By default, for backward compatibility a settings profile constraint for a specific setting inherit every not set field from
831
+ previous profile. You can change this behaviour by setting this to true.
832
+ If it's set to true then if settings profile has a constraint for a specific setting, then this constraint completely cancels all
833
+ actions of previous constraint (defined in other profiles) for the same specific setting, including fields that are not set by new constraint.
834
+ It also enables 'changeable_in_readonly' constraint type -->
835
+ <settings_constraints_replace_previous>true</settings_constraints_replace_previous>
836
+
837
+ <!-- By default, for backward compatibility creating table with a specific table engine ignores grant,
838
+ however you can change this behaviour by setting this to true -->
839
+ <table_engines_require_grant>false</table_engines_require_grant>
840
+
841
+ <!-- Number of seconds since last access a role is stored in the Role Cache -->
842
+ <role_cache_expiration_time_seconds>600</role_cache_expiration_time_seconds>
843
+ </access_control_improvements>
844
+
845
+ <!-- Default profile of settings. -->
846
+ <default_profile>default</default_profile>
847
+
848
+ <!-- Comma-separated list of prefixes for user-defined settings.
849
+ The server will allow to set these settings, and retrieve them with the getSetting function.
850
+ They are also logged in the query_log, similarly to other settings, but have no special effect.
851
+ The "SQL_" prefix is introduced for compatibility with MySQL - these settings are being set by Tableau.
852
+ -->
853
+ <custom_settings_prefixes>SQL_</custom_settings_prefixes>
854
+
855
+ <!-- System profile of settings. This settings are used by internal processes (Distributed DDL worker and so on). -->
856
+ <!-- <system_profile>default</system_profile> -->
857
+
858
+ <!-- Buffer profile of settings.
859
+ This settings are used by Buffer storage to flush data to the underlying table.
860
+ Default: used from system_profile directive.
861
+ -->
862
+ <!-- <buffer_profile>default</buffer_profile> -->
863
+
864
+ <!-- Default database. -->
865
+ <default_database>default</default_database>
866
+
867
+ <!-- Server time zone could be set here.
868
+
869
+ Time zone is used when converting between String and DateTime types,
870
+ when printing DateTime in text formats and parsing DateTime from text,
871
+ it is used in date and time related functions, if specific time zone was not passed as an argument.
872
+
873
+ Time zone is specified as identifier from IANA time zone database, like UTC or Africa/Abidjan.
874
+ If not specified, system time zone at server startup is used.
875
+
876
+ Please note, that server could display time zone alias instead of specified name.
877
+ Example: Zulu is an alias for UTC.
878
+ -->
879
+ <!-- <timezone>UTC</timezone> -->
880
+
881
+ <!-- You can specify umask here (see "man umask"). Server will apply it on startup.
882
+ Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read).
883
+ -->
884
+ <!-- <umask>022</umask> -->
885
+
886
+ <!-- Perform mlockall after startup to lower first queries latency
887
+ and to prevent clickhouse executable from being paged out under high IO load.
888
+ Enabling this option is recommended but will lead to increased startup time for up to a few seconds.
889
+ -->
890
+ <mlock_executable>true</mlock_executable>
891
+
892
+ <!-- Reallocate memory for machine code ("text") using huge pages. Highly experimental. -->
893
+ <remap_executable>false</remap_executable>
894
+
895
+ <![CDATA[
896
+ Uncomment below in order to use JDBC table engine and function.
897
+
898
+ To install and run JDBC bridge in background:
899
+ * [Debian/Ubuntu]
900
+ export MVN_URL=https://repo1.maven.org/maven2/com/clickhouse/clickhouse-jdbc-bridge/
901
+ export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '<release>' | sed -e 's|.*>\(.*\)<.*|\1|')
902
+ wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge_$PKG_VER-1_all.deb
903
+ apt install --no-install-recommends -f ./clickhouse-jdbc-bridge_$PKG_VER-1_all.deb
904
+ clickhouse-jdbc-bridge &
905
+
906
+ * [CentOS/RHEL]
907
+ export MVN_URL=https://repo1.maven.org/maven2/com/clickhouse/clickhouse-jdbc-bridge/
908
+ export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '<release>' | sed -e 's|.*>\(.*\)<.*|\1|')
909
+ wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm
910
+ yum localinstall -y clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm
911
+ clickhouse-jdbc-bridge &
912
+
913
+ Please refer to https://github.com/ClickHouse/clickhouse-jdbc-bridge#usage for more information.
914
+ ]]>
915
+ <!--
916
+ <jdbc_bridge>
917
+ <host>127.0.0.1</host>
918
+ <port>9019</port>
919
+ </jdbc_bridge>
920
+ -->
921
+
922
+ <!-- Configuration of clusters that could be used in Distributed tables.
923
+ https://clickhouse.com/docs/en/operations/table_engines/distributed/
924
+ Note: ClickHouse Cloud https://clickhouse.com/cloud has the cluster preconfigured and dynamically scalable.
925
+ -->
926
+ <remote_servers>
927
+ <!-- Test only shard config for testing distributed storage -->
928
+ <default>
929
+ <!-- Inter-server per-cluster secret for Distributed queries
930
+ default: no secret (no authentication will be performed)
931
+
932
+ If set, then Distributed queries will be validated on shards, so at least:
933
+ - such cluster should exist on the shard,
934
+ - such cluster should have the same secret.
935
+
936
+ And also (and which is more important), the initial_user will
937
+ be used as current user for the query.
938
+
939
+ Right now the protocol is pretty simple, and it only takes into account:
940
+ - cluster name
941
+ - query
942
+
943
+ Also, it will be nice if the following will be implemented:
944
+ - source hostname (see interserver_http_host), but then it will depend on DNS,
945
+ it can use IP address instead, but then you need to get correct on the initiator node.
946
+ - target hostname / ip address (same notes as for source hostname)
947
+ - time-based security tokens
948
+ -->
949
+ <!-- <secret></secret> -->
950
+
951
+ <shard>
952
+ <!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
953
+ <!-- <internal_replication>false</internal_replication> -->
954
+ <!-- Optional. Shard weight when writing data. Default: 1. -->
955
+ <!-- <weight>1</weight> -->
956
+ <replica>
957
+ <host>localhost</host>
958
+ <port>9000</port>
959
+ <!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
960
+ <!-- <priority>1</priority> -->
961
+ <!-- Use SSL? Default: no -->
962
+ <!-- <secure>0</secure> -->
963
+ <!-- Optional. Bind to specific host before connecting to use a specific network. -->
964
+ <!-- <bind_host>10.0.0.1</bind_host> -->
965
+ </replica>
966
+ </shard>
967
+ </default>
968
+ </remote_servers>
969
+
970
+ <!-- The list of hosts allowed to use in URL-related storage engines and table functions.
971
+ If this section is not present in configuration, all hosts are allowed.
972
+ -->
973
+ <!--<remote_url_allow_hosts>-->
974
+ <!-- Host should be specified exactly as in URL. The name is checked before DNS resolution.
975
+ Example: "clickhouse.com", "clickhouse.com." and "www.clickhouse.com" are different hosts.
976
+ If port is explicitly specified in URL, the host:port is checked as a whole.
977
+ If host specified here without port, any port with this host allowed.
978
+ "clickhouse.com" -> "clickhouse.com:443", "clickhouse.com:80" etc. is allowed, but "clickhouse.com:80" -> only "clickhouse.com:80" is allowed.
979
+ If the host is specified as IP address, it is checked as specified in URL. Example: "[2a02:6b8:a::a]".
980
+ If there are redirects and support for redirects is enabled, every redirect (the Location field) is checked.
981
+ Host should be specified using the host xml tag:
982
+ <host>clickhouse.com</host>
983
+ -->
984
+
985
+ <!-- Regular expression can be specified. RE2 engine is used for regexps.
986
+ Regexps are not aligned: don't forget to add ^ and $. Also don't forget to escape dot (.) metacharacter
987
+ (forgetting to do so is a common source of error).
988
+ -->
989
+ <!--</remote_url_allow_hosts>-->
990
+
991
+ <!-- The list of HTTP headers forbidden to use in HTTP-related storage engines and table functions.
992
+ If this section is not present in configuration, all headers are allowed.
993
+ -->
994
+ <!-- <http_forbid_headers>
995
+ <header>exact_header</header>
996
+ <header_regexp>(?i)(case_insensitive_header)</header_regexp>
997
+ </http_forbid_headers> -->
998
+
999
+ <!-- If element has 'incl' attribute, then for it's value will be used corresponding substitution from another file.
1000
+ By default, path to file with substitutions is /etc/metrika.xml. It could be changed in config in 'include_from' element.
1001
+ Values for substitutions are specified in /clickhouse/name_of_substitution elements in that file.
1002
+ -->
1003
+
1004
+ <!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
1005
+ Optional. If you don't use replicated tables, you could omit that.
1006
+
1007
+ See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
1008
+
1009
+ Note: ClickHouse Cloud https://clickhouse.com/cloud has ClickHouse Keeper automatically configured for every service.
1010
+ -->
1011
+
1012
+ <!--
1013
+ <zookeeper>
1014
+ <node>
1015
+ <host>example1</host>
1016
+ <port>2181</port>
1017
+ </node>
1018
+ <node>
1019
+ <host>example2</host>
1020
+ <port>2181</port>
1021
+ </node>
1022
+ <node>
1023
+ <host>example3</host>
1024
+ <port>2181</port>
1025
+ </node>
1026
+ </zookeeper>
1027
+ -->
1028
+
1029
+ <!-- Substitutions for parameters of replicated tables.
1030
+ Optional. If you don't use replicated tables, you could omit that.
1031
+
1032
+ See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/#creating-replicated-tables
1033
+ -->
1034
+ <!--
1035
+ <macros>
1036
+ <shard>01</shard>
1037
+ <replica>example01-01-1</replica>
1038
+ </macros>
1039
+ -->
1040
+
1041
+ <!--
1042
+ <default_replica_path>/clickhouse/tables/{database}/{table}</default_replica_path>
1043
+ <default_replica_name>{replica}</default_replica_name>
1044
+ -->
1045
+
1046
+ <!-- Replica group name for database Replicated.
1047
+ The cluster created by Replicated database will consist of replicas in the same group.
1048
+ DDL queries will only wail for the replicas in the same group.
1049
+ Empty by default.
1050
+ -->
1051
+ <!--
1052
+ <replica_group_name><replica_group_name>
1053
+ -->
1054
+
1055
+
1056
+ <!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
1057
+ <builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>
1058
+
1059
+
1060
+ <!-- Maximum session timeout, in seconds. Default: 3600. -->
1061
+ <max_session_timeout>3600</max_session_timeout>
1062
+
1063
+ <!-- Default session timeout, in seconds. Default: 60. -->
1064
+ <default_session_timeout>60</default_session_timeout>
1065
+
1066
+ <!-- Sending data to Graphite for monitoring. Several sections can be defined. -->
1067
+ <!--
1068
+ interval - send every X second
1069
+ root_path - prefix for keys
1070
+ hostname_in_path - append hostname to root_path (default = true)
1071
+ metrics - send data from table system.metrics
1072
+ events - send data from table system.events
1073
+ asynchronous_metrics - send data from table system.asynchronous_metrics
1074
+ -->
1075
+ <!--
1076
+ <graphite>
1077
+ <host>localhost</host>
1078
+ <port>42000</port>
1079
+ <timeout>0.1</timeout>
1080
+ <interval>60</interval>
1081
+ <root_path>one_min</root_path>
1082
+ <hostname_in_path>true</hostname_in_path>
1083
+
1084
+ <metrics>true</metrics>
1085
+ <events>true</events>
1086
+ <events_cumulative>false</events_cumulative>
1087
+ <asynchronous_metrics>true</asynchronous_metrics>
1088
+ </graphite>
1089
+ <graphite>
1090
+ <host>localhost</host>
1091
+ <port>42000</port>
1092
+ <timeout>0.1</timeout>
1093
+ <interval>1</interval>
1094
+ <root_path>one_sec</root_path>
1095
+
1096
+ <metrics>true</metrics>
1097
+ <events>true</events>
1098
+ <events_cumulative>false</events_cumulative>
1099
+ <asynchronous_metrics>false</asynchronous_metrics>
1100
+ </graphite>
1101
+ -->
1102
+
1103
+ <!-- Serve endpoint for Prometheus monitoring. -->
1104
+ <!--
1105
+ endpoint - mertics path (relative to root, statring with "/")
1106
+ port - port to setup server. If not defined or 0 than http_port used
1107
+ metrics - send data from table system.metrics
1108
+ events - send data from table system.events
1109
+ asynchronous_metrics - send data from table system.asynchronous_metrics
1110
+ -->
1111
+ <!--
1112
+ <prometheus>
1113
+ <endpoint>/metrics</endpoint>
1114
+ <port>9363</port>
1115
+
1116
+ <metrics>true</metrics>
1117
+ <events>true</events>
1118
+ <asynchronous_metrics>true</asynchronous_metrics>
1119
+ </prometheus>
1120
+ -->
1121
+
1122
+ <!-- Query log. Used only for queries with setting log_queries = 1. -->
1123
+ <query_log>
1124
+ <!-- What table to insert data. If table is not exist, it will be created.
1125
+ When query log structure is changed after system update,
1126
+ then old table will be renamed and new table will be created automatically.
1127
+ -->
1128
+ <database>system</database>
1129
+ <table>query_log</table>
1130
+ <!--
1131
+ PARTITION BY expr: https://clickhouse.com/docs/en/table_engines/mergetree-family/custom_partitioning_key/
1132
+ Example:
1133
+ event_date
1134
+ toMonday(event_date)
1135
+ toYYYYMM(event_date)
1136
+ toStartOfHour(event_time)
1137
+ -->
1138
+ <partition_by>toYYYYMM(event_date)</partition_by>
1139
+ <!--
1140
+ Table TTL specification: https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#mergetree-table-ttl
1141
+ Example:
1142
+ event_date + INTERVAL 1 WEEK
1143
+ event_date + INTERVAL 7 DAY DELETE
1144
+ event_date + INTERVAL 2 WEEK TO DISK 'bbb'
1145
+
1146
+ <ttl>event_date + INTERVAL 30 DAY DELETE</ttl>
1147
+ -->
1148
+
1149
+ <!--
1150
+ ORDER BY expr: https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree#order_by
1151
+ Example:
1152
+ event_date, event_time
1153
+ event_date, type, query_id
1154
+ event_date, event_time, initial_query_id
1155
+
1156
+ <order_by>event_date, event_time, initial_query_id</order_by>
1157
+ -->
1158
+
1159
+ <!-- Instead of partition_by, you can provide full engine expression (starting with ENGINE = ) with parameters,
1160
+ Example: <engine>ENGINE = MergeTree PARTITION BY toYYYYMM(event_date) ORDER BY (event_date, event_time) SETTINGS index_granularity = 1024</engine>
1161
+ -->
1162
+
1163
+ <!-- Interval of flushing data. -->
1164
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1165
+ <!-- Maximum size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk. -->
1166
+ <max_size_rows>1048576</max_size_rows>
1167
+ <!-- Pre-allocated size in lines for the logs. -->
1168
+ <reserved_size_rows>8192</reserved_size_rows>
1169
+ <!-- Lines amount threshold, reaching it launches flushing logs to the disk in background. -->
1170
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1171
+ <!-- Indication whether logs should be dumped to the disk in case of a crash -->
1172
+ <flush_on_crash>false</flush_on_crash>
1173
+
1174
+ <!-- example of using a different storage policy for a system table -->
1175
+ <!-- storage_policy>local_ssd</storage_policy -->
1176
+ </query_log>
1177
+
1178
+ <!-- Trace log. Stores stack traces collected by query profilers.
1179
+ See query_profiler_real_time_period_ns and query_profiler_cpu_time_period_ns settings. -->
1180
+ <trace_log>
1181
+ <database>system</database>
1182
+ <table>trace_log</table>
1183
+
1184
+ <partition_by>toYYYYMM(event_date)</partition_by>
1185
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1186
+ <max_size_rows>1048576</max_size_rows>
1187
+ <reserved_size_rows>8192</reserved_size_rows>
1188
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1189
+ <!-- Indication whether logs should be dumped to the disk in case of a crash -->
1190
+ <flush_on_crash>false</flush_on_crash>
1191
+ <symbolize>true</symbolize>
1192
+ </trace_log>
1193
+
1194
+ <!-- Query thread log. Has information about all threads participated in query execution.
1195
+ Used only for queries with setting log_query_threads = 1. -->
1196
+ <query_thread_log>
1197
+ <database>system</database>
1198
+ <table>query_thread_log</table>
1199
+ <partition_by>toYYYYMM(event_date)</partition_by>
1200
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1201
+ <max_size_rows>1048576</max_size_rows>
1202
+ <reserved_size_rows>8192</reserved_size_rows>
1203
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1204
+ <flush_on_crash>false</flush_on_crash>
1205
+ </query_thread_log>
1206
+
1207
+ <!-- Query views log. Has information about all dependent views associated with a query.
1208
+ Used only for queries with setting log_query_views = 1. -->
1209
+ <query_views_log>
1210
+ <database>system</database>
1211
+ <table>query_views_log</table>
1212
+ <partition_by>toYYYYMM(event_date)</partition_by>
1213
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1214
+ </query_views_log>
1215
+
1216
+ <!-- Part log contains information about all actions with parts in MergeTree tables (creation, deletion, merges, downloads). -->
1217
+ <part_log>
1218
+ <database>system</database>
1219
+ <table>part_log</table>
1220
+ <partition_by>toYYYYMM(event_date)</partition_by>
1221
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1222
+ <max_size_rows>1048576</max_size_rows>
1223
+ <reserved_size_rows>8192</reserved_size_rows>
1224
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1225
+ <flush_on_crash>false</flush_on_crash>
1226
+ </part_log>
1227
+
1228
+ <!-- Text log contains all information from usual server log but stores it in structured and efficient way.
1229
+ The level of the messages that goes to the table can be limited (<level>), if not specified all messages will go to the table.
1230
+ -->
1231
+ <text_log>
1232
+ <database>system</database>
1233
+ <table>text_log</table>
1234
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1235
+ <max_size_rows>1048576</max_size_rows>
1236
+ <reserved_size_rows>8192</reserved_size_rows>
1237
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1238
+ <flush_on_crash>false</flush_on_crash>
1239
+ <level>trace</level>
1240
+ </text_log>
1241
+
1242
+ <!-- Metric log contains rows with current values of ProfileEvents, CurrentMetrics collected with "collect_interval_milliseconds" interval. -->
1243
+ <metric_log>
1244
+ <database>system</database>
1245
+ <table>metric_log</table>
1246
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1247
+ <max_size_rows>1048576</max_size_rows>
1248
+ <reserved_size_rows>8192</reserved_size_rows>
1249
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1250
+ <collect_interval_milliseconds>1000</collect_interval_milliseconds>
1251
+ <flush_on_crash>false</flush_on_crash>
1252
+ </metric_log>
1253
+
1254
+ <!-- Error log contains rows with current values of errors collected with "collect_interval_milliseconds" interval. -->
1255
+ <error_log>
1256
+ <database>system</database>
1257
+ <table>error_log</table>
1258
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1259
+ <max_size_rows>1048576</max_size_rows>
1260
+ <reserved_size_rows>8192</reserved_size_rows>
1261
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1262
+ <collect_interval_milliseconds>1000</collect_interval_milliseconds>
1263
+ <flush_on_crash>false</flush_on_crash>
1264
+ </error_log>
1265
+
1266
+ <!-- Query metric log contains history of memory and metric values from table system.events for individual queries, periodically flushed to disk
1267
+ every "collect_interval_milliseconds" interval-->
1268
+ <query_metric_log>
1269
+ <database>system</database>
1270
+ <table>query_metric_log</table>
1271
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1272
+ <max_size_rows>1048576</max_size_rows>
1273
+ <reserved_size_rows>8192</reserved_size_rows>
1274
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1275
+ <collect_interval_milliseconds>1000</collect_interval_milliseconds>
1276
+ <flush_on_crash>false</flush_on_crash>
1277
+ </query_metric_log>
1278
+
1279
+ <!--
1280
+ Asynchronous metric log contains values of metrics from
1281
+ system.asynchronous_metrics.
1282
+ -->
1283
+ <asynchronous_metric_log>
1284
+ <database>system</database>
1285
+ <table>asynchronous_metric_log</table>
1286
+ <flush_interval_milliseconds>7000</flush_interval_milliseconds>
1287
+ <max_size_rows>1048576</max_size_rows>
1288
+ <reserved_size_rows>8192</reserved_size_rows>
1289
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1290
+ <flush_on_crash>false</flush_on_crash>
1291
+ </asynchronous_metric_log>
1292
+
1293
+ <!--
1294
+ OpenTelemetry log contains OpenTelemetry trace spans.
1295
+
1296
+ NOTE: this table does not use standard schema with event_date and event_time!
1297
+ -->
1298
+ <opentelemetry_span_log>
1299
+ <!--
1300
+ The default table creation code is insufficient, this <engine> spec
1301
+ is a workaround. There is no 'event_time' for this log, but two times,
1302
+ start and finish. It is sorted by finish time, to avoid inserting
1303
+ data too far away in the past (probably we can sometimes insert a span
1304
+ that is seconds earlier than the last span in the table, due to a race
1305
+ between several spans inserted in parallel). This gives the spans a
1306
+ global order that we can use to e.g. retry insertion into some external
1307
+ system.
1308
+ -->
1309
+ <engine>
1310
+ engine MergeTree
1311
+ partition by toYYYYMM(finish_date)
1312
+ order by (finish_date, finish_time_us)
1313
+ </engine>
1314
+ <database>system</database>
1315
+ <table>opentelemetry_span_log</table>
1316
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1317
+ <max_size_rows>1048576</max_size_rows>
1318
+ <reserved_size_rows>8192</reserved_size_rows>
1319
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1320
+ <flush_on_crash>false</flush_on_crash>
1321
+ </opentelemetry_span_log>
1322
+
1323
+
1324
+ <!-- Crash log. Stores stack traces for fatal errors.
1325
+ This table is normally empty. -->
1326
+ <crash_log>
1327
+ <database>system</database>
1328
+ <table>crash_log</table>
1329
+
1330
+ <partition_by />
1331
+ <flush_interval_milliseconds>1000</flush_interval_milliseconds>
1332
+ <max_size_rows>1024</max_size_rows>
1333
+ <reserved_size_rows>1024</reserved_size_rows>
1334
+ <buffer_size_rows_flush_threshold>512</buffer_size_rows_flush_threshold>
1335
+ <flush_on_crash>true</flush_on_crash>
1336
+ </crash_log>
1337
+
1338
+ <!-- Session log. Stores user log in (successful or not) and log out events. -->
1339
+ <!-- <session_log>
1340
+ <database>system</database>
1341
+ <table>session_log</table>
1342
+
1343
+ <partition_by>toYYYYMM(event_date)</partition_by>
1344
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1345
+ <max_size_rows>1048576</max_size_rows>
1346
+ <reserved_size_rows>8192</reserved_size_rows>
1347
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1348
+ <flush_on_crash>false</flush_on_crash>
1349
+ </session_log> -->
1350
+
1351
+ <!-- Profiling on Processors level. -->
1352
+ <processors_profile_log>
1353
+ <database>system</database>
1354
+ <table>processors_profile_log</table>
1355
+
1356
+ <partition_by>toYYYYMM(event_date)</partition_by>
1357
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1358
+ <max_size_rows>1048576</max_size_rows>
1359
+ <reserved_size_rows>8192</reserved_size_rows>
1360
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1361
+ <flush_on_crash>false</flush_on_crash>
1362
+ <ttl>event_date + INTERVAL 30 DAY DELETE</ttl>
1363
+ </processors_profile_log>
1364
+
1365
+ <!-- Log of asynchronous inserts. It allows to check status
1366
+ of insert query in fire-and-forget mode.
1367
+ -->
1368
+ <asynchronous_insert_log>
1369
+ <database>system</database>
1370
+ <table>asynchronous_insert_log</table>
1371
+
1372
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1373
+ <max_size_rows>1048576</max_size_rows>
1374
+ <reserved_size_rows>8192</reserved_size_rows>
1375
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1376
+ <flush_on_crash>false</flush_on_crash>
1377
+ <partition_by>event_date</partition_by>
1378
+ <ttl>event_date + INTERVAL 3 DAY</ttl>
1379
+ </asynchronous_insert_log>
1380
+
1381
+ <!-- Backup/restore log.
1382
+ -->
1383
+ <backup_log>
1384
+ <database>system</database>
1385
+ <table>backup_log</table>
1386
+ <partition_by>toYYYYMM(event_date)</partition_by>
1387
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1388
+ </backup_log>
1389
+
1390
+ <!-- Storage S3Queue log.
1391
+ -->
1392
+ <s3queue_log>
1393
+ <database>system</database>
1394
+ <table>s3queue_log</table>
1395
+ <partition_by>toYYYYMM(event_date)</partition_by>
1396
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1397
+ </s3queue_log>
1398
+
1399
+ <!-- Blob storage object operations log.
1400
+ -->
1401
+ <blob_storage_log>
1402
+ <database>system</database>
1403
+ <table>blob_storage_log</table>
1404
+ <partition_by>toYYYYMM(event_date)</partition_by>
1405
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1406
+ <ttl>event_date + INTERVAL 30 DAY</ttl>
1407
+ </blob_storage_log>
1408
+
1409
+ <!-- Configure system.dashboards for dashboard.html.
1410
+
1411
+ Could have any query parameters, for which there will be an input on the page.
1412
+ For instance an example from comments have the following:
1413
+ - seconds
1414
+ - rounding
1415
+
1416
+ NOTE: All default dashboards will be overwritten if it was set here. -->
1417
+ <!-- Here is an example without merge() function, to make it work with readonly user -->
1418
+ <!--
1419
+ <dashboards>
1420
+ <dashboard>
1421
+ <dashboard>Overview</dashboard>
1422
+ <title>Queries/second</title>
1423
+ <query>
1424
+ SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(ProfileEvent_Query)
1425
+ FROM system.metric_log
1426
+ WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32}
1427
+ GROUP BY t
1428
+ ORDER BY t WITH FILL STEP {rounding:UInt32}
1429
+ </query>
1430
+ </dashboard>
1431
+ </dashboards>
1432
+ -->
1433
+
1434
+ <!-- <top_level_domains_path>/var/lib/clickhouse/top_level_domains/</top_level_domains_path> -->
1435
+ <!-- Custom TLD lists.
1436
+ Format: <name>/path/to/file</name>
1437
+
1438
+ Changes will not be applied w/o server restart.
1439
+ Path to the list is under top_level_domains_path (see above).
1440
+ -->
1441
+ <top_level_domains_lists>
1442
+ <!--
1443
+ <public_suffix_list>/path/to/public_suffix_list.dat</public_suffix_list>
1444
+ -->
1445
+ </top_level_domains_lists>
1446
+
1447
+ <!-- Configuration of external dictionaries. See:
1448
+ https://clickhouse.com/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts
1449
+ -->
1450
+ <dictionaries_config>*_dictionary.*ml</dictionaries_config>
1451
+
1452
+ <!-- Load dictionaries lazily, i.e. a dictionary will be loaded when it's used for the first time.
1453
+ "false" means ClickHouse will start loading dictionaries immediately at startup.
1454
+ -->
1455
+ <dictionaries_lazy_load>true</dictionaries_lazy_load>
1456
+
1457
+ <!-- Wait at startup until all the dictionaries finish their loading (successfully or not)
1458
+ before receiving any connections. Affects dictionaries only if "dictionaries_lazy_load" is false.
1459
+ Setting this to false can make ClickHouse start faster, however some queries can be executed slower.
1460
+ -->
1461
+ <wait_dictionaries_load_at_startup>true</wait_dictionaries_load_at_startup>
1462
+
1463
+ <!-- Configuration of user defined executable functions -->
1464
+ <user_defined_executable_functions_config>*_function.*ml</user_defined_executable_functions_config>
1465
+
1466
+ <!-- Path in ZooKeeper to store user-defined SQL functions created by the command CREATE FUNCTION.
1467
+ If not specified they will be stored locally. -->
1468
+ <!-- <user_defined_zookeeper_path>/clickhouse/user_defined</user_defined_zookeeper_path> -->
1469
+
1470
+ <!-- Path in ZooKeeper to store workload and resource created by the command CREATE WORKLOAD and CREATE RESOURCE.
1471
+ If not specified they will be stored locally. -->
1472
+ <!-- <workload_zookeeper_path>/clickhouse/workload/definitions.sql</workload_zookeeper_path> -->
1473
+
1474
+ <!-- Uncomment if you want data to be compressed 30-100% better.
1475
+ Don't do that if you just started using ClickHouse.
1476
+ Note: ClickHouse Cloud https://clickhouse.com/cloud has a stronger compression by default.
1477
+ -->
1478
+ <!--
1479
+ <compression>
1480
+ <!- - Set of variants. Checked in order. Last matching case wins. If nothing matches, lz4 will be used. - ->
1481
+ <case>
1482
+
1483
+ <!- - Conditions. All must be satisfied. Some conditions may be omitted. - ->
1484
+ <min_part_size>10000000000</min_part_size> <!- - Min part size in bytes. - ->
1485
+ <min_part_size_ratio>0.01</min_part_size_ratio> <!- - Min size of part relative to whole table size. - ->
1486
+
1487
+ <!- - What compression method to use. - ->
1488
+ <method>zstd</method>
1489
+ </case>
1490
+ </compression>
1491
+ -->
1492
+
1493
+ <!-- Configuration of encryption. The server executes a command to
1494
+ obtain an encryption key at startup if such a command is
1495
+ defined, or encryption codecs will be disabled otherwise. The
1496
+ command is executed through /bin/sh and is expected to write
1497
+ a Base64-encoded key to the stdout.
1498
+
1499
+ Note: ClickHouse Cloud https://clickhouse.com/cloud supports encryption with customer-managed keys.
1500
+ -->
1501
+ <encryption_codecs>
1502
+ <!-- aes_128_gcm_siv -->
1503
+ <!-- Example of getting hex key from env -->
1504
+ <!-- the code should use this key and throw an exception if its length is not 16 bytes -->
1505
+ <!-- key_hex from_env="..."></key_hex -->
1506
+
1507
+ <!-- Example of multiple hex keys. They can be imported from env or be written down in config -->
1508
+ <!-- the code should use these keys and throw an exception if their length is not 16 bytes -->
1509
+ <!-- key_hex id="0">...</key_hex -->
1510
+ <!-- key_hex id="1" from_env=".."></key_hex -->
1511
+ <!-- key_hex id="2">...</key_hex -->
1512
+ <!-- current_key_id>2</current_key_id -->
1513
+
1514
+ <!-- Example of getting hex key from config -->
1515
+ <!-- the code should use this key and throw an exception if its length is not 16 bytes -->
1516
+ <!-- key>...</key -->
1517
+
1518
+ <!-- example of adding nonce -->
1519
+ <!-- nonce>...</nonce -->
1520
+
1521
+ <!-- /aes_128_gcm_siv -->
1522
+ </encryption_codecs>
1523
+
1524
+ <!-- Allow to execute distributed DDL queries (CREATE, DROP, ALTER, RENAME) on cluster.
1525
+ Works only if ZooKeeper is enabled. Comment it if such functionality isn't required.
1526
+ Note: ClickHouse Cloud https://clickhouse.com/cloud always runs DDL queries on cluster.
1527
+ -->
1528
+ <distributed_ddl>
1529
+ <!-- Path in ZooKeeper to queue with DDL queries -->
1530
+ <path>/clickhouse/task_queue/ddl</path>
1531
+ <!-- Path in ZooKeeper to store running DDL hosts -->
1532
+ <replicas_path>/clickhouse/task_queue/replicas</replicas_path>
1533
+
1534
+ <!-- Settings from this profile will be used to execute DDL queries -->
1535
+ <!-- <profile>default</profile> -->
1536
+
1537
+ <!-- Controls how many ON CLUSTER queries can be run simultaneously. -->
1538
+ <!-- <pool_size>1</pool_size> -->
1539
+
1540
+ <!--
1541
+ Cleanup settings (active tasks will not be removed)
1542
+ -->
1543
+
1544
+ <!-- Controls task TTL (default 1 week) -->
1545
+ <!-- <task_max_lifetime>604800</task_max_lifetime> -->
1546
+
1547
+ <!-- Controls how often cleanup should be performed (in seconds) -->
1548
+ <!-- <cleanup_delay_period>60</cleanup_delay_period> -->
1549
+
1550
+ <!-- Controls how many tasks could be in the queue -->
1551
+ <!-- <max_tasks_in_queue>1000</max_tasks_in_queue> -->
1552
+
1553
+ <!-- Host name of the current node. If specified, will only compare and not resolve hostnames inside the DDL tasks -->
1554
+ <!-- <host_name>replica</host_name> -->
1555
+ </distributed_ddl>
1556
+
1557
+ <!-- Workload scheduling: used to regulate how resources are utilized and shared between merges, mutations and other workloads.
1558
+ Specified value is used as `workload` setting value for background merge or mutation.
1559
+ -->
1560
+ <!--
1561
+ <merge_workload>merges_and_mutations</merge_workload>
1562
+ <mutation_workload>merges_and_mutations</mutation_workload>
1563
+ -->
1564
+
1565
+ <!-- Workload scheduling: throw or provide unlimited access to resource given unknown `workload` query setting -->
1566
+ <!-- <throw_on_unknown_workload>true</throw_on_unknown_workload> -->
1567
+
1568
+ <!-- Workload scheduling: if enabled during CPU overloaded periods, long-running queries will downscale to lower number of threads dynamically.
1569
+ Ensures more fair CPU resource distribution.
1570
+ -->
1571
+ <!--
1572
+ <cpu_slot_preemption>true</cpu_slot_preemption>
1573
+ <cpu_slot_quantum_ns>10000000</cpu_slot_quantum_ns>
1574
+ <cpu_slot_preemption_timeout_ms>1000</cpu_slot_preemption_timeout_ms>
1575
+ -->
1576
+
1577
+ <!-- Settings to fine-tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h -->
1578
+ <!--
1579
+ <merge_tree>
1580
+ <max_suspicious_broken_parts>5</max_suspicious_broken_parts>
1581
+ </merge_tree>
1582
+ -->
1583
+
1584
+ <!-- Settings to fine-tune ReplicatedMergeTree tables. See documentation in source code, in MergeTreeSettings.h
1585
+ Note: ClickHouse Cloud https://clickhouse.com/cloud has a SharedMergeTree engine that does not require fine-tuning.
1586
+ -->
1587
+ <!--
1588
+ <replicated_merge_tree>
1589
+ <max_replicated_fetches_network_bandwidth>1000000000</max_replicated_fetches_network_bandwidth>
1590
+ </replicated_merge_tree>
1591
+ -->
1592
+
1593
+ <!-- Settings to fine-tune DatabaseReplicatedSettings. See documentation in source code, in DatabaseReplicatedSettings.h -->
1594
+ <!--
1595
+ <database_replicated>
1596
+ <max_broken_tables_ratio>1</max_broken_tables_ratio>
1597
+ <max_replication_lag_to_enqueue>50</max_replication_lag_to_enqueue>
1598
+ <wait_entry_commited_timeout_sec>3600</wait_entry_commited_timeout_sec>
1599
+ <collection_name>default_collection</collection_name>
1600
+ <check_consistency>true</check_consistency>
1601
+ <max_retries_before_automatic_recovery>10</max_retries_before_automatic_recovery>
1602
+ </database_replicated>
1603
+ -->
1604
+
1605
+ <!-- Settings to fine-tune Distributed tables. See documentation in source code, in DistributedSettings.h -->
1606
+ <!--
1607
+ <distributed>
1608
+ <flush_on_detach>false</flush_on_detach>
1609
+ </distributed>
1610
+ -->
1611
+
1612
+ <!-- Protection from accidental DROP.
1613
+ If size of a MergeTree table is greater than max_table_size_to_drop (in bytes) than table could not be dropped with any DROP query.
1614
+ If you want do delete one table and don't want to change clickhouse-server config, you could create special file <clickhouse-path>/flags/force_drop_table and make DROP once.
1615
+ By default max_table_size_to_drop is 50GB; max_table_size_to_drop=0 allows to DROP any tables.
1616
+ The same for max_partition_size_to_drop.
1617
+ Uncomment to disable protection.
1618
+ -->
1619
+ <!-- <max_table_size_to_drop>0</max_table_size_to_drop> -->
1620
+ <!-- <max_partition_size_to_drop>0</max_partition_size_to_drop> -->
1621
+
1622
+ <!-- Example of parameters for GraphiteMergeTree table engine -->
1623
+ <graphite_rollup_example>
1624
+ <pattern>
1625
+ <regexp>click_cost</regexp>
1626
+ <function>any</function>
1627
+ <retention>
1628
+ <age>0</age>
1629
+ <precision>3600</precision>
1630
+ </retention>
1631
+ <retention>
1632
+ <age>86400</age>
1633
+ <precision>60</precision>
1634
+ </retention>
1635
+ </pattern>
1636
+ <default>
1637
+ <function>max</function>
1638
+ <retention>
1639
+ <age>0</age>
1640
+ <precision>60</precision>
1641
+ </retention>
1642
+ <retention>
1643
+ <age>3600</age>
1644
+ <precision>300</precision>
1645
+ </retention>
1646
+ <retention>
1647
+ <age>86400</age>
1648
+ <precision>3600</precision>
1649
+ </retention>
1650
+ </default>
1651
+ </graphite_rollup_example>
1652
+
1653
+ <!-- Directory in <clickhouse-path> containing schema files for various input formats.
1654
+ The directory will be created if it doesn't exist.
1655
+ -->
1656
+ <format_schema_path>/var/lib/clickhouse/format_schemas/</format_schema_path>
1657
+
1658
+ <!-- Directory containing the proto files for the well-known Protobuf types.
1659
+ -->
1660
+ <google_protos_path>/usr/share/clickhouse/protos/</google_protos_path>
1661
+
1662
+ <!-- Default query masking rules, matching lines would be replaced with something else in the logs
1663
+ (both text logs and system.query_log).
1664
+ name - name for the rule (optional)
1665
+ regexp - RE2 compatible regular expression (mandatory)
1666
+ replace - substitution string for sensitive data (optional, by default - six asterisks)
1667
+ <query_masking_rules>
1668
+ <rule>
1669
+ <name>hide encrypt/decrypt arguments</name>
1670
+ <regexp>((?:aes_)?(?:encrypt|decrypt)(?:_mysql)?)\s*\(\s*(?:'(?:\\'|.)+'|.*?)\s*\)</regexp>
1671
+ <replace>\1(???)</replace>
1672
+ </rule>
1673
+ </query_masking_rules> -->
1674
+
1675
+ <!-- Uncomment to use custom http handlers.
1676
+
1677
+ rules are checked from top to bottom, first match runs the handler
1678
+ url - to match request URL (path and query string only), you can use 'regex:' prefix to use regex match(optional)
1679
+ full_url - to match request **full** URL (schema, host:port, path and query string), you can use 'regex:' prefix to use regex match(optional)
1680
+ note, ClickHouse does not support "virtual hosts" so "host" will contain IP address not the "Host" header.
1681
+ empty_query_string - check that there is no query string in the URL
1682
+ methods - to match request method, you can use commas to separate multiple method matches(optional)
1683
+ headers - to match request headers, match each child element (child element name is header name), you can use 'regex:' prefix to use regex match(optional)
1684
+
1685
+ handler is request handler
1686
+ type - supported types: static, dynamic_query_handler, predefined_query_handler, redirect
1687
+ query - use with predefined_query_handler type, executes query when the handler is called
1688
+ query_param_name - use with dynamic_query_handler type, extracts and executes the value corresponding to the <query_param_name> value in HTTP request params
1689
+ status - use with static type, response status code
1690
+ content_type - use with static type, response content-type
1691
+ response_content - use with static type, response content sent to client, when using the prefix 'file://' or 'config://', find the content from the file or configuration send to client.
1692
+ url - a location for redirect
1693
+
1694
+ Along with a list of rules, you can specify <defaults/> which means - enable all the default handlers.
1695
+
1696
+ <http_handlers>
1697
+ <rule>
1698
+ <url>/</url>
1699
+ <methods>POST,GET</methods>
1700
+ <headers><pragma>no-cache</pragma></headers>
1701
+ <handler>
1702
+ <type>dynamic_query_handler</type>
1703
+ <query_param_name>query</query_param_name>
1704
+ </handler>
1705
+ </rule>
1706
+
1707
+ <rule>
1708
+ <url>/predefined_query</url>
1709
+ <methods>POST,GET</methods>
1710
+ <handler>
1711
+ <type>predefined_query_handler</type>
1712
+ <query>SELECT * FROM system.settings</query>
1713
+ </handler>
1714
+ </rule>
1715
+
1716
+ <rule>
1717
+ <url>/play</url>
1718
+ <handler>
1719
+ <type>redirect</type>
1720
+ <location>/play?user=play</location>
1721
+ </handler>
1722
+ </rule>
1723
+
1724
+ <rule>
1725
+ <full_url>regex:http?://[^/]/dashboard</full_url>
1726
+ <handler>
1727
+ <type>redirect</type>
1728
+ <location>/dashboard?user=play</location>
1729
+ </handler>
1730
+ </rule>
1731
+
1732
+ <rule>
1733
+ <handler>
1734
+ <type>static</type>
1735
+ <status>200</status>
1736
+ <content_type>text/plain; charset=UTF-8</content_type>
1737
+ <response_content>config://http_server_default_response</response_content>
1738
+ </handler>
1739
+ </rule>
1740
+ </http_handlers>
1741
+ -->
1742
+
1743
+ <send_crash_reports>
1744
+ <!-- Setting <enabled> to true allows sending crash reports to -->
1745
+ <!-- the ClickHouse core developers team. -->
1746
+ <!-- Doing so at least in pre-production environments is highly appreciated -->
1747
+ <!-- The reports are anonymized -->
1748
+ <enabled>true</enabled>
1749
+ <send_logical_errors>true</send_logical_errors>
1750
+ <endpoint>https://crash.clickhouse.com/</endpoint>
1751
+ </send_crash_reports>
1752
+
1753
+ <!-- Uncomment to disable ClickHouse internal DNS caching. -->
1754
+ <!-- <disable_internal_dns_cache>1</disable_internal_dns_cache> -->
1755
+
1756
+ <!-- You can also configure rocksdb like this: -->
1757
+ <!-- Full list of options:
1758
+ - options:
1759
+ - https://github.com/facebook/rocksdb/blob/4b013dcbed2df84fde3901d7655b9b91c557454d/include/rocksdb/options.h#L1452
1760
+ - column_family_options:
1761
+ - https://github.com/facebook/rocksdb/blob/4b013dcbed2df84fde3901d7655b9b91c557454d/include/rocksdb/options.h#L66
1762
+ - block_based_table_options:
1763
+ - https://github.com/facebook/rocksdb/blob/4b013dcbed2df84fde3901d7655b9b91c557454d/table/block_based/block_based_table_factory.cc#L228
1764
+ - https://github.com/facebook/rocksdb/blob/4b013dcbed2df84fde3901d7655b9b91c557454d/include/rocksdb/table.h#L129
1765
+ -->
1766
+ <!--
1767
+ <rocksdb>
1768
+ <options>
1769
+ <max_background_jobs>8</max_background_jobs>
1770
+ <info_log_level>DEBUG_LEVEL</info_log_level>
1771
+ </options>
1772
+ <column_family_options>
1773
+ <num_levels>2</num_levels>
1774
+ </column_family_options>
1775
+ <block_based_table_options>
1776
+ <block_size>1024</block_size>
1777
+ </block_based_table_options>
1778
+ <tables>
1779
+ <table>
1780
+ <name>TABLE</name>
1781
+ <options>
1782
+ <max_background_jobs>8</max_background_jobs>
1783
+ </options>
1784
+ <column_family_options>
1785
+ <num_levels>2</num_levels>
1786
+ </column_family_options>
1787
+ <block_based_table_options>
1788
+ <block_size>1024</block_size>
1789
+ </block_based_table_options>
1790
+ </table>
1791
+ </tables>
1792
+ </rocksdb>
1793
+ -->
1794
+
1795
+ <!-- <kafka> -->
1796
+ <!-- Global configuration properties -->
1797
+ <!--
1798
+ NOTE: statistics should be consumed, otherwise it creates too many
1799
+ entries in the queue that leads to memory leak and slow shutdown.
1800
+ default value: 0
1801
+ <statistics_interval_ms>3000</statistics_interval_ms>
1802
+ -->
1803
+
1804
+ <!-- Topic configuration properties -->
1805
+ <!--
1806
+ <kafka_topic>
1807
+ <name>football</name>
1808
+ <request_timeout_ms>6000</request_timeout_ms>
1809
+ </kafka_topic>
1810
+ -->
1811
+
1812
+ <!-- Producer configuration -->
1813
+ <!--
1814
+ <producer>
1815
+ <compression_codec>gzip</compression_codec>
1816
+ <kafka_topic>
1817
+ <name>football</name>
1818
+ <request_timeout_ms>6000</request_timeout_ms>
1819
+ </kafka_topic>
1820
+ </producer>
1821
+ -->
1822
+
1823
+ <!-- Consumer configuration -->
1824
+ <!--
1825
+ <consumer>
1826
+ <enable_auto_commit>true</enable_auto_commit>
1827
+ </consumer>
1828
+ -->
1829
+ <!-- </kafka> -->
1830
+
1831
+ <!-- Note: ClickHouse Cloud https://clickhouse.com/cloud provides automatic backups to object storage. -->
1832
+ <backups>
1833
+ <allowed_path>backups</allowed_path>
1834
+
1835
+ <!-- If the BACKUP command fails and this setting is true then the files
1836
+ copied before the failure will be removed automatically.
1837
+ -->
1838
+ <remove_backup_files_after_failure>true</remove_backup_files_after_failure>
1839
+ </backups>
1840
+
1841
+ <!-- This allows to disable exposing addresses in stack traces for security reasons.
1842
+ Please be aware that it does not improve security much, but makes debugging much harder.
1843
+ The addresses that are small offsets from zero will be displayed nevertheless to show nullptr dereferences.
1844
+ Regardless of this configuration, the addresses are visible in the system.stack_trace and system.trace_log tables
1845
+ if the user has access to these tables.
1846
+ I don't recommend to change this setting.
1847
+ <show_addresses_in_stack_traces>false</show_addresses_in_stack_traces>
1848
+ -->
1849
+
1850
+ <!-- On Linux systems this can control the behavior of OOM killer.
1851
+ <oom_score>-1000</oom_score>
1852
+ -->
1853
+
1854
+ <!-- Delay (in seconds) to wait for unfinished queries before force exit -->
1855
+ <!-- <shutdown_wait_unfinished>5</shutdown_wait_unfinished> -->
1856
+
1857
+ <!-- If set true ClickHouse will wait for running queries finish before shutdown. -->
1858
+ <!-- <shutdown_wait_unfinished_queries>false</shutdown_wait_unfinished_queries> -->
1859
+
1860
+ <!-- Disables the checksum of the clickhouse-server binary when performing integrity checks.
1861
+ Not recommended in production environments, but there may be legitimate use-cases, such as
1862
+ when instrumenting ClickHouse via eBPF probes.
1863
+
1864
+ <skip_binary_checksum_checks>false</skip_binary_checksum_checks>
1865
+ -->
1866
+ </clickhouse>
platform/dbops/archive/databases_old/data/clickhouse/config/users.xml ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <clickhouse>
2
+ <!-- See also the files in users.d directory where the settings can be overridden. -->
3
+
4
+ <!-- Profiles of settings. -->
5
+ <profiles>
6
+ <!-- Default settings. -->
7
+ <default>
8
+ <!-- <async_insert>1</async_insert> -->
9
+ </default>
10
+
11
+ <!-- Profile that allows only read queries. -->
12
+ <readonly>
13
+ <readonly>1</readonly>
14
+ </readonly>
15
+ </profiles>
16
+
17
+ <!-- Users and ACL. -->
18
+ <users>
19
+ <!-- If user name was not specified, 'default' user is used. -->
20
+ <default>
21
+ <!-- See also the files in users.d directory where the password can be overridden.
22
+
23
+ Password could be specified in plaintext or in SHA256 (in hex format).
24
+
25
+ If you want to specify password in plaintext (not recommended), place it in 'password' element.
26
+ Example: <password>qwerty</password>.
27
+ Password could be empty.
28
+
29
+ If you want to specify SHA256, place it in 'password_sha256_hex' element.
30
+ Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
31
+ Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
32
+
33
+ If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
34
+ Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
35
+
36
+ If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
37
+ place its name in 'server' element inside 'ldap' element.
38
+ Example: <ldap><server>my_ldap_server</server></ldap>
39
+
40
+ If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
41
+ place 'kerberos' element instead of 'password' (and similar) elements.
42
+ The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
43
+ You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
44
+ whose initiator's realm matches it.
45
+ Example: <kerberos />
46
+ Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
47
+
48
+ How to generate decent password:
49
+ Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
50
+ In first line will be password and in second - corresponding SHA256.
51
+
52
+ How to generate double SHA1:
53
+ Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
54
+ In first line will be password and in second - corresponding double SHA1.
55
+ -->
56
+ <password></password>
57
+
58
+ <!-- List of networks with open access.
59
+
60
+ To open access from everywhere, specify:
61
+ <ip>::/0</ip>
62
+
63
+ To open access only from localhost, specify:
64
+ <ip>::1</ip>
65
+ <ip>127.0.0.1</ip>
66
+
67
+ Each element of list has one of the following forms:
68
+ <ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
69
+ 2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
70
+ <host> Hostname. Example: server01.clickhouse.com.
71
+ To check access, DNS query is performed, and all received addresses compared to peer address.
72
+ <host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
73
+ To check access, DNS PTR query is performed for peer address and then regexp is applied.
74
+ Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
75
+ Strongly recommended that regexp is ends with $
76
+ All results of DNS requests are cached till server restart.
77
+ -->
78
+ <networks>
79
+ <ip>::/0</ip>
80
+ </networks>
81
+
82
+ <!-- Settings profile for user. -->
83
+ <profile>default</profile>
84
+
85
+ <!-- Quota for user. -->
86
+ <quota>default</quota>
87
+
88
+ <!-- User can create other users and grant rights to them. -->
89
+ <access_management>1</access_management>
90
+
91
+ <!-- User can manipulate named collections. -->
92
+ <named_collection_control>1</named_collection_control>
93
+
94
+ <!-- User permissions can be granted here -->
95
+ <!--
96
+ <grants>
97
+ <query>GRANT ALL ON *.*</query>
98
+ </grants>
99
+ -->
100
+ </default>
101
+ </users>
102
+
103
+ <!-- Quotas. -->
104
+ <quotas>
105
+ <!-- Name of quota. -->
106
+ <default>
107
+ <!-- Limits for time interval. You could specify many intervals with different limits. -->
108
+ <interval>
109
+ <!-- Length of interval. -->
110
+ <duration>3600</duration>
111
+
112
+ <!-- No limits. Just calculate resource usage for time interval. -->
113
+ <queries>0</queries>
114
+ <errors>0</errors>
115
+ <result_rows>0</result_rows>
116
+ <read_rows>0</read_rows>
117
+ <execution_time>0</execution_time>
118
+ </interval>
119
+ </default>
120
+ </quotas>
121
+ </clickhouse>
platform/dbops/archive/databases_old/data/clickhouse/data/metadata/INFORMATION_SCHEMA.sql ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ATTACH DATABASE INFORMATION_SCHEMA
2
+ ENGINE = Memory
platform/dbops/archive/databases_old/data/clickhouse/data/metadata/default.sql ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ATTACH DATABASE _ UUID 'bc408257-bbcf-4cfc-8ec5-9f73348141b5'
2
+ ENGINE = Atomic
platform/dbops/archive/databases_old/data/clickhouse/data/metadata/information_schema.sql ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ATTACH DATABASE information_schema
2
+ ENGINE = Memory
platform/dbops/archive/databases_old/data/clickhouse/data/metadata/system.sql ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ATTACH DATABASE _ UUID 'a988ca13-d250-436e-a034-8df2bfdad652'
2
+ ENGINE = Atomic
platform/dbops/archive/databases_old/data/clickhouse/data/preprocessed_configs/config.xml ADDED
@@ -0,0 +1,1888 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!-- This file was generated automatically.
2
+ Do not edit it: it is likely to be discarded and generated again before it's read next time.
3
+ Files used to generate this file:
4
+ /data/data/clickhouse/config/config.xml
5
+ /data/data/clickhouse/config/config.d/data-paths.xml
6
+ /data/data/clickhouse/config/config.d/logger.xml
7
+ /data/data/clickhouse/config/config.d/openssl.xml
8
+ /data/data/clickhouse/config/config.d/user-directories.xml -->
9
+
10
+ <!--
11
+ NOTE: User and query level settings are set up in "users.xml" file.
12
+ If you have accidentally specified user-level settings here, server won't start.
13
+ You can either move the settings to the right place inside "users.xml" file
14
+ or add <skip_check_for_incorrect_settings>1</skip_check_for_incorrect_settings> here.
15
+ -->
16
+ <clickhouse>
17
+ <logger>
18
+ <!-- Possible levels [1]:
19
+
20
+ - none (turns off logging)
21
+ - fatal
22
+ - critical
23
+ - error
24
+ - warning
25
+ - notice
26
+ - information
27
+ - debug
28
+ - trace
29
+ - test (not for production usage)
30
+
31
+ [1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
32
+ -->
33
+ <level>trace</level>
34
+
35
+ <!-- Startup level is used to set the root logger level at server startup.
36
+ It is useful for debugging startup issues.
37
+ The root logger level will be reset to the default level after the server is fully initialized -->
38
+ <!-- <startupLevel>trace</startupLevel> -->
39
+ <!-- Shutdown level is used to set the root logger level at server Shutdown.
40
+ It is useful for debugging shutdown issues -->
41
+ <!-- <shutdownLevel>trace</shutdownLevel> -->
42
+
43
+ <log>/data/data/clickhouse/logs/clickhouse-server.log</log>
44
+ <errorlog>/data/data/clickhouse/logs/clickhouse-server.err.log</errorlog>
45
+ <!-- Rotation policy
46
+ See https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/FileChannel.h#L54-L85
47
+ -->
48
+ <size>1000M</size>
49
+ <count>10</count>
50
+
51
+ <!-- <console>1</console> --> <!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
52
+ <!-- <console_log_level>trace</console_log_level> -->
53
+
54
+ <!-- <use_syslog>0</use_syslog> -->
55
+ <!-- <syslog_level>trace</syslog_level> -->
56
+
57
+ <!-- <stream_compress>0</stream_compress> -->
58
+
59
+ <!-- By default logging happens in different threads so it does not block the execution
60
+ If the amount of messages waiting to be flushed is too large, new messages will be dropped.
61
+ -->
62
+ <!-- <async>1</async> -->
63
+ <!-- <async_queue_max_size>100000</async_queue_max_size> -->
64
+
65
+ <!-- Per level overrides (legacy):
66
+
67
+ For example to suppress logging of the ConfigReloader you can use:
68
+ NOTE: levels.logger is reserved, see below.
69
+ -->
70
+ <!--
71
+ <levels>
72
+ <ConfigReloader>none</ConfigReloader>
73
+ </levels>
74
+ -->
75
+
76
+ <!-- Per level overrides:
77
+
78
+ For example to suppress logging of the RBAC for default user you can use:
79
+ (But please note that the logger name maybe changed from version to version, even after minor upgrade)
80
+ -->
81
+ <!--
82
+ <levels>
83
+ <logger>
84
+ <name>ContextAccess (default)</name>
85
+ <level>none</level>
86
+ </logger>
87
+ <logger>
88
+ <name>DatabaseOrdinary (test)</name>
89
+ <level>none</level>
90
+ </logger>
91
+ </levels>
92
+ -->
93
+ <!-- Structured log formatting:
94
+ You can specify log format(for now, JSON only). In that case, the console log will be printed
95
+ in specified format like JSON.
96
+ For example, as below:
97
+
98
+ {"date_time":"1650918987.180175","thread_name":"#1","thread_id":"254545","level":"Trace","query_id":"","logger_name":"BaseDaemon","message":"Received signal 2","source_file":"../base/daemon/BaseDaemon.cpp; virtual void SignalListener::run()","source_line":"192"}
99
+ {"date_time_utc":"2024-11-06T09:06:09Z","thread_name":"#1","thread_id":"254545","level":"Trace","query_id":"","logger_name":"BaseDaemon","message":"Received signal 2","source_file":"../base/daemon/BaseDaemon.cpp; virtual void SignalListener::run()","source_line":"192"}
100
+ To enable JSON logging support, please uncomment the entire <formatting> tag below.
101
+
102
+ a) You can modify key names by changing values under tag values inside <names> tag.
103
+ For example, to change DATE_TIME to MY_DATE_TIME, you can do like:
104
+ <date_time>MY_DATE_TIME</date_time>
105
+ <date_time_utc>MY_UTC_DATE_TIME</date_time_utc>
106
+ b) You can stop unwanted log properties from appearing in logs. To do so, you can simply comment out (recommended)
107
+ that property from this file.
108
+ For example, if you do not want your log to print query_id, you can comment out only <query_id> tag.
109
+ However, if you comment out all the tags under <names>, the program will print default values for as
110
+ below.
111
+ -->
112
+ <!-- <formatting>
113
+ <type>json</type>
114
+ <names>
115
+ <date_time>date_time</date_time>
116
+ <date_time_utc>date_time_utc</date_time_utc>
117
+ <thread_name>thread_name</thread_name>
118
+ <thread_id>thread_id</thread_id>
119
+ <level>level</level>
120
+ <query_id>query_id</query_id>
121
+ <logger_name>logger_name</logger_name>
122
+ <message>message</message>
123
+ <source_file>source_file</source_file>
124
+ <source_line>source_line</source_line>
125
+ </names>
126
+ </formatting> -->
127
+ </logger>
128
+
129
+ <url_scheme_mappers>
130
+ <s3>
131
+ <to>https://{bucket}.s3.amazonaws.com</to>
132
+ </s3>
133
+ <gs>
134
+ <to>https://storage.googleapis.com/{bucket}</to>
135
+ </gs>
136
+ <oss>
137
+ <to>https://{bucket}.oss.aliyuncs.com</to>
138
+ </oss>
139
+ </url_scheme_mappers>
140
+
141
+ <!-- Add headers to response in options request. OPTIONS method is used in CORS preflight requests. -->
142
+ <http_options_response>
143
+ <header>
144
+ <name>Access-Control-Allow-Origin</name>
145
+ <value>*</value>
146
+ </header>
147
+ <header>
148
+ <name>Access-Control-Allow-Headers</name>
149
+ <value>origin, x-requested-with, x-clickhouse-format, x-clickhouse-user, x-clickhouse-key, Authorization</value>
150
+ </header>
151
+ <header>
152
+ <name>Access-Control-Allow-Methods</name>
153
+ <value>POST, GET, OPTIONS</value>
154
+ </header>
155
+ <header>
156
+ <name>Access-Control-Max-Age</name>
157
+ <value>86400</value>
158
+ </header>
159
+ </http_options_response>
160
+
161
+ <!-- The name that will be shown in the clickhouse-client.
162
+ By default, anything with "production" will be highlighted in red in query prompt.
163
+ -->
164
+ <!--display_name>production</display_name-->
165
+
166
+ <!-- Port for HTTP API. See also 'https_port' for secure connections.
167
+ This interface is also used by ODBC and JDBC drivers (DataGrip, Dbeaver, ...)
168
+ and by most of the web interfaces (embedded UI, Grafana, Redash, ...).
169
+ -->
170
+ <http_port>8123</http_port>
171
+
172
+ <!-- Port for interaction by native protocol with:
173
+ - clickhouse-client and other native ClickHouse tools (clickhouse-benchmark);
174
+ - clickhouse-server with other clickhouse-servers for distributed query processing;
175
+ - ClickHouse drivers and applications supporting native protocol
176
+ (this protocol is also informally called as "the TCP protocol");
177
+ See also 'tcp_port_secure' for secure connections.
178
+ -->
179
+ <tcp_port>9000</tcp_port>
180
+
181
+ <!-- Chunked capabilities for native protocol by server.
182
+ Can be enabled separately for send and receive channels.
183
+ Supported modes:
184
+ - chunked - server requires from client to have chunked enabled;
185
+ - chunked_optional - server supports both chunked and notchunked protocol;
186
+ - notchunked - server requires from client notchunked protocol (current default);
187
+ - notchunked_optional - server supports both chunked and notchunked protocol.
188
+ -->
189
+ <!--
190
+ <proto_caps>
191
+ <send>notchunked_optional</send>
192
+ <recv>notchunked_optional</recv>
193
+ </proto_caps>
194
+ -->
195
+
196
+ <!-- Compatibility with MySQL protocol.
197
+ ClickHouse will pretend to be MySQL for applications connecting to this port.
198
+ -->
199
+ <mysql_port>9004</mysql_port>
200
+
201
+ <!-- Compatibility with PostgreSQL protocol.
202
+ ClickHouse will pretend to be PostgreSQL for applications connecting to this port.
203
+ -->
204
+ <postgresql_port>9005</postgresql_port>
205
+
206
+ <!-- HTTP API with TLS (HTTPS).
207
+ You have to configure certificate to enable this interface.
208
+ See the OpenSSL section below.
209
+ -->
210
+ <!-- <https_port>8443</https_port> -->
211
+
212
+ <!-- Native interface with TLS.
213
+ You have to configure certificate to enable this interface.
214
+ See the OpenSSL section below.
215
+ -->
216
+ <!-- <tcp_port_secure>9440</tcp_port_secure> -->
217
+
218
+ <!-- Native interface wrapped with PROXYv1 protocol
219
+ PROXYv1 header is sent for every connection.
220
+ ClickHouse will extract information about proxy-forwarded client address from the header.
221
+ -->
222
+ <!-- <tcp_with_proxy_port>9011</tcp_with_proxy_port> -->
223
+
224
+ <!-- Port for communication between replicas. Used for data exchange.
225
+ It provides low-level data access between servers.
226
+ This port should not be accessible from untrusted networks.
227
+ See also 'interserver_http_credentials'.
228
+ Data transferred over connections to this port should not go through untrusted networks.
229
+ See also 'interserver_https_port'.
230
+ -->
231
+ <interserver_http_port>9009</interserver_http_port>
232
+
233
+ <!-- Port for communication between replicas with TLS.
234
+ You have to configure certificate to enable this interface.
235
+ See the OpenSSL section below.
236
+ See also 'interserver_http_credentials'.
237
+ -->
238
+ <!-- <interserver_https_port>9010</interserver_https_port> -->
239
+
240
+ <!-- Hostname that is used by other replicas to request this server.
241
+ If not specified, then it is determined analogous to 'hostname -f' command.
242
+ This setting could be used to switch replication to another network interface
243
+ (the server may be connected to multiple networks via multiple addresses)
244
+ -->
245
+
246
+ <!--
247
+ <interserver_http_host>example.clickhouse.com</interserver_http_host>
248
+ -->
249
+
250
+ <!-- Port for the SSH server which allows to connect and execute
251
+ queries in an interactive fashion using the embedded client over the PTY.
252
+ -->
253
+ <!-- <tcp_ssh_port>9022</tcp_ssh_port> -->
254
+
255
+ <ssh_server>
256
+ <!-- The public part of the host key will be written to the known_hosts file
257
+ on the SSH client side on the first connect.
258
+ -->
259
+ <!-- <host_rsa_key>path_to_the_ssh_key</host_rsa_key> -->
260
+ <!-- <host_ecdsa_key>path_to_the_ssh_key</host_ecdsa_key> -->
261
+ <!-- <host_ed25519_key>path_to_the_ssh_key</host_ed25519_key> -->
262
+
263
+ <!-- Unlocks the possibility to pass the client options as environment
264
+ variables in the form of: ssh -o SetEnv="key1=value1 key2=value2".
265
+ This is considered unsafe and should be used with caution.
266
+ -->
267
+ <!-- <enable_client_options_passing>false</enable_client_options_passing>-->
268
+ </ssh_server>
269
+
270
+ <!-- You can specify credentials for authentication between replicas.
271
+ This is required when interserver_https_port is accessible from untrusted networks,
272
+ and also recommended to avoid SSRF attacks from possibly compromised services in your network.
273
+ -->
274
+ <!--<interserver_http_credentials>
275
+ <user>interserver</user>
276
+ <password></password>
277
+ </interserver_http_credentials>-->
278
+
279
+ <!-- Listen specified address.
280
+ Use :: (wildcard IPv6 address), if you want to accept connections both with IPv4 and IPv6 from everywhere.
281
+ Notes:
282
+ If you open connections from wildcard address, make sure that at least one of the following measures is applied:
283
+ - server is protected by firewall and not accessible from untrusted networks;
284
+ - all users are restricted to subset of network addresses (see users.xml);
285
+ - all users have strong passwords, only secure (TLS) interfaces are accessible, or connections are only made via TLS interfaces.
286
+ - users without password have readonly access.
287
+ See also: https://www.shodan.io/search?query=clickhouse
288
+ -->
289
+ <!-- <listen_host>::</listen_host> -->
290
+
291
+
292
+ <!-- Same for hosts without support for IPv6: -->
293
+ <!-- <listen_host>0.0.0.0</listen_host> -->
294
+
295
+ <!-- Default values - try listen localhost on IPv4 and IPv6. -->
296
+ <!--
297
+ <listen_host>::1</listen_host>
298
+ <listen_host>127.0.0.1</listen_host>
299
+ -->
300
+
301
+ <!-- <interserver_listen_host>::</interserver_listen_host> -->
302
+ <!-- Listen host for communication between replicas. Used for data exchange -->
303
+ <!-- Default values - equal to listen_host -->
304
+
305
+ <!-- Don't exit if IPv6 or IPv4 networks are unavailable while trying to listen. -->
306
+ <!-- <listen_try>0</listen_try> -->
307
+
308
+ <!-- Allow multiple servers to listen on the same address:port. This is not recommended.
309
+ -->
310
+ <!-- <listen_reuse_port>0</listen_reuse_port> -->
311
+
312
+ <!-- <listen_backlog>4096</listen_backlog> -->
313
+
314
+ <!-- <max_connections>4096</max_connections> -->
315
+
316
+ <!-- For 'Connection: keep-alive' in HTTP 1.1 -->
317
+ <keep_alive_timeout>10</keep_alive_timeout>
318
+
319
+ <!-- Enable verbose output in /replicas_status handler. -->
320
+ <!-- <enable_verbose_replicas_status/>true<enable_verbose_replicas_status>-->
321
+
322
+ <!-- Enable stacktrace in default http handler. -->
323
+ <!-- <enable_http_stacktrace/>true<enable_http_stacktrace>-->
324
+
325
+ <!-- gRPC protocol (see src/Server/grpc_protos/clickhouse_grpc.proto for the API) -->
326
+ <!-- <grpc_port>9100</grpc_port> -->
327
+ <grpc>
328
+ <enable_ssl>false</enable_ssl>
329
+
330
+ <!-- The following two files are used only if enable_ssl=1 -->
331
+ <ssl_cert_file>/path/to/ssl_cert_file</ssl_cert_file>
332
+ <ssl_key_file>/path/to/ssl_key_file</ssl_key_file>
333
+
334
+ <!-- Whether server will request client for a certificate -->
335
+ <ssl_require_client_auth>false</ssl_require_client_auth>
336
+
337
+ <!-- The following file is used only if ssl_require_client_auth=1 -->
338
+ <ssl_ca_cert_file>/path/to/ssl_ca_cert_file</ssl_ca_cert_file>
339
+
340
+ <!-- Default transport compression type (can be overridden by client, see the transport_compression_type field in QueryInfo).
341
+ Supported algorithms: none, deflate, gzip, stream_gzip -->
342
+ <transport_compression_type>none</transport_compression_type>
343
+
344
+ <!-- Default transport compression level. Supported levels: 0..3 -->
345
+ <transport_compression_level>0</transport_compression_level>
346
+
347
+ <!-- Send/receive message size limits in bytes. -1 means unlimited -->
348
+ <max_send_message_size>-1</max_send_message_size>
349
+ <max_receive_message_size>-1</max_receive_message_size>
350
+
351
+ <!-- Enable if you want very detailed logs -->
352
+ <verbose_logs>false</verbose_logs>
353
+ </grpc>
354
+
355
+ <!-- Used with https_port and tcp_port_secure. Full ssl options list: https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71
356
+ Note: ClickHouse Cloud https://clickhouse.com/cloud always has secure connections configured.
357
+ -->
358
+ <openSSL>
359
+ <server> <!-- Used for https server AND secure tcp port -->
360
+ <!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
361
+ <!-- <certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
362
+ <privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile> -->
363
+ <!-- dhparams are optional. You can delete the <dhParamsFile> element.
364
+ To generate dhparams, use the following command:
365
+ openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096
366
+ Only file format with BEGIN DH PARAMETERS is supported.
367
+ -->
368
+ <!-- <dhParamsFile>/etc/clickhouse-server/dhparam.pem</dhParamsFile>-->
369
+ <verificationMode>none</verificationMode>
370
+ <loadDefaultCAFile>true</loadDefaultCAFile>
371
+ <cacheSessions>true</cacheSessions>
372
+ <disableProtocols>sslv2,sslv3</disableProtocols>
373
+ <preferServerCiphers>true</preferServerCiphers>
374
+
375
+ <invalidCertificateHandler>
376
+ <!-- The server, in contrast to the client, cannot ask about the certificate interactively.
377
+ The only reasonable option is to reject.
378
+ -->
379
+ <name>RejectCertificateHandler</name>
380
+ </invalidCertificateHandler>
381
+
382
+ <certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
383
+ <privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile>
384
+ </server>
385
+
386
+ <client> <!-- Used for connecting to https dictionary source and secured Zookeeper communication -->
387
+ <loadDefaultCAFile>true</loadDefaultCAFile>
388
+ <cacheSessions>true</cacheSessions>
389
+ <disableProtocols>sslv2,sslv3</disableProtocols>
390
+ <preferServerCiphers>true</preferServerCiphers>
391
+ <!-- Use for self-signed: <verificationMode>none</verificationMode> -->
392
+ <invalidCertificateHandler>
393
+ <!-- Use for self-signed: <name>AcceptCertificateHandler</name> -->
394
+ <name>RejectCertificateHandler</name>
395
+ </invalidCertificateHandler>
396
+ </client>
397
+
398
+
399
+ </openSSL>
400
+
401
+ <!-- Default root page on http[s] server. -->
402
+ <!--
403
+ <http_server_default_response><![CDATA[Greetings from ClickHouse!]]></http_server_default_response>
404
+ -->
405
+
406
+ <!-- The maximum number of query processing threads, excluding threads for retrieving data from remote servers, allowed to run all queries.
407
+ This is not a hard limit. In case if the limit is reached the query will still get at least one thread to run.
408
+ Query can upscale to desired number of threads during execution if more threads become available.
409
+ -->
410
+ <concurrent_threads_soft_limit_num>0</concurrent_threads_soft_limit_num>
411
+ <concurrent_threads_soft_limit_ratio_to_cores>2</concurrent_threads_soft_limit_ratio_to_cores>
412
+ <concurrent_threads_scheduler>fair_round_robin</concurrent_threads_scheduler>
413
+
414
+ <!-- Maximum number of concurrent queries. -->
415
+ <max_concurrent_queries>1000</max_concurrent_queries>
416
+
417
+ <!-- Maximum memory usage (resident set size) for server process.
418
+ Zero value or unset means default. Default is "max_server_memory_usage_to_ram_ratio" of available physical RAM.
419
+ If the value is larger than "max_server_memory_usage_to_ram_ratio" of available physical RAM, it will be cut down.
420
+
421
+ The constraint is checked on query execution time.
422
+ If a query tries to allocate memory and the current memory usage plus allocation is greater
423
+ than specified threshold, exception will be thrown.
424
+
425
+ It is not practical to set this constraint to small values like just a few gigabytes,
426
+ because memory allocator will keep this amount of memory in caches and the server will deny service of queries.
427
+ -->
428
+ <max_server_memory_usage>0</max_server_memory_usage>
429
+
430
+ <!-- Maximum number of threads in the Global thread pool.
431
+ This will default to a maximum of 10000 threads if not specified.
432
+ This setting will be useful in scenarios where there are a large number
433
+ of distributed queries that are running concurrently but are idling most
434
+ of the time, in which case a higher number of threads might be required.
435
+ -->
436
+
437
+ <max_thread_pool_size>10000</max_thread_pool_size>
438
+
439
+ <!-- Configure other thread pools: -->
440
+ <!--
441
+ <background_buffer_flush_schedule_pool_size>16</background_buffer_flush_schedule_pool_size>
442
+ <background_pool_size>16</background_pool_size>
443
+ <background_merges_mutations_concurrency_ratio>2</background_merges_mutations_concurrency_ratio>
444
+ <background_merges_mutations_scheduling_policy>round_robin</background_merges_mutations_scheduling_policy>
445
+ <background_move_pool_size>8</background_move_pool_size>
446
+ <background_fetches_pool_size>8</background_fetches_pool_size>
447
+ <background_common_pool_size>8</background_common_pool_size>
448
+ <background_schedule_pool_size>128</background_schedule_pool_size>
449
+ <background_message_broker_schedule_pool_size>16</background_message_broker_schedule_pool_size>
450
+ <background_distributed_schedule_pool_size>16</background_distributed_schedule_pool_size>
451
+ <tables_loader_foreground_pool_size>0</tables_loader_foreground_pool_size>
452
+ <tables_loader_background_pool_size>0</tables_loader_background_pool_size>
453
+ -->
454
+
455
+ <!-- Enables asynchronous loading of databases and tables to speedup server startup.
456
+ Queries to not yet loaded entity will be blocked until load is finished.
457
+ -->
458
+ <async_load_databases>true</async_load_databases>
459
+
460
+ <!-- On memory constrained environments you may have to set this to value larger than 1.
461
+ -->
462
+ <max_server_memory_usage_to_ram_ratio>0.9</max_server_memory_usage_to_ram_ratio>
463
+
464
+ <!-- Simple server-wide memory profiler. Collect a stack trace at every peak allocation step (in bytes).
465
+ Data will be stored in system.trace_log table with query_id = empty string.
466
+ Zero means disabled.
467
+ -->
468
+ <total_memory_profiler_step>4194304</total_memory_profiler_step>
469
+
470
+ <!-- Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type.
471
+ The probability is for every alloc/free regardless to the size of the allocation.
472
+ Note that sampling happens only when the amount of untracked memory exceeds the untracked memory limit,
473
+ which is 4 MiB by default but can be lowered if 'total_memory_profiler_step' is lowered.
474
+ You may want to set 'total_memory_profiler_step' to 1 for extra fine grained sampling.
475
+ -->
476
+ <total_memory_tracker_sample_probability>0</total_memory_tracker_sample_probability>
477
+
478
+ <!-- Set limit on number of open files (default: maximum). This setting makes sense on Mac OS X because getrlimit() fails to retrieve
479
+ correct maximum value. -->
480
+ <!-- <max_open_files>262144</max_open_files> -->
481
+
482
+ <!-- Size of cache of uncompressed blocks of data, used in tables of MergeTree family.
483
+ In bytes. Cache is single for server. Memory is allocated only on demand.
484
+ Cache is used when 'use_uncompressed_cache' user setting turned on (off by default).
485
+ Uncompressed cache is advantageous only for very short queries and in rare cases.
486
+
487
+ Note: uncompressed cache can be pointless for lz4, because memory bandwidth
488
+ is slower than multi-core decompression on some server configurations.
489
+ Enabling it can sometimes paradoxically make queries slower.
490
+ -->
491
+ <uncompressed_cache_size>8589934592</uncompressed_cache_size>
492
+
493
+ <!-- Approximate size of mark cache, used in tables of MergeTree family.
494
+ In bytes. Cache is single for server. Memory is allocated only on demand.
495
+ You should not lower this value. -->
496
+ <!-- <mark_cache_size>5368709120</mark_cache_size> -->
497
+
498
+ <!-- For marks of secondary indices. -->
499
+ <!-- <index_mark_cache_size>5368709120</index_mark_cache_size> -->
500
+
501
+ <!-- If you enable the `min_bytes_to_use_mmap_io` setting,
502
+ the data in MergeTree tables can be read with mmap to avoid copying from kernel to userspace.
503
+ It makes sense only for large files and helps only if data reside in page cache.
504
+ To avoid frequent open/mmap/munmap/close calls (which are very expensive due to consequent page faults)
505
+ and to reuse mappings from several threads and queries,
506
+ the cache of mapped files is maintained. Its size is the number of mapped regions (usually equal to the number of mapped files).
507
+ The amount of data in mapped files can be monitored
508
+ in system.metrics, system.metric_log by the MMappedFiles, MMappedFileBytes metrics
509
+ and in system.asynchronous_metrics, system.asynchronous_metrics_log by the MMapCacheCells metric,
510
+ and also in system.events, system.processes, system.query_log, system.query_thread_log, system.query_views_log by the
511
+ CreatedReadBufferMMap, CreatedReadBufferMMapFailed, MMappedFileCacheHits, MMappedFileCacheMisses events.
512
+ Note that the amount of data in mapped files does not consume memory directly and is not accounted
513
+ in query or server memory usage - because this memory can be discarded similar to OS page cache.
514
+ The cache is dropped (the files are closed) automatically on removal of old parts in MergeTree,
515
+ also it can be dropped manually by the SYSTEM DROP MMAP CACHE query.
516
+ -->
517
+ <!-- <mmap_cache_size>1024</mmap_cache_size> -->
518
+
519
+ <!-- Cache size in bytes for compiled expressions.-->
520
+ <!-- <compiled_expression_cache_size>134217728</compiled_expression_cache_size> -->
521
+
522
+ <!-- Cache size in elements for compiled expressions.-->
523
+ <!-- <compiled_expression_cache_elements_size>10000</compiled_expression_cache_elements_size> -->
524
+
525
+ <!-- Size of the query condition cache in bytes. -->
526
+ <query_condition_cache_size>106700800</query_condition_cache_size>
527
+
528
+ <!-- Configuration for the query cache -->
529
+ <!--
530
+ <query_cache>
531
+ <max_size_in_bytes>1073741824</max_size_in_bytes>
532
+ <max_entries>1024</max_entries>
533
+ <max_entry_size_in_bytes>1048576</max_entry_size_in_bytes>
534
+ <max_entry_size_in_rows>30000000</max_entry_size_in_rows>
535
+ </query_cache>
536
+ -->
537
+
538
+ <!-- Cache path for custom (created from SQL) cached disks -->
539
+ <custom_cached_disks_base_directory>/var/lib/clickhouse/caches/</custom_cached_disks_base_directory>
540
+
541
+ <validate_tcp_client_information>false</validate_tcp_client_information>
542
+
543
+ <!-- Path to data directory, with trailing slash. -->
544
+ <path>/data/data/clickhouse/data/</path>
545
+
546
+ <!-- Multi-disk configuration example: -->
547
+ <!--
548
+ <storage_configuration>
549
+ <disks>
550
+ <default>
551
+ <keep_free_space_bytes>0</keep_free_space_bytes>
552
+ </default>
553
+ <data>
554
+ <path>/data/</path>
555
+ <keep_free_space_bytes>0</keep_free_space_bytes>
556
+ </data>
557
+ <s3>
558
+ <type>s3</type>
559
+ <endpoint>http://path/to/endpoint</endpoint>
560
+ <access_key_id>your_access_key_id</access_key_id>
561
+ <secret_access_key>your_secret_access_key</secret_access_key>
562
+ </s3>
563
+ <blob_storage_disk>
564
+ <type>azure_blob_storage</type>
565
+ <storage_account_url>http://account.blob.core.windows.net</storage_account_url>
566
+ <container_name>container</container_name>
567
+ <account_name>account</account_name>
568
+ <account_key>pass123</account_key>
569
+ <metadata_path>/var/lib/clickhouse/disks/blob_storage_disk/</metadata_path>
570
+ <skip_access_check>false</skip_access_check>
571
+ </blob_storage_disk>
572
+ </disks>
573
+
574
+ <policies>
575
+ <all>
576
+ <volumes>
577
+ <main>
578
+ <disk>default</disk>
579
+ <disk>data</disk>
580
+ <disk>s3</disk>
581
+ <disk>blob_storage_disk</disk>
582
+
583
+ <max_data_part_size_bytes></max_data_part_size_bytes>
584
+ <max_data_part_size_ratio></max_data_part_size_ratio>
585
+ <perform_ttl_move_on_insert>true</perform_ttl_move_on_insert>
586
+ <load_balancing>round_robin</load_balancing>
587
+ </main>
588
+ </volumes>
589
+ <move_factor>0.2</move_factor>
590
+ </all>
591
+ </policies>
592
+ </storage_configuration>
593
+ -->
594
+
595
+ <!-- Default database disk storing metadata files: -->
596
+ <!--
597
+ <database_disk>
598
+ <disk>default</disk>
599
+ </database_disk>
600
+ -->
601
+
602
+ <!-- Path to temporary data for processing heavy queries. -->
603
+ <!-- NOTE: all files with `tmp` prefix will be removed at server startup -->
604
+ <tmp_path>/data/data/clickhouse/tmp/</tmp_path>
605
+
606
+ <!-- Disable AuthType plaintext_password and no_password for ACL. -->
607
+ <allow_plaintext_password>1</allow_plaintext_password>
608
+ <allow_no_password>1</allow_no_password>
609
+ <allow_implicit_no_password>1</allow_implicit_no_password>
610
+
611
+ <!-- When a user does not specify a password type in the CREATE USER query, the default password type is used.
612
+ Accepted values are: 'plaintext_password', 'sha256_password', 'double_sha1_password', 'bcrypt_password'.
613
+ -->
614
+ <default_password_type>sha256_password</default_password_type>
615
+
616
+ <!-- Work factor for bcrypt_password authentication type -->
617
+ <bcrypt_workfactor>12</bcrypt_workfactor>
618
+
619
+ <!-- Complexity requirements for user passwords.
620
+ Note: ClickHouse Cloud https://clickhouse.com/cloud is always configured for strong passwords.
621
+ -->
622
+ <!-- <password_complexity>
623
+ <rule>
624
+ <pattern>.{12}</pattern>
625
+ <message>be at least 12 characters long</message>
626
+ </rule>
627
+ <rule>
628
+ <pattern>\p{N}</pattern>
629
+ <message>contain at least 1 numeric character</message>
630
+ </rule>
631
+ <rule>
632
+ <pattern>\p{Ll}</pattern>
633
+ <message>contain at least 1 lowercase character</message>
634
+ </rule>
635
+ <rule>
636
+ <pattern>\p{Lu}</pattern>
637
+ <message>contain at least 1 uppercase character</message>
638
+ </rule>
639
+ <rule>
640
+ <pattern>[^\p{L}\p{N}]</pattern>
641
+ <message>contain at least 1 special character</message>
642
+ </rule>
643
+ </password_complexity> -->
644
+
645
+ <!-- Policy from the <storage_configuration> for the temporary files.
646
+ If not set <tmp_path> is used, otherwise <tmp_path> is ignored.
647
+
648
+ Notes:
649
+ - move_factor is ignored
650
+ - keep_free_space_bytes is ignored
651
+ - max_data_part_size_bytes is ignored
652
+ - you must have exactly one volume in that policy
653
+
654
+ NOTE: all files with `tmp` prefix will be removed at server startup
655
+ -->
656
+ <!-- <tmp_policy>tmp</tmp_policy> -->
657
+
658
+ <!-- Directory with user provided files that are accessible by 'file' table function. -->
659
+ <user_files_path>/data/data/clickhouse/user_files/</user_files_path>
660
+
661
+ <!-- LDAP server definitions. -->
662
+ <ldap_servers>
663
+ <!-- List LDAP servers with their connection parameters here to later 1) use them as authenticators for dedicated local users,
664
+ who have 'ldap' authentication mechanism specified instead of 'password', or to 2) use them as remote user directories.
665
+ Parameters:
666
+ host - LDAP server hostname or IP, this parameter is mandatory and cannot be empty.
667
+ port - LDAP server port, default is 636 if enable_tls is set to true, 389 otherwise.
668
+ bind_dn - template used to construct the DN to bind to.
669
+ The resulting DN will be constructed by replacing all '{user_name}' substrings of the template with the actual
670
+ user name during each authentication attempt.
671
+ user_dn_detection - section with LDAP search parameters for detecting the actual user DN of the bound user.
672
+ This is mainly used in search filters for further role mapping when the server is Active Directory. The
673
+ resulting user DN will be used when replacing '{user_dn}' substrings wherever they are allowed. By default,
674
+ user DN is set equal to bind DN, but once search is performed, it will be updated with to the actual detected
675
+ user DN value.
676
+ base_dn - template used to construct the base DN for the LDAP search.
677
+ The resulting DN will be constructed by replacing all '{user_name}' and '{bind_dn}' substrings
678
+ of the template with the actual user name and bind DN during the LDAP search.
679
+ scope - scope of the LDAP search.
680
+ Accepted values are: 'base', 'one_level', 'children', 'subtree' (the default).
681
+ search_filter - template used to construct the search filter for the LDAP search.
682
+ The resulting filter will be constructed by replacing all '{user_name}', '{bind_dn}', and '{base_dn}'
683
+ substrings of the template with the actual user name, bind DN, and base DN during the LDAP search.
684
+ Note, that the special characters must be escaped properly in XML.
685
+ verification_cooldown - a period of time, in seconds, after a successful bind attempt, during which a user will be assumed
686
+ to be successfully authenticated for all consecutive requests without contacting the LDAP server.
687
+ Specify 0 (the default) to disable caching and force contacting the LDAP server for each authentication request.
688
+ enable_tls - flag to trigger use of secure connection to the LDAP server.
689
+ Specify 'no' for plain text (ldap://) protocol (not recommended).
690
+ Specify 'yes' for LDAP over SSL/TLS (ldaps://) protocol (recommended, the default).
691
+ Specify 'starttls' for legacy StartTLS protocol (plain text (ldap://) protocol, upgraded to TLS).
692
+ tls_minimum_protocol_version - the minimum protocol version of SSL/TLS.
693
+ Accepted values are: 'ssl2', 'ssl3', 'tls1.0', 'tls1.1', 'tls1.2' (the default).
694
+ tls_require_cert - SSL/TLS peer certificate verification behavior.
695
+ Accepted values are: 'never', 'allow', 'try', 'demand' (the default).
696
+ tls_cert_file - path to certificate file.
697
+ tls_key_file - path to certificate key file.
698
+ tls_ca_cert_file - path to CA certificate file.
699
+ tls_ca_cert_dir - path to the directory containing CA certificates.
700
+ tls_cipher_suite - allowed cipher suite (in OpenSSL notation).
701
+ Example:
702
+ <my_ldap_server>
703
+ <host>localhost</host>
704
+ <port>636</port>
705
+ <bind_dn>uid={user_name},ou=users,dc=example,dc=com</bind_dn>
706
+ <verification_cooldown>300</verification_cooldown>
707
+ <enable_tls>yes</enable_tls>
708
+ <tls_minimum_protocol_version>tls1.2</tls_minimum_protocol_version>
709
+ <tls_require_cert>demand</tls_require_cert>
710
+ <tls_cert_file>/path/to/tls_cert_file</tls_cert_file>
711
+ <tls_key_file>/path/to/tls_key_file</tls_key_file>
712
+ <tls_ca_cert_file>/path/to/tls_ca_cert_file</tls_ca_cert_file>
713
+ <tls_ca_cert_dir>/path/to/tls_ca_cert_dir</tls_ca_cert_dir>
714
+ <tls_cipher_suite>ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384</tls_cipher_suite>
715
+ </my_ldap_server>
716
+ Example (typical Active Directory with configured user DN detection for further role mapping):
717
+ <my_ad_server>
718
+ <host>localhost</host>
719
+ <port>389</port>
720
+ <bind_dn>EXAMPLE\{user_name}</bind_dn>
721
+ <user_dn_detection>
722
+ <base_dn>CN=Users,DC=example,DC=com</base_dn>
723
+ <search_filter>(&amp;(objectClass=user)(sAMAccountName={user_name}))</search_filter>
724
+ </user_dn_detection>
725
+ <enable_tls>no</enable_tls>
726
+ </my_ad_server>
727
+ -->
728
+ </ldap_servers>
729
+
730
+ <!-- To enable Kerberos authentication support for HTTP requests (GSS-SPNEGO), for those users who are explicitly configured
731
+ to authenticate via Kerberos, define a single 'kerberos' section here.
732
+ Parameters:
733
+ principal - canonical service principal name, that will be acquired and used when accepting security contexts.
734
+ This parameter is optional, if omitted, the default principal will be used.
735
+ This parameter cannot be specified together with 'realm' parameter.
736
+ realm - a realm, that will be used to restrict authentication to only those requests whose initiator's realm matches it.
737
+ This parameter is optional, if omitted, no additional filtering by realm will be applied.
738
+ This parameter cannot be specified together with 'principal' parameter.
739
+ Example:
740
+ <kerberos />
741
+ Example:
742
+ <kerberos>
743
+ <principal>HTTP/clickhouse.example.com@EXAMPLE.COM</principal>
744
+ </kerberos>
745
+ Example:
746
+ <kerberos>
747
+ <realm>EXAMPLE.COM</realm>
748
+ </kerberos>
749
+ -->
750
+
751
+ <!-- Sources to read users, roles, access rights, profiles of settings, quotas. -->
752
+ <user_directories>
753
+ <users_xml>
754
+ <!-- Path to configuration file with predefined users. -->
755
+ <path>users.xml</path>
756
+ </users_xml>
757
+ <local_directory>
758
+ <!-- Path to folder where users created by SQL commands are stored. -->
759
+ <path>/var/lib/clickhouse/access</path>
760
+
761
+
762
+ </local_directory>
763
+
764
+ <!-- To add an LDAP server as a remote user directory of users that are not defined locally, define a single 'ldap' section
765
+ with the following parameters:
766
+ server - one of LDAP server names defined in 'ldap_servers' config section above.
767
+ This parameter is mandatory and cannot be empty.
768
+ roles - section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server.
769
+ If no roles are specified here or assigned during role mapping (below), user will not be able to perform any
770
+ actions after authentication.
771
+ role_mapping - section with LDAP search parameters and mapping rules.
772
+ When a user authenticates, while still bound to LDAP, an LDAP search is performed using search_filter and the
773
+ name of the logged in user. For each entry found during that search, the value of the specified attribute is
774
+ extracted. For each attribute value that has the specified prefix, the prefix is removed, and the rest of the
775
+ value becomes the name of a local role defined in ClickHouse, which is expected to be created beforehand by
776
+ CREATE ROLE command.
777
+ There can be multiple 'role_mapping' sections defined inside the same 'ldap' section. All of them will be
778
+ applied.
779
+ base_dn - template used to construct the base DN for the LDAP search.
780
+ The resulting DN will be constructed by replacing all '{user_name}', '{bind_dn}', and '{user_dn}'
781
+ substrings of the template with the actual user name, bind DN, and user DN during each LDAP search.
782
+ scope - scope of the LDAP search.
783
+ Accepted values are: 'base', 'one_level', 'children', 'subtree' (the default).
784
+ search_filter - template used to construct the search filter for the LDAP search.
785
+ The resulting filter will be constructed by replacing all '{user_name}', '{bind_dn}', '{user_dn}', and
786
+ '{base_dn}' substrings of the template with the actual user name, bind DN, user DN, and base DN during
787
+ each LDAP search.
788
+ Note, that the special characters must be escaped properly in XML.
789
+ attribute - attribute name whose values will be returned by the LDAP search. 'cn', by default.
790
+ prefix - prefix, that will be expected to be in front of each string in the original list of strings returned by
791
+ the LDAP search. Prefix will be removed from the original strings and resulting strings will be treated
792
+ as local role names. Empty, by default.
793
+ Example:
794
+ <ldap>
795
+ <server>my_ldap_server</server>
796
+ <roles>
797
+ <my_local_role1 />
798
+ <my_local_role2 />
799
+ </roles>
800
+ <role_mapping>
801
+ <base_dn>ou=groups,dc=example,dc=com</base_dn>
802
+ <scope>subtree</scope>
803
+ <search_filter>(&amp;(objectClass=groupOfNames)(member={bind_dn}))</search_filter>
804
+ <attribute>cn</attribute>
805
+ <prefix>clickhouse_</prefix>
806
+ </role_mapping>
807
+ </ldap>
808
+ Example (typical Active Directory with role mapping that relies on the detected user DN):
809
+ <ldap>
810
+ <server>my_ad_server</server>
811
+ <role_mapping>
812
+ <base_dn>CN=Users,DC=example,DC=com</base_dn>
813
+ <attribute>CN</attribute>
814
+ <scope>subtree</scope>
815
+ <search_filter>(&amp;(objectClass=group)(member={user_dn}))</search_filter>
816
+ <prefix>clickhouse_</prefix>
817
+ </role_mapping>
818
+ </ldap>
819
+ -->
820
+
821
+
822
+ </user_directories>
823
+
824
+ <access_control_improvements>
825
+ <!-- Enables logic that users without permissive row policies can still read rows using a SELECT query.
826
+ For example, if there two users A, B and a row policy is defined only for A, then
827
+ if this setting is true the user B will see all rows, and if this setting is false the user B will see no rows.
828
+ By default this setting is true. -->
829
+ <users_without_row_policies_can_read_rows>true</users_without_row_policies_can_read_rows>
830
+
831
+ <!-- By default, for backward compatibility ON CLUSTER queries ignore CLUSTER grant,
832
+ however you can change this behaviour by setting this to true -->
833
+ <on_cluster_queries_require_cluster_grant>true</on_cluster_queries_require_cluster_grant>
834
+
835
+ <!-- By default, for backward compatibility "SELECT * FROM system.<table>" doesn't require any grants and can be executed
836
+ by any user. You can change this behaviour by setting this to true.
837
+ If it's set to true then this query requires "GRANT SELECT ON system.<table>" just like as for non-system tables.
838
+ Exceptions: a few system tables ("tables", "columns", "databases", and some constant tables like "one", "contributors")
839
+ are still accessible for everyone; and if there is a SHOW privilege (e.g. "SHOW USERS") granted the corresponding system
840
+ table (i.e. "system.users") will be accessible. -->
841
+ <select_from_system_db_requires_grant>true</select_from_system_db_requires_grant>
842
+
843
+ <!-- By default, for backward compatibility "SELECT * FROM information_schema.<table>" doesn't require any grants and can be
844
+ executed by any user. You can change this behaviour by setting this to true.
845
+ If it's set to true then this query requires "GRANT SELECT ON information_schema.<table>" just like as for ordinary tables. -->
846
+ <select_from_information_schema_requires_grant>true</select_from_information_schema_requires_grant>
847
+
848
+ <!-- By default, for backward compatibility a settings profile constraint for a specific setting inherit every not set field from
849
+ previous profile. You can change this behaviour by setting this to true.
850
+ If it's set to true then if settings profile has a constraint for a specific setting, then this constraint completely cancels all
851
+ actions of previous constraint (defined in other profiles) for the same specific setting, including fields that are not set by new constraint.
852
+ It also enables 'changeable_in_readonly' constraint type -->
853
+ <settings_constraints_replace_previous>true</settings_constraints_replace_previous>
854
+
855
+ <!-- By default, for backward compatibility creating table with a specific table engine ignores grant,
856
+ however you can change this behaviour by setting this to true -->
857
+ <table_engines_require_grant>false</table_engines_require_grant>
858
+
859
+ <!-- Number of seconds since last access a role is stored in the Role Cache -->
860
+ <role_cache_expiration_time_seconds>600</role_cache_expiration_time_seconds>
861
+ </access_control_improvements>
862
+
863
+ <!-- Default profile of settings. -->
864
+ <default_profile>default</default_profile>
865
+
866
+ <!-- Comma-separated list of prefixes for user-defined settings.
867
+ The server will allow to set these settings, and retrieve them with the getSetting function.
868
+ They are also logged in the query_log, similarly to other settings, but have no special effect.
869
+ The "SQL_" prefix is introduced for compatibility with MySQL - these settings are being set by Tableau.
870
+ -->
871
+ <custom_settings_prefixes>SQL_</custom_settings_prefixes>
872
+
873
+ <!-- System profile of settings. This settings are used by internal processes (Distributed DDL worker and so on). -->
874
+ <!-- <system_profile>default</system_profile> -->
875
+
876
+ <!-- Buffer profile of settings.
877
+ This settings are used by Buffer storage to flush data to the underlying table.
878
+ Default: used from system_profile directive.
879
+ -->
880
+ <!-- <buffer_profile>default</buffer_profile> -->
881
+
882
+ <!-- Default database. -->
883
+ <default_database>default</default_database>
884
+
885
+ <!-- Server time zone could be set here.
886
+
887
+ Time zone is used when converting between String and DateTime types,
888
+ when printing DateTime in text formats and parsing DateTime from text,
889
+ it is used in date and time related functions, if specific time zone was not passed as an argument.
890
+
891
+ Time zone is specified as identifier from IANA time zone database, like UTC or Africa/Abidjan.
892
+ If not specified, system time zone at server startup is used.
893
+
894
+ Please note, that server could display time zone alias instead of specified name.
895
+ Example: Zulu is an alias for UTC.
896
+ -->
897
+ <!-- <timezone>UTC</timezone> -->
898
+
899
+ <!-- You can specify umask here (see "man umask"). Server will apply it on startup.
900
+ Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read).
901
+ -->
902
+ <!-- <umask>022</umask> -->
903
+
904
+ <!-- Perform mlockall after startup to lower first queries latency
905
+ and to prevent clickhouse executable from being paged out under high IO load.
906
+ Enabling this option is recommended but will lead to increased startup time for up to a few seconds.
907
+ -->
908
+ <mlock_executable>true</mlock_executable>
909
+
910
+ <!-- Reallocate memory for machine code ("text") using huge pages. Highly experimental. -->
911
+ <remap_executable>false</remap_executable>
912
+
913
+ <![CDATA[
914
+ Uncomment below in order to use JDBC table engine and function.
915
+
916
+ To install and run JDBC bridge in background:
917
+ * [Debian/Ubuntu]
918
+ export MVN_URL=https://repo1.maven.org/maven2/com/clickhouse/clickhouse-jdbc-bridge/
919
+ export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '<release>' | sed -e 's|.*>\(.*\)<.*|\1|')
920
+ wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge_$PKG_VER-1_all.deb
921
+ apt install --no-install-recommends -f ./clickhouse-jdbc-bridge_$PKG_VER-1_all.deb
922
+ clickhouse-jdbc-bridge &
923
+
924
+ * [CentOS/RHEL]
925
+ export MVN_URL=https://repo1.maven.org/maven2/com/clickhouse/clickhouse-jdbc-bridge/
926
+ export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '<release>' | sed -e 's|.*>\(.*\)<.*|\1|')
927
+ wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm
928
+ yum localinstall -y clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm
929
+ clickhouse-jdbc-bridge &
930
+
931
+ Please refer to https://github.com/ClickHouse/clickhouse-jdbc-bridge#usage for more information.
932
+ ]]>
933
+ <!--
934
+ <jdbc_bridge>
935
+ <host>127.0.0.1</host>
936
+ <port>9019</port>
937
+ </jdbc_bridge>
938
+ -->
939
+
940
+ <!-- Configuration of clusters that could be used in Distributed tables.
941
+ https://clickhouse.com/docs/en/operations/table_engines/distributed/
942
+ Note: ClickHouse Cloud https://clickhouse.com/cloud has the cluster preconfigured and dynamically scalable.
943
+ -->
944
+ <remote_servers>
945
+ <!-- Test only shard config for testing distributed storage -->
946
+ <default>
947
+ <!-- Inter-server per-cluster secret for Distributed queries
948
+ default: no secret (no authentication will be performed)
949
+
950
+ If set, then Distributed queries will be validated on shards, so at least:
951
+ - such cluster should exist on the shard,
952
+ - such cluster should have the same secret.
953
+
954
+ And also (and which is more important), the initial_user will
955
+ be used as current user for the query.
956
+
957
+ Right now the protocol is pretty simple, and it only takes into account:
958
+ - cluster name
959
+ - query
960
+
961
+ Also, it will be nice if the following will be implemented:
962
+ - source hostname (see interserver_http_host), but then it will depend on DNS,
963
+ it can use IP address instead, but then you need to get correct on the initiator node.
964
+ - target hostname / ip address (same notes as for source hostname)
965
+ - time-based security tokens
966
+ -->
967
+ <!-- <secret></secret> -->
968
+
969
+ <shard>
970
+ <!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
971
+ <!-- <internal_replication>false</internal_replication> -->
972
+ <!-- Optional. Shard weight when writing data. Default: 1. -->
973
+ <!-- <weight>1</weight> -->
974
+ <replica>
975
+ <host>localhost</host>
976
+ <port>9000</port>
977
+ <!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
978
+ <!-- <priority>1</priority> -->
979
+ <!-- Use SSL? Default: no -->
980
+ <!-- <secure>0</secure> -->
981
+ <!-- Optional. Bind to specific host before connecting to use a specific network. -->
982
+ <!-- <bind_host>10.0.0.1</bind_host> -->
983
+ </replica>
984
+ </shard>
985
+ </default>
986
+ </remote_servers>
987
+
988
+ <!-- The list of hosts allowed to use in URL-related storage engines and table functions.
989
+ If this section is not present in configuration, all hosts are allowed.
990
+ -->
991
+ <!--<remote_url_allow_hosts>-->
992
+ <!-- Host should be specified exactly as in URL. The name is checked before DNS resolution.
993
+ Example: "clickhouse.com", "clickhouse.com." and "www.clickhouse.com" are different hosts.
994
+ If port is explicitly specified in URL, the host:port is checked as a whole.
995
+ If host specified here without port, any port with this host allowed.
996
+ "clickhouse.com" -> "clickhouse.com:443", "clickhouse.com:80" etc. is allowed, but "clickhouse.com:80" -> only "clickhouse.com:80" is allowed.
997
+ If the host is specified as IP address, it is checked as specified in URL. Example: "[2a02:6b8:a::a]".
998
+ If there are redirects and support for redirects is enabled, every redirect (the Location field) is checked.
999
+ Host should be specified using the host xml tag:
1000
+ <host>clickhouse.com</host>
1001
+ -->
1002
+
1003
+ <!-- Regular expression can be specified. RE2 engine is used for regexps.
1004
+ Regexps are not aligned: don't forget to add ^ and $. Also don't forget to escape dot (.) metacharacter
1005
+ (forgetting to do so is a common source of error).
1006
+ -->
1007
+ <!--</remote_url_allow_hosts>-->
1008
+
1009
+ <!-- The list of HTTP headers forbidden to use in HTTP-related storage engines and table functions.
1010
+ If this section is not present in configuration, all headers are allowed.
1011
+ -->
1012
+ <!-- <http_forbid_headers>
1013
+ <header>exact_header</header>
1014
+ <header_regexp>(?i)(case_insensitive_header)</header_regexp>
1015
+ </http_forbid_headers> -->
1016
+
1017
+ <!-- If element has 'incl' attribute, then for it's value will be used corresponding substitution from another file.
1018
+ By default, path to file with substitutions is /etc/metrika.xml. It could be changed in config in 'include_from' element.
1019
+ Values for substitutions are specified in /clickhouse/name_of_substitution elements in that file.
1020
+ -->
1021
+
1022
+ <!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
1023
+ Optional. If you don't use replicated tables, you could omit that.
1024
+
1025
+ See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
1026
+
1027
+ Note: ClickHouse Cloud https://clickhouse.com/cloud has ClickHouse Keeper automatically configured for every service.
1028
+ -->
1029
+
1030
+ <!--
1031
+ <zookeeper>
1032
+ <node>
1033
+ <host>example1</host>
1034
+ <port>2181</port>
1035
+ </node>
1036
+ <node>
1037
+ <host>example2</host>
1038
+ <port>2181</port>
1039
+ </node>
1040
+ <node>
1041
+ <host>example3</host>
1042
+ <port>2181</port>
1043
+ </node>
1044
+ </zookeeper>
1045
+ -->
1046
+
1047
+ <!-- Substitutions for parameters of replicated tables.
1048
+ Optional. If you don't use replicated tables, you could omit that.
1049
+
1050
+ See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/#creating-replicated-tables
1051
+ -->
1052
+ <!--
1053
+ <macros>
1054
+ <shard>01</shard>
1055
+ <replica>example01-01-1</replica>
1056
+ </macros>
1057
+ -->
1058
+
1059
+ <!--
1060
+ <default_replica_path>/clickhouse/tables/{database}/{table}</default_replica_path>
1061
+ <default_replica_name>{replica}</default_replica_name>
1062
+ -->
1063
+
1064
+ <!-- Replica group name for database Replicated.
1065
+ The cluster created by Replicated database will consist of replicas in the same group.
1066
+ DDL queries will only wail for the replicas in the same group.
1067
+ Empty by default.
1068
+ -->
1069
+ <!--
1070
+ <replica_group_name><replica_group_name>
1071
+ -->
1072
+
1073
+
1074
+ <!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
1075
+ <builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>
1076
+
1077
+
1078
+ <!-- Maximum session timeout, in seconds. Default: 3600. -->
1079
+ <max_session_timeout>3600</max_session_timeout>
1080
+
1081
+ <!-- Default session timeout, in seconds. Default: 60. -->
1082
+ <default_session_timeout>60</default_session_timeout>
1083
+
1084
+ <!-- Sending data to Graphite for monitoring. Several sections can be defined. -->
1085
+ <!--
1086
+ interval - send every X second
1087
+ root_path - prefix for keys
1088
+ hostname_in_path - append hostname to root_path (default = true)
1089
+ metrics - send data from table system.metrics
1090
+ events - send data from table system.events
1091
+ asynchronous_metrics - send data from table system.asynchronous_metrics
1092
+ -->
1093
+ <!--
1094
+ <graphite>
1095
+ <host>localhost</host>
1096
+ <port>42000</port>
1097
+ <timeout>0.1</timeout>
1098
+ <interval>60</interval>
1099
+ <root_path>one_min</root_path>
1100
+ <hostname_in_path>true</hostname_in_path>
1101
+
1102
+ <metrics>true</metrics>
1103
+ <events>true</events>
1104
+ <events_cumulative>false</events_cumulative>
1105
+ <asynchronous_metrics>true</asynchronous_metrics>
1106
+ </graphite>
1107
+ <graphite>
1108
+ <host>localhost</host>
1109
+ <port>42000</port>
1110
+ <timeout>0.1</timeout>
1111
+ <interval>1</interval>
1112
+ <root_path>one_sec</root_path>
1113
+
1114
+ <metrics>true</metrics>
1115
+ <events>true</events>
1116
+ <events_cumulative>false</events_cumulative>
1117
+ <asynchronous_metrics>false</asynchronous_metrics>
1118
+ </graphite>
1119
+ -->
1120
+
1121
+ <!-- Serve endpoint for Prometheus monitoring. -->
1122
+ <!--
1123
+ endpoint - mertics path (relative to root, statring with "/")
1124
+ port - port to setup server. If not defined or 0 than http_port used
1125
+ metrics - send data from table system.metrics
1126
+ events - send data from table system.events
1127
+ asynchronous_metrics - send data from table system.asynchronous_metrics
1128
+ -->
1129
+ <!--
1130
+ <prometheus>
1131
+ <endpoint>/metrics</endpoint>
1132
+ <port>9363</port>
1133
+
1134
+ <metrics>true</metrics>
1135
+ <events>true</events>
1136
+ <asynchronous_metrics>true</asynchronous_metrics>
1137
+ </prometheus>
1138
+ -->
1139
+
1140
+ <!-- Query log. Used only for queries with setting log_queries = 1. -->
1141
+ <query_log>
1142
+ <!-- What table to insert data. If table is not exist, it will be created.
1143
+ When query log structure is changed after system update,
1144
+ then old table will be renamed and new table will be created automatically.
1145
+ -->
1146
+ <database>system</database>
1147
+ <table>query_log</table>
1148
+ <!--
1149
+ PARTITION BY expr: https://clickhouse.com/docs/en/table_engines/mergetree-family/custom_partitioning_key/
1150
+ Example:
1151
+ event_date
1152
+ toMonday(event_date)
1153
+ toYYYYMM(event_date)
1154
+ toStartOfHour(event_time)
1155
+ -->
1156
+ <partition_by>toYYYYMM(event_date)</partition_by>
1157
+ <!--
1158
+ Table TTL specification: https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#mergetree-table-ttl
1159
+ Example:
1160
+ event_date + INTERVAL 1 WEEK
1161
+ event_date + INTERVAL 7 DAY DELETE
1162
+ event_date + INTERVAL 2 WEEK TO DISK 'bbb'
1163
+
1164
+ <ttl>event_date + INTERVAL 30 DAY DELETE</ttl>
1165
+ -->
1166
+
1167
+ <!--
1168
+ ORDER BY expr: https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree#order_by
1169
+ Example:
1170
+ event_date, event_time
1171
+ event_date, type, query_id
1172
+ event_date, event_time, initial_query_id
1173
+
1174
+ <order_by>event_date, event_time, initial_query_id</order_by>
1175
+ -->
1176
+
1177
+ <!-- Instead of partition_by, you can provide full engine expression (starting with ENGINE = ) with parameters,
1178
+ Example: <engine>ENGINE = MergeTree PARTITION BY toYYYYMM(event_date) ORDER BY (event_date, event_time) SETTINGS index_granularity = 1024</engine>
1179
+ -->
1180
+
1181
+ <!-- Interval of flushing data. -->
1182
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1183
+ <!-- Maximum size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk. -->
1184
+ <max_size_rows>1048576</max_size_rows>
1185
+ <!-- Pre-allocated size in lines for the logs. -->
1186
+ <reserved_size_rows>8192</reserved_size_rows>
1187
+ <!-- Lines amount threshold, reaching it launches flushing logs to the disk in background. -->
1188
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1189
+ <!-- Indication whether logs should be dumped to the disk in case of a crash -->
1190
+ <flush_on_crash>false</flush_on_crash>
1191
+
1192
+ <!-- example of using a different storage policy for a system table -->
1193
+ <!-- storage_policy>local_ssd</storage_policy -->
1194
+ </query_log>
1195
+
1196
+ <!-- Trace log. Stores stack traces collected by query profilers.
1197
+ See query_profiler_real_time_period_ns and query_profiler_cpu_time_period_ns settings. -->
1198
+ <trace_log>
1199
+ <database>system</database>
1200
+ <table>trace_log</table>
1201
+
1202
+ <partition_by>toYYYYMM(event_date)</partition_by>
1203
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1204
+ <max_size_rows>1048576</max_size_rows>
1205
+ <reserved_size_rows>8192</reserved_size_rows>
1206
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1207
+ <!-- Indication whether logs should be dumped to the disk in case of a crash -->
1208
+ <flush_on_crash>false</flush_on_crash>
1209
+ <symbolize>true</symbolize>
1210
+ </trace_log>
1211
+
1212
+ <!-- Query thread log. Has information about all threads participated in query execution.
1213
+ Used only for queries with setting log_query_threads = 1. -->
1214
+ <query_thread_log>
1215
+ <database>system</database>
1216
+ <table>query_thread_log</table>
1217
+ <partition_by>toYYYYMM(event_date)</partition_by>
1218
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1219
+ <max_size_rows>1048576</max_size_rows>
1220
+ <reserved_size_rows>8192</reserved_size_rows>
1221
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1222
+ <flush_on_crash>false</flush_on_crash>
1223
+ </query_thread_log>
1224
+
1225
+ <!-- Query views log. Has information about all dependent views associated with a query.
1226
+ Used only for queries with setting log_query_views = 1. -->
1227
+ <query_views_log>
1228
+ <database>system</database>
1229
+ <table>query_views_log</table>
1230
+ <partition_by>toYYYYMM(event_date)</partition_by>
1231
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1232
+ </query_views_log>
1233
+
1234
+ <!-- Part log contains information about all actions with parts in MergeTree tables (creation, deletion, merges, downloads). -->
1235
+ <part_log>
1236
+ <database>system</database>
1237
+ <table>part_log</table>
1238
+ <partition_by>toYYYYMM(event_date)</partition_by>
1239
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1240
+ <max_size_rows>1048576</max_size_rows>
1241
+ <reserved_size_rows>8192</reserved_size_rows>
1242
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1243
+ <flush_on_crash>false</flush_on_crash>
1244
+ </part_log>
1245
+
1246
+ <!-- Text log contains all information from usual server log but stores it in structured and efficient way.
1247
+ The level of the messages that goes to the table can be limited (<level>), if not specified all messages will go to the table.
1248
+ -->
1249
+ <text_log>
1250
+ <database>system</database>
1251
+ <table>text_log</table>
1252
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1253
+ <max_size_rows>1048576</max_size_rows>
1254
+ <reserved_size_rows>8192</reserved_size_rows>
1255
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1256
+ <flush_on_crash>false</flush_on_crash>
1257
+ <level>trace</level>
1258
+ </text_log>
1259
+
1260
+ <!-- Metric log contains rows with current values of ProfileEvents, CurrentMetrics collected with "collect_interval_milliseconds" interval. -->
1261
+ <metric_log>
1262
+ <database>system</database>
1263
+ <table>metric_log</table>
1264
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1265
+ <max_size_rows>1048576</max_size_rows>
1266
+ <reserved_size_rows>8192</reserved_size_rows>
1267
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1268
+ <collect_interval_milliseconds>1000</collect_interval_milliseconds>
1269
+ <flush_on_crash>false</flush_on_crash>
1270
+ </metric_log>
1271
+
1272
+ <!-- Error log contains rows with current values of errors collected with "collect_interval_milliseconds" interval. -->
1273
+ <error_log>
1274
+ <database>system</database>
1275
+ <table>error_log</table>
1276
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1277
+ <max_size_rows>1048576</max_size_rows>
1278
+ <reserved_size_rows>8192</reserved_size_rows>
1279
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1280
+ <collect_interval_milliseconds>1000</collect_interval_milliseconds>
1281
+ <flush_on_crash>false</flush_on_crash>
1282
+ </error_log>
1283
+
1284
+ <!-- Query metric log contains history of memory and metric values from table system.events for individual queries, periodically flushed to disk
1285
+ every "collect_interval_milliseconds" interval-->
1286
+ <query_metric_log>
1287
+ <database>system</database>
1288
+ <table>query_metric_log</table>
1289
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1290
+ <max_size_rows>1048576</max_size_rows>
1291
+ <reserved_size_rows>8192</reserved_size_rows>
1292
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1293
+ <collect_interval_milliseconds>1000</collect_interval_milliseconds>
1294
+ <flush_on_crash>false</flush_on_crash>
1295
+ </query_metric_log>
1296
+
1297
+ <!--
1298
+ Asynchronous metric log contains values of metrics from
1299
+ system.asynchronous_metrics.
1300
+ -->
1301
+ <asynchronous_metric_log>
1302
+ <database>system</database>
1303
+ <table>asynchronous_metric_log</table>
1304
+ <flush_interval_milliseconds>7000</flush_interval_milliseconds>
1305
+ <max_size_rows>1048576</max_size_rows>
1306
+ <reserved_size_rows>8192</reserved_size_rows>
1307
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1308
+ <flush_on_crash>false</flush_on_crash>
1309
+ </asynchronous_metric_log>
1310
+
1311
+ <!--
1312
+ OpenTelemetry log contains OpenTelemetry trace spans.
1313
+
1314
+ NOTE: this table does not use standard schema with event_date and event_time!
1315
+ -->
1316
+ <opentelemetry_span_log>
1317
+ <!--
1318
+ The default table creation code is insufficient, this <engine> spec
1319
+ is a workaround. There is no 'event_time' for this log, but two times,
1320
+ start and finish. It is sorted by finish time, to avoid inserting
1321
+ data too far away in the past (probably we can sometimes insert a span
1322
+ that is seconds earlier than the last span in the table, due to a race
1323
+ between several spans inserted in parallel). This gives the spans a
1324
+ global order that we can use to e.g. retry insertion into some external
1325
+ system.
1326
+ -->
1327
+ <engine>
1328
+ engine MergeTree
1329
+ partition by toYYYYMM(finish_date)
1330
+ order by (finish_date, finish_time_us)
1331
+ </engine>
1332
+ <database>system</database>
1333
+ <table>opentelemetry_span_log</table>
1334
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1335
+ <max_size_rows>1048576</max_size_rows>
1336
+ <reserved_size_rows>8192</reserved_size_rows>
1337
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1338
+ <flush_on_crash>false</flush_on_crash>
1339
+ </opentelemetry_span_log>
1340
+
1341
+
1342
+ <!-- Crash log. Stores stack traces for fatal errors.
1343
+ This table is normally empty. -->
1344
+ <crash_log>
1345
+ <database>system</database>
1346
+ <table>crash_log</table>
1347
+
1348
+ <partition_by/>
1349
+ <flush_interval_milliseconds>1000</flush_interval_milliseconds>
1350
+ <max_size_rows>1024</max_size_rows>
1351
+ <reserved_size_rows>1024</reserved_size_rows>
1352
+ <buffer_size_rows_flush_threshold>512</buffer_size_rows_flush_threshold>
1353
+ <flush_on_crash>true</flush_on_crash>
1354
+ </crash_log>
1355
+
1356
+ <!-- Session log. Stores user log in (successful or not) and log out events. -->
1357
+ <!-- <session_log>
1358
+ <database>system</database>
1359
+ <table>session_log</table>
1360
+
1361
+ <partition_by>toYYYYMM(event_date)</partition_by>
1362
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1363
+ <max_size_rows>1048576</max_size_rows>
1364
+ <reserved_size_rows>8192</reserved_size_rows>
1365
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1366
+ <flush_on_crash>false</flush_on_crash>
1367
+ </session_log> -->
1368
+
1369
+ <!-- Profiling on Processors level. -->
1370
+ <processors_profile_log>
1371
+ <database>system</database>
1372
+ <table>processors_profile_log</table>
1373
+
1374
+ <partition_by>toYYYYMM(event_date)</partition_by>
1375
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1376
+ <max_size_rows>1048576</max_size_rows>
1377
+ <reserved_size_rows>8192</reserved_size_rows>
1378
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1379
+ <flush_on_crash>false</flush_on_crash>
1380
+ <ttl>event_date + INTERVAL 30 DAY DELETE</ttl>
1381
+ </processors_profile_log>
1382
+
1383
+ <!-- Log of asynchronous inserts. It allows to check status
1384
+ of insert query in fire-and-forget mode.
1385
+ -->
1386
+ <asynchronous_insert_log>
1387
+ <database>system</database>
1388
+ <table>asynchronous_insert_log</table>
1389
+
1390
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1391
+ <max_size_rows>1048576</max_size_rows>
1392
+ <reserved_size_rows>8192</reserved_size_rows>
1393
+ <buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1394
+ <flush_on_crash>false</flush_on_crash>
1395
+ <partition_by>event_date</partition_by>
1396
+ <ttl>event_date + INTERVAL 3 DAY</ttl>
1397
+ </asynchronous_insert_log>
1398
+
1399
+ <!-- Backup/restore log.
1400
+ -->
1401
+ <backup_log>
1402
+ <database>system</database>
1403
+ <table>backup_log</table>
1404
+ <partition_by>toYYYYMM(event_date)</partition_by>
1405
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1406
+ </backup_log>
1407
+
1408
+ <!-- Storage S3Queue log.
1409
+ -->
1410
+ <s3queue_log>
1411
+ <database>system</database>
1412
+ <table>s3queue_log</table>
1413
+ <partition_by>toYYYYMM(event_date)</partition_by>
1414
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1415
+ </s3queue_log>
1416
+
1417
+ <!-- Blob storage object operations log.
1418
+ -->
1419
+ <blob_storage_log>
1420
+ <database>system</database>
1421
+ <table>blob_storage_log</table>
1422
+ <partition_by>toYYYYMM(event_date)</partition_by>
1423
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
1424
+ <ttl>event_date + INTERVAL 30 DAY</ttl>
1425
+ </blob_storage_log>
1426
+
1427
+ <!-- Configure system.dashboards for dashboard.html.
1428
+
1429
+ Could have any query parameters, for which there will be an input on the page.
1430
+ For instance an example from comments have the following:
1431
+ - seconds
1432
+ - rounding
1433
+
1434
+ NOTE: All default dashboards will be overwritten if it was set here. -->
1435
+ <!-- Here is an example without merge() function, to make it work with readonly user -->
1436
+ <!--
1437
+ <dashboards>
1438
+ <dashboard>
1439
+ <dashboard>Overview</dashboard>
1440
+ <title>Queries/second</title>
1441
+ <query>
1442
+ SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(ProfileEvent_Query)
1443
+ FROM system.metric_log
1444
+ WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32}
1445
+ GROUP BY t
1446
+ ORDER BY t WITH FILL STEP {rounding:UInt32}
1447
+ </query>
1448
+ </dashboard>
1449
+ </dashboards>
1450
+ -->
1451
+
1452
+ <!-- <top_level_domains_path>/var/lib/clickhouse/top_level_domains/</top_level_domains_path> -->
1453
+ <!-- Custom TLD lists.
1454
+ Format: <name>/path/to/file</name>
1455
+
1456
+ Changes will not be applied w/o server restart.
1457
+ Path to the list is under top_level_domains_path (see above).
1458
+ -->
1459
+ <top_level_domains_lists>
1460
+ <!--
1461
+ <public_suffix_list>/path/to/public_suffix_list.dat</public_suffix_list>
1462
+ -->
1463
+ </top_level_domains_lists>
1464
+
1465
+ <!-- Configuration of external dictionaries. See:
1466
+ https://clickhouse.com/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts
1467
+ -->
1468
+ <dictionaries_config>*_dictionary.*ml</dictionaries_config>
1469
+
1470
+ <!-- Load dictionaries lazily, i.e. a dictionary will be loaded when it's used for the first time.
1471
+ "false" means ClickHouse will start loading dictionaries immediately at startup.
1472
+ -->
1473
+ <dictionaries_lazy_load>true</dictionaries_lazy_load>
1474
+
1475
+ <!-- Wait at startup until all the dictionaries finish their loading (successfully or not)
1476
+ before receiving any connections. Affects dictionaries only if "dictionaries_lazy_load" is false.
1477
+ Setting this to false can make ClickHouse start faster, however some queries can be executed slower.
1478
+ -->
1479
+ <wait_dictionaries_load_at_startup>true</wait_dictionaries_load_at_startup>
1480
+
1481
+ <!-- Configuration of user defined executable functions -->
1482
+ <user_defined_executable_functions_config>*_function.*ml</user_defined_executable_functions_config>
1483
+
1484
+ <!-- Path in ZooKeeper to store user-defined SQL functions created by the command CREATE FUNCTION.
1485
+ If not specified they will be stored locally. -->
1486
+ <!-- <user_defined_zookeeper_path>/clickhouse/user_defined</user_defined_zookeeper_path> -->
1487
+
1488
+ <!-- Path in ZooKeeper to store workload and resource created by the command CREATE WORKLOAD and CREATE RESOURCE.
1489
+ If not specified they will be stored locally. -->
1490
+ <!-- <workload_zookeeper_path>/clickhouse/workload/definitions.sql</workload_zookeeper_path> -->
1491
+
1492
+ <!-- Uncomment if you want data to be compressed 30-100% better.
1493
+ Don't do that if you just started using ClickHouse.
1494
+ Note: ClickHouse Cloud https://clickhouse.com/cloud has a stronger compression by default.
1495
+ -->
1496
+ <!--
1497
+ <compression>
1498
+ <!- - Set of variants. Checked in order. Last matching case wins. If nothing matches, lz4 will be used. - ->
1499
+ <case>
1500
+
1501
+ <!- - Conditions. All must be satisfied. Some conditions may be omitted. - ->
1502
+ <min_part_size>10000000000</min_part_size> <!- - Min part size in bytes. - ->
1503
+ <min_part_size_ratio>0.01</min_part_size_ratio> <!- - Min size of part relative to whole table size. - ->
1504
+
1505
+ <!- - What compression method to use. - ->
1506
+ <method>zstd</method>
1507
+ </case>
1508
+ </compression>
1509
+ -->
1510
+
1511
+ <!-- Configuration of encryption. The server executes a command to
1512
+ obtain an encryption key at startup if such a command is
1513
+ defined, or encryption codecs will be disabled otherwise. The
1514
+ command is executed through /bin/sh and is expected to write
1515
+ a Base64-encoded key to the stdout.
1516
+
1517
+ Note: ClickHouse Cloud https://clickhouse.com/cloud supports encryption with customer-managed keys.
1518
+ -->
1519
+ <encryption_codecs>
1520
+ <!-- aes_128_gcm_siv -->
1521
+ <!-- Example of getting hex key from env -->
1522
+ <!-- the code should use this key and throw an exception if its length is not 16 bytes -->
1523
+ <!-- key_hex from_env="..."></key_hex -->
1524
+
1525
+ <!-- Example of multiple hex keys. They can be imported from env or be written down in config -->
1526
+ <!-- the code should use these keys and throw an exception if their length is not 16 bytes -->
1527
+ <!-- key_hex id="0">...</key_hex -->
1528
+ <!-- key_hex id="1" from_env=".."></key_hex -->
1529
+ <!-- key_hex id="2">...</key_hex -->
1530
+ <!-- current_key_id>2</current_key_id -->
1531
+
1532
+ <!-- Example of getting hex key from config -->
1533
+ <!-- the code should use this key and throw an exception if its length is not 16 bytes -->
1534
+ <!-- key>...</key -->
1535
+
1536
+ <!-- example of adding nonce -->
1537
+ <!-- nonce>...</nonce -->
1538
+
1539
+ <!-- /aes_128_gcm_siv -->
1540
+ </encryption_codecs>
1541
+
1542
+ <!-- Allow to execute distributed DDL queries (CREATE, DROP, ALTER, RENAME) on cluster.
1543
+ Works only if ZooKeeper is enabled. Comment it if such functionality isn't required.
1544
+ Note: ClickHouse Cloud https://clickhouse.com/cloud always runs DDL queries on cluster.
1545
+ -->
1546
+ <distributed_ddl>
1547
+ <!-- Path in ZooKeeper to queue with DDL queries -->
1548
+ <path>/clickhouse/task_queue/ddl</path>
1549
+ <!-- Path in ZooKeeper to store running DDL hosts -->
1550
+ <replicas_path>/clickhouse/task_queue/replicas</replicas_path>
1551
+
1552
+ <!-- Settings from this profile will be used to execute DDL queries -->
1553
+ <!-- <profile>default</profile> -->
1554
+
1555
+ <!-- Controls how many ON CLUSTER queries can be run simultaneously. -->
1556
+ <!-- <pool_size>1</pool_size> -->
1557
+
1558
+ <!--
1559
+ Cleanup settings (active tasks will not be removed)
1560
+ -->
1561
+
1562
+ <!-- Controls task TTL (default 1 week) -->
1563
+ <!-- <task_max_lifetime>604800</task_max_lifetime> -->
1564
+
1565
+ <!-- Controls how often cleanup should be performed (in seconds) -->
1566
+ <!-- <cleanup_delay_period>60</cleanup_delay_period> -->
1567
+
1568
+ <!-- Controls how many tasks could be in the queue -->
1569
+ <!-- <max_tasks_in_queue>1000</max_tasks_in_queue> -->
1570
+
1571
+ <!-- Host name of the current node. If specified, will only compare and not resolve hostnames inside the DDL tasks -->
1572
+ <!-- <host_name>replica</host_name> -->
1573
+ </distributed_ddl>
1574
+
1575
+ <!-- Workload scheduling: used to regulate how resources are utilized and shared between merges, mutations and other workloads.
1576
+ Specified value is used as `workload` setting value for background merge or mutation.
1577
+ -->
1578
+ <!--
1579
+ <merge_workload>merges_and_mutations</merge_workload>
1580
+ <mutation_workload>merges_and_mutations</mutation_workload>
1581
+ -->
1582
+
1583
+ <!-- Workload scheduling: throw or provide unlimited access to resource given unknown `workload` query setting -->
1584
+ <!-- <throw_on_unknown_workload>true</throw_on_unknown_workload> -->
1585
+
1586
+ <!-- Workload scheduling: if enabled during CPU overloaded periods, long-running queries will downscale to lower number of threads dynamically.
1587
+ Ensures more fair CPU resource distribution.
1588
+ -->
1589
+ <!--
1590
+ <cpu_slot_preemption>true</cpu_slot_preemption>
1591
+ <cpu_slot_quantum_ns>10000000</cpu_slot_quantum_ns>
1592
+ <cpu_slot_preemption_timeout_ms>1000</cpu_slot_preemption_timeout_ms>
1593
+ -->
1594
+
1595
+ <!-- Settings to fine-tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h -->
1596
+ <!--
1597
+ <merge_tree>
1598
+ <max_suspicious_broken_parts>5</max_suspicious_broken_parts>
1599
+ </merge_tree>
1600
+ -->
1601
+
1602
+ <!-- Settings to fine-tune ReplicatedMergeTree tables. See documentation in source code, in MergeTreeSettings.h
1603
+ Note: ClickHouse Cloud https://clickhouse.com/cloud has a SharedMergeTree engine that does not require fine-tuning.
1604
+ -->
1605
+ <!--
1606
+ <replicated_merge_tree>
1607
+ <max_replicated_fetches_network_bandwidth>1000000000</max_replicated_fetches_network_bandwidth>
1608
+ </replicated_merge_tree>
1609
+ -->
1610
+
1611
+ <!-- Settings to fine-tune DatabaseReplicatedSettings. See documentation in source code, in DatabaseReplicatedSettings.h -->
1612
+ <!--
1613
+ <database_replicated>
1614
+ <max_broken_tables_ratio>1</max_broken_tables_ratio>
1615
+ <max_replication_lag_to_enqueue>50</max_replication_lag_to_enqueue>
1616
+ <wait_entry_commited_timeout_sec>3600</wait_entry_commited_timeout_sec>
1617
+ <collection_name>default_collection</collection_name>
1618
+ <check_consistency>true</check_consistency>
1619
+ <max_retries_before_automatic_recovery>10</max_retries_before_automatic_recovery>
1620
+ </database_replicated>
1621
+ -->
1622
+
1623
+ <!-- Settings to fine-tune Distributed tables. See documentation in source code, in DistributedSettings.h -->
1624
+ <!--
1625
+ <distributed>
1626
+ <flush_on_detach>false</flush_on_detach>
1627
+ </distributed>
1628
+ -->
1629
+
1630
+ <!-- Protection from accidental DROP.
1631
+ If size of a MergeTree table is greater than max_table_size_to_drop (in bytes) than table could not be dropped with any DROP query.
1632
+ If you want do delete one table and don't want to change clickhouse-server config, you could create special file <clickhouse-path>/flags/force_drop_table and make DROP once.
1633
+ By default max_table_size_to_drop is 50GB; max_table_size_to_drop=0 allows to DROP any tables.
1634
+ The same for max_partition_size_to_drop.
1635
+ Uncomment to disable protection.
1636
+ -->
1637
+ <!-- <max_table_size_to_drop>0</max_table_size_to_drop> -->
1638
+ <!-- <max_partition_size_to_drop>0</max_partition_size_to_drop> -->
1639
+
1640
+ <!-- Example of parameters for GraphiteMergeTree table engine -->
1641
+ <graphite_rollup_example>
1642
+ <pattern>
1643
+ <regexp>click_cost</regexp>
1644
+ <function>any</function>
1645
+ <retention>
1646
+ <age>0</age>
1647
+ <precision>3600</precision>
1648
+ </retention>
1649
+ <retention>
1650
+ <age>86400</age>
1651
+ <precision>60</precision>
1652
+ </retention>
1653
+ </pattern>
1654
+ <default>
1655
+ <function>max</function>
1656
+ <retention>
1657
+ <age>0</age>
1658
+ <precision>60</precision>
1659
+ </retention>
1660
+ <retention>
1661
+ <age>3600</age>
1662
+ <precision>300</precision>
1663
+ </retention>
1664
+ <retention>
1665
+ <age>86400</age>
1666
+ <precision>3600</precision>
1667
+ </retention>
1668
+ </default>
1669
+ </graphite_rollup_example>
1670
+
1671
+ <!-- Directory in <clickhouse-path> containing schema files for various input formats.
1672
+ The directory will be created if it doesn't exist.
1673
+ -->
1674
+ <format_schema_path>/var/lib/clickhouse/format_schemas/</format_schema_path>
1675
+
1676
+ <!-- Directory containing the proto files for the well-known Protobuf types.
1677
+ -->
1678
+ <google_protos_path>/usr/share/clickhouse/protos/</google_protos_path>
1679
+
1680
+ <!-- Default query masking rules, matching lines would be replaced with something else in the logs
1681
+ (both text logs and system.query_log).
1682
+ name - name for the rule (optional)
1683
+ regexp - RE2 compatible regular expression (mandatory)
1684
+ replace - substitution string for sensitive data (optional, by default - six asterisks)
1685
+ <query_masking_rules>
1686
+ <rule>
1687
+ <name>hide encrypt/decrypt arguments</name>
1688
+ <regexp>((?:aes_)?(?:encrypt|decrypt)(?:_mysql)?)\s*\(\s*(?:'(?:\\'|.)+'|.*?)\s*\)</regexp>
1689
+ <replace>\1(???)</replace>
1690
+ </rule>
1691
+ </query_masking_rules> -->
1692
+
1693
+ <!-- Uncomment to use custom http handlers.
1694
+
1695
+ rules are checked from top to bottom, first match runs the handler
1696
+ url - to match request URL (path and query string only), you can use 'regex:' prefix to use regex match(optional)
1697
+ full_url - to match request **full** URL (schema, host:port, path and query string), you can use 'regex:' prefix to use regex match(optional)
1698
+ note, ClickHouse does not support "virtual hosts" so "host" will contain IP address not the "Host" header.
1699
+ empty_query_string - check that there is no query string in the URL
1700
+ methods - to match request method, you can use commas to separate multiple method matches(optional)
1701
+ headers - to match request headers, match each child element (child element name is header name), you can use 'regex:' prefix to use regex match(optional)
1702
+
1703
+ handler is request handler
1704
+ type - supported types: static, dynamic_query_handler, predefined_query_handler, redirect
1705
+ query - use with predefined_query_handler type, executes query when the handler is called
1706
+ query_param_name - use with dynamic_query_handler type, extracts and executes the value corresponding to the <query_param_name> value in HTTP request params
1707
+ status - use with static type, response status code
1708
+ content_type - use with static type, response content-type
1709
+ response_content - use with static type, response content sent to client, when using the prefix 'file://' or 'config://', find the content from the file or configuration send to client.
1710
+ url - a location for redirect
1711
+
1712
+ Along with a list of rules, you can specify <defaults/> which means - enable all the default handlers.
1713
+
1714
+ <http_handlers>
1715
+ <rule>
1716
+ <url>/</url>
1717
+ <methods>POST,GET</methods>
1718
+ <headers><pragma>no-cache</pragma></headers>
1719
+ <handler>
1720
+ <type>dynamic_query_handler</type>
1721
+ <query_param_name>query</query_param_name>
1722
+ </handler>
1723
+ </rule>
1724
+
1725
+ <rule>
1726
+ <url>/predefined_query</url>
1727
+ <methods>POST,GET</methods>
1728
+ <handler>
1729
+ <type>predefined_query_handler</type>
1730
+ <query>SELECT * FROM system.settings</query>
1731
+ </handler>
1732
+ </rule>
1733
+
1734
+ <rule>
1735
+ <url>/play</url>
1736
+ <handler>
1737
+ <type>redirect</type>
1738
+ <location>/play?user=play</location>
1739
+ </handler>
1740
+ </rule>
1741
+
1742
+ <rule>
1743
+ <full_url>regex:http?://[^/]/dashboard</full_url>
1744
+ <handler>
1745
+ <type>redirect</type>
1746
+ <location>/dashboard?user=play</location>
1747
+ </handler>
1748
+ </rule>
1749
+
1750
+ <rule>
1751
+ <handler>
1752
+ <type>static</type>
1753
+ <status>200</status>
1754
+ <content_type>text/plain; charset=UTF-8</content_type>
1755
+ <response_content>config://http_server_default_response</response_content>
1756
+ </handler>
1757
+ </rule>
1758
+ </http_handlers>
1759
+ -->
1760
+
1761
+ <send_crash_reports>
1762
+ <!-- Setting <enabled> to true allows sending crash reports to -->
1763
+ <!-- the ClickHouse core developers team. -->
1764
+ <!-- Doing so at least in pre-production environments is highly appreciated -->
1765
+ <!-- The reports are anonymized -->
1766
+ <enabled>true</enabled>
1767
+ <send_logical_errors>true</send_logical_errors>
1768
+ <endpoint>https://crash.clickhouse.com/</endpoint>
1769
+ </send_crash_reports>
1770
+
1771
+ <!-- Uncomment to disable ClickHouse internal DNS caching. -->
1772
+ <!-- <disable_internal_dns_cache>1</disable_internal_dns_cache> -->
1773
+
1774
+ <!-- You can also configure rocksdb like this: -->
1775
+ <!-- Full list of options:
1776
+ - options:
1777
+ - https://github.com/facebook/rocksdb/blob/4b013dcbed2df84fde3901d7655b9b91c557454d/include/rocksdb/options.h#L1452
1778
+ - column_family_options:
1779
+ - https://github.com/facebook/rocksdb/blob/4b013dcbed2df84fde3901d7655b9b91c557454d/include/rocksdb/options.h#L66
1780
+ - block_based_table_options:
1781
+ - https://github.com/facebook/rocksdb/blob/4b013dcbed2df84fde3901d7655b9b91c557454d/table/block_based/block_based_table_factory.cc#L228
1782
+ - https://github.com/facebook/rocksdb/blob/4b013dcbed2df84fde3901d7655b9b91c557454d/include/rocksdb/table.h#L129
1783
+ -->
1784
+ <!--
1785
+ <rocksdb>
1786
+ <options>
1787
+ <max_background_jobs>8</max_background_jobs>
1788
+ <info_log_level>DEBUG_LEVEL</info_log_level>
1789
+ </options>
1790
+ <column_family_options>
1791
+ <num_levels>2</num_levels>
1792
+ </column_family_options>
1793
+ <block_based_table_options>
1794
+ <block_size>1024</block_size>
1795
+ </block_based_table_options>
1796
+ <tables>
1797
+ <table>
1798
+ <name>TABLE</name>
1799
+ <options>
1800
+ <max_background_jobs>8</max_background_jobs>
1801
+ </options>
1802
+ <column_family_options>
1803
+ <num_levels>2</num_levels>
1804
+ </column_family_options>
1805
+ <block_based_table_options>
1806
+ <block_size>1024</block_size>
1807
+ </block_based_table_options>
1808
+ </table>
1809
+ </tables>
1810
+ </rocksdb>
1811
+ -->
1812
+
1813
+ <!-- <kafka> -->
1814
+ <!-- Global configuration properties -->
1815
+ <!--
1816
+ NOTE: statistics should be consumed, otherwise it creates too many
1817
+ entries in the queue that leads to memory leak and slow shutdown.
1818
+ default value: 0
1819
+ <statistics_interval_ms>3000</statistics_interval_ms>
1820
+ -->
1821
+
1822
+ <!-- Topic configuration properties -->
1823
+ <!--
1824
+ <kafka_topic>
1825
+ <name>football</name>
1826
+ <request_timeout_ms>6000</request_timeout_ms>
1827
+ </kafka_topic>
1828
+ -->
1829
+
1830
+ <!-- Producer configuration -->
1831
+ <!--
1832
+ <producer>
1833
+ <compression_codec>gzip</compression_codec>
1834
+ <kafka_topic>
1835
+ <name>football</name>
1836
+ <request_timeout_ms>6000</request_timeout_ms>
1837
+ </kafka_topic>
1838
+ </producer>
1839
+ -->
1840
+
1841
+ <!-- Consumer configuration -->
1842
+ <!--
1843
+ <consumer>
1844
+ <enable_auto_commit>true</enable_auto_commit>
1845
+ </consumer>
1846
+ -->
1847
+ <!-- </kafka> -->
1848
+
1849
+ <!-- Note: ClickHouse Cloud https://clickhouse.com/cloud provides automatic backups to object storage. -->
1850
+ <backups>
1851
+ <allowed_path>backups</allowed_path>
1852
+
1853
+ <!-- If the BACKUP command fails and this setting is true then the files
1854
+ copied before the failure will be removed automatically.
1855
+ -->
1856
+ <remove_backup_files_after_failure>true</remove_backup_files_after_failure>
1857
+ </backups>
1858
+
1859
+ <!-- This allows to disable exposing addresses in stack traces for security reasons.
1860
+ Please be aware that it does not improve security much, but makes debugging much harder.
1861
+ The addresses that are small offsets from zero will be displayed nevertheless to show nullptr dereferences.
1862
+ Regardless of this configuration, the addresses are visible in the system.stack_trace and system.trace_log tables
1863
+ if the user has access to these tables.
1864
+ I don't recommend to change this setting.
1865
+ <show_addresses_in_stack_traces>false</show_addresses_in_stack_traces>
1866
+ -->
1867
+
1868
+ <!-- On Linux systems this can control the behavior of OOM killer.
1869
+ <oom_score>-1000</oom_score>
1870
+ -->
1871
+
1872
+ <!-- Delay (in seconds) to wait for unfinished queries before force exit -->
1873
+ <!-- <shutdown_wait_unfinished>5</shutdown_wait_unfinished> -->
1874
+
1875
+ <!-- If set true ClickHouse will wait for running queries finish before shutdown. -->
1876
+ <!-- <shutdown_wait_unfinished_queries>false</shutdown_wait_unfinished_queries> -->
1877
+
1878
+ <!-- Disables the checksum of the clickhouse-server binary when performing integrity checks.
1879
+ Not recommended in production environments, but there may be legitimate use-cases, such as
1880
+ when instrumenting ClickHouse via eBPF probes.
1881
+
1882
+ <skip_binary_checksum_checks>false</skip_binary_checksum_checks>
1883
+ -->
1884
+
1885
+
1886
+
1887
+
1888
+ </clickhouse>
platform/dbops/archive/databases_old/data/clickhouse/data/preprocessed_configs/users.xml ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!-- This file was generated automatically.
2
+ Do not edit it: it is likely to be discarded and generated again before it's read next time.
3
+ Files used to generate this file:
4
+ /data/data/clickhouse/config/users.xml -->
5
+
6
+ <clickhouse>
7
+ <!-- See also the files in users.d directory where the settings can be overridden. -->
8
+
9
+ <!-- Profiles of settings. -->
10
+ <profiles>
11
+ <!-- Default settings. -->
12
+ <default>
13
+ <!-- <async_insert>1</async_insert> -->
14
+ </default>
15
+
16
+ <!-- Profile that allows only read queries. -->
17
+ <readonly>
18
+ <readonly>1</readonly>
19
+ </readonly>
20
+ </profiles>
21
+
22
+ <!-- Users and ACL. -->
23
+ <users>
24
+ <!-- If user name was not specified, 'default' user is used. -->
25
+ <default>
26
+ <!-- See also the files in users.d directory where the password can be overridden.
27
+
28
+ Password could be specified in plaintext or in SHA256 (in hex format).
29
+
30
+ If you want to specify password in plaintext (not recommended), place it in 'password' element.
31
+ Example: <password>qwerty</password>.
32
+ Password could be empty.
33
+
34
+ If you want to specify SHA256, place it in 'password_sha256_hex' element.
35
+ Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
36
+ Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
37
+
38
+ If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
39
+ Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
40
+
41
+ If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
42
+ place its name in 'server' element inside 'ldap' element.
43
+ Example: <ldap><server>my_ldap_server</server></ldap>
44
+
45
+ If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
46
+ place 'kerberos' element instead of 'password' (and similar) elements.
47
+ The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
48
+ You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
49
+ whose initiator's realm matches it.
50
+ Example: <kerberos />
51
+ Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
52
+
53
+ How to generate decent password:
54
+ Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
55
+ In first line will be password and in second - corresponding SHA256.
56
+
57
+ How to generate double SHA1:
58
+ Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
59
+ In first line will be password and in second - corresponding double SHA1.
60
+ -->
61
+ <password/>
62
+
63
+ <!-- List of networks with open access.
64
+
65
+ To open access from everywhere, specify:
66
+ <ip>::/0</ip>
67
+
68
+ To open access only from localhost, specify:
69
+ <ip>::1</ip>
70
+ <ip>127.0.0.1</ip>
71
+
72
+ Each element of list has one of the following forms:
73
+ <ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
74
+ 2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
75
+ <host> Hostname. Example: server01.clickhouse.com.
76
+ To check access, DNS query is performed, and all received addresses compared to peer address.
77
+ <host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
78
+ To check access, DNS PTR query is performed for peer address and then regexp is applied.
79
+ Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
80
+ Strongly recommended that regexp is ends with $
81
+ All results of DNS requests are cached till server restart.
82
+ -->
83
+ <networks>
84
+ <ip>::/0</ip>
85
+ </networks>
86
+
87
+ <!-- Settings profile for user. -->
88
+ <profile>default</profile>
89
+
90
+ <!-- Quota for user. -->
91
+ <quota>default</quota>
92
+
93
+ <!-- User can create other users and grant rights to them. -->
94
+ <access_management>1</access_management>
95
+
96
+ <!-- User can manipulate named collections. -->
97
+ <named_collection_control>1</named_collection_control>
98
+
99
+ <!-- User permissions can be granted here -->
100
+ <!--
101
+ <grants>
102
+ <query>GRANT ALL ON *.*</query>
103
+ </grants>
104
+ -->
105
+ </default>
106
+ </users>
107
+
108
+ <!-- Quotas. -->
109
+ <quotas>
110
+ <!-- Name of quota. -->
111
+ <default>
112
+ <!-- Limits for time interval. You could specify many intervals with different limits. -->
113
+ <interval>
114
+ <!-- Length of interval. -->
115
+ <duration>3600</duration>
116
+
117
+ <!-- No limits. Just calculate resource usage for time interval. -->
118
+ <queries>0</queries>
119
+ <errors>0</errors>
120
+ <result_rows>0</result_rows>
121
+ <read_rows>0</read_rows>
122
+ <execution_time>0</execution_time>
123
+ </interval>
124
+ </default>
125
+ </quotas>
126
+ </clickhouse>
platform/dbops/archive/databases_old/data/clickhouse/data/status ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ PID: 487320
2
+ Started at: 2025-08-29 00:53:01
3
+ Revision: 54502
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/checksums.txt ADDED
Binary file (910 Bytes). View file
 
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/columns.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ columns format version: 1
2
+ 5 columns:
3
+ `hostname` LowCardinality(String)
4
+ `event_date` Date
5
+ `event_time` DateTime
6
+ `metric` LowCardinality(String)
7
+ `value` Float64
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/columns_substreams.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ columns substreams version: 1
2
+ 5 columns:
3
+ 2 substreams for column `hostname`:
4
+ hostname.dict
5
+ hostname
6
+ 1 substreams for column `event_date`:
7
+ event_date
8
+ 1 substreams for column `event_time`:
9
+ event_time
10
+ 2 substreams for column `metric`:
11
+ metric.dict
12
+ metric
13
+ 1 substreams for column `value`:
14
+ value
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/count.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 14358335
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/default_compression_codec.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ CODEC(LZ4)
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/event_date.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7632c9a21bcb7a0d16f5b75658245371f89dd494eac33ff56be7f34274d267e6
3
+ size 30290
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/event_date.cmrk2 ADDED
Binary file (2.25 kB). View file
 
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/event_time.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc9a90f9ac29d7961f78b8a52194e222f21606fa2ec05e2b80a700ce5a094d59
3
+ size 91656
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/event_time.cmrk2 ADDED
Binary file (3.05 kB). View file
 
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/hostname.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:710f61cdfd90786f9aa7a44ab4e53b59a6b5b157fadacbe296832090ec811cd4
3
+ size 12548
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/hostname.cmrk2 ADDED
Binary file (1.11 kB). View file
 
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/hostname.dict.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f1c5a6328e6525d12aad404a7a0a0282c778d25085ae138cad777921b618fe1
3
+ size 59
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/hostname.dict.cmrk2 ADDED
Binary file (63 Bytes). View file
 
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/metadata_version.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 0
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/metric.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c029b0875a7015fc8a00d3c9268f5b44352cef667d63432a3050463c5cffbb4
3
+ size 46242
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/metric.cmrk2 ADDED
Binary file (1.62 kB). View file
 
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/metric.dict.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cedc211a864a51f546187ff3921222bbf2d7ef7826902fd0a694e548921a02c3
3
+ size 5186
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/metric.dict.cmrk2 ADDED
Binary file (65 Bytes). View file
 
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/minmax_event_date.idx ADDED
@@ -0,0 +1 @@
 
 
1
+ iOiO
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/partition.dat ADDED
Binary file (4 Bytes). View file
 
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/primary.cidx ADDED
Binary file (10.1 kB). View file
 
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/serialization.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"columns":[{"kind":"Default","name":"event_date","num_defaults":0,"num_rows":14358335},{"kind":"Default","name":"event_time","num_defaults":0,"num_rows":14358335},{"kind":"Default","name":"value","num_defaults":10438262,"num_rows":14358335}],"version":0}
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/value.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd44b6820478842142f624bb269182a4e9ff29076e54b6c377995b765f24d383
3
+ size 4575473
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1061_1919_203/value.cmrk2 ADDED
Binary file (3.47 kB). View file
 
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/columns.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ columns format version: 1
2
+ 5 columns:
3
+ `hostname` LowCardinality(String)
4
+ `event_date` Date
5
+ `event_time` DateTime
6
+ `metric` LowCardinality(String)
7
+ `value` Float64
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/columns_substreams.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ columns substreams version: 1
2
+ 5 columns:
3
+ 2 substreams for column `hostname`:
4
+ hostname.dict
5
+ hostname
6
+ 1 substreams for column `event_date`:
7
+ event_date
8
+ 1 substreams for column `event_time`:
9
+ event_time
10
+ 2 substreams for column `metric`:
11
+ metric.dict
12
+ metric
13
+ 1 substreams for column `value`:
14
+ value
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/count.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 8851953
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/event_date.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f831d29053f3abc411522e4065a6b653457250b50c46355d93818bbc8621736
3
+ size 18698
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/event_date.cmrk2 ADDED
Binary file (1.43 kB). View file
 
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/event_time.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d9377c9577de7113e7e65721d9cd6bb3d6766ed501eb20e926c3f821c3ea8ef
3
+ size 49063
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/event_time.cmrk2 ADDED
Binary file (1.86 kB). View file
 
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/hostname.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf0903a337f53e7e759e4622ed0cf4585046d08ad366ddc942f2bae323cc7b30
3
+ size 7760
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/hostname.cmrk2 ADDED
Binary file (738 Bytes). View file
 
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/hostname.dict.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f1c5a6328e6525d12aad404a7a0a0282c778d25085ae138cad777921b618fe1
3
+ size 59
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/hostname.dict.cmrk2 ADDED
Binary file (63 Bytes). View file
 
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/metric.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f0dba0fa2be51805cd52b1eb8bda4a43d89c019ac588e2da43602c689945dfe
3
+ size 32172
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/metric.cmrk2 ADDED
Binary file (1.07 kB). View file
 
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/metric.dict.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3db29abfdef71ace4396ca43cccea5940d48820ead7e57acc26e4394f7b0cd4
3
+ size 5200
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/metric.dict.cmrk2 ADDED
Binary file (64 Bytes). View file
 
platform/dbops/archive/databases_old/data/clickhouse/data/store/256/256e67ed-4fbf-46e0-9e3a-4113943de34d/202508_1920_2448_162/minmax_event_date.idx ADDED
@@ -0,0 +1 @@
 
 
1
+ iOiO