Browse Source

初始化

master
张献维 2 days ago
commit
3977957e13
  1. 46
      .codecov.yml
  2. 0
      .dockerignore
  3. 10
      .editorconfig
  4. 5
      .gitattributes
  5. 23
      .github/CODEOWNERS
  6. 19
      .github/ISSUE_TEMPLATE/bug-report.md
  7. 27
      .github/ISSUE_TEMPLATE/challenge-program.md
  8. 7
      .github/ISSUE_TEMPLATE/development-task.md
  9. 19
      .github/ISSUE_TEMPLATE/feature-request.md
  10. 20
      .github/ISSUE_TEMPLATE/general-question.md
  11. 29
      .github/ISSUE_TEMPLATE/performance-questions.md
  12. 33
      .github/labeler.yml
  13. 45
      .github/pull_request_template.md
  14. 58
      .github/workflows/assign_project.yml
  15. 14
      .github/workflows/labeler.yml
  16. 21
      .gitignore
  17. 1267
      CHANGELOG.md
  18. 3
      CODE_OF_CONDUCT.md
  19. 4
      CONTRIBUTING.md
  20. 1
      CONTRIBUTORS.md
  21. 53
      Dockerfile
  22. 11
      Jenkinsfile
  23. 201
      LICENSE
  24. 27
      LICENSES/QL-LICENSE
  25. 27
      LICENSES/STRUTIL-LICENSE
  26. 323
      Makefile
  27. 91
      README.md
  28. 1467
      bindinfo/bind_test.go
  29. 259
      bindinfo/cache.go
  30. 950
      bindinfo/handle.go
  31. 121
      bindinfo/session_handle.go
  32. 28
      checklist.md
  33. 36
      checkout-pr-branch.sh
  34. 12
      circle.yml
  35. 62
      cmd/benchdb/README.md
  36. 301
      cmd/benchdb/main.go
  37. 121
      cmd/benchfilesort/README.md
  38. 437
      cmd/benchfilesort/main.go
  39. 138
      cmd/benchkv/main.go
  40. 87
      cmd/benchraw/main.go
  41. 224
      cmd/ddltest/column_test.go
  42. 1028
      cmd/ddltest/ddl_test.go
  43. 218
      cmd/ddltest/index_test.go
  44. 55
      cmd/ddltest/random.go
  45. 65
      cmd/explaintest/README.md
  46. 12
      cmd/explaintest/config.toml
  47. 713
      cmd/explaintest/main.go
  48. 50
      cmd/explaintest/main_test.go
  49. BIN
      cmd/explaintest/portgenerator
  50. 44
      cmd/explaintest/r/access_path_selection.result
  51. 56
      cmd/explaintest/r/black_list.result
  52. 29
      cmd/explaintest/r/explain-non-select-stmt.result
  53. 46
      cmd/explaintest/r/explain.result
  54. 262
      cmd/explaintest/r/explain_complex.result
  55. 227
      cmd/explaintest/r/explain_complex_stats.result
  56. 786
      cmd/explaintest/r/explain_easy.result
  57. 189
      cmd/explaintest/r/explain_easy_stats.result
  58. 486
      cmd/explaintest/r/explain_generate_column_substitute.result
  59. 128
      cmd/explaintest/r/explain_indexmerge.result
  60. 27
      cmd/explaintest/r/explain_join_stats.result
  61. 25
      cmd/explaintest/r/explain_stats.result
  62. 31
      cmd/explaintest/r/explain_union_scan.result
  63. 233
      cmd/explaintest/r/generated_columns.result
  64. 54
      cmd/explaintest/r/index_join.result
  65. 4386
      cmd/explaintest/r/partition_pruning.result
  66. 482
      cmd/explaintest/r/select.result
  67. 43
      cmd/explaintest/r/subquery.result
  68. 271
      cmd/explaintest/r/topn_push_down.result
  69. 8
      cmd/explaintest/r/topn_pushdown.result
  70. 1301
      cmd/explaintest/r/tpch.result
  71. 126
      cmd/explaintest/r/window_function.result
  72. 200
      cmd/explaintest/run-tests.sh
  73. 1
      cmd/explaintest/s/explain_complex_stats_dd.json
  74. 1
      cmd/explaintest/s/explain_complex_stats_dt.json
  75. 1
      cmd/explaintest/s/explain_complex_stats_pp.json
  76. 1
      cmd/explaintest/s/explain_complex_stats_rr.json
  77. 1
      cmd/explaintest/s/explain_complex_stats_st.json
  78. 1
      cmd/explaintest/s/explain_complex_stats_tbl_001.json
  79. 1
      cmd/explaintest/s/explain_complex_stats_tbl_002.json
  80. 1
      cmd/explaintest/s/explain_complex_stats_tbl_003.json
  81. 1
      cmd/explaintest/s/explain_complex_stats_tbl_004.json
  82. 1
      cmd/explaintest/s/explain_complex_stats_tbl_005.json
  83. 1
      cmd/explaintest/s/explain_complex_stats_tbl_006.json
  84. 1
      cmd/explaintest/s/explain_complex_stats_tbl_007.json
  85. 1
      cmd/explaintest/s/explain_complex_stats_tbl_008.json
  86. 1
      cmd/explaintest/s/explain_complex_stats_tbl_009.json
  87. 1
      cmd/explaintest/s/explain_easy_stats_index_prune.json
  88. 1
      cmd/explaintest/s/explain_easy_stats_t1.json
  89. 1
      cmd/explaintest/s/explain_easy_stats_t2.json
  90. 1
      cmd/explaintest/s/explain_easy_stats_t3.json
  91. 1
      cmd/explaintest/s/explain_easy_stats_tbl_dnf.json
  92. 1
      cmd/explaintest/s/explain_indexmerge_stats_t.json
  93. 46859
      cmd/explaintest/s/explain_join_stats_e.json
  94. 1522
      cmd/explaintest/s/explain_join_stats_lo.json
  95. 1
      cmd/explaintest/s/explain_stats_t.json
  96. 55951
      cmd/explaintest/s/explain_union_scan.json
  97. 1
      cmd/explaintest/s/tpch_stats/customer.json
  98. 1
      cmd/explaintest/s/tpch_stats/lineitem.json
  99. 1
      cmd/explaintest/s/tpch_stats/nation.json
  100. 1
      cmd/explaintest/s/tpch_stats/orders.json

46
.codecov.yml

@ -0,0 +1,46 @@
codecov:
notify:
require_ci_to_pass: yes
coverage:
precision: 4
round: down
range: "65...90"
status:
project:
default:
threshold: 0.2 #Allow the coverage to drop by threshold%, and posting a success status.
patch:
default:
target: 0% # trial operation
changes: no
parsers:
gcov:
branch_detection:
conditional: yes
loop: yes
method: no
macro: no
comment:
layout: "header, diff"
behavior: default
require_changes: no
ignore:
- "LICENSES"
- "*_test.go"
- ".git"
- "*.yml"
- "*.md"
- "cmd/.*"
- "docs/.*"
- "vendor/.*"
- "ddl/failtest/.*"
- "ddl/testutil/.*"
- "executor/seqtest/.*"
- "metrics/.*"
- "expression/generator/.*"

0
.dockerignore

10
.editorconfig

@ -0,0 +1,10 @@
[*]
end_of_line = lf
insert_final_newline = true
charset = utf-8
# tab_size = 4 spaces
[*.go]
indent_style = tab
indent_size = 4
trim_trailing_whitespace = true

5
.gitattributes

@ -0,0 +1,5 @@
# Set the default behavior, in case people don't have core.autocrlf set.
* text=auto
# Declare files that will always have LF line endings on checkout.
*.y text eol=lf

23
.github/CODEOWNERS

@ -0,0 +1,23 @@
/distsql @pingcap/co-exec
/executor @pingcap/co-exec
/expression @pingcap/co-exec
/types @pingcap/co-exec
/util/chunk @pingcap/co-exec
/util/disk @pingcap/co-exec
/util/execdetails @pingcap/co-exec
/util/expensivequery @pingcap/co-exec
/util/filesort @pingcap/co-exec
/util/memory @pingcap/co-exec
/util/sqlexec @pingcap/co-exec
/planner @pingcap/co-planner
/statistics @pingcap/co-planner
/util/ranger @pingcap/co-planner
/util/plancodec @pingcap/co-planner
/bindinfo @pingcap/co-planner
/ddl @pingcap/co-ddl
/domain @pingcap/co-ddl
/infoschema @pingcap/co-ddl
/meta @pingcap/co-ddl
/owner @pingcap/co-ddl

19
.github/ISSUE_TEMPLATE/bug-report.md

@ -0,0 +1,19 @@
---
name: "\U0001F41B Bug Report"
about: As a User, I want to report a Bug.
labels: type/bug
---
## Bug Report
Please answer these questions before submitting your issue. Thanks!
### 1. What did you do?
<!-- If possible, provide a recipe for reproducing the error. -->
### 2. What did you expect to see?
### 3. What did you see instead?
### 4. What version of TiDB are you using? (`tidb-server -V` or run `select tidb_version();` on TiDB)

27
.github/ISSUE_TEMPLATE/challenge-program.md

@ -0,0 +1,27 @@
---
name: "\U0001F947 Propose a Challenge Program task"
about: As a developer, I want to propose a Challenge Program task.
labels: challenge-program-2
---
## Description
## Score
* score number
## Mentor(s)
* [@xxxx](github url)
Contact the mentors: **#tidb-challenge-program** channel in [TiDB Community](https://join.slack.com/t/tidbcommunity/shared_invite/enQtNzc0MzI4ODExMDc4LWYwYmIzMjZkYzJiNDUxMmZlN2FiMGJkZjAyMzQ5NGU0NGY0NzI3NTYwMjAyNGQ1N2I2ZjAxNzc1OGUwYWM0NzE) Slack Workspace
## Recommended Skills
* skills 1
* skills 1
## Learning Materials
* Chinese: [TiDB 精选技术讲解文章](https://github.com/pingcap/presentations/blob/master/hackathon-2019/reference-document-of-hackathon-2019.md)
* English: [Awesome-Database-Learning](https://github.com/pingcap/awesome-database-learning)

7
.github/ISSUE_TEMPLATE/development-task.md

@ -0,0 +1,7 @@
---
name: "\U0001F680 Development Task"
about: As a TiDB developer, I want to record a development task.
labels: type/enhancement
---
## Development Task

19
.github/ISSUE_TEMPLATE/feature-request.md

@ -0,0 +1,19 @@
---
name: "\U0001F680 Feature Request"
about: As a user, I want to request a New Feature on the product.
labels: type/feature-request
---
## Feature Request
**Is your feature request related to a problem? Please describe:**
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
**Describe the feature you'd like:**
<!-- A clear and concise description of what you want to happen. -->
**Describe alternatives you've considered:**
<!-- A clear and concise description of any alternative solutions or features you've considered. -->
**Teachability, Documentation, Adoption, Migration Strategy:**
<!-- If you can, explain some scenarios how users might use this, situations it would be helpful in. Any API designs, mockups, or diagrams are also helpful. -->

20
.github/ISSUE_TEMPLATE/general-question.md

@ -0,0 +1,20 @@
---
name: "\U0001F914 Ask a Question"
about: I want to ask a question.
labels: type/question
---
## General Question
<!--
Before asking a question, make sure you have:
- Searched existing Stack Overflow questions.
- Googled your question.
- Searched open and closed [GitHub issues](https://github.com/pingcap/tidb/issues?utf8=%E2%9C%93&q=is%3Aissue)
- Read the documentation:
- [TiDB Readme](https://github.com/pingcap/tidb)
- [TiDB Doc](https://github.com/pingcap/docs)
-->

29
.github/ISSUE_TEMPLATE/performance-questions.md

@ -0,0 +1,29 @@
---
name: "\U0001F947 Ask a Database Performance Question"
about: I want to ask a database performance question.
labels: type/question, type/performance
---
## Performance Questions
- What version of TiDB are you using?
<!-- You can try `tidb-server -V` or run `select tidb_version();` on TiDB to get this information -->
- What's the observed and your expected performance respectively?
- Have you compared TiDB with other databases? If yes, what's their difference?
- For a specific slow SQL query, please provide the following information:
- Whether you analyzed the tables involved in the query and how long it is after you ran the last `ANALYZE`.
- Whether this SQL query always or occasionally runs slowly.
- The `EXPLAIN ANALYZE` result of this query if your TiDB version is higher than 2.1, or you can just provide the `EXPLAIN` result.
- The plain text of the SQL query and table schema so we can test it locally. It would be better if you can provide the dumped statistics information.
<!-- you can use `show create table ${involved_table}\G` to get the table schema.-->
<!-- use `curl -G "http://${tidb-server-ip}:${tidb-server-status-port}/stats/dump/${db_name}/${table_name}" > ${table_name}_stats.json` to get the dumped statistics of one involved table.-->
- The `EXPLAIN` result of the compared database. For MySQL, `EXPLAIN format=json`'s result will be more helpful.
- Other information that is useful from your perspective.
- For a general performance question, e.g. the benchmark result you got by yourself is not expected, please provide the following information:
- Your cluster's topology architecture.
- A simple description of you workload.
- The metrics PDF generated from Grafana monitor. Remember to set the time range to the performance issue duration.

33
.github/labeler.yml

@ -0,0 +1,33 @@
component/executor:
- distsql/*
- executor/*
- util/chunk/*
- util/disk/*
- util/execdetails/*
- util/expensivequery/*
- util/filesort/*
- util/memory/*
- util/sqlexec/*
component/expression:
- expression/*
- types/*
component/planner:
- planner/*
- bindinfo/*
- util/ranger/*
- util/plancodec/*
component/statistics:
- statistics/*
component/DDL:
- ddl/*
- domain/*
- infoschema/*
- meta/*
- owner/*
component/config:
- config/*

45
.github/pull_request_template.md

@ -0,0 +1,45 @@
<!-- Thank you for contributing to TiDB!
PR Title Format:
1. pkg [, pkg2, pkg3]: what's changed
2. *: what's changed
-->
### What problem does this PR solve?
Issue Number: close #xxx <!-- REMOVE this line if no issue to close -->
Problem Summary:
### What is changed and how it works?
Proposal: [xxx](url) <!-- REMOVE this line if not applicable -->
What's Changed:
How it Works:
### Related changes
- PR to update `pingcap/docs`/`pingcap/docs-cn`:
- PR to update `pingcap/tidb-ansible`:
- Need to cherry-pick to the release branch
### Check List <!--REMOVE the items that are not applicable-->
Tests <!-- At least one of them must be included. -->
- Unit test
- Integration test
- Manual test (add detailed scripts or steps below)
- No code
Side effects
- Performance regression
- Consumes more CPU
- Consumes more MEM
- Breaking backward compatibility
### Release note <!-- bugfixes or new feature need a release note -->

58
.github/workflows/assign_project.yml

@ -0,0 +1,58 @@
name: Auto Assign Project Local
on:
issues:
types: [labeled]
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
jobs:
assign_one_project:
runs-on: ubuntu-latest
name: Assign to One Project
steps:
- name: Run issues assignment to project SIG Runtime Kanban
uses: srggrs/assign-one-project-github-action@1.2.0
if: |
contains(github.event.issue.labels.*.name, 'component/executor') ||
contains(github.event.issue.labels.*.name, 'component/expression')
with:
project: 'https://github.com/pingcap/tidb/projects/38'
column_name: 'Issue Backlog: Need Triage'
- name: Run issues assignment to project SIG Planner Kanban
uses: srggrs/assign-one-project-github-action@1.2.0
if: |
contains(github.event.issue.labels.*.name, 'component/planner') ||
contains(github.event.issue.labels.*.name, 'component/statistics')
with:
project: 'https://github.com/pingcap/tidb/projects/39'
column_name: 'Issue Backlog: Need Triage'
- name: Run issues assignment to Question and Bug Reports Kanban
uses: srggrs/assign-one-project-github-action@1.2.0
if: |
contains(github.event.issue.labels.*.name, 'type/question') ||
contains(github.event.issue.labels.*.name, 'type/bug')
with:
project: 'https://github.com/pingcap/tidb/projects/36'
column_name: 'Need Triage'
- name: Run issues assignment to Feature Request Kanban
uses: srggrs/assign-one-project-github-action@1.2.0
if: |
contains(github.event.issue.labels.*.name, 'type/feature-request')
with:
project: 'https://github.com/pingcap/tidb/projects/41'
column_name: 'Need Triage'
- name: Run issues assignment to Robust test
uses: srggrs/assign-one-project-github-action@1.2.0
if: |
contains(github.event.issue.labels.*.name, 'component/test')
with:
project: 'https://github.com/pingcap/tidb/projects/32'
column_name: 'TODO/Help Wanted'
- name: Run issues assignment to project UT Coverage
uses: srggrs/assign-one-project-github-action@1.2.0
if: |
contains(github.event.issue.labels.*.name, 'type/UT-coverage')
with:
project: 'https://github.com/pingcap/tidb/projects/44'
column_name: 'To do'

14
.github/workflows/labeler.yml

@ -0,0 +1,14 @@
name: "Pull request labeler"
on:
schedule:
- cron: '*/6 * * * *'
jobs:
labeler:
runs-on: ubuntu-latest
steps:
- uses: paulfantom/periodic-labeler@master
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITHUB_REPOSITORY: ${{ github.repository }}
LABEL_MAPPINGS_FILE: .github/labeler.yml

21
.gitignore

@ -0,0 +1,21 @@
bin
/tidb-server/tidb-server
/tidb-server/debug
coverage.out
.idea/
*.iml
*.swp
*.log
tags
profile.coverprofile
explain_test
cmd/explaintest/explain-test.out
cmd/explaintest/explaintest_tidb-server
*.fail.go
tools/bin/
vendor
/_tools/
.DS_Store
.vscode
/go.sum
/.idea/

1267
CHANGELOG.md

File diff suppressed because it is too large

3
CODE_OF_CONDUCT.md

@ -0,0 +1,3 @@
# Contributor Covenant Code of Conduct
See the [Contributor Covenant Code of Conduct](https://github.com/pingcap/community/blob/master/CODE_OF_CONDUCT.md)

4
CONTRIBUTING.md

@ -0,0 +1,4 @@
# Contribution Guide
See the [Contribution Guide](https://github.com/pingcap/community/blob/master/CONTRIBUTING.md) in the
[community](https://github.com/pingcap/community) repo.

1
CONTRIBUTORS.md

@ -0,0 +1 @@
Contributor list is moved to [Contributors](https://github.com/pingcap/community/blob/master/architecture/contributor-list.md#tidb-contributors)

53
Dockerfile

@ -0,0 +1,53 @@
# Copyright 2019 PingCAP, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# See the License for the specific language governing permissions and
# limitations under the License.
# Builder image
FROM golang:1.13-alpine as builder
RUN apk add --no-cache \
wget \
make \
git \
gcc \
musl-dev
RUN wget -O /usr/local/bin/dumb-init https://github.com/Yelp/dumb-init/releases/download/v1.2.2/dumb-init_1.2.2_amd64 \
&& chmod +x /usr/local/bin/dumb-init
RUN mkdir -p /go/src/github.com/pingcap/tidb
WORKDIR /go/src/github.com/pingcap/tidb
# Cache dependencies
COPY go.mod .
COPY go.sum .
RUN GO111MODULE=on go mod download
# Build real binaries
COPY . .
RUN make
# Executable image
FROM alpine
RUN apk add --no-cache \
curl
COPY --from=builder /go/src/github.com/pingcap/tidb/bin/tidb-server /tidb-server
COPY --from=builder /usr/local/bin/dumb-init /usr/local/bin/dumb-init
WORKDIR /
EXPOSE 4000
ENTRYPOINT ["/usr/local/bin/dumb-init", "/tidb-server"]

11
Jenkinsfile

@ -0,0 +1,11 @@
#!groovy
node {
def TIDB_TEST_BRANCH = "master"
def TIKV_BRANCH = "master"
def PD_BRANCH = "master"
fileLoader.withGit('git@github.com:pingcap/SRE.git', 'master', 'github-iamxy-ssh', '') {
fileLoader.load('jenkins/ci/pingcap_tidb_branch.groovy').call(TIDB_TEST_BRANCH, TIKV_BRANCH, PD_BRANCH)
}
}

201
LICENSE

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

27
LICENSES/QL-LICENSE

@ -0,0 +1,27 @@
Copyright (c) 2014 The ql Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the names of the authors nor the names of the
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

27
LICENSES/STRUTIL-LICENSE

@ -0,0 +1,27 @@
Copyright (c) 2014 The strutil Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the names of the authors nor the names of the
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

323
Makefile

@ -0,0 +1,323 @@
# Copyright 2019 PingCAP, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# See the License for the specific language governing permissions and
# limitations under the License.
PROJECT=tidb
GOPATH ?= $(shell go env GOPATH)
P=8
# Ensure GOPATH is set before running build process.
ifeq "$(GOPATH)" ""
$(error Please set the environment variable GOPATH before running `make`)
endif
FAIL_ON_STDOUT := awk '{ print } END { if (NR > 0) { exit 1 } }'
CURDIR := $(shell pwd)
path_to_add := $(addsuffix /bin,$(subst :,/bin:,$(GOPATH))):$(PWD)/tools/bin
export PATH := $(path_to_add):$(PATH)
GO := GO111MODULE=on go
GOBUILD := $(GO) build $(BUILD_FLAG) -tags codes
GOBUILDCOVERAGE := GOPATH=$(GOPATH) cd tidb-server; $(GO) test -coverpkg="../..." -c .
GOTEST := $(GO) test -p $(P)
OVERALLS := GO111MODULE=on overalls
STATICCHECK := GO111MODULE=on staticcheck
TIDB_EDITION ?= Community
# Ensure TIDB_EDITION is set to Community or Enterprise before running build process.
ifneq "$(TIDB_EDITION)" "Community"
ifneq "$(TIDB_EDITION)" "Enterprise"
$(error Please set the correct environment variable TIDB_EDITION before running `make`)
endif
endif
ARCH := "`uname -s`"
LINUX := "Linux"
MAC := "Darwin"
PACKAGE_LIST := go list ./...| grep -vE "cmd"
PACKAGES := $$($(PACKAGE_LIST))
PACKAGE_DIRECTORIES := $(PACKAGE_LIST) | sed 's|github.com/pingcap/$(PROJECT)/||'
FILES := $$(find $$($(PACKAGE_DIRECTORIES)) -name "*.go")
FAILPOINT_ENABLE := $$(find $$PWD/ -type d | grep -vE "(\.git|tools)" | xargs tools/bin/failpoint-ctl enable)
FAILPOINT_DISABLE := $$(find $$PWD/ -type d | grep -vE "(\.git|tools)" | xargs tools/bin/failpoint-ctl disable)
LDFLAGS += -X "github.com/pingcap/parser/mysql.TiDBReleaseVersion=$(shell git describe --tags --dirty --always)"
LDFLAGS += -X "github.com/pingcap/tidb/util/versioninfo.TiDBBuildTS=$(shell date -u '+%Y-%m-%d %I:%M:%S')"
LDFLAGS += -X "github.com/pingcap/tidb/util/versioninfo.TiDBGitHash=$(shell git rev-parse HEAD)"
LDFLAGS += -X "github.com/pingcap/tidb/util/versioninfo.TiDBGitBranch=$(shell git rev-parse --abbrev-ref HEAD)"
LDFLAGS += -X "github.com/pingcap/tidb/util/versioninfo.TiDBEdition=$(TIDB_EDITION)"
TEST_LDFLAGS = -X "github.com/pingcap/tidb/config.checkBeforeDropLDFlag=1"
COVERAGE_SERVER_LDFLAGS = -X "github.com/pingcap/tidb/tidb-server.isCoverageServer=1"
CHECK_LDFLAGS += $(LDFLAGS) ${TEST_LDFLAGS}
TARGET = ""
# VB = Vector Benchmark
VB_FILE =
VB_FUNC =
.PHONY: all build update clean todo test gotest interpreter server dev benchkv benchraw check checklist parser tidy ddltest
default: server buildsucc
server-admin-check: server_check buildsucc
buildsucc:
@echo Build TiDB Server successfully!
all: dev server benchkv
parser:
@echo "remove this command later, when our CI script doesn't call it"
dev: checklist check test
build:
$(GOBUILD)
# Install the check tools.
check-setup:tools/bin/revive tools/bin/goword tools/bin/gometalinter tools/bin/gosec
check: fmt errcheck lint tidy testSuite check-static vet staticcheck errdoc
# These need to be fixed before they can be ran regularly
check-fail: goword check-slow
fmt:
@echo "gofmt (simplify)"
@gofmt -s -l -w $(FILES) 2>&1 | $(FAIL_ON_STDOUT)
goword:tools/bin/goword
tools/bin/goword $(FILES) 2>&1 | $(FAIL_ON_STDOUT)
gosec:tools/bin/gosec
tools/bin/gosec $$($(PACKAGE_DIRECTORIES))
check-static: tools/bin/golangci-lint
tools/bin/golangci-lint run -v --disable-all --deadline=3m \
--enable=misspell \
--enable=ineffassign \
$$($(PACKAGE_DIRECTORIES))
check-slow:tools/bin/gometalinter tools/bin/gosec
tools/bin/gometalinter --disable-all \
--enable errcheck \
$$($(PACKAGE_DIRECTORIES))
errcheck:tools/bin/errcheck
@echo "errcheck"
@GO111MODULE=on tools/bin/errcheck -exclude ./tools/check/errcheck_excludes.txt -ignoretests -blank $(PACKAGES)
gogenerate:
@echo "go generate ./..."
./tools/check/check-gogenerate.sh
errdoc:tools/bin/errdoc-gen
@echo "generator errors.toml"
./tools/check/check-errdoc.sh
lint:tools/bin/revive
@echo "linting"
@tools/bin/revive -formatter friendly -config tools/check/revive.toml $(FILES)
vet:
@echo "vet"
$(GO) vet -all $(PACKAGES) 2>&1 | $(FAIL_ON_STDOUT)
staticcheck:
$(GO) get honnef.co/go/tools/cmd/staticcheck
$(STATICCHECK) ./...
tidy:
@echo "go mod tidy"
./tools/check/check-tidy.sh
testSuite:
@echo "testSuite"
./tools/check/check_testSuite.sh
clean:
$(GO) clean -i ./...
rm -rf *.out
rm -rf parser
# Split tests for CI to run `make test` in parallel.
test: test_part_1 test_part_2
@>&2 echo "Great, all tests passed."
test_part_1: checklist explaintest
test_part_2: checkdep gotest gogenerate
explaintest: server
@cd cmd/explaintest && ./run-tests.sh -s ../../bin/tidb-server
ddltest:
@cd cmd/ddltest && $(GO) test -o ../../bin/ddltest -c
upload-coverage: SHELL:=/bin/bash
upload-coverage:
ifeq ("$(TRAVIS_COVERAGE)", "1")
mv overalls.coverprofile coverage.txt
bash <(curl -s https://codecov.io/bash)
endif
gotest: failpoint-enable
ifeq ("$(TRAVIS_COVERAGE)", "1")
@echo "Running in TRAVIS_COVERAGE mode."
$(GO) get github.com/go-playground/overalls
@export log_level=error; \
$(OVERALLS) -project=github.com/pingcap/tidb \
-covermode=count \
-ignore='.git,vendor,cmd,docs,LICENSES' \
-concurrency=4 \
-- -coverpkg=./... \
|| { $(FAILPOINT_DISABLE); exit 1; }
else
@echo "Running in native mode."
@export log_level=fatal; export TZ='Asia/Shanghai'; \
$(GOTEST) -ldflags '$(TEST_LDFLAGS)' -cover $(PACKAGES) -check.p true -check.timeout 4s || { $(FAILPOINT_DISABLE); exit 1; }
endif
@$(FAILPOINT_DISABLE)
race: failpoint-enable
@export log_level=debug; \
$(GOTEST) -timeout 20m -race $(PACKAGES) || { $(FAILPOINT_DISABLE); exit 1; }
@$(FAILPOINT_DISABLE)
leak: failpoint-enable
@export log_level=debug; \
$(GOTEST) -tags leak $(PACKAGES) || { $(FAILPOINT_DISABLE); exit 1; }
@$(FAILPOINT_DISABLE)
tikv_integration_test: failpoint-enable
$(GOTEST) ./store/tikv/. -with-tikv=true || { $(FAILPOINT_DISABLE); exit 1; }
@$(FAILPOINT_DISABLE)
RACE_FLAG =
ifeq ("$(WITH_RACE)", "1")
RACE_FLAG = -race
GOBUILD = GOPATH=$(GOPATH) $(GO) build
endif
CHECK_FLAG =
ifeq ("$(WITH_CHECK)", "1")
CHECK_FLAG = $(TEST_LDFLAGS)
endif
server:
ifeq ($(TARGET), "")
CGO_ENABLED=1 $(GOBUILD) $(RACE_FLAG) -ldflags '$(LDFLAGS) $(CHECK_FLAG)' -o bin/tidb-server tidb-server/main.go
else
CGO_ENABLED=1 $(GOBUILD) $(RACE_FLAG) -ldflags '$(LDFLAGS) $(CHECK_FLAG)' -o '$(TARGET)' tidb-server/main.go
endif
server_check:
ifeq ($(TARGET), "")
$(GOBUILD) $(RACE_FLAG) -ldflags '$(CHECK_LDFLAGS)' -o bin/tidb-server tidb-server/main.go
else
$(GOBUILD) $(RACE_FLAG) -ldflags '$(CHECK_LDFLAGS)' -o '$(TARGET)' tidb-server/main.go
endif
linux:
ifeq ($(TARGET), "")
GOOS=linux $(GOBUILD) $(RACE_FLAG) -ldflags '$(LDFLAGS) $(CHECK_FLAG)' -o bin/tidb-server-linux tidb-server/main.go
else
GOOS=linux $(GOBUILD) $(RACE_FLAG) -ldflags '$(LDFLAGS) $(CHECK_FLAG)' -o '$(TARGET)' tidb-server/main.go
endif
server_coverage:
ifeq ($(TARGET), "")
$(GOBUILDCOVERAGE) $(RACE_FLAG) -ldflags '$(LDFLAGS) $(COVERAGE_SERVER_LDFLAGS) $(CHECK_FLAG)' -o ../bin/tidb-server-coverage
else
$(GOBUILDCOVERAGE) $(RACE_FLAG) -ldflags '$(LDFLAGS) $(COVERAGE_SERVER_LDFLAGS) $(CHECK_FLAG)' -o '$(TARGET)'
endif
benchkv:
$(GOBUILD) -ldflags '$(LDFLAGS)' -o bin/benchkv cmd/benchkv/main.go
benchraw:
$(GOBUILD) -ldflags '$(LDFLAGS)' -o bin/benchraw cmd/benchraw/main.go
benchdb:
$(GOBUILD) -ldflags '$(LDFLAGS)' -o bin/benchdb cmd/benchdb/main.go
importer:
$(GOBUILD) -ldflags '$(LDFLAGS)' -o bin/importer ./cmd/importer
checklist:
cat checklist.md
failpoint-enable: tools/bin/failpoint-ctl
# Converting gofail failpoints...
@$(FAILPOINT_ENABLE)
failpoint-disable: tools/bin/failpoint-ctl
# Restoring gofail failpoints...
@$(FAILPOINT_DISABLE)
checkdep:
$(GO) list -f '{{ join .Imports "\n" }}' github.com/pingcap/tidb/store/tikv | grep ^github.com/pingcap/parser$$ || exit 0; exit 1
tools/bin/megacheck: tools/check/go.mod
cd tools/check; \
$(GO) build -o ../bin/megacheck honnef.co/go/tools/cmd/megacheck
tools/bin/revive: tools/check/go.mod
cd tools/check; \
$(GO) build -o ../bin/revive github.com/mgechev/revive
tools/bin/goword: tools/check/go.mod
cd tools/check; \
$(GO) build -o ../bin/goword github.com/chzchzchz/goword
tools/bin/gometalinter: tools/check/go.mod
cd tools/check; \
$(GO) build -o ../bin/gometalinter gopkg.in/alecthomas/gometalinter.v3
tools/bin/gosec: tools/check/go.mod
cd tools/check; \
$(GO) build -o ../bin/gosec github.com/securego/gosec/cmd/gosec
tools/bin/errcheck: tools/check/go.mod
cd tools/check; \
$(GO) build -o ../bin/errcheck github.com/kisielk/errcheck
tools/bin/failpoint-ctl: go.mod
$(GO) build -o $@ github.com/pingcap/failpoint/failpoint-ctl
tools/bin/misspell:tools/check/go.mod
$(GO) get -u github.com/client9/misspell/cmd/misspell
tools/bin/ineffassign:tools/check/go.mod
cd tools/check; \
$(GO) build -o ../bin/ineffassign github.com/gordonklaus/ineffassign
tools/bin/errdoc-gen: go.mod
$(GO) build -o $@ github.com/pingcap/tiup/components/errdoc/errdoc-gen
tools/bin/golangci-lint:
curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b ./tools/bin v1.21.0
# Usage:
#
# $ make vectorized-bench VB_FILE=Time VB_FUNC=builtinCurrentDateSig
vectorized-bench:
cd ./expression && \
go test -v -timeout=0 -benchmem \
-bench=BenchmarkVectorizedBuiltin$(VB_FILE)Func \
-run=BenchmarkVectorizedBuiltin$(VB_FILE)Func \
-args "$(VB_FUNC)"

91
README.md

@ -0,0 +1,91 @@
![](docs/logo_with_text.png)
[![LICENSE](https://img.shields.io/github/license/pingcap/tidb.svg)](https://github.com/pingcap/tidb/blob/master/LICENSE)
[![Language](https://img.shields.io/badge/Language-Go-blue.svg)](https://golang.org/)
[![Build Status](https://travis-ci.org/pingcap/tidb.svg?branch=master)](https://travis-ci.org/pingcap/tidb)
[![Go Report Card](https://goreportcard.com/badge/github.com/pingcap/tidb)](https://goreportcard.com/report/github.com/pingcap/tidb)
[![GitHub release](https://img.shields.io/github/tag/pingcap/tidb.svg?label=release)](https://github.com/pingcap/tidb/releases)
[![GitHub release date](https://img.shields.io/github/release-date/pingcap/tidb.svg)](https://github.com/pingcap/tidb/releases)
[![CircleCI Status](https://circleci.com/gh/pingcap/tidb.svg?style=shield)](https://circleci.com/gh/pingcap/tidb)
[![Coverage Status](https://codecov.io/gh/pingcap/tidb/branch/master/graph/badge.svg)](https://codecov.io/gh/pingcap/tidb)
[![GoDoc](https://img.shields.io/badge/Godoc-reference-blue.svg)](https://godoc.org/github.com/pingcap/tidb)
- [**Slack Channel**](https://pingcap.com/tidbslack/)
- **Twitter**: [@PingCAP](https://twitter.com/PingCAP)
- [**Reddit**](https://www.reddit.com/r/TiDB/)
- **Mailing list**: [Google Group](https://groups.google.com/forum/#!forum/tidb-user)
- [**Blog**](https://www.pingcap.com/blog/)
- [**For support, please contact PingCAP**](http://bit.ly/contact_us_via_github)
## What is TiDB?
TiDB ("Ti" stands for Titanium) is an open-source NewSQL database that supports Hybrid Transactional and Analytical Processing (HTAP) workloads. It is MySQL compatible and features horizontal scalability, strong consistency, and high availability.
- __Horizontal Scalability__
TiDB expands both SQL processing and storage by simply adding new nodes. This makes infrastructure capacity planning both easier and more cost-effective than traditional relational databases which only scale vertically.
- __MySQL Compatible Syntax__
TiDB acts like it is a MySQL 5.7 server to your applications. You can continue to use all of the existing MySQL client libraries, and in many cases, you will not need to change a single line of code in your application. Because TiDB is built from scratch, not a MySQL fork, please check out the list of [known compatibility differences](https://pingcap.com/docs/v3.0/reference/mysql-compatibility/).
- __Distributed Transactions with Strong Consistency__
TiDB internally shards table into small range-based chunks that we refer to as "regions". Each region defaults to approximately 100MiB in size, and TiDB uses a Two-phase commit internally to ensure that regions are maintained in a transactionally consistent way.
- __Cloud Native__
TiDB is designed to work in the cloud -- public, private, or hybrid -- making deployment, provisioning, operations, and maintenance simple.
The storage layer of TiDB, called TiKV, [became](https://www.cncf.io/blog/2018/08/28/cncf-to-host-tikv-in-the-sandbox/) a [Cloud Native Computing Foundation](https://www.cncf.io/) member project in 2018. The architecture of the TiDB platform also allows SQL processing and storage to be scaled independently of each other in a very cloud-friendly manner.
- __Minimize ETL__
TiDB is designed to support both transaction processing (OLTP) and analytical processing (OLAP) workloads. This means that while you may have traditionally transacted on MySQL and then Extracted, Transformed and Loaded (ETL) data into a column store for analytical processing, this step is no longer required.
- __High Availability__
TiDB uses the Raft consensus algorithm to ensure that data is highly available and safely replicated throughout storage in Raft groups. In the event of failure, a Raft group will automatically elect a new leader for the failed member, and self-heal the TiDB cluster without any required manual intervention. Failure and self-healing operations are also transparent to applications.
For more details and latest updates, see [official TiDB blog](https://www.pingcap.com/blog/).
## Adopters
View the current list of in-production TiDB adopters [here](https://pingcap.com/docs/adopters/).
## Roadmap
Read the [Roadmap](https://pingcap.com/docs/ROADMAP).
## Quick start
Read the [Quick Start Guide](https://pingcap.com/docs/QUICKSTART), which includes deployment methods using Ansible, Docker, and Kubernetes.
## Getting Help
- [**Stack Overflow**](https://stackoverflow.com/questions/tagged/tidb)
- [**User Group (Chinese)**](https://asktug.com)
## Documentation
+ [English](https://pingcap.com/docs)
+ [简体中文](https://pingcap.com/docs-cn)
## Architecture
![architecture](./docs/architecture.png)
## Contributing
[<img src="docs/contribution-map.png" alt="contribution-map" width="180">](https://github.com/pingcap/tidb-map/blob/master/maps/contribution-map.md#tidb-is-an-open-source-distributed-htap-database-compatible-with-the-mysql-protocol)
Contributions are welcomed and greatly appreciated. See
[CONTRIBUTING.md](https://github.com/pingcap/community/blob/master/CONTRIBUTING.md)
for details on submitting patches and the contribution workflow. For more contributing information, click on the contributor icon above.
## License
TiDB is under the Apache 2.0 license. See the [LICENSE](./LICENSE) file for details.
## Acknowledgments
- Thanks [cznic](https://github.com/cznic) for providing some great open source tools.
- Thanks [GolevelDB](https://github.com/syndtr/goleveldb), [BoltDB](https://github.com/boltdb/bolt), and [RocksDB](https://github.com/facebook/rocksdb) for their powerful storage engines.

1467
bindinfo/bind_test.go

File diff suppressed because it is too large

259
bindinfo/cache.go

@ -0,0 +1,259 @@
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package bindinfo
import (
"time"
"unsafe"
"github.com/pingcap/parser"
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/hint"
)
const (
// Using is the bind info's in use status.
Using = "using"
// deleted is the bind info's deleted status.
deleted = "deleted"
// Invalid is the bind info's invalid status.
Invalid = "invalid"
// PendingVerify means the bind info needs to be verified.
PendingVerify = "pending verify"
// Rejected means that the bind has been rejected after verify process.
// We can retry it after certain time has passed.
Rejected = "rejected"
// Manual indicates the binding is created by SQL like "create binding for ...".
Manual = "manual"
// Capture indicates the binding is captured by TiDB automatically.
Capture = "capture"
// Evolve indicates the binding is evolved by TiDB from old bindings.
Evolve = "evolve"
)
// Binding stores the basic bind hint info.
type Binding struct {
BindSQL string
// Status represents the status of the binding. It can only be one of the following values:
// 1. deleted: BindRecord is deleted, can not be used anymore.
// 2. using: Binding is in the normal active mode.
Status string
CreateTime types.Time
UpdateTime types.Time
Source string
Charset string
Collation string
// Hint is the parsed hints, it is used to bind hints to stmt node.
Hint *hint.HintsSet
// ID is the string form of Hint. It would be non-empty only when the status is `Using` or `PendingVerify`.
ID string
}
func (b *Binding) isSame(rb *Binding) bool {
if b.ID != "" && rb.ID != "" {
return b.ID == rb.ID
}
// Sometimes we cannot construct `ID` because of the changed schema, so we need to compare by bind sql.
return b.BindSQL == rb.BindSQL
}
// SinceUpdateTime returns the duration since last update time. Export for test.
func (b *Binding) SinceUpdateTime() (time.Duration, error) {
updateTime, err := b.UpdateTime.GoTime(time.Local)
if err != nil {
return 0, err
}
return time.Since(updateTime), nil
}
// cache is a k-v map, key is original sql, value is a slice of BindRecord.
type cache map[string][]*BindRecord
// BindRecord represents a sql bind record retrieved from the storage.
type BindRecord struct {
OriginalSQL string
Db string
Bindings []Binding
}
// HasUsingBinding checks if there are any using bindings in bind record.
func (br *BindRecord) HasUsingBinding() bool {
for _, binding := range br.Bindings {
if binding.Status == Using {
return true
}
}
return false
}
// FindBinding find bindings in BindRecord.
func (br *BindRecord) FindBinding(hint string) *Binding {
for _, binding := range br.Bindings {
if binding.ID == hint {
return &binding
}
}
return nil
}
// prepareHints builds ID and Hint for BindRecord. If sctx is not nil, we check if
// the BindSQL is still valid.
func (br *BindRecord) prepareHints(sctx sessionctx.Context) error {
p := parser.New()
for i, bind := range br.Bindings {
if (bind.Hint != nil && bind.ID != "") || bind.Status == deleted {
continue
}
if sctx != nil {
_, err := getHintsForSQL(sctx, bind.BindSQL)
if err != nil {
return err
}
}
hintsSet, warns, err := hint.ParseHintsSet(p, bind.BindSQL, bind.Charset, bind.Collation, br.Db)
if err != nil {
return err
}
hintsStr, err := hintsSet.Restore()
if err != nil {
return err
}
// For `create global binding for select * from t using select * from t`, we allow it though hintsStr is empty.
// For `create global binding for select * from t using select /*+ non_exist_hint() */ * from t`,
// the hint is totally invaild, we escalate warning to error.
if hintsStr == "" && len(warns) > 0 {
return warns[0]
}
br.Bindings[i].Hint = hintsSet
br.Bindings[i].ID = hintsStr
}
return nil
}
// `merge` merges two BindRecord. It will replace old bindings with new bindings if there are new updates.
func merge(lBindRecord, rBindRecord *BindRecord) *BindRecord {
if lBindRecord == nil {
return rBindRecord
}
if rBindRecord == nil {
return lBindRecord
}
result := lBindRecord.shallowCopy()
for _, rbind := range rBindRecord.Bindings {
found := false
for j, lbind := range lBindRecord.Bindings {
if lbind.isSame(&rbind) {
found = true
if rbind.UpdateTime.Compare(lbind.UpdateTime) >= 0 {
result.Bindings[j] = rbind
}
break
}
}
if !found {
result.Bindings = append(result.Bindings, rbind)
}
}
return result
}
func (br *BindRecord) remove(deleted *BindRecord) *BindRecord {
// Delete all bindings.
if len(deleted.Bindings) == 0 {
return &BindRecord{OriginalSQL: br.OriginalSQL, Db: br.Db}
}
result := br.shallowCopy()
for _, deletedBind := range deleted.Bindings {
for i, bind := range result.Bindings {
if bind.isSame(&deletedBind) {
result.Bindings = append(result.Bindings[:i], result.Bindings[i+1:]...)
break
}
}
}
return result
}
func (br *BindRecord) removeDeletedBindings() *BindRecord {
result := BindRecord{OriginalSQL: br.OriginalSQL, Db: br.Db, Bindings: make([]Binding, 0, len(br.Bindings))}
for _, binding := range br.Bindings {
if binding.Status != deleted {
result.Bindings = append(result.Bindings, binding)
}
}
return &result
}
// shallowCopy shallow copies the BindRecord.
func (br *BindRecord) shallowCopy() *BindRecord {
result := BindRecord{
OriginalSQL: br.OriginalSQL,
Db: br.Db,
Bindings: make([]Binding, len(br.Bindings)),
}
copy(result.Bindings, br.Bindings)
return &result
}
func (br *BindRecord) isSame(other *BindRecord) bool {
return br.OriginalSQL == other.OriginalSQL && br.Db == other.Db
}
var statusIndex = map[string]int{
Using: 0,
deleted: 1,
Invalid: 2,
}
func (br *BindRecord) metrics() ([]float64, []int) {
sizes := make([]float64, len(statusIndex))
count := make([]int, len(statusIndex))
if br == nil {
return sizes, count
}
commonLength := float64(len(br.OriginalSQL) + len(br.Db))
// We treat it as deleted if there are no bindings. It could only occur in session handles.
if len(br.Bindings) == 0 {
sizes[statusIndex[deleted]] = commonLength
count[statusIndex[deleted]] = 1
return sizes, count
}
// Make the common length counted in the first binding.
sizes[statusIndex[br.Bindings[0].Status]] = commonLength
for _, binding := range br.Bindings {
sizes[statusIndex[binding.Status]] += binding.size()
count[statusIndex[binding.Status]]++
}
return sizes, count
}
// size calculates the memory size of a bind info.
func (b *Binding) size() float64 {
res := len(b.BindSQL) + len(b.Status) + 2*int(unsafe.Sizeof(b.CreateTime)) + len(b.Charset) + len(b.Collation)
return float64(res)
}
func updateMetrics(scope string, before *BindRecord, after *BindRecord, sizeOnly bool) {
beforeSize, beforeCount := before.metrics()
afterSize, afterCount := after.metrics()
for status, index := range statusIndex {
metrics.BindMemoryUsage.WithLabelValues(scope, status).Add(afterSize[index] - beforeSize[index])
if !sizeOnly {
metrics.BindTotalGauge.WithLabelValues(scope, status).Add(float64(afterCount[index] - beforeCount[index]))
}
}
}

950
bindinfo/handle.go

@ -0,0 +1,950 @@
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package bindinfo
import (
"context"
"fmt"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/pingcap/parser"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/format"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/types"
driver "github.com/pingcap/tidb/types/parser_driver"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/hint"
"github.com/pingcap/tidb/util/logutil"
utilparser "github.com/pingcap/tidb/util/parser"
"github.com/pingcap/tidb/util/sqlexec"
"github.com/pingcap/tidb/util/stmtsummary"
"github.com/pingcap/tidb/util/timeutil"
"go.uber.org/zap"
)
// BindHandle is used to handle all global sql bind operations.
type BindHandle struct {
sctx struct {
sync.Mutex
sessionctx.Context
}
// bindInfo caches the sql bind info from storage.
//
// The Mutex protects that there is only one goroutine changes the content
// of atmoic.Value.
//
// NOTE: Concurrent Value Write:
//
// bindInfo.Lock()
// newCache := bindInfo.Value.Load()
// do the write operation on the newCache
// bindInfo.Value.Store(newCache)
// bindInfo.Unlock()
//
// NOTE: Concurrent Value Read:
//
// cache := bindInfo.Load().
// read the content
//
bindInfo struct {
sync.Mutex
atomic.Value
parser *parser.Parser
lastUpdateTime types.Time
}
// invalidBindRecordMap indicates the invalid bind records found during querying.
// A record will be deleted from this map, after 2 bind-lease, after it is dropped from the kv.
invalidBindRecordMap tmpBindRecordMap
// pendingVerifyBindRecordMap indicates the pending verify bind records that found during query.
pendingVerifyBindRecordMap tmpBindRecordMap
}
// Lease influences the duration of loading bind info and handling invalid bind.
var Lease = 3 * time.Second
const (
// OwnerKey is the bindinfo owner path that is saved to etcd.
OwnerKey = "/tidb/bindinfo/owner"
// Prompt is the prompt for bindinfo owner manager.
Prompt = "bindinfo"
)
type bindRecordUpdate struct {
bindRecord *BindRecord
updateTime time.Time
}
// NewBindHandle creates a new BindHandle.
func NewBindHandle(ctx sessionctx.Context) *BindHandle {
handle := &BindHandle{}
handle.sctx.Context = ctx
handle.bindInfo.Value.Store(make(cache, 32))
handle.bindInfo.parser = parser.New()
handle.invalidBindRecordMap.Value.Store(make(map[string]*bindRecordUpdate))
handle.invalidBindRecordMap.flushFunc = func(record *BindRecord) error {
return handle.DropBindRecord(record.OriginalSQL, record.Db, &record.Bindings[0])
}
handle.pendingVerifyBindRecordMap.Value.Store(make(map[string]*bindRecordUpdate))
handle.pendingVerifyBindRecordMap.flushFunc = func(record *BindRecord) error {
// BindSQL has already been validated when coming here, so we use nil sctx parameter.
return handle.AddBindRecord(nil, record)
}
return handle
}
// Update updates the global sql bind cache.
func (h *BindHandle) Update(fullLoad bool) (err error) {
h.bindInfo.Lock()
lastUpdateTime := h.bindInfo.lastUpdateTime
h.bindInfo.Unlock()
sql := "select original_sql, bind_sql, default_db, status, create_time, update_time, charset, collation, source from mysql.bind_info"
if !fullLoad {
sql += " where update_time > \"" + lastUpdateTime.String() + "\""
}
// We need to apply the updates by order, wrong apply order of same original sql may cause inconsistent state.
sql += " order by update_time"
// No need to acquire the session context lock for ExecRestrictedSQL, it
// uses another background session.
rows, _, err := h.sctx.Context.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(sql)
if err != nil {
return err
}
// Make sure there is only one goroutine writes the cache.
h.bindInfo.Lock()
newCache := h.bindInfo.Value.Load().(cache).copy()
defer func() {
h.bindInfo.lastUpdateTime = lastUpdateTime
h.bindInfo.Value.Store(newCache)
h.bindInfo.Unlock()
}()
for _, row := range rows {
hash, meta, err := h.newBindRecord(row)
// Update lastUpdateTime to the newest one.
if meta.Bindings[0].UpdateTime.Compare(lastUpdateTime) > 0 {
lastUpdateTime = meta.Bindings[0].UpdateTime
}
if err != nil {
logutil.BgLogger().Info("update bindinfo failed", zap.Error(err))
continue
}
oldRecord := newCache.getBindRecord(hash, meta.OriginalSQL, meta.Db)
newRecord := merge(oldRecord, meta).removeDeletedBindings()
if len(newRecord.Bindings) > 0 {
newCache.setBindRecord(hash, newRecord)
} else {
newCache.removeDeletedBindRecord(hash, newRecord)
}
updateMetrics(metrics.ScopeGlobal, oldRecord, newCache.getBindRecord(hash, meta.OriginalSQL, meta.Db), true)
}
return nil
}
// CreateBindRecord creates a BindRecord to the storage and the cache.
// It replaces all the exists bindings for the same normalized SQL.
func (h *BindHandle) CreateBindRecord(sctx sessionctx.Context, record *BindRecord) (err error) {
err = record.prepareHints(sctx)
if err != nil {
return err
}
exec, _ := h.sctx.Context.(sqlexec.SQLExecutor)
h.sctx.Lock()
_, err = exec.ExecuteInternal(context.TODO(), "BEGIN")
if err != nil {
h.sctx.Unlock()
return
}
normalizedSQL := parser.DigestNormalized(record.OriginalSQL)
oldRecord := h.GetBindRecord(normalizedSQL, record.OriginalSQL, record.Db)
defer func() {
if err != nil {
_, err1 := exec.ExecuteInternal(context.TODO(), "ROLLBACK")
h.sctx.Unlock()
terror.Log(err1)
return
}
_, err = exec.ExecuteInternal(context.TODO(), "COMMIT")
h.sctx.Unlock()
if err != nil {
return
}
// Make sure there is only one goroutine writes the cache and uses parser.
h.bindInfo.Lock()
if oldRecord != nil {
h.removeBindRecord(normalizedSQL, oldRecord)
}
h.appendBindRecord(normalizedSQL, record)
h.bindInfo.Unlock()
}()
txn, err1 := h.sctx.Context.Txn(true)
if err1 != nil {
return err1
}
now := types.NewTime(types.FromGoTime(oracle.GetTimeFromTS(txn.StartTS())), mysql.TypeTimestamp, 3)
if oldRecord != nil {
for _, binding := range oldRecord.Bindings {
_, err1 = exec.ExecuteInternal(context.TODO(), h.logicalDeleteBindInfoSQL(record.OriginalSQL, record.Db, now, binding.BindSQL))
if err != nil {
return err1
}
}
}
for i := range record.Bindings {
record.Bindings[i].CreateTime = now
record.Bindings[i].UpdateTime = now
// insert the BindRecord to the storage.
_, err = exec.ExecuteInternal(context.TODO(), h.insertBindInfoSQL(record.OriginalSQL, record.Db, record.Bindings[i]))
if err != nil {
return err
}
}
return nil
}
// AddBindRecord adds a BindRecord to the storage and BindRecord to the cache.
func (h *BindHandle) AddBindRecord(sctx sessionctx.Context, record *BindRecord) (err error) {
err = record.prepareHints(sctx)
if err != nil {
return err
}
oldRecord := h.GetBindRecord(parser.DigestNormalized(record.OriginalSQL), record.OriginalSQL, record.Db)
var duplicateBinding *Binding
if oldRecord != nil {
binding := oldRecord.FindBinding(record.Bindings[0].ID)
if binding != nil {
// There is already a binding with status `Using`, `PendingVerify` or `Rejected`, we could directly cancel the job.
if record.Bindings[0].Status == PendingVerify {
return nil
}
// Otherwise, we need to remove it before insert.
duplicateBinding = binding
}
}
exec, _ := h.sctx.Context.(sqlexec.SQLExecutor)
h.sctx.Lock()
_, err = exec.ExecuteInternal(context.TODO(), "BEGIN")
if err != nil {
h.sctx.Unlock()
return
}
defer func() {
if err != nil {
_, err1 := exec.ExecuteInternal(context.TODO(), "ROLLBACK")
h.sctx.Unlock()
terror.Log(err1)
return
}
_, err = exec.ExecuteInternal(context.TODO(), "COMMIT")
h.sctx.Unlock()
if err != nil {
return
}
// Make sure there is only one goroutine writes the cache and uses parser.
h.bindInfo.Lock()
h.appendBindRecord(parser.DigestNormalized(record.OriginalSQL), record)
h.bindInfo.Unlock()
}()
txn, err1 := h.sctx.Context.Txn(true)
if err1 != nil {
return err1
}
if duplicateBinding != nil {
_, err = exec.ExecuteInternal(context.TODO(), h.deleteBindInfoSQL(record.OriginalSQL, record.Db, duplicateBinding.BindSQL))
if err != nil {
return err
}
}
now := types.NewTime(types.FromGoTime(oracle.GetTimeFromTS(txn.StartTS())), mysql.TypeTimestamp, 3)
for i := range record.Bindings {
if duplicateBinding != nil {
record.Bindings[i].CreateTime = duplicateBinding.CreateTime
} else {
record.Bindings[i].CreateTime = now
}
record.Bindings[i].UpdateTime = now
// insert the BindRecord to the storage.
_, err = exec.ExecuteInternal(context.TODO(), h.insertBindInfoSQL(record.OriginalSQL, record.Db, record.Bindings[i]))
if err != nil {
return err
}
}
return nil
}
// DropBindRecord drops a BindRecord to the storage and BindRecord int the cache.
func (h *BindHandle) DropBindRecord(originalSQL, db string, binding *Binding) (err error) {
exec, _ := h.sctx.Context.(sqlexec.SQLExecutor)
h.sctx.Lock()
_, err = exec.ExecuteInternal(context.TODO(), "BEGIN")
if err != nil {
h.sctx.Unlock()
return
}
defer func() {
if err != nil {
_, err1 := exec.ExecuteInternal(context.TODO(), "ROLLBACK")
h.sctx.Unlock()
terror.Log(err1)
return
}
_, err = exec.ExecuteInternal(context.TODO(), "COMMIT")
h.sctx.Unlock()
if err != nil {
return
}
record := &BindRecord{OriginalSQL: originalSQL, Db: db}
if binding != nil {
record.Bindings = append(record.Bindings, *binding)
}
// Make sure there is only one goroutine writes the cache and uses parser.
h.bindInfo.Lock()
h.removeBindRecord(parser.DigestNormalized(originalSQL), record)
h.bindInfo.Unlock()
}()
txn, err1 := h.sctx.Context.Txn(true)
if err1 != nil {
return err1
}
updateTs := types.NewTime(types.FromGoTime(oracle.GetTimeFromTS(txn.StartTS())), mysql.TypeTimestamp, 3)
bindSQL := ""
if binding != nil {
bindSQL = binding.BindSQL
}
_, err = exec.ExecuteInternal(context.TODO(), h.logicalDeleteBindInfoSQL(originalSQL, db, updateTs, bindSQL))
return err
}
// tmpBindRecordMap is used to temporarily save bind record changes.
// Those changes will be flushed into store periodically.
type tmpBindRecordMap struct {
sync.Mutex
atomic.Value
flushFunc func(record *BindRecord) error
}
// flushToStore calls flushFunc for items in tmpBindRecordMap and removes them with a delay.
func (tmpMap *tmpBindRecordMap) flushToStore() {
tmpMap.Lock()
defer tmpMap.Unlock()
newMap := copyBindRecordUpdateMap(tmpMap.Load().(map[string]*bindRecordUpdate))
for key, bindRecord := range newMap {
if bindRecord.updateTime.IsZero() {
err := tmpMap.flushFunc(bindRecord.bindRecord)
if err != nil {
logutil.BgLogger().Error("flush bind record failed", zap.Error(err))
}
bindRecord.updateTime = time.Now()
continue
}
if time.Since(bindRecord.updateTime) > 6*time.Second {
delete(newMap, key)
updateMetrics(metrics.ScopeGlobal, bindRecord.bindRecord, nil, false)
}
}
tmpMap.Store(newMap)
}
// Add puts a BindRecord into tmpBindRecordMap.
func (tmpMap *tmpBindRecordMap) Add(bindRecord *BindRecord) {
key := bindRecord.OriginalSQL + ":" + bindRecord.Db + ":" + bindRecord.Bindings[0].ID
if _, ok := tmpMap.Load().(map[string]*bindRecordUpdate)[key]; ok {
return
}
tmpMap.Lock()
defer tmpMap.Unlock()
if _, ok := tmpMap.Load().(map[string]*bindRecordUpdate)[key]; ok {
return
}
newMap := copyBindRecordUpdateMap(tmpMap.Load().(map[string]*bindRecordUpdate))
newMap[key] = &bindRecordUpdate{
bindRecord: bindRecord,
}
tmpMap.Store(newMap)
updateMetrics(metrics.ScopeGlobal, nil, bindRecord, false)
}
// DropInvalidBindRecord executes the drop BindRecord tasks.
func (h *BindHandle) DropInvalidBindRecord() {
h.invalidBindRecordMap.flushToStore()
}
// AddDropInvalidBindTask adds BindRecord which needs to be deleted into invalidBindRecordMap.
func (h *BindHandle) AddDropInvalidBindTask(invalidBindRecord *BindRecord) {
h.invalidBindRecordMap.Add(invalidBindRecord)
}
// Size returns the size of bind info cache.
func (h *BindHandle) Size() int {
size := 0
for _, bindRecords := range h.bindInfo.Load().(cache) {
size += len(bindRecords)
}
return size
}
// GetBindRecord returns the BindRecord of the (normdOrigSQL,db) if BindRecord exist.
func (h *BindHandle) GetBindRecord(hash, normdOrigSQL, db string) *BindRecord {
return h.bindInfo.Load().(cache).getBindRecord(hash, normdOrigSQL, db)
}
// GetAllBindRecord returns all bind records in cache.
func (h *BindHandle) GetAllBindRecord() (bindRecords []*BindRecord) {
bindRecordMap := h.bindInfo.Load().(cache)
for _, bindRecord := range bindRecordMap {
bindRecords = append(bindRecords, bindRecord...)
}
return bindRecords
}
// newBindRecord builds BindRecord from a tuple in storage.
func (h *BindHandle) newBindRecord(row chunk.Row) (string, *BindRecord, error) {
hint := Binding{
BindSQL: row.GetString(1),
Status: row.GetString(3),
CreateTime: row.GetTime(4),
UpdateTime: row.GetTime(5),
Charset: row.GetString(6),
Collation: row.GetString(7),
Source: row.GetString(8),
}
bindRecord := &BindRecord{
OriginalSQL: row.GetString(0),
Db: row.GetString(2),
Bindings: []Binding{hint},
}
hash := parser.DigestNormalized(bindRecord.OriginalSQL)
h.sctx.Lock()
defer h.sctx.Unlock()
h.sctx.GetSessionVars().CurrentDB = bindRecord.Db
err := bindRecord.prepareHints(h.sctx.Context)
return hash, bindRecord, err
}
// appendBindRecord addes the BindRecord to the cache, all the stale BindRecords are
// removed from the cache after this operation.
func (h *BindHandle) appendBindRecord(hash string, meta *BindRecord) {
newCache := h.bindInfo.Value.Load().(cache).copy()
oldRecord := newCache.getBindRecord(hash, meta.OriginalSQL, meta.Db)
newRecord := merge(oldRecord, meta)
newCache.setBindRecord(hash, newRecord)
h.bindInfo.Value.Store(newCache)
updateMetrics(metrics.ScopeGlobal, oldRecord, newRecord, false)
}
// removeBindRecord removes the BindRecord from the cache.
func (h *BindHandle) removeBindRecord(hash string, meta *BindRecord) {
newCache := h.bindInfo.Value.Load().(cache).copy()
oldRecord := newCache.getBindRecord(hash, meta.OriginalSQL, meta.Db)
newCache.removeDeletedBindRecord(hash, meta)
h.bindInfo.Value.Store(newCache)
updateMetrics(metrics.ScopeGlobal, oldRecord, newCache.getBindRecord(hash, meta.OriginalSQL, meta.Db), false)
}
// removeDeletedBindRecord removes the BindRecord which has same originSQL and db with specified BindRecord.
func (c cache) removeDeletedBindRecord(hash string, meta *BindRecord) {
metas, ok := c[hash]
if !ok {
return
}
for i := len(metas) - 1; i >= 0; i-- {
if metas[i].isSame(meta) {
metas[i] = metas[i].remove(meta)
if len(metas[i].Bindings) == 0 {
metas = append(metas[:i], metas[i+1:]...)
}
if len(metas) == 0 {
delete(c, hash)
return
}
}
}
c[hash] = metas
}
func (c cache) setBindRecord(hash string, meta *BindRecord) {
metas := c[hash]
for i := range metas {
if metas[i].Db == meta.Db && metas[i].OriginalSQL == meta.OriginalSQL {
metas[i] = meta
return
}
}
c[hash] = append(c[hash], meta)
}
func (c cache) copy() cache {
newCache := make(cache, len(c))
for k, v := range c {
bindRecords := make([]*BindRecord, len(v))
copy(bindRecords, v)
newCache[k] = bindRecords
}
return newCache
}
func copyBindRecordUpdateMap(oldMap map[string]*bindRecordUpdate) map[string]*bindRecordUpdate {
newMap := make(map[string]*bindRecordUpdate, len(oldMap))
for k, v := range oldMap {
newMap[k] = v
}
return newMap
}
func (c cache) getBindRecord(hash, normdOrigSQL, db string) *BindRecord {
bindRecords := c[hash]
for _, bindRecord := range bindRecords {
if bindRecord.OriginalSQL == normdOrigSQL && bindRecord.Db == db {
return bindRecord
}
}
return nil
}
func (h *BindHandle) deleteBindInfoSQL(normdOrigSQL, db, bindSQL string) string {
return fmt.Sprintf(
`DELETE FROM mysql.bind_info WHERE original_sql=%s AND default_db=%s AND bind_sql=%s`,
expression.Quote(normdOrigSQL),
expression.Quote(db),
expression.Quote(bindSQL),
)
}
func (h *BindHandle) insertBindInfoSQL(orignalSQL string, db string, info Binding) string {
return fmt.Sprintf(`INSERT INTO mysql.bind_info VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)`,
expression.Quote(orignalSQL),
expression.Quote(info.BindSQL),
expression.Quote(db),
expression.Quote(info.Status),
expression.Quote(info.CreateTime.String()),
expression.Quote(info.UpdateTime.String()),
expression.Quote(info.Charset),
expression.Quote(info.Collation),
expression.Quote(info.Source),
)
}
func (h *BindHandle) logicalDeleteBindInfoSQL(originalSQL, db string, updateTs types.Time, bindingSQL string) string {
sql := fmt.Sprintf(`UPDATE mysql.bind_info SET status=%s,update_time=%s WHERE original_sql=%s and default_db=%s`,
expression.Quote(deleted),
expression.Quote(updateTs.String()),
expression.Quote(originalSQL),
expression.Quote(db))
if bindingSQL == "" {
return sql
}
return sql + fmt.Sprintf(` and bind_sql = %s`, expression.Quote(bindingSQL))
}
// CaptureBaselines is used to automatically capture plan baselines.
func (h *BindHandle) CaptureBaselines() {
parser4Capture := parser.New()
schemas, sqls := stmtsummary.StmtSummaryByDigestMap.GetMoreThanOnceBindableStmt()
for i := range sqls {
stmt, err := parser4Capture.ParseOneStmt(sqls[i], "", "")
if err != nil {
logutil.BgLogger().Debug("parse SQL failed", zap.String("SQL", sqls[i]), zap.Error(err))
continue
}
if insertStmt, ok := stmt.(*ast.InsertStmt); ok && insertStmt.Select == nil {
continue
}
normalizedSQL, digest := parser.NormalizeDigest(sqls[i])
dbName := utilparser.GetDefaultDB(stmt, schemas[i])
if r := h.GetBindRecord(digest, normalizedSQL, dbName); r != nil && r.HasUsingBinding() {
continue
}
h.sctx.Lock()
h.sctx.GetSessionVars().CurrentDB = schemas[i]
oriIsolationRead := h.sctx.GetSessionVars().IsolationReadEngines
// TODO: support all engines plan hint in capture baselines.
h.sctx.GetSessionVars().IsolationReadEngines = map[kv.StoreType]struct{}{kv.TiKV: {}}
hints, err := getHintsForSQL(h.sctx.Context, sqls[i])
h.sctx.GetSessionVars().IsolationReadEngines = oriIsolationRead
h.sctx.Unlock()
if err != nil {
logutil.BgLogger().Debug("generate hints failed", zap.String("SQL", sqls[i]), zap.Error(err))
continue
}
bindSQL := GenerateBindSQL(context.TODO(), stmt, hints)
if bindSQL == "" {
continue
}
charset, collation := h.sctx.GetSessionVars().GetCharsetInfo()
binding := Binding{
BindSQL: bindSQL,
Status: Using,
Charset: charset,
Collation: collation,
Source: Capture,
}
// We don't need to pass the `sctx` because the BindSQL has been validated already.
err = h.AddBindRecord(nil, &BindRecord{OriginalSQL: normalizedSQL, Db: dbName, Bindings: []Binding{binding}})
if err != nil {
logutil.BgLogger().Info("capture baseline failed", zap.String("SQL", sqls[i]), zap.Error(err))
}
}
}
func getHintsForSQL(sctx sessionctx.Context, sql string) (string, error) {
oriVals := sctx.GetSessionVars().UsePlanBaselines
sctx.GetSessionVars().UsePlanBaselines = false
recordSets, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), fmt.Sprintf("explain format='hint' %s", sql))
sctx.GetSessionVars().UsePlanBaselines = oriVals
if len(recordSets) > 0 {
defer terror.Log(recordSets[0].Close())
}
if err != nil {
return "", err
}
chk := recordSets[0].NewChunk()
err = recordSets[0].Next(context.TODO(), chk)
if err != nil {
return "", err
}
return chk.GetRow(0).GetString(0), nil
}
// GenerateBindSQL generates binding sqls from stmt node and plan hints.
func GenerateBindSQL(ctx context.Context, stmtNode ast.StmtNode, planHint string) string {
// If would be nil for very simple cases such as point get, we do not need to evolve for them.
if planHint == "" {
return ""
}
paramChecker := &paramMarkerChecker{}
stmtNode.Accept(paramChecker)
// We need to evolve on current sql, but we cannot restore values for paramMarkers yet,
// so just ignore them now.
if paramChecker.hasParamMarker {
return ""
}
// We need to evolve plan based on the current sql, not the original sql which may have different parameters.
// So here we would remove the hint and inject the current best plan hint.
hint.BindHint(stmtNode, &hint.HintsSet{})
var sb strings.Builder
restoreCtx := format.NewRestoreCtx(format.DefaultRestoreFlags, &sb)
err := stmtNode.Restore(restoreCtx)
if err != nil {
logutil.Logger(ctx).Warn("Restore SQL failed", zap.Error(err))
}
bindSQL := sb.String()
switch n := stmtNode.(type) {
case *ast.DeleteStmt:
deleteIdx := strings.Index(bindSQL, "DELETE")
// Remove possible `explain` prefix.
bindSQL = bindSQL[deleteIdx:]
return strings.Replace(bindSQL, "DELETE", fmt.Sprintf("DELETE /*+ %s*/", planHint), 1)
case *ast.UpdateStmt:
updateIdx := strings.Index(bindSQL, "UPDATE")
// Remove possible `explain` prefix.
bindSQL = bindSQL[updateIdx:]
return strings.Replace(bindSQL, "UPDATE", fmt.Sprintf("UPDATE /*+ %s*/", planHint), 1)
case *ast.SelectStmt:
selectIdx := strings.Index(bindSQL, "SELECT")
// Remove possible `explain` prefix.
bindSQL = bindSQL[selectIdx:]
return strings.Replace(bindSQL, "SELECT", fmt.Sprintf("SELECT /*+ %s*/", planHint), 1)
case *ast.InsertStmt:
insertIdx := int(0)
if n.IsReplace {
insertIdx = strings.Index(bindSQL, "REPLACE")
} else {
insertIdx = strings.Index(bindSQL, "INSERT")
}
// Remove possible `explain` prefix.
bindSQL = bindSQL[insertIdx:]
return strings.Replace(bindSQL, "SELECT", fmt.Sprintf("SELECT /*+ %s*/", planHint), 1)
}
logutil.Logger(ctx).Warn("Unexpected statement type")
return ""
}
type paramMarkerChecker struct {
hasParamMarker bool
}
func (e *paramMarkerChecker) Enter(in ast.Node) (ast.Node, bool) {
if _, ok := in.(*driver.ParamMarkerExpr); ok {
e.hasParamMarker = true
return in, true
}
return in, false
}
func (e *paramMarkerChecker) Leave(in ast.Node) (ast.Node, bool) {
return in, true
}
// AddEvolvePlanTask adds the evolve plan task into memory cache. It would be flushed to store periodically.
func (h *BindHandle) AddEvolvePlanTask(originalSQL, DB string, binding Binding) {
br := &BindRecord{
OriginalSQL: originalSQL,
Db: DB,
Bindings: []Binding{binding},
}
h.pendingVerifyBindRecordMap.Add(br)
}
// SaveEvolveTasksToStore saves the evolve task into store.
func (h *BindHandle) SaveEvolveTasksToStore() {
h.pendingVerifyBindRecordMap.flushToStore()
}
func getEvolveParameters(ctx sessionctx.Context) (time.Duration, time.Time, time.Time, error) {
sql := fmt.Sprintf("select variable_name, variable_value from mysql.global_variables where variable_name in ('%s', '%s', '%s')",
variable.TiDBEvolvePlanTaskMaxTime, variable.TiDBEvolvePlanTaskStartTime, variable.TiDBEvolvePlanTaskEndTime)
rows, _, err := ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(sql)
if err != nil {
return 0, time.Time{}, time.Time{}, err
}
maxTime, startTimeStr, endTimeStr := int64(variable.DefTiDBEvolvePlanTaskMaxTime), variable.DefTiDBEvolvePlanTaskStartTime, variable.DefAutoAnalyzeEndTime
for _, row := range rows {
switch row.GetString(0) {
case variable.TiDBEvolvePlanTaskMaxTime:
maxTime, err = strconv.ParseInt(row.GetString(1), 10, 64)
if err != nil {
return 0, time.Time{}, time.Time{}, err
}
case variable.TiDBEvolvePlanTaskStartTime:
startTimeStr = row.GetString(1)
case variable.TiDBEvolvePlanTaskEndTime:
endTimeStr = row.GetString(1)
}
}
startTime, err := time.ParseInLocation(variable.FullDayTimeFormat, startTimeStr, time.UTC)
if err != nil {
return 0, time.Time{}, time.Time{}, err
}
endTime, err := time.ParseInLocation(variable.FullDayTimeFormat, endTimeStr, time.UTC)
if err != nil {
return 0, time.Time{}, time.Time{}, err
}
return time.Duration(maxTime) * time.Second, startTime, endTime, nil
}
const (
// acceptFactor is the factor to decide should we accept the pending verified plan.
// A pending verified plan will be accepted if it performs at least `acceptFactor` times better than the accepted plans.
acceptFactor = 1.5
// verifyTimeoutFactor is how long to wait to verify the pending plan.
// For debugging purposes it is useful to wait a few times longer than the current execution time so that
// an informative error can be written to the log.
verifyTimeoutFactor = 2.0
// nextVerifyDuration is the duration that we will retry the rejected plans.
nextVerifyDuration = 7 * 24 * time.Hour
)
func (h *BindHandle) getOnePendingVerifyJob() (string, string, Binding) {
cache := h.bindInfo.Value.Load().(cache)
for _, bindRecords := range cache {
for _, bindRecord := range bindRecords {
for _, bind := range bindRecord.Bindings {
if bind.Status == PendingVerify {
return bindRecord.OriginalSQL, bindRecord.Db, bind
}
if bind.Status != Rejected {
continue
}
dur, err := bind.SinceUpdateTime()
// Should not happen.
if err != nil {
continue
}
// Rejected and retry it now.
if dur > nextVerifyDuration {
return bindRecord.OriginalSQL, bindRecord.Db, bind
}
}
}
}
return "", "", Binding{}
}
func (h *BindHandle) getRunningDuration(sctx sessionctx.Context, db, sql string, maxTime time.Duration) (time.Duration, error) {
ctx := context.TODO()
if db != "" {
_, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, fmt.Sprintf("use `%s`", db))
if err != nil {
return 0, err
}
}
ctx, cancelFunc := context.WithCancel(ctx)
timer := time.NewTimer(maxTime)
resultChan := make(chan error)
startTime := time.Now()
go runSQL(ctx, sctx, sql, resultChan)
select {
case err := <-resultChan:
cancelFunc()
if err != nil {
return 0, err
}
return time.Since(startTime), nil
case <-timer.C:
cancelFunc()
logutil.BgLogger().Warn("plan verification timed out", zap.Duration("timeElapsed", time.Since(startTime)))
}
<-resultChan
return -1, nil
}
func runSQL(ctx context.Context, sctx sessionctx.Context, sql string, resultChan chan<- error) {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
stackSize := runtime.Stack(buf, false)
buf = buf[:stackSize]
resultChan <- fmt.Errorf("run sql panicked: %v", string(buf))
}
}()
recordSets, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql)
if err != nil {
if len(recordSets) > 0 {
terror.Call(recordSets[0].Close)
}
resultChan <- err
return
}
recordSet := recordSets[0]
chk := recordSets[0].NewChunk()
for {
err = recordSet.Next(ctx, chk)
if err != nil || chk.NumRows() == 0 {
break
}
}
terror.Call(recordSets[0].Close)
resultChan <- err
}
// HandleEvolvePlanTask tries to evolve one plan task.
// It only handle one tasks once because we want each task could use the latest parameters.
func (h *BindHandle) HandleEvolvePlanTask(sctx sessionctx.Context, adminEvolve bool) error {
originalSQL, db, binding := h.getOnePendingVerifyJob()
if originalSQL == "" {
return nil
}
maxTime, startTime, endTime, err := getEvolveParameters(sctx)
if err != nil {
return err
}
if maxTime == 0 || (!timeutil.WithinDayTimePeriod(startTime, endTime, time.Now()) && !adminEvolve) {
return nil
}
sctx.GetSessionVars().UsePlanBaselines = true
currentPlanTime, err := h.getRunningDuration(sctx, db, binding.BindSQL, maxTime)
// If we just return the error to the caller, this job will be retried again and again and cause endless logs,
// since it is still in the bind record. Now we just drop it and if it is actually retryable,
// we will hope for that we can capture this evolve task again.
if err != nil {
return h.DropBindRecord(originalSQL, db, &binding)
}
// If the accepted plan timeouts, it is hard to decide the timeout for verify plan.
// Currently we simply mark the verify plan as `using` if it could run successfully within maxTime.
if currentPlanTime > 0 {
maxTime = time.Duration(float64(currentPlanTime) * verifyTimeoutFactor)
}
sctx.GetSessionVars().UsePlanBaselines = false
verifyPlanTime, err := h.getRunningDuration(sctx, db, binding.BindSQL, maxTime)
if err != nil {
return h.DropBindRecord(originalSQL, db, &binding)
}
if verifyPlanTime == -1 || (float64(verifyPlanTime)*acceptFactor > float64(currentPlanTime)) {
binding.Status = Rejected
digestText, _ := parser.NormalizeDigest(binding.BindSQL) // for log desensitization
logutil.BgLogger().Warn("new plan rejected",
zap.Duration("currentPlanTime", currentPlanTime),
zap.Duration("verifyPlanTime", verifyPlanTime),
zap.String("digestText", digestText),
)
} else {
binding.Status = Using
}
// We don't need to pass the `sctx` because the BindSQL has been validated already.
return h.AddBindRecord(nil, &BindRecord{OriginalSQL: originalSQL, Db: db, Bindings: []Binding{binding}})
}
// Clear resets the bind handle. It is only used for test.
func (h *BindHandle) Clear() {
h.bindInfo.Lock()
h.bindInfo.Store(make(cache))
h.bindInfo.lastUpdateTime = types.ZeroTimestamp
h.bindInfo.Unlock()
h.invalidBindRecordMap.Store(make(map[string]*bindRecordUpdate))
h.pendingVerifyBindRecordMap.Store(make(map[string]*bindRecordUpdate))
}
// FlushBindings flushes the BindRecord in temp maps to storage and loads them into cache.
func (h *BindHandle) FlushBindings() error {
h.DropInvalidBindRecord()
h.SaveEvolveTasksToStore()
return h.Update(false)
}
// ReloadBindings clears existing binding cache and do a full load from mysql.bind_info.
// It is used to maintain consistency between cache and mysql.bind_info if the table is deleted or truncated.
func (h *BindHandle) ReloadBindings() error {
h.bindInfo.Lock()
h.bindInfo.Store(make(cache))
h.bindInfo.lastUpdateTime = types.ZeroTimestamp
h.bindInfo.Unlock()
return h.Update(true)
}

121
bindinfo/session_handle.go

@ -0,0 +1,121 @@
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package bindinfo
import (
"time"
"github.com/pingcap/parser"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/types"
)
// SessionHandle is used to handle all session sql bind operations.
type SessionHandle struct {
ch cache
parser *parser.Parser
}
// NewSessionBindHandle creates a new SessionBindHandle.
func NewSessionBindHandle(parser *parser.Parser) *SessionHandle {
sessionHandle := &SessionHandle{parser: parser}
sessionHandle.ch = make(cache)
return sessionHandle
}
// appendBindRecord adds the BindRecord to the cache, all the stale bindMetas are
// removed from the cache after this operation.
func (h *SessionHandle) appendBindRecord(hash string, meta *BindRecord) {
oldRecord := h.ch.getBindRecord(hash, meta.OriginalSQL, meta.Db)
h.ch.setBindRecord(hash, meta)
updateMetrics(metrics.ScopeSession, oldRecord, meta, false)
}
// CreateBindRecord creates a BindRecord to the cache.
// It replaces all the exists bindings for the same normalized SQL.
func (h *SessionHandle) CreateBindRecord(sctx sessionctx.Context, record *BindRecord) (err error) {
err = record.prepareHints(sctx)
if err != nil {
return err
}
now := types.NewTime(types.FromGoTime(time.Now().In(sctx.GetSessionVars().StmtCtx.TimeZone)), mysql.TypeTimestamp, 3)
for i := range record.Bindings {
record.Bindings[i].CreateTime = now
record.Bindings[i].UpdateTime = now
}
// update the BindMeta to the cache.
h.appendBindRecord(parser.DigestNormalized(record.OriginalSQL), record)
return nil
}
// DropBindRecord drops a BindRecord in the cache.
func (h *SessionHandle) DropBindRecord(originalSQL, db string, binding *Binding) error {
oldRecord := h.GetBindRecord(originalSQL, db)
var newRecord *BindRecord
record := &BindRecord{OriginalSQL: originalSQL, Db: db}
if binding != nil {
record.Bindings = append(record.Bindings, *binding)
}
if oldRecord != nil {
newRecord = oldRecord.remove(record)
} else {
newRecord = record
}
h.ch.setBindRecord(parser.DigestNormalized(record.OriginalSQL), newRecord)
updateMetrics(metrics.ScopeSession, oldRecord, newRecord, false)
return nil
}
// GetBindRecord return the BindMeta of the (normdOrigSQL,db) if BindMeta exist.
func (h *SessionHandle) GetBindRecord(normdOrigSQL, db string) *BindRecord {
hash := parser.DigestNormalized(normdOrigSQL)
bindRecords := h.ch[hash]
for _, bindRecord := range bindRecords {
if bindRecord.OriginalSQL == normdOrigSQL && bindRecord.Db == db {
return bindRecord
}
}
return nil
}
// GetAllBindRecord return all session bind info.
func (h *SessionHandle) GetAllBindRecord() (bindRecords []*BindRecord) {
for _, bindRecord := range h.ch {
bindRecords = append(bindRecords, bindRecord...)
}
return bindRecords
}
// Close closes the session handle.
func (h *SessionHandle) Close() {
for _, bindRecords := range h.ch {
for _, bindRecord := range bindRecords {
updateMetrics(metrics.ScopeSession, bindRecord, nil, false)
}
}
}
// sessionBindInfoKeyType is a dummy type to avoid naming collision in context.
type sessionBindInfoKeyType int
// String defines a Stringer function for debugging and pretty printing.
func (k sessionBindInfoKeyType) String() string {
return "session_bindinfo"
}
// SessionBindInfoKeyType is a variable key for store session bind info.
const SessionBindInfoKeyType sessionBindInfoKeyType = 0

28
checklist.md

@ -0,0 +1,28 @@
# Following the checklist saves the reviewers' time and gets your PR reviewed faster.
# Self Review
Have you reviewed every line of your changes by yourself?
# Test
Have you added enough test cases to cover the new feature or bug fix?
Also, add comments to describe your test cases.
# Naming
Do function names keep consistent with its behavior?
Is it easy to infer the function's behavior by its name?
# Comment
Is there any code that confuses the reviewer?
Add comments on them! You'll be asked to do so anyway.
Make sure there is no syntax or spelling error in your comments.
Some online syntax checking tools like Grammarly may be helpful.
# Refactor
Is there any way to refactor the code to make it more readable?
If the refactoring touches a lot of existing code, send another PR to do it.
# Single Purpose
Make sure the PR does only one thing and nothing else.
# Diff Size
Make sure the diff size is no more than 500, split it into small PRs if it is too large.

36
checkout-pr-branch.sh

@ -0,0 +1,36 @@
#!/usr/bin/env bash
# Copyright 2019 PingCAP, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is used to checkout a TiDB PR branch in a forked repo.
if test -z $1; then
echo -e "Usage:\n"
echo -e "\tcheckout-pr-branch.sh [github-username]:[pr-branch]\n"
echo -e "The argument can be copied directly from github PR page."
echo -e "The local branch name would be [github-username]/[pr-branch]."
exit 0;
fi
username=$(echo $1 | cut -d':' -f1)
branch=$(echo $1 | cut -d':' -f2)
local_branch=$username/$branch
fork="https://github.com/$username/tidb"
exists=`git show-ref refs/heads/$local_branch`
if [ -n "$exists" ]; then
git checkout $local_branch
git pull $fork $branch:$local_branch
else
git fetch $fork $branch:$local_branch
git checkout $local_branch
fi

12
circle.yml

@ -0,0 +1,12 @@
version: 2
jobs:
build:
docker:
- image: golang:1.13
working_directory: /go/src/github.com/pingcap/tidb
steps:
- checkout
- run:
name: "Build & Test"
command: make dev

62
cmd/benchdb/README.md

@ -0,0 +1,62 @@
## BenchDB
BenchDB is a command line tool to test the performance of TiDB.
### Quick Start
Make sure you have started PD and TiKV, then run:
```
./benchdb -run="create|truncate|insert:0_10000|update-random:0_10000:100000|select:0_10000:10"
```
### Arguments
#### `run`
The `run` argument defines the workflow of the test. You can define
multiple jobs, separated by `|`. The jobs are executed sequentially.
The `run` argument has the following options:
* `create` creates a table. Currently it's just a typical simple table, with a few columns.
* `truncate` truncates the table.
* `insert:xxx_yyy` inserts rows with ID in `[xxx, yyy)`.
e.g. `insert:0_10000` inserts 10000 rows with ID in range `[0, 9999)`.
* `update-random:xxx_yyy:zzz` updates a row with a random ID in range `[xxx, yyy)`, for `zzz` times.
e.g. `update-random:100_200:50` updates 50 random rows with ID in range `[100, 200)`.
* `update-range:xxx_yyy:zzz` update a range of rows with ID in range `[xxx, yyy)`, for `zzz` times.
* `select:xxx_yyy:zzz` select rows with ID range in `[xxx, yyy)`, for `zzz` times.
* `gc` does a manually triggered GC, so we can compare the performance before and after GC.
* `query:xxx:zzz` run a sql query `xxx`, for `zzz` times.
The output shows the execution time.
#### `table`
The name of the table, so we can create many tables for different tests without the need to clean up.
Default is `bench_db`.
#### `blob`
The blob column size in bytes, so we can test performance for different row size.
Default is `1000`.
#### `batch`
The batch number of statements in a transaction, used for insert and update-random only, to speed up the test workflow.
Default is `100`.
#### `addr`
The PD address. Default is `127.0.0.1:2379`.
### `L`
The log level. Default is `warn`.

301
cmd/benchdb/main.go

@ -0,0 +1,301 @@
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"flag"
"fmt"
"math/rand"
"strconv"
"strings"
"time"
"github.com/pingcap/log"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/store"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/util/logutil"
"go.uber.org/zap"
)
var (
addr = flag.String("addr", "127.0.0.1:2379", "pd address")
tableName = flag.String("table", "benchdb", "name of the table")
batchSize = flag.Int("batch", 100, "number of statements in a transaction, used for insert and update-random only")
blobSize = flag.Int("blob", 1000, "size of the blob column in the row")
logLevel = flag.String("L", "warn", "log level")
runJobs = flag.String("run", strings.Join([]string{
"create",
"truncate",
"insert:0_10000",
"update-random:0_10000:100000",
"select:0_10000:10",
"update-range:5000_5100:1000",
"select:0_10000:10",
"gc",
"select:0_10000:10",
}, "|"), "jobs to run")
)
func main() {
flag.Parse()
flag.PrintDefaults()
err := logutil.InitZapLogger(logutil.NewLogConfig(*logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
terror.MustNil(err)
err = store.Register("tikv", tikv.Driver{})
terror.MustNil(err)
ut := newBenchDB()
works := strings.Split(*runJobs, "|")
for _, v := range works {
work := strings.ToLower(strings.TrimSpace(v))
name, spec := ut.mustParseWork(work)
switch name {
case "create":
ut.createTable()
case "truncate":
ut.truncateTable()
case "insert":
ut.insertRows(spec)
case "update-random", "update_random":
ut.updateRandomRows(spec)
case "update-range", "update_range":
ut.updateRangeRows(spec)
case "select":
ut.selectRows(spec)
case "query":
ut.query(spec)
default:
cLog("Unknown job ", v)
return
}
}
}
type benchDB struct {
store tikv.Storage
session session.Session
}
func newBenchDB() *benchDB {
// Create TiKV store and disable GC as we will trigger GC manually.
store, err := store.New("tikv://" + *addr + "?disableGC=true")
terror.MustNil(err)
_, err = session.BootstrapSession(store)
terror.MustNil(err)
se, err := session.CreateSession(store)
terror.MustNil(err)
_, err = se.Execute(context.Background(), "use test")
terror.MustNil(err)
return &benchDB{
store: store.(tikv.Storage),
session: se,
}
}
func (ut *benchDB) mustExec(sql string) {
rss, err := ut.session.Execute(context.Background(), sql)
defer func() {
for _, rs := range rss {
err = rs.Close()
if err != nil {
log.Fatal(err.Error())
}
}
}()
if err != nil {
log.Fatal(err.Error())
return
}
if len(rss) > 0 {
ctx := context.Background()
rs := rss[0]
req := rs.NewChunk()
for {
err := rs.Next(ctx, req)
if err != nil {
log.Fatal(err.Error())
}
if req.NumRows() == 0 {
break
}
}
}
}
func (ut *benchDB) mustParseWork(work string) (name string, spec string) {
strs := strings.Split(work, ":")
if len(strs) == 1 {
return strs[0], ""
}
return strs[0], strings.Join(strs[1:], ":")
}
func (ut *benchDB) mustParseInt(s string) int {
i, err := strconv.Atoi(s)
if err != nil {
log.Fatal(err.Error())
}
return i
}
func (ut *benchDB) mustParseRange(s string) (start, end int) {
strs := strings.Split(s, "_")
if len(strs) != 2 {
log.Fatal("parse range failed", zap.String("invalid range", s))
}
startStr, endStr := strs[0], strs[1]
start = ut.mustParseInt(startStr)
end = ut.mustParseInt(endStr)
if start < 0 || end < start {
log.Fatal("parse range failed", zap.String("invalid range", s))
}
return
}
func (ut *benchDB) mustParseSpec(s string) (start, end, count int) {
strs := strings.Split(s, ":")
start, end = ut.mustParseRange(strs[0])
if len(strs) == 1 {
count = 1
return
}
count = ut.mustParseInt(strs[1])
return
}
func (ut *benchDB) createTable() {
cLog("create table")
createSQL := "CREATE TABLE IF NOT EXISTS " + *tableName + ` (
id bigint(20) NOT NULL,
name varchar(32) NOT NULL,
exp bigint(20) NOT NULL DEFAULT '0',
data blob,
PRIMARY KEY (id),
UNIQUE KEY name (name)
)`
ut.mustExec(createSQL)
}
func (ut *benchDB) truncateTable() {
cLog("truncate table")
ut.mustExec("truncate table " + *tableName)
}
func (ut *benchDB) runCountTimes(name string, count int, f func()) {
var (
sum, first, last time.Duration
min = time.Minute
max = time.Nanosecond
)
cLogf("%s started", name)
for i := 0; i < count; i++ {
before := time.Now()
f()
dur := time.Since(before)
if first == 0 {
first = dur
}
last = dur
if dur < min {
min = dur
}
if dur > max {
max = dur
}
sum += dur
}
cLogf("%s done, avg %s, count %d, sum %s, first %s, last %s, max %s, min %s\n\n",
name, sum/time.Duration(count), count, sum, first, last, max, min)
}
func (ut *benchDB) insertRows(spec string) {
start, end, _ := ut.mustParseSpec(spec)
loopCount := (end - start + *batchSize - 1) / *batchSize
id := start
ut.runCountTimes("insert", loopCount, func() {
ut.mustExec("begin")
buf := make([]byte, *blobSize/2)
for i := 0; i < *batchSize; i++ {
if id == end {
break
}
rand.Read(buf)
insetQuery := fmt.Sprintf("insert %s (id, name, data) values (%d, '%d', '%x')",
*tableName, id, id, buf)
ut.mustExec(insetQuery)
id++
}
ut.mustExec("commit")
})
}
func (ut *benchDB) updateRandomRows(spec string) {
start, end, totalCount := ut.mustParseSpec(spec)
loopCount := (totalCount + *batchSize - 1) / *batchSize
var runCount = 0
ut.runCountTimes("update-random", loopCount, func() {
ut.mustExec("begin")
for i := 0; i < *batchSize; i++ {
if runCount == totalCount {
break
}
id := rand.Intn(end-start) + start
updateQuery := fmt.Sprintf("update %s set exp = exp + 1 where id = %d", *tableName, id)
ut.mustExec(updateQuery)
runCount++
}
ut.mustExec("commit")
})
}
func (ut *benchDB) updateRangeRows(spec string) {
start, end, count := ut.mustParseSpec(spec)
ut.runCountTimes("update-range", count, func() {
ut.mustExec("begin")
updateQuery := fmt.Sprintf("update %s set exp = exp + 1 where id >= %d and id < %d", *tableName, start, end)
ut.mustExec(updateQuery)
ut.mustExec("commit")
})
}
func (ut *benchDB) selectRows(spec string) {
start, end, count := ut.mustParseSpec(spec)
ut.runCountTimes("select", count, func() {
selectQuery := fmt.Sprintf("select * from %s where id >= %d and id < %d", *tableName, start, end)
ut.mustExec(selectQuery)
})
}
func (ut *benchDB) query(spec string) {
strs := strings.Split(spec, ":")
sql := strs[0]
count, err := strconv.Atoi(strs[1])
terror.MustNil(err)
ut.runCountTimes("query", count, func() {
ut.mustExec(sql)
})
}
func cLogf(format string, args ...interface{}) {
str := fmt.Sprintf(format, args...)
fmt.Println("\033[0;32m" + str + "\033[0m\n")
}
func cLog(args ...interface{}) {
str := fmt.Sprint(args...)
fmt.Println("\033[0;32m" + str + "\033[0m\n")
}

121
cmd/benchfilesort/README.md

@ -0,0 +1,121 @@
## BenchFileSort
BenchFileSort is a command line tool to test the performance of util/filesort.
### Quick Start (Examples)
Step 1 - Generate the synthetic data
```
./benchfilesort gen -keySize 8 -valSize 16 -scale 1000
```
Expected output:
```
Generating...
Done!
Data placed in: /path/to/data.out
Time used: xxxx ms
=================================
```
Step 2 - Load the data and run the benchmark
```
./benchfilesort run -bufSize 50 -nWorkers 1 -inputRatio 100 -outputRatio 50
```
Expected output:
```
Loading...
number of rows = 1000, key size = 8, value size = 16
load 1000 rows
Done!
Loaded 1000 rows
Time used: xxxx ms
=================================
Inputing...
Done!
Input 1000 rows
Time used: xxxx s
=================================
Outputing...
Done!
Output 500 rows
Time used: xxxx ms
=================================
Closing...
Done!
Time used: xxxx ms
=================================
```
For performance tuning purpose, `Input` time and `Output` time are two KPIs you should focus on.
`Close` time reflects the GC performance, which might be noteworthy sometimes.
### Commands and Arguments
#### `gen` command
The `gen` command generate the synthetic data for the benchmark.
You can specify how many rows you want to generate, the key size
and value size for each row.
The generated data is located in `$dir/data.out` (`$dir` is specified
by the `dir` argument).
The `gen` command supports the following arguments:
* `dir` (default: current working directory)
Specify the home directory of generated data
* `keySize` (default: 8)
Specify the key size for generated rows
* `valSize` (default: 8)
Specify the value size for generated rows
* `scale` (default: 100)
Specify how many rows to generate
* `cpuprofile` (default: "")
Turn on the CPU profile
#### `run` command
The `run` command load the synthetic data and run the benchmark.
You can specify the home directory of the synthetic data.
The benchmark will use predefined amount of memory, which is controlled
by the `bufSize` argument, to run the test.
You can control how many rows to input into and output from, which are
defined by the `inputRatio` and `outputRatio` arguments.
The `run` command supports the following arguments:
* `dir` (default: current working directory)
Specify the home directory of synthetic data
* `bufSize` (default: 500000)
Specify the amount of memory used by the benchmark
* `nWorkers` (default: 1)
Specify the number of workers used in async sorting
* `inputRatio` (default: 100)
Specify the percentage of rows to input:
`# of rows to input = # of total rows * inputRatio / 100`
* `outputRatio` (default: 100)
Specify the percentage of rows to output:
`# of rows to output = # of rows to input * outputRatio / 100`
* `cpuprofile` (default: "")
Turn on the CPU profile

437
cmd/benchfilesort/main.go

@ -0,0 +1,437 @@
// Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"encoding/binary"
"flag"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"runtime/pprof"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/log"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/filesort"
"github.com/pingcap/tidb/util/logutil"
)
type comparableRow struct {
key []types.Datum
val []types.Datum
handle int64
}
var (
genCmd = flag.NewFlagSet("gen", flag.ExitOnError)
runCmd = flag.NewFlagSet("run", flag.ExitOnError)
logLevel = "warn"
cpuprofile string
tmpDir string
keySize int
valSize int
bufSize int
scale int
nWorkers int
inputRatio int
outputRatio int
)
func nextRow(r *rand.Rand, keySize int, valSize int) *comparableRow {
key := make([]types.Datum, keySize)
for i := range key {
key[i] = types.NewDatum(r.Int())
}
val := make([]types.Datum, valSize)
for j := range val {
val[j] = types.NewDatum(r.Int())
}
handle := r.Int63()
return &comparableRow{key: key, val: val, handle: handle}
}
func encodeRow(b []byte, row *comparableRow) ([]byte, error) {
var (
err error
head = make([]byte, 8)
body []byte
)
sc := &stmtctx.StatementContext{TimeZone: time.Local}
body, err = codec.EncodeKey(sc, body, row.key...)
if err != nil {
return b, errors.Trace(err)
}
body, err = codec.EncodeKey(sc, body, row.val...)
if err != nil {
return b, errors.Trace(err)
}
body, err = codec.EncodeKey(sc, body, types.NewIntDatum(row.handle))
if err != nil {
return b, errors.Trace(err)
}
binary.BigEndian.PutUint64(head, uint64(len(body)))
b = append(b, head...)
b = append(b, body...)
return b, nil
}
func decodeRow(fd *os.File) (*comparableRow, error) {
var (
err error
n int
head = make([]byte, 8)
dcod = make([]types.Datum, 0, keySize+valSize+1)
)
n, err = fd.Read(head)
if n != 8 {
return nil, errors.New("incorrect header")
}
if err != nil {
return nil, errors.Trace(err)
}
rowSize := int(binary.BigEndian.Uint64(head))
rowBytes := make([]byte, rowSize)
n, err = fd.Read(rowBytes)
if n != rowSize {
return nil, errors.New("incorrect row")
}
if err != nil {
return nil, errors.Trace(err)
}
dcod, err = codec.Decode(rowBytes, keySize+valSize+1)
if err != nil {
return nil, errors.Trace(err)
}
return &comparableRow{
key: dcod[:keySize],
val: dcod[keySize : keySize+valSize],
handle: dcod[keySize+valSize:][0].GetInt64(),
}, nil
}
func encodeMeta(b []byte, scale int, keySize int, valSize int) []byte {
meta := make([]byte, 8)
binary.BigEndian.PutUint64(meta, uint64(scale))
b = append(b, meta...)
binary.BigEndian.PutUint64(meta, uint64(keySize))
b = append(b, meta...)
binary.BigEndian.PutUint64(meta, uint64(valSize))
b = append(b, meta...)
return b
}
func decodeMeta(fd *os.File) error {
meta := make([]byte, 24)
if n, err := fd.Read(meta); err != nil || n != 24 {
if n != 24 {
return errors.New("incorrect meta data")
}
return errors.Trace(err)
}
scale = int(binary.BigEndian.Uint64(meta[:8]))
if scale <= 0 {
return errors.New("number of rows must be positive")
}
keySize = int(binary.BigEndian.Uint64(meta[8:16]))
if keySize <= 0 {
return errors.New("key size must be positive")
}
valSize = int(binary.BigEndian.Uint64(meta[16:]))
if valSize <= 0 {
return errors.New("value size must be positive")
}
return nil
}
/*
* The synthetic data is exported as a binary format.
* The encoding format is:
* 1) Meta Data
* Three 64-bit integers represent scale size, key size and value size.
* 2) Row Data
* Each row is encoded as:
* One 64-bit integer represent the row size in bytes, followed by the
* the actual row bytes.
*/
func export() error {
var outputBytes []byte
fileName := filepath.Join(tmpDir, "data.out")
outputFile, err := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return errors.Trace(err)
}
defer terror.Call(outputFile.Close)
outputBytes = encodeMeta(outputBytes, scale, keySize, valSize)
seed := rand.NewSource(time.Now().UnixNano())
r := rand.New(seed)
for i := 1; i <= scale; i++ {
outputBytes, err = encodeRow(outputBytes, nextRow(r, keySize, valSize))
if err != nil {
return errors.Trace(err)
}
_, err = outputFile.Write(outputBytes)
if err != nil {
return errors.Trace(err)
}
outputBytes = outputBytes[:0]
}
return nil
}
func load(ratio int) ([]*comparableRow, error) {
var (
err error
fd *os.File
)
fileName := filepath.Join(tmpDir, "data.out")
fd, err = os.Open(fileName)
if os.IsNotExist(err) {
return nil, errors.New("data file (data.out) does not exist")
}
if err != nil {
return nil, errors.Trace(err)
}
defer terror.Call(fd.Close)
err = decodeMeta(fd)
if err != nil {
return nil, errors.Trace(err)
}
cLogf("\tnumber of rows = %d, key size = %d, value size = %d", scale, keySize, valSize)
var (
row *comparableRow
rows = make([]*comparableRow, 0, scale)
)
totalRows := int(float64(scale) * (float64(ratio) / 100.0))
cLogf("\tload %d rows", totalRows)
for i := 1; i <= totalRows; i++ {
row, err = decodeRow(fd)
if err != nil {
return nil, errors.Trace(err)
}
rows = append(rows, row)
}
return rows, nil
}
func driveGenCmd() {
err := genCmd.Parse(os.Args[2:])
terror.MustNil(err)
// Sanity checks
if keySize <= 0 {
log.Fatal("key size must be positive")
}
if valSize <= 0 {
log.Fatal("value size must be positive")
}
if scale <= 0 {
log.Fatal("scale must be positive")
}
if _, err = os.Stat(tmpDir); err != nil {
if os.IsNotExist(err) {
log.Fatal("tmpDir does not exist")
}
log.Fatal(err.Error())
}
cLog("Generating...")
start := time.Now()
err = export()
terror.MustNil(err)
cLog("Done!")
cLogf("Data placed in: %s", filepath.Join(tmpDir, "data.out"))
cLog("Time used: ", time.Since(start))
cLog("=================================")
}
func driveRunCmd() {
err := runCmd.Parse(os.Args[2:])
terror.MustNil(err)
// Sanity checks
if bufSize <= 0 {
log.Fatal("buffer size must be positive")
}
if nWorkers <= 0 {
log.Fatal("the number of workers must be positive")
}
if inputRatio < 0 || inputRatio > 100 {
log.Fatal("input ratio must between 0 and 100 (inclusive)")
}
if outputRatio < 0 || outputRatio > 100 {
log.Fatal("output ratio must between 0 and 100 (inclusive)")
}
if _, err = os.Stat(tmpDir); err != nil {
if os.IsNotExist(err) {
log.Fatal("tmpDir does not exist")
}
terror.MustNil(err)
}
var (
dir string
profile *os.File
fs *filesort.FileSorter
)
cLog("Loading...")
start := time.Now()
data, err := load(inputRatio)
terror.MustNil(err)
cLog("Done!")
cLogf("Loaded %d rows", len(data))
cLog("Time used: ", time.Since(start))
cLog("=================================")
sc := new(stmtctx.StatementContext)
fsBuilder := new(filesort.Builder)
byDesc := make([]bool, keySize)
for i := 0; i < keySize; i++ {
byDesc[i] = false
}
dir, err = ioutil.TempDir(tmpDir, "benchfilesort_test")
terror.MustNil(err)
fs, err = fsBuilder.SetSC(sc).SetSchema(keySize, valSize).SetBuf(bufSize).SetWorkers(nWorkers).SetDesc(byDesc).SetDir(dir).Build()
terror.MustNil(err)
if cpuprofile != "" {
profile, err = os.Create(cpuprofile)
terror.MustNil(err)
}
cLog("Inputing...")
start = time.Now()
for _, r := range data {
err = fs.Input(r.key, r.val, r.handle)
terror.MustNil(err)
}
cLog("Done!")
cLogf("Input %d rows", len(data))
cLog("Time used: ", time.Since(start))
cLog("=================================")
cLog("Outputing...")
totalRows := int(float64(len(data)) * (float64(outputRatio) / 100.0))
start = time.Now()
if cpuprofile != "" {
err = pprof.StartCPUProfile(profile)
terror.MustNil(err)
}
for i := 0; i < totalRows; i++ {
_, _, _, err = fs.Output()
terror.MustNil(err)
}
if cpuprofile != "" {
pprof.StopCPUProfile()
}
cLog("Done!")
cLogf("Output %d rows", totalRows)
cLog("Time used: ", time.Since(start))
cLog("=================================")
cLog("Closing...")
start = time.Now()
err = fs.Close()
terror.MustNil(err)
cLog("Done!")
cLog("Time used: ", time.Since(start))
cLog("=================================")
}
func init() {
err := logutil.InitZapLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
terror.MustNil(err)
cwd, err1 := os.Getwd()
terror.MustNil(err1)
genCmd.StringVar(&tmpDir, "dir", cwd, "where to store the generated rows")
genCmd.IntVar(&keySize, "keySize", 8, "the size of key")
genCmd.IntVar(&valSize, "valSize", 8, "the size of value")
genCmd.IntVar(&scale, "scale", 100, "how many rows to generate")
genCmd.StringVar(&cpuprofile, "cpuprofile", "", "write cpu profile to file")
runCmd.StringVar(&tmpDir, "dir", cwd, "where to load the generated rows")
runCmd.IntVar(&bufSize, "bufSize", 500000, "how many rows held in memory at a time")
runCmd.IntVar(&nWorkers, "nWorkers", 1, "how many workers used in async sorting")
runCmd.IntVar(&inputRatio, "inputRatio", 100, "input percentage")
runCmd.IntVar(&outputRatio, "outputRatio", 100, "output percentage")
runCmd.StringVar(&cpuprofile, "cpuprofile", "", "write cpu profile to file")
}
func main() {
flag.Parse()
if len(os.Args) == 1 {
fmt.Printf("Usage:\n\n")
fmt.Printf("\tbenchfilesort command [arguments]\n\n")
fmt.Printf("The commands are:\n\n")
fmt.Println("\tgen\t", "generate rows")
fmt.Println("\trun\t", "run tests")
fmt.Println("")
fmt.Println("Checkout benchfilesort/README for more information.")
return
}
switch os.Args[1] {
case "gen":
driveGenCmd()
case "run":
driveRunCmd()
default:
fmt.Printf("%q is not valid command.\n", os.Args[1])
os.Exit(2)
}
}
func cLogf(format string, args ...interface{}) {
str := fmt.Sprintf(format, args...)
fmt.Println("\033[0;32m" + str + "\033[0m")
}
func cLog(args ...interface{}) {
str := fmt.Sprint(args...)
fmt.Println("\033[0;32m" + str + "\033[0m")
}

138
cmd/benchkv/main.go

@ -0,0 +1,138 @@
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"flag"
"fmt"
"io/ioutil"
"net/http"
_ "net/http/pprof"
"sync"
"time"
_ "github.com/go-sql-driver/mysql"
"github.com/pingcap/errors"
"github.com/pingcap/log"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/store/tikv"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"go.uber.org/zap"
)
var (
store kv.Storage
dataCnt = flag.Int("N", 1000000, "data num")
workerCnt = flag.Int("C", 400, "concurrent num")
pdAddr = flag.String("pd", "localhost:2379", "pd address:localhost:2379")
valueSize = flag.Int("V", 5, "value size in byte")
txnCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "tikv",
Subsystem: "txn",
Name: "total",
Help: "Counter of txns.",
}, []string{"type"})
txnRolledbackCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "tikv",
Subsystem: "txn",
Name: "failed_total",
Help: "Counter of rolled back txns.",
}, []string{"type"})
txnDurations = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "tikv",
Subsystem: "txn",
Name: "durations_histogram_seconds",
Help: "Txn latency distributions.",
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13),
}, []string{"type"})
)
// Init initializes information.
func Init() {
driver := tikv.Driver{}
var err error
store, err = driver.Open(fmt.Sprintf("tikv://%s?cluster=1", *pdAddr))
terror.MustNil(err)
prometheus.MustRegister(txnCounter)
prometheus.MustRegister(txnRolledbackCounter)
prometheus.MustRegister(txnDurations)
http.Handle("/metrics", promhttp.Handler())
go func() {
err1 := http.ListenAndServe(":9191", nil)
terror.Log(errors.Trace(err1))
}()
}
// batchRW makes sure conflict free.
func batchRW(value []byte) {
wg := sync.WaitGroup{}
base := *dataCnt / *workerCnt
wg.Add(*workerCnt)
for i := 0; i < *workerCnt; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < base; j++ {
txnCounter.WithLabelValues("txn").Inc()
start := time.Now()
k := base*i + j
txn, err := store.Begin()
if err != nil {
log.Fatal(err.Error())
}
key := fmt.Sprintf("key_%d", k)
err = txn.Set([]byte(key), value)
terror.Log(errors.Trace(err))
err = txn.Commit(context.Background())
if err != nil {
txnRolledbackCounter.WithLabelValues("txn").Inc()
terror.Call(txn.Rollback)
}
txnDurations.WithLabelValues("txn").Observe(time.Since(start).Seconds())
}
}(i)
}
wg.Wait()
}
func main() {
flag.Parse()
log.SetLevel(zap.ErrorLevel)
Init()
value := make([]byte, *valueSize)
t := time.Now()
batchRW(value)
resp, err := http.Get("http://localhost:9191/metrics")
terror.MustNil(err)
defer terror.Call(resp.Body.Close)
text, err1 := ioutil.ReadAll(resp.Body)
terror.Log(errors.Trace(err1))
fmt.Println(string(text))
fmt.Printf("\nelapse:%v, total %v\n", time.Since(t), *dataCnt)
}

87
cmd/benchraw/main.go

@ -0,0 +1,87 @@
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"fmt"
"net/http"
_ "net/http/pprof"
"strings"
"sync"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/log"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/store/tikv"
"go.uber.org/zap"
)
var (
dataCnt = flag.Int("N", 1000000, "data num")
workerCnt = flag.Int("C", 100, "concurrent num")
pdAddr = flag.String("pd", "localhost:2379", "pd address:localhost:2379")
valueSize = flag.Int("V", 5, "value size in byte")
sslCA = flag.String("cacert", "", "path of file that contains list of trusted SSL CAs.")
sslCert = flag.String("cert", "", "path of file that contains X509 certificate in PEM format.")
sslKey = flag.String("key", "", "path of file that contains X509 key in PEM format.")
)
// batchRawPut blinds put bench.
func batchRawPut(value []byte) {
cli, err := tikv.NewRawKVClient(strings.Split(*pdAddr, ","), config.Security{
ClusterSSLCA: *sslCA,
ClusterSSLCert: *sslCert,
ClusterSSLKey: *sslKey,
})
if err != nil {
log.Fatal(err.Error())
}
wg := sync.WaitGroup{}
base := *dataCnt / *workerCnt
wg.Add(*workerCnt)
for i := 0; i < *workerCnt; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < base; j++ {
k := base*i + j
key := fmt.Sprintf("key_%d", k)
err = cli.Put([]byte(key), value)
if err != nil {
log.Fatal("put failed", zap.Error(err))
}
}
}(i)
}
wg.Wait()
}
func main() {
flag.Parse()
log.SetLevel(zap.WarnLevel)
go func() {
err := http.ListenAndServe(":9191", nil)
terror.Log(errors.Trace(err))
}()
value := make([]byte, *valueSize)
t := time.Now()
batchRawPut(value)
fmt.Printf("\nelapse:%v, total %v\n", time.Since(t), *dataCnt)
}

224
cmd/ddltest/column_test.go

@ -0,0 +1,224 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddltest
import (
"fmt"
"reflect"
"sync"
"sync/atomic"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/parser/terror"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/types"
log "github.com/sirupsen/logrus"
goctx "golang.org/x/net/context"
)
// After add column finished, check the records in the table.
func (s *TestDDLSuite) checkAddColumn(c *C, rowID int64, defaultVal interface{}, updatedVal interface{}) {
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
c.Assert(err, IsNil)
tbl := s.getTable(c, "test_column")
oldInsertCount := int64(0)
newInsertCount := int64(0)
oldUpdateCount := int64(0)
newUpdateCount := int64(0)
err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) {
col1Val := data[0].GetValue()
col2Val := data[1].GetValue()
col3Val := data[2].GetValue()
// Check inserted row.
if reflect.DeepEqual(col1Val, col2Val) {
if reflect.DeepEqual(col3Val, defaultVal) {
// When insert a row with 2 columns, the third column will be default value.
oldInsertCount++
} else if reflect.DeepEqual(col3Val, col1Val) {
// When insert a row with 3 columns, the third column value will be the first column value.
newInsertCount++
} else {
log.Fatalf("[checkAddColumn fail]invalid row: %v", data)
}
}
// Check updated row.
if reflect.DeepEqual(col2Val, updatedVal) {
if reflect.DeepEqual(col3Val, defaultVal) || reflect.DeepEqual(col3Val, col1Val) {
oldUpdateCount++
} else if reflect.DeepEqual(col3Val, updatedVal) {
newUpdateCount++
} else {
log.Fatalf("[checkAddColumn fail]invalid row: %v", data)
}
}
return true, nil
})
c.Assert(err, IsNil)
deleteCount := rowID - oldInsertCount - newInsertCount - oldUpdateCount - newUpdateCount
c.Assert(oldInsertCount, GreaterEqual, int64(0))
c.Assert(newInsertCount, GreaterEqual, int64(0))
c.Assert(oldUpdateCount, Greater, int64(0))
c.Assert(newUpdateCount, Greater, int64(0))
c.Assert(deleteCount, Greater, int64(0))
}
func (s *TestDDLSuite) checkDropColumn(c *C, rowID int64, alterColumn *table.Column, updateDefault interface{}) {
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
c.Assert(err, IsNil)
tbl := s.getTable(c, "test_column")
for _, col := range tbl.Cols() {
c.Assert(col.ID, Not(Equals), alterColumn.ID)
}
insertCount := int64(0)
updateCount := int64(0)
err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) {
if reflect.DeepEqual(data[1].GetValue(), data[0].GetValue()) {
// Check inserted row.
insertCount++
} else if reflect.DeepEqual(data[1].GetValue(), updateDefault) {
// Check updated row.
updateCount++
} else {
log.Fatalf("[checkDropColumn fail]invalid row: %v", data)
}
return true, nil
})
c.Assert(err, IsNil)
deleteCount := rowID - insertCount - updateCount
c.Assert(insertCount, Greater, int64(0))
c.Assert(updateCount, Greater, int64(0))
c.Assert(deleteCount, Greater, int64(0))
}
func (s *TestDDLSuite) TestColumn(c *C) {
// first add many data
workerNum := 10
base := *dataNum / workerNum
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < base; j++ {
k := base*i + j
s.execInsert(c, fmt.Sprintf("insert into test_column values (%d, %d)", k, k))
}
}(i)
}
wg.Wait()
tbl := []struct {
Query string
ColumnName string
Add bool
Default interface{}
}{
{"alter table test_column add column c3 int default -1", "c3", true, int64(-1)},
{"alter table test_column drop column c3", "c3", false, nil},
}
rowID := int64(*dataNum)
updateDefault := int64(-2)
var alterColumn *table.Column
for _, t := range tbl {
c.Logf("run DDL %s", t.Query)
done := s.runDDL(t.Query)
ticker := time.NewTicker(time.Duration(*lease) * time.Second / 2)
defer ticker.Stop()
LOOP:
for {
select {
case err := <-done:
c.Assert(err, IsNil)
break LOOP
case <-ticker.C:
count := 10
s.execColumnOperations(c, workerNum, count, &rowID, updateDefault)
}
}
if t.Add {
s.checkAddColumn(c, rowID, t.Default, updateDefault)
} else {
s.checkDropColumn(c, rowID, alterColumn, updateDefault)
}
tbl := s.getTable(c, "test_column")
alterColumn = table.FindCol(tbl.Cols(), t.ColumnName)
if t.Add {
c.Assert(alterColumn, NotNil)
} else {
c.Assert(alterColumn, IsNil)
}
}
}
func (s *TestDDLSuite) execColumnOperations(c *C, workerNum, count int, rowID *int64, updateDefault int64) {
var wg sync.WaitGroup
// workerNum = 10
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func() {
defer wg.Done()
for j := 0; j < count; j++ {
key := int(atomic.AddInt64(rowID, 2))
s.execInsert(c, fmt.Sprintf("insert into test_column (c1, c2) values (%d, %d)",
key-1, key-1))
s.exec(fmt.Sprintf("insert into test_column values (%d, %d, %d)", key, key, key))
s.mustExec(c, fmt.Sprintf("update test_column set c2 = %d where c1 = %d",
updateDefault, randomNum(key)))
s.exec(fmt.Sprintf("update test_column set c2 = %d, c3 = %d where c1 = %d",
updateDefault, updateDefault, randomNum(key)))
s.mustExec(c, fmt.Sprintf("delete from test_column where c1 = %d", randomNum(key)))
}
}()
}
wg.Wait()
}
func (s *TestDDLSuite) TestCommitWhenSchemaChanged(c *C) {
s.mustExec(c, "drop table if exists test_commit")
s.mustExec(c, "create table test_commit (a int, b int)")
s.mustExec(c, "insert into test_commit values (1, 1)")
s.mustExec(c, "insert into test_commit values (2, 2)")
s1, err := session.CreateSession(s.store)
c.Assert(err, IsNil)
ctx := goctx.Background()
_, err = s1.Execute(ctx, "use test_ddl")
c.Assert(err, IsNil)
s1.Execute(ctx, "begin")
s1.Execute(ctx, "insert into test_commit values (3, 3)")
s.mustExec(c, "alter table test_commit drop column b")
// When this transaction commit, it will find schema already changed.
s1.Execute(ctx, "insert into test_commit values (4, 4)")
_, err = s1.Execute(ctx, "commit")
c.Assert(terror.ErrorEqual(err, plannercore.ErrWrongValueCountOnRow), IsTrue, Commentf("err %v", err))
}

1028
cmd/ddltest/ddl_test.go

File diff suppressed because it is too large

218
cmd/ddltest/index_test.go

@ -0,0 +1,218 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddltest
import (
"fmt"
"io"
"math"
"sync"
"sync/atomic"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/store/tikv/gcworker"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/types"
goctx "golang.org/x/net/context"
)
func getIndex(t table.Table, name string) table.Index {
for _, idx := range t.Indices() {
if idx.Meta().Name.O == name {
return idx
}
}
return nil
}
func (s *TestDDLSuite) checkAddIndex(c *C, indexInfo *model.IndexInfo) {
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
c.Assert(err, IsNil)
tbl := s.getTable(c, "test_index")
// read handles form table
handles := make(map[int64]struct{})
err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(),
func(h int64, data []types.Datum, cols []*table.Column) (bool, error) {
handles[h] = struct{}{}
return true, nil
})
c.Assert(err, IsNil)
// read handles from index
idx := tables.NewIndex(tbl.Meta().ID, tbl.Meta(), indexInfo)
err = ctx.NewTxn(goctx.Background())
c.Assert(err, IsNil)
txn, err := ctx.Txn(false)
c.Assert(err, IsNil)
defer func() {
txn.Rollback()
}()
it, err := idx.SeekFirst(txn)
c.Assert(err, IsNil)
defer it.Close()
for {
_, h, err := it.Next()
if terror.ErrorEqual(err, io.EOF) {
break
}
c.Assert(err, IsNil)
c.Assert(handles, HasKey, h)
delete(handles, h)
}
c.Assert(handles, HasLen, 0)
}
func (s *TestDDLSuite) checkDropIndex(c *C, indexInfo *model.IndexInfo) {
gcWorker, err := gcworker.NewMockGCWorker(s.store.(tikv.Storage))
c.Assert(err, IsNil)
err = gcWorker.DeleteRanges(goctx.Background(), uint64(math.MaxInt32))
c.Assert(err, IsNil)
ctx := s.ctx
err = ctx.NewTxn(goctx.Background())
c.Assert(err, IsNil)
tbl := s.getTable(c, "test_index")
// read handles from index
idx := tables.NewIndex(tbl.Meta().ID, tbl.Meta(), indexInfo)
err = ctx.NewTxn(goctx.Background())
c.Assert(err, IsNil)
txn, err := ctx.Txn(false)
c.Assert(err, IsNil)
defer txn.Rollback()
it, err := idx.SeekFirst(txn)
c.Assert(err, IsNil)
defer it.Close()
handles := make(map[int64]struct{})
for {
_, h, err := it.Next()
if terror.ErrorEqual(err, io.EOF) {
break
}
c.Assert(err, IsNil)
handles[h] = struct{}{}
}
// TODO: Uncomment this after apply pool is finished
// c.Assert(handles, HasLen, 0)
}
// TestIndex operations on table test_index (c int, c1 bigint, c2 double, c3 varchar(256), primary key(c)).
func (s *TestDDLSuite) TestIndex(c *C) {
// first add many data
workerNum := 10
base := *dataNum / workerNum
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < base; j++ {
k := base*i + j
s.execInsert(c,
fmt.Sprintf("insert into test_index values (%d, %d, %f, '%s')",
k, randomInt(), randomFloat(), randomString(10)))
}
}(i)
}
wg.Wait()
tbl := []struct {
Query string
IndexName string
Add bool
}{
{"create index c1_index on test_index (c1)", "c1_index", true},
{"drop index c1_index on test_index", "c1_index", false},
{"create index c2_index on test_index (c2)", "c2_index", true},
{"drop index c2_index on test_index", "c2_index", false},
{"create index c3_index on test_index (c3)", "c3_index", true},
{"drop index c3_index on test_index", "c3_index", false},
}
insertID := int64(*dataNum)
var oldIndex table.Index
for _, t := range tbl {
c.Logf("run DDL sql %s", t.Query)
done := s.runDDL(t.Query)
ticker := time.NewTicker(time.Duration(*lease) * time.Second / 2)
defer ticker.Stop()
LOOP:
for {
select {
case err := <-done:
c.Assert(err, IsNil)
break LOOP
case <-ticker.C:
// add count new data
// delete count old data randomly
// update count old data randomly
count := 10
s.execIndexOperations(c, workerNum, count, &insertID)
}
}
tbl := s.getTable(c, "test_index")
index := getIndex(tbl, t.IndexName)
if t.Add {
c.Assert(index, NotNil)
oldIndex = index
s.checkAddIndex(c, index.Meta())
} else {
c.Assert(index, IsNil)
s.checkDropIndex(c, oldIndex.Meta())
}
}
}
func (s *TestDDLSuite) execIndexOperations(c *C, workerNum, count int, insertID *int64) {
var wg sync.WaitGroup
// workerNum = 10
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func() {
defer wg.Done()
for j := 0; j < count; j++ {
id := atomic.AddInt64(insertID, 1)
sql := fmt.Sprintf("insert into test_index values (%d, %d, %f, '%s')", id, randomInt(), randomFloat(), randomString(10))
s.execInsert(c, sql)
c.Logf("sql %s", sql)
sql = fmt.Sprintf("delete from test_index where c = %d", randomIntn(int(id)))
s.mustExec(c, sql)
c.Logf("sql %s", sql)
sql = fmt.Sprintf("update test_index set c1 = %d, c2 = %f, c3 = '%s' where c = %d", randomInt(), randomFloat(), randomString(10), randomIntn(int(id)))
s.mustExec(c, sql)
c.Logf("sql %s", sql)
}
}()
}
wg.Wait()
}

55
cmd/ddltest/random.go

@ -0,0 +1,55 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddltest
import (
"math/rand"
)
func randomInt() int {
return rand.Int()
}
func randomIntn(n int) int {
return rand.Intn(n)
}
func randomFloat() float64 {
return rand.Float64()
}
func randomString(n int) string {
const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
var bytes = make([]byte, n)
for i := range bytes {
bytes[i] = alphanum[randomIntn(len(alphanum))]
}
return string(bytes)
}
// Args
// 0 -> min
// 1 -> max
// randomNum(1,10) -> [1,10)
// randomNum(-1) -> random
// randomNum() -> random
func randomNum(args ...int) int {
if len(args) > 1 {
return args[0] + randomIntn(args[1]-args[0])
} else if len(args) == 1 {
return randomIntn(args[0])
} else {
return randomInt()
}
}

65
cmd/explaintest/README.md

@ -0,0 +1,65 @@
# ExplainTest
ExplainTest is a explain test command tool, also with some useful test cases for TiDB execute plan logic, we can run case via `run-tests.sh`.
```
Usage: ./run-tests.sh [options]
-h: Print this help message.
-s <tidb-server-path>: Use tidb-server in <tidb-server-path> for testing.
eg. "./run-tests.sh -s ./explaintest_tidb-server"
-b <y|Y|n|N>: "y" or "Y" for building test binaries [default "y" if this option is not specified].
"n" or "N" for not to build.
The building of tidb-server will be skiped if "-s <tidb-server-path>" is provided.
-r <test-name>|all: Run tests in file "t/<test-name>.test" and record result to file "r/<test-name>.result".
"all" for running all tests and record their results.
-t <test-name>: Run tests in file "t/<test-name>.test".
This option will be ignored if "-r <test-name>" is provided.
Run all tests if this option is not provided.
-v <vendor-path>: Add <vendor-path> to $GOPATH.
-c <test-name>|all: Create data according to creating statements in file "t/<test-name>.test" and save stats in "s/<test-name>_tableName.json".
<test-name> must has a suffix of '_stats'.
"all" for creating stats of all tests.
-i <importer-path>: Use importer in <importer-path> for creating data.
```
## How it works
ExplainTest will read test case in `t/*.test`, and execute them in TiDB server with `s/*.json` stat, and compare explain result in `r/*.result`.
For convenience, we can generate new `*.result` and `*.json` from execute by use `-r` parameter for `run-tests.sh`
## Usage
### Regression Execute Plan Modification
After modify code and before commit, please run this command under TiDB root folder.
```sh
make dev
```
or
```sh
make explaintest
```
It will identify execute plan change.
### Generate New Stats and Result from Execute
First, add new test query in `t/` folder.
```sh
cd cmd/explaintest
./run-tests.sh -r [casename]
./run-tests.sh -c [casename]
``
It will generate result and stats base on last execution, and then we can reuse them or open editor to do some modify.

12
cmd/explaintest/config.toml

@ -0,0 +1,12 @@
port = 4001
lease = "0"
mem-quota-query = 34359738368
[status]
status-port = 10081
[performance]
stats-lease = "0"
[experimental]
allow-expression-index = true

713
cmd/explaintest/main.go

@ -0,0 +1,713 @@
// Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bytes"
"database/sql"
"flag"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"strings"
"time"
_ "github.com/go-sql-driver/mysql"
"github.com/pingcap/errors"
"github.com/pingcap/log"
"github.com/pingcap/parser/ast"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/mock"
"go.uber.org/zap"
)
const dbName = "test"
var (
logLevel string
record bool
create bool
)
func init() {
flag.StringVar(&logLevel, "log-level", "error", "set log level: info, warn, error, debug [default: error]")
flag.BoolVar(&record, "record", false, "record the test output in the result file")
flag.BoolVar(&create, "create", false, "create and import data into table, and save json file of stats")
}
var mdb *sql.DB
type query struct {
Query string
Line int
}
type tester struct {
name string
tx *sql.Tx
buf bytes.Buffer
// enable query log will output origin statement into result file too
// use --disable_query_log or --enable_query_log to control it
enableQueryLog bool
singleQuery bool
// check expected error, use --error before the statement
// see http://dev.mysql.com/doc/mysqltest/2.0/en/writing-tests-expecting-errors.html
expectedErrs []string
// only for test, not record, every time we execute a statement, we should read the result
// data to check correction.
resultFD *os.File
// ctx is used for Compile sql statement
ctx sessionctx.Context
}
func newTester(name string) *tester {
t := new(tester)
t.name = name
t.enableQueryLog = true
t.ctx = mock.NewContext()
t.ctx.GetSessionVars().EnableWindowFunction = true
return t
}
func (t *tester) Run() error {
queries, err := t.loadQueries()
if err != nil {
return errors.Trace(err)
}
if err = t.openResult(); err != nil {
return errors.Trace(err)
}
var s string
defer func() {
if t.tx != nil {
log.Error("transaction is not committed correctly, rollback")
err = t.rollback()
if err != nil {
log.Error("transaction is failed rollback", zap.Error(err))
}
}
if t.resultFD != nil {
err = t.resultFD.Close()
if err != nil {
log.Error("result fd close failed", zap.Error(err))
}
}
}()
LOOP:
for _, q := range queries {
s = q.Query
if strings.HasPrefix(s, "--") {
// clear expected errors
t.expectedErrs = nil
switch s {
case "--enable_query_log":
t.enableQueryLog = true
case "--disable_query_log":
t.enableQueryLog = false
case "--single_query":
t.singleQuery = true
case "--halt":
// if we meet halt, we will ignore following tests
break LOOP
default:
if strings.HasPrefix(s, "--error") {
t.expectedErrs = strings.Split(strings.TrimSpace(strings.TrimPrefix(s, "--error")), ",")
} else if strings.HasPrefix(s, "-- error") {
t.expectedErrs = strings.Split(strings.TrimSpace(strings.TrimPrefix(s, "-- error")), ",")
} else if strings.HasPrefix(s, "--echo") {
echo := strings.TrimSpace(strings.TrimPrefix(s, "--echo"))
t.buf.WriteString(echo)
t.buf.WriteString("\n")
}
}
} else {
if err = t.execute(q); err != nil {
return errors.Annotate(err, fmt.Sprintf("sql:%v", q.Query))
}
}
}
return t.flushResult()
}
func (t *tester) loadQueries() ([]query, error) {
data, err := ioutil.ReadFile(t.testFileName())
if err != nil {
return nil, err
}
seps := bytes.Split(data, []byte("\n"))
queries := make([]query, 0, len(seps))
newStmt := true
for i, v := range seps {
s := string(bytes.TrimSpace(v))
// we will skip # comment here
if strings.HasPrefix(s, "#") {
newStmt = true
continue
} else if strings.HasPrefix(s, "--") {
queries = append(queries, query{Query: s, Line: i + 1})
newStmt = true
continue
} else if len(s) == 0 {
continue
}
if newStmt {
queries = append(queries, query{Query: s, Line: i + 1})
} else {
lastQuery := queries[len(queries)-1]
lastQuery.Query = fmt.Sprintf("%s\n%s", lastQuery.Query, s)
queries[len(queries)-1] = lastQuery
}
// if the line has a ; in the end, we will treat new line as the new statement.
newStmt = strings.HasSuffix(s, ";")
}
return queries, nil
}
// parserErrorHandle handle mysql_test syntax `--error ER_PARSE_ERROR`, to allow following query
// return parser error.
func (t *tester) parserErrorHandle(query query, err error) error {
offset := t.buf.Len()
for _, expectedErr := range t.expectedErrs {
if expectedErr == "ER_PARSE_ERROR" {
if t.enableQueryLog {
t.buf.WriteString(query.Query)
t.buf.WriteString("\n")
}
t.buf.WriteString(fmt.Sprintf("%s\n", err))
err = nil
break
}
}
if err != nil {
return errors.Trace(err)
}
// clear expected errors after we execute the first query
t.expectedErrs = nil
t.singleQuery = false
if !record && !create {
// check test result now
gotBuf := t.buf.Bytes()[offset:]
buf := make([]byte, t.buf.Len()-offset)
if _, err = t.resultFD.ReadAt(buf, int64(offset)); err != nil {
return errors.Trace(errors.Errorf("run \"%v\" at line %d err, we got \n%s\nbut read result err %s", query.Query, query.Line, gotBuf, err))
}
if !bytes.Equal(gotBuf, buf) {
return errors.Trace(errors.Errorf("run \"%v\" at line %d err, we need(%v):\n%s\nbut got(%v):\n%s\n", query.Query, query.Line, len(buf), buf, len(gotBuf), gotBuf))
}
}
return errors.Trace(err)
}
func (t *tester) executeDefault(qText string) (err error) {
if t.tx != nil {
return filterWarning(t.executeStmt(qText))
}
// if begin or following commit fails, we don't think
// this error is the expected one.
if t.tx, err = mdb.Begin(); err != nil {
err2 := t.rollback()
if err2 != nil {
log.Error("transaction is failed to rollback", zap.Error(err))
}
return err
}
if err = filterWarning(t.executeStmt(qText)); err != nil {
err2 := t.rollback()
if err2 != nil {
log.Error("transaction is failed rollback", zap.Error(err))
}
return err
}
if err = t.commit(); err != nil {
err2 := t.rollback()
if err2 != nil {
log.Error("transaction is failed rollback", zap.Error(err))
}
return err
}
return nil
}
func (t *tester) execute(query query) error {
if len(query.Query) == 0 {
return nil
}
list, err := session.Parse(t.ctx, query.Query)
if err != nil {
return t.parserErrorHandle(query, err)
}
for _, st := range list {
var qText string
if t.singleQuery {
qText = query.Query
} else {
qText = st.Text()
}
offset := t.buf.Len()
if t.enableQueryLog {
t.buf.WriteString(qText)
t.buf.WriteString("\n")
}
switch st.(type) {
case *ast.BeginStmt:
t.tx, err = mdb.Begin()
if err != nil {
err2 := t.rollback()
if err2 != nil {
log.Error("transaction is failed rollback", zap.Error(err))
}
break
}
case *ast.CommitStmt:
err = t.commit()
if err != nil {
err2 := t.rollback()
if err2 != nil {
log.Error("transaction is failed rollback", zap.Error(err))
}
break
}
case *ast.RollbackStmt:
err = t.rollback()
if err != nil {
break
}
default:
if create {
createStmt, isCreate := st.(*ast.CreateTableStmt)
if isCreate {
if err = t.create(createStmt.Table.Name.String(), qText); err != nil {
break
}
} else {
_, isDrop := st.(*ast.DropTableStmt)
_, isAnalyze := st.(*ast.AnalyzeTableStmt)
if isDrop || isAnalyze {
if err = t.executeDefault(qText); err != nil {
break
}
}
}
} else if err = t.executeDefault(qText); err != nil {
break
}
}
if err != nil && len(t.expectedErrs) > 0 {
// TODO: check whether this err is expected.
// but now we think it is.
// output expected err
t.buf.WriteString(fmt.Sprintf("%s\n", err))
err = nil
}
// clear expected errors after we execute the first query
t.expectedErrs = nil
t.singleQuery = false
if err != nil {
return errors.Trace(errors.Errorf("run \"%v\" at line %d err %v", st.Text(), query.Line, err))
}
if !record && !create {
// check test result now
gotBuf := t.buf.Bytes()[offset:]
buf := make([]byte, t.buf.Len()-offset)
if _, err = t.resultFD.ReadAt(buf, int64(offset)); !(err == nil || err == io.EOF) {
return errors.Trace(errors.Errorf("run \"%v\" at line %d err, we got \n%s\nbut read result err %s", st.Text(), query.Line, gotBuf, err))
}
if !bytes.Equal(gotBuf, buf) {
return errors.Trace(errors.Errorf("run \"%v\" at line %d err, we need:\n%s\nbut got:\n%s\n", query.Query, query.Line, buf, gotBuf))
}
}
}
return errors.Trace(err)
}
func filterWarning(err error) error {
return err
}
func (t *tester) create(tableName string, qText string) error {
fmt.Printf("import data for table %s of test %s:\n", tableName, t.name)
path := "./importer -t \"" + qText + "\" -P 4001 -n 2000 -c 100"
cmd := exec.Command("sh", "-c", path)
stdoutIn, err := cmd.StdoutPipe()
if err != nil {
log.Error("open stdout pipe failed", zap.Error(err))
}
stderrIn, err := cmd.StderrPipe()
if err != nil {
log.Error("open stderr pipe failed", zap.Error(err))
}
var stdoutBuf, stderrBuf bytes.Buffer
var errStdout, errStderr error
stdout := io.MultiWriter(os.Stdout, &stdoutBuf)
stderr := io.MultiWriter(os.Stderr, &stderrBuf)
if err = cmd.Start(); err != nil {
return errors.Trace(err)
}
go func() {
_, errStdout = io.Copy(stdout, stdoutIn)
}()
go func() {
_, errStderr = io.Copy(stderr, stderrIn)
}()
if err = cmd.Wait(); err != nil {
log.Fatal("importer failed", zap.Error(err))
return err
}
if errStdout != nil {
return errors.Trace(errStdout)
}
if errStderr != nil {
return errors.Trace(errStderr)
}
if err = t.analyze(tableName); err != nil {
return err
}
resp, err := http.Get("http://127.0.0.1:10081/stats/dump/" + dbName + "/" + tableName)
if err != nil {
return err
}
js, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
return ioutil.WriteFile(t.statsFileName(tableName), js, 0644)
}
func (t *tester) commit() error {
err := t.tx.Commit()
if err != nil {
return err
}
t.tx = nil
return nil
}
func (t *tester) rollback() error {
if t.tx == nil {
return nil
}
err := t.tx.Rollback()
t.tx = nil
return err
}
func (t *tester) analyze(tableName string) error {
return t.execute(query{Query: "analyze table " + tableName + ";", Line: 0})
}
func (t *tester) executeStmt(query string) error {
if isQuery(query) {
rows, err := t.tx.Query(query)
if err != nil {
return errors.Trace(err)
}
cols, err := rows.Columns()
if err != nil {
return errors.Trace(err)
}
for i, c := range cols {
t.buf.WriteString(c)
if i != len(cols)-1 {
t.buf.WriteString("\t")
}
}
t.buf.WriteString("\n")
values := make([][]byte, len(cols))
scanArgs := make([]interface{}, len(values))
for i := range values {
scanArgs[i] = &values[i]
}
for rows.Next() {
err = rows.Scan(scanArgs...)
if err != nil {
return errors.Trace(err)
}
var value string
for i, col := range values {
// Here we can check if the value is nil (NULL value)
if col == nil {
value = "NULL"
} else {
value = string(col)
}
t.buf.WriteString(value)
if i < len(values)-1 {
t.buf.WriteString("\t")
}
}
t.buf.WriteString("\n")
}
err = rows.Err()
if err != nil {
return errors.Trace(err)
}
} else {
// TODO: rows affected and last insert id
_, err := t.tx.Exec(query)
if err != nil {
return errors.Trace(err)
}
}
return nil
}
func (t *tester) openResult() error {
if record || create {
return nil
}
var err error
t.resultFD, err = os.Open(t.resultFileName())
return err
}
func (t *tester) flushResult() error {
if !record {
return nil
}
return ioutil.WriteFile(t.resultFileName(), t.buf.Bytes(), 0644)
}
func (t *tester) statsFileName(tableName string) string {
return fmt.Sprintf("./s/%s_%s.json", t.name, tableName)
}
func (t *tester) testFileName() string {
// test and result must be in current ./t the same as MySQL
return fmt.Sprintf("./t/%s.test", t.name)
}
func (t *tester) resultFileName() string {
// test and result must be in current ./r, the same as MySQL
return fmt.Sprintf("./r/%s.result", t.name)
}
func loadAllTests() ([]string, error) {
// tests must be in t folder
files, err := ioutil.ReadDir("./t")
if err != nil {
return nil, err
}
tests := make([]string, 0, len(files))
for _, f := range files {
if f.IsDir() {
continue
}
// the test file must have a suffix .test
name := f.Name()
if strings.HasSuffix(name, ".test") {
name = strings.TrimSuffix(name, ".test")
if create && !strings.HasSuffix(name, "_stats") {
continue
}
tests = append(tests, name)
}
}
return tests, nil
}
// openDBWithRetry opens a database specified by its database driver name and a
// driver-specific data source name. And it will do some retries if the connection fails.
func openDBWithRetry(driverName, dataSourceName string) (mdb *sql.DB, err error) {
startTime := time.Now()
sleepTime := time.Millisecond * 500
retryCnt := 60
// The max retry interval is 30 s.
for i := 0; i < retryCnt; i++ {
mdb, err = sql.Open(driverName, dataSourceName)
if err != nil {
log.Warn("open DB failed", zap.Int("retry count", i), zap.Error(err))
time.Sleep(sleepTime)
continue
}
err = mdb.Ping()
if err == nil {
break
}
log.Warn("ping DB failed", zap.Int("retry count", i), zap.Error(err))
if err1 := mdb.Close(); err1 != nil {
log.Error("close DB failed", zap.Error(err1))
}
time.Sleep(sleepTime)
}
if err != nil {
log.Error("open Db failed", zap.Duration("take time", time.Since(startTime)), zap.Error(err))
return nil, errors.Trace(err)
}
return
}
func main() {
flag.Parse()
err := logutil.InitZapLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
if err != nil {
panic("init logger fail, " + err.Error())
}
mdb, err = openDBWithRetry(
"mysql",
"root@tcp(localhost:4001)/"+dbName+"?allowAllFiles=true",
)
if err != nil {
log.Fatal("open DB failed", zap.Error(err))
}
defer func() {
log.Warn("close DB")
err = mdb.Close()
if err != nil {
log.Error("close DB failed", zap.Error(err))
}
}()
log.Warn("create new DB", zap.Reflect("DB", mdb))
if _, err = mdb.Exec("DROP DATABASE IF EXISTS test"); err != nil {
log.Fatal("executing drop DB test failed", zap.Error(err))
}
if _, err = mdb.Exec("CREATE DATABASE test"); err != nil {
log.Fatal("executing create DB test failed", zap.Error(err))
}
if _, err = mdb.Exec("USE test"); err != nil {
log.Fatal("executing use test failed", zap.Error(err))
}
if _, err = mdb.Exec("set @@tidb_hash_join_concurrency=1"); err != nil {
log.Fatal("set @@tidb_hash_join_concurrency=1 failed", zap.Error(err))
}
if _, err = mdb.Exec("set sql_mode='STRICT_TRANS_TABLES'"); err != nil {
log.Fatal("set sql_mode='STRICT_TRANS_TABLES' failed", zap.Error(err))
}
tests := flag.Args()
// we will run all tests if no tests assigned
if len(tests) == 0 {
if tests, err = loadAllTests(); err != nil {
log.Fatal("load all tests failed", zap.Error(err))
}
}
if record {
log.Info("recording tests", zap.Strings("tests", tests))
} else if create {
log.Info("creating data", zap.Strings("tests", tests))
} else {
log.Info("running tests", zap.Strings("tests", tests))
}
for _, t := range tests {
if strings.Contains(t, "--log-level") {
continue
}
tr := newTester(t)
if err = tr.Run(); err != nil {
log.Fatal("run test", zap.String("test", t), zap.Error(err))
}
log.Info("run test ok", zap.String("test", t))
}
log.Info("Explain test passed")
}
var queryStmtTable = []string{"explain", "select", "show", "execute", "describe", "desc", "admin"}
func trimSQL(sql string) string {
// Trim space.
sql = strings.TrimSpace(sql)
// Trim leading /*comment*/
// There may be multiple comments
for strings.HasPrefix(sql, "/*") {
i := strings.Index(sql, "*/")
if i != -1 && i < len(sql)+1 {
sql = sql[i+2:]
sql = strings.TrimSpace(sql)
continue
}
break
}
// Trim leading '('. For `(select 1);` is also a query.
return strings.TrimLeft(sql, "( ")
}
// isQuery checks if a sql statement is a query statement.
func isQuery(sql string) bool {
sqlText := strings.ToLower(trimSQL(sql))
for _, key := range queryStmtTable {
if strings.HasPrefix(sqlText, key) {
return true
}
}
return false
}

50
cmd/explaintest/main_test.go

@ -0,0 +1,50 @@
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import "testing"
func TestIsQuery(t *testing.T) {
tbl := []struct {
sql string
ok bool
}{
{"/*comment*/ select 1;", true},
{"/*comment*/ /*comment*/ select 1;", true},
{"select /*comment*/ 1 /*comment*/;", true},
{"(select /*comment*/ 1 /*comment*/);", true},
}
for _, tb := range tbl {
if isQuery(tb.sql) != tb.ok {
t.Fatalf("%s", tb.sql)
}
}
}
func TestTrimSQL(t *testing.T) {
tbl := []struct {
sql string
target string
}{
{"/*comment*/ select 1; ", "select 1;"},
{"/*comment*/ /*comment*/ select 1;", "select 1;"},
{"select /*comment*/ 1 /*comment*/;", "select /*comment*/ 1 /*comment*/;"},
{"/*comment select 1; ", "/*comment select 1;"},
}
for _, tb := range tbl {
if trimSQL(tb.sql) != tb.target {
t.Fatalf("%s", tb.sql)
}
}
}

BIN
cmd/explaintest/portgenerator

Binary file not shown.

44
cmd/explaintest/r/access_path_selection.result

@ -0,0 +1,44 @@
CREATE TABLE `access_path_selection` (
`a` int,
`b` int,
KEY `IDX_a` (`a`),
KEY `IDX_b` (`b`),
KEY `IDX_ab` (`a`, `b`)
);
explain select a from access_path_selection where a < 3;
id estRows task access object operator info
IndexReader_6 3323.33 root index:IndexRangeScan_5
└─IndexRangeScan_5 3323.33 cop[tikv] table:access_path_selection, index:IDX_a(a) range:[-inf,3), keep order:false, stats:pseudo
explain select a, b from access_path_selection where a < 3;
id estRows task access object operator info
IndexReader_6 3323.33 root index:IndexRangeScan_5
└─IndexRangeScan_5 3323.33 cop[tikv] table:access_path_selection, index:IDX_ab(a, b) range:[-inf,3), keep order:false, stats:pseudo
explain select a, b from access_path_selection where b < 3;
id estRows task access object operator info
TableReader_7 3323.33 root data:Selection_6
└─Selection_6 3323.33 cop[tikv] lt(test.access_path_selection.b, 3)
└─TableFullScan_5 10000.00 cop[tikv] table:access_path_selection keep order:false, stats:pseudo
explain select a, b from access_path_selection where a < 3 and b < 3;
id estRows task access object operator info
IndexReader_11 1104.45 root index:Selection_10
└─Selection_10 1104.45 cop[tikv] lt(test.access_path_selection.b, 3)
└─IndexRangeScan_9 3323.33 cop[tikv] table:access_path_selection, index:IDX_ab(a, b) range:[-inf,3), keep order:false, stats:pseudo
explain select a, b from access_path_selection where a > 10 order by _tidb_rowid;
id estRows task access object operator info
Projection_6 3333.33 root test.access_path_selection.a, test.access_path_selection.b
└─TableReader_13 3333.33 root data:Selection_12
└─Selection_12 3333.33 cop[tikv] gt(test.access_path_selection.a, 10)
└─TableFullScan_11 10000.00 cop[tikv] table:access_path_selection keep order:true, stats:pseudo
explain select max(_tidb_rowid) from access_path_selection;
id estRows task access object operator info
StreamAgg_13 1.00 root funcs:max(test.access_path_selection._tidb_rowid)->Column#4
└─Limit_17 1.00 root offset:0, count:1
└─TableReader_27 1.00 root data:Limit_26
└─Limit_26 1.00 cop[tikv] offset:0, count:1
└─TableFullScan_25 1.25 cop[tikv] table:access_path_selection keep order:true, desc, stats:pseudo
explain select count(1) from access_path_selection;
id estRows task access object operator info
StreamAgg_28 1.00 root funcs:count(Column#18)->Column#4
└─TableReader_29 1.00 root data:StreamAgg_8
└─StreamAgg_8 1.00 cop[tikv] funcs:count(1)->Column#18
└─TableFullScan_24 10000.00 cop[tikv] table:access_path_selection keep order:false, stats:pseudo

56
cmd/explaintest/r/black_list.result

@ -0,0 +1,56 @@
use test;
drop table if exists t;
create table t (a int);
explain select * from t where a < 1;
id estRows task access object operator info
TableReader_7 3323.33 root data:Selection_6
└─Selection_6 3323.33 cop[tikv] lt(test.t.a, 1)
└─TableFullScan_5 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
insert into mysql.opt_rule_blacklist values('predicate_push_down');
admin reload opt_rule_blacklist;
explain select * from t where a < 1;
id estRows task access object operator info
Selection_5 8000.00 root lt(test.t.a, 1)
└─TableReader_7 10000.00 root data:TableFullScan_6
└─TableFullScan_6 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
delete from mysql.opt_rule_blacklist where name='predicate_push_down';
admin reload opt_rule_blacklist;
explain select * from t where a < 1;
id estRows task access object operator info
TableReader_7 3323.33 root data:Selection_6
└─Selection_6 3323.33 cop[tikv] lt(test.t.a, 1)
└─TableFullScan_5 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
insert into mysql.expr_pushdown_blacklist values('<', 'tikv,tiflash,tidb', 'for test');
admin reload expr_pushdown_blacklist;
explain select * from t where a < 1;
id estRows task access object operator info
Selection_5 8000.00 root lt(test.t.a, 1)
└─TableReader_7 10000.00 root data:TableFullScan_6
└─TableFullScan_6 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
delete from mysql.expr_pushdown_blacklist where name='<' and store_type = 'tikv,tiflash,tidb' and reason = 'for test';
admin reload expr_pushdown_blacklist;
explain select * from t where a < 1;
id estRows task access object operator info
TableReader_7 3323.33 root data:Selection_6
└─Selection_6 3323.33 cop[tikv] lt(test.t.a, 1)
└─TableFullScan_5 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
insert into mysql.expr_pushdown_blacklist values('lt', 'tikv,tiflash,tidb', 'for test');
admin reload expr_pushdown_blacklist;
explain select * from t where a < 1;
id estRows task access object operator info
Selection_5 8000.00 root lt(test.t.a, 1)
└─TableReader_7 10000.00 root data:TableFullScan_6
└─TableFullScan_6 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
delete from mysql.expr_pushdown_blacklist where name='lt' and store_type = 'tikv,tiflash,tidb' and reason = 'for test';
admin reload expr_pushdown_blacklist;
explain select * from t where a < 1;
id estRows task access object operator info
TableReader_7 3323.33 root data:Selection_6
└─Selection_6 3323.33 cop[tikv] lt(test.t.a, 1)
└─TableFullScan_5 10000.00 cop[tikv] table:t keep order:false, stats:pseudo

29
cmd/explaintest/r/explain-non-select-stmt.result

@ -0,0 +1,29 @@
use test;
drop table if exists t;
create table t(a bigint, b bigint);
explain insert into t values(1, 1);
id estRows task access object operator info
Insert_1 N/A root N/A
explain insert into t select * from t;
id estRows task access object operator info
Insert_1 N/A root N/A
└─TableReader_7 10000.00 root data:TableFullScan_6
└─TableFullScan_6 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain delete from t where a > 100;
id estRows task access object operator info
Delete_4 N/A root N/A
└─TableReader_8 3333.33 root data:Selection_7
└─Selection_7 3333.33 cop[tikv] gt(test.t.a, 100)
└─TableFullScan_6 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain update t set b = 100 where a = 200;
id estRows task access object operator info
Update_4 N/A root N/A
└─TableReader_8 10.00 root data:Selection_7
└─Selection_7 10.00 cop[tikv] eq(test.t.a, 200)
└─TableFullScan_6 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain replace into t select a, 100 from t;
id estRows task access object operator info
Insert_1 N/A root N/A
└─Projection_5 10000.00 root test.t.a, 100->Column#6
└─TableReader_7 10000.00 root data:TableFullScan_6
└─TableFullScan_6 10000.00 cop[tikv] table:t keep order:false, stats:pseudo

46
cmd/explaintest/r/explain.result

@ -0,0 +1,46 @@
drop table if exists t;
create table t (id int, c1 timestamp);
show columns from t;
Field Type Null Key Default Extra
id int(11) YES NULL
c1 timestamp YES NULL
explain t;
Field Type Null Key Default Extra
id int(11) YES NULL
c1 timestamp YES NULL
describe t;
Field Type Null Key Default Extra
id int(11) YES NULL
c1 timestamp YES NULL
desc t;
Field Type Null Key Default Extra
id int(11) YES NULL
c1 timestamp YES NULL
desc t c1;
Field Type Null Key Default Extra
c1 timestamp YES NULL
desc t id;
Field Type Null Key Default Extra
id int(11) YES NULL
drop table if exists t;
create table t(id int primary key, a int, b int);
set session tidb_hashagg_partial_concurrency = 1;
set session tidb_hashagg_final_concurrency = 1;
explain select group_concat(a) from t group by id;
id estRows task access object operator info
StreamAgg_8 8000.00 root group by:Column#6, funcs:group_concat(Column#5 separator ",")->Column#4
└─Projection_18 10000.00 root cast(test.t.a, var_string(20))->Column#5, test.t.id
└─TableReader_15 10000.00 root data:TableFullScan_14
└─TableFullScan_14 10000.00 cop[tikv] table:t keep order:true, stats:pseudo
explain select group_concat(a, b) from t group by id;
id estRows task access object operator info
StreamAgg_8 8000.00 root group by:Column#7, funcs:group_concat(Column#5, Column#6 separator ",")->Column#4
└─Projection_18 10000.00 root cast(test.t.a, var_string(20))->Column#5, cast(test.t.b, var_string(20))->Column#6, test.t.id
└─TableReader_15 10000.00 root data:TableFullScan_14
└─TableFullScan_14 10000.00 cop[tikv] table:t keep order:true, stats:pseudo
drop table t;
drop view if exists v;
create view v as select cast(replace(substring_index(substring_index("",',',1),':',-1),'"','') as CHAR(32)) as event_id;
desc v;
Field Type Null Key Default Extra
event_id varchar(32) YES NULL

262
cmd/explaintest/r/explain_complex.result

@ -0,0 +1,262 @@
CREATE TABLE `dt` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT ,
`aid` varchar(32) NOT NULL,
`cm` int(10) unsigned NOT NULL,
`pt` varchar(10) NOT NULL,
`dic` varchar(64) DEFAULT NULL,
`ip` varchar(15) DEFAULT NULL,
`ds` date DEFAULT NULL,
`ds2` varchar(13) DEFAULT NULL ,
`t` int(13) DEFAULT NULL ,
`ext` varchar(550) DEFAULT NULL,
`p1` varchar(64) DEFAULT NULL ,
`p2` varchar(64) DEFAULT NULL,
`p3` varchar(64) DEFAULT NULL,
`p4` varchar(64) DEFAULT NULL,
`p5` varchar(64) DEFAULT NULL,
`p6_md5` varchar(32) DEFAULT NULL,
`p7_md5` varchar(32) DEFAULT NULL,
`bm` tinyint(1) DEFAULT '0',
`bgm` tinyint(1) DEFAULT '0',
`insert_date` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (`id`),
UNIQUE KEY `aid` (`aid`,`dic`),
KEY `ip` (`ip`),
KEY `cmi` (`cm`)
);
CREATE TABLE `st` (
`id` int(11) UNSIGNED NOT NULL AUTO_INCREMENT ,
`pt` varchar(10) NOT NULL ,
`aid` varchar(35) NOT NULL ,
`cm` int(10) NOT NULL ,
`ip` varchar(15) DEFAULT NULL ,
`dic` varchar(64) DEFAULT NULL ,
`dit` varchar(5) DEFAULT NULL,
`p1` varchar(64) DEFAULT NULL ,
`p2` varchar(64) DEFAULT NULL,
`p3` varchar(64) DEFAULT NULL,
`p4` varchar(64) DEFAULT NULL,
`p5` varchar(64) DEFAULT NULL,
`p6_md5` varchar(32) DEFAULT NULL,
`p7_md5` varchar(32) DEFAULT NULL,
`ext` varchar(550) DEFAULT NULL,
`bm` tinyint(1) DEFAULT '0',
`ds` date NOT NULL ,
`ds2` varchar(13) DEFAULT NULL ,
`t` int(13) NOT NULL ,
PRIMARY KEY (`id`),
KEY `t` (`t`),
KEY `icd` (`cm`,`ds`)
);
CREATE TABLE `dd` (
`id` int(11) UNSIGNED NOT NULL AUTO_INCREMENT ,
`aid` varchar(35) NOT NULL ,
`pt` varchar(10) NOT NULL ,
`dic` varchar(64) NOT NULL,
`dim` varchar(32) NOT NULL ,
`mac` varchar(32) DEFAULT NULL ,
`ip` varchar(15) DEFAULT NULL ,
`t` int(13) DEFAULT NULL ,
`bm` tinyint(1) DEFAULT '0',
PRIMARY KEY (`id`),
UNIQUE KEY `aid` (`aid`,`dic`),
KEY `ip` (`ip`),
KEY `pi` (`aid`,`dim`),
KEY `t` (`t`)
);
CREATE TABLE `pp` (
`oid` varchar(20) NOT NULL,
`uid` bigint(20) unsigned NOT NULL,
`cid` int(11) unsigned NOT NULL,
`ppt` int(11) NOT NULL DEFAULT '0',
`pdt` int(11) DEFAULT '0',
`am` decimal(10,2) unsigned NOT NULL DEFAULT '0.00',
`cc` decimal(10,2) NOT NULL DEFAULT '0.00',
`ps` tinyint(1) NOT NULL,
`tid` varchar(200) DEFAULT NULL,
`ppf` varchar(50) NOT NULL,
`bs` tinyint(1) NOT NULL DEFAULT '0',
`bex` tinyint(1) NOT NULL DEFAULT '0',
`bu` int(11) NOT NULL DEFAULT '0',
`pc` char(10) NOT NULL DEFAULT 'CNY',
`ui` int(16) NOT NULL DEFAULT '1',
`cr` decimal(10,4) unsigned NOT NULL DEFAULT '1.0000',
`pi` int(11) unsigned NOT NULL,
`si` int(11) unsigned NOT NULL,
`bcc` int(11) NOT NULL DEFAULT '0',
`acc` int(11) NOT NULL DEFAULT '0',
KEY `oid` (`oid`),
KEY `uid` (`uid`),
KEY `ppt` (`ppt`),
KEY `pdt` (`pdt`),
KEY `cid` (`cid`),
KEY `ps` (`ps`),
KEY `sp` (`uid`,`pi`)
);
CREATE TABLE `rr` (
`aid` varchar(35) NOT NULL ,
`pt` varchar(10) NOT NULL ,
`dic` varchar(64) NOT NULL ,
`gid` varchar(42) NOT NULL ,
`acd` varchar(32) NOT NULL ,
`t` int(13) DEFAULT NULL ,
`bm` tinyint(1) DEFAULT '0',
PRIMARY KEY (`aid`,`dic`)
);
explain SELECT `ds`, `p1`, `p2`, `p3`, `p4`, `p5`, `p6_md5`, `p7_md5`, count(dic) as install_device FROM `dt` use index (cmi) WHERE (`ds` >= '2016-09-01') AND (`ds` <= '2016-11-03') AND (`cm` IN ('1062', '1086', '1423', '1424', '1425', '1426', '1427', '1428', '1429', '1430', '1431', '1432', '1433', '1434', '1435', '1436', '1437', '1438', '1439', '1440', '1441', '1442', '1443', '1444', '1445', '1446', '1447', '1448', '1449', '1450', '1451', '1452', '1488', '1489', '1490', '1491', '1492', '1493', '1494', '1495', '1496', '1497', '1550', '1551', '1552', '1553', '1554', '1555', '1556', '1557', '1558', '1559', '1597', '1598', '1599', '1600', '1601', '1602', '1603', '1604', '1605', '1606', '1607', '1608', '1609', '1610', '1611', '1612', '1613', '1614', '1615', '1616', '1623', '1624', '1625', '1626', '1627', '1628', '1629', '1630', '1631', '1632', '1709', '1719', '1720', '1843', '2813', '2814', '2815', '2816', '2817', '2818', '2819', '2820', '2821', '2822', '2823', '2824', '2825', '2826', '2827', '2828', '2829', '2830', '2831', '2832', '2833', '2834', '2835', '2836', '2837', '2838', '2839', '2840', '2841', '2842', '2843', '2844', '2845', '2846', '2847', '2848', '2849', '2850', '2851', '2852', '2853', '2854', '2855', '2856', '2857', '2858', '2859', '2860', '2861', '2862', '2863', '2864', '2865', '2866', '2867', '2868', '2869', '2870', '2871', '2872', '3139', '3140', '3141', '3142', '3143', '3144', '3145', '3146', '3147', '3148', '3149', '3150', '3151', '3152', '3153', '3154', '3155', '3156', '3157', '3158', '3386', '3387', '3388', '3389', '3390', '3391', '3392', '3393', '3394', '3395', '3664', '3665', '3666', '3667', '3668', '3670', '3671', '3672', '3673', '3674', '3676', '3677', '3678', '3679', '3680', '3681', '3682', '3683', '3684', '3685', '3686', '3687', '3688', '3689', '3690', '3691', '3692', '3693', '3694', '3695', '3696', '3697', '3698', '3699', '3700', '3701', '3702', '3703', '3704', '3705', '3706', '3707', '3708', '3709', '3710', '3711', '3712', '3713', '3714', '3715', '3960', '3961', '3962', '3963', '3964', '3965', '3966', '3967', '3968', '3978', '3979', '3980', '3981', '3982', '3983', '3984', '3985', '3986', '3987', '4208', '4209', '4210', '4211', '4212', '4304', '4305', '4306', '4307', '4308', '4866', '4867', '4868', '4869', '4870', '4871', '4872', '4873', '4874', '4875')) GROUP BY `ds`, `p1`, `p2`, `p3`, `p4`, `p5`, `p6_md5`, `p7_md5` ORDER BY `ds2` DESC;
id estRows task access object operator info
Projection_7 53.00 root test.dt.ds, test.dt.p1, test.dt.p2, test.dt.p3, test.dt.p4, test.dt.p5, test.dt.p6_md5, test.dt.p7_md5, Column#21
└─Sort_8 53.00 root test.dt.ds2:desc
└─HashAgg_16 53.00 root group by:test.dt.ds, test.dt.p1, test.dt.p2, test.dt.p3, test.dt.p4, test.dt.p5, test.dt.p6_md5, test.dt.p7_md5, funcs:count(Column#32)->Column#21, funcs:firstrow(test.dt.ds)->test.dt.ds, funcs:firstrow(Column#34)->test.dt.ds2, funcs:firstrow(test.dt.p1)->test.dt.p1, funcs:firstrow(test.dt.p2)->test.dt.p2, funcs:firstrow(test.dt.p3)->test.dt.p3, funcs:firstrow(test.dt.p4)->test.dt.p4, funcs:firstrow(test.dt.p5)->test.dt.p5, funcs:firstrow(test.dt.p6_md5)->test.dt.p6_md5, funcs:firstrow(test.dt.p7_md5)->test.dt.p7_md5
└─IndexLookUp_17 53.00 root
├─IndexRangeScan_13(Build) 2650.00 cop[tikv] table:dt, index:cmi(cm) range:[1062,1062], [1086,1086], [1423,1423], [1424,1424], [1425,1425], [1426,1426], [1427,1427], [1428,1428], [1429,1429], [1430,1430], [1431,1431], [1432,1432], [1433,1433], [1434,1434], [1435,1435], [1436,1436], [1437,1437], [1438,1438], [1439,1439], [1440,1440], [1441,1441], [1442,1442], [1443,1443], [1444,1444], [1445,1445], [1446,1446], [1447,1447], [1448,1448], [1449,1449], [1450,1450], [1451,1451], [1452,1452], [1488,1488], [1489,1489], [1490,1490], [1491,1491], [1492,1492], [1493,1493], [1494,1494], [1495,1495], [1496,1496], [1497,1497], [1550,1550], [1551,1551], [1552,1552], [1553,1553], [1554,1554], [1555,1555], [1556,1556], [1557,1557], [1558,1558], [1559,1559], [1597,1597], [1598,1598], [1599,1599], [1600,1600], [1601,1601], [1602,1602], [1603,1603], [1604,1604], [1605,1605], [1606,1606], [1607,1607], [1608,1608], [1609,1609], [1610,1610], [1611,1611], [1612,1612], [1613,1613], [1614,1614], [1615,1615], [1616,1616], [1623,1623], [1624,1624], [1625,1625], [1626,1626], [1627,1627], [1628,1628], [1629,1629], [1630,1630], [1631,1631], [1632,1632], [1709,1709], [1719,1719], [1720,1720], [1843,1843], [2813,2813], [2814,2814], [2815,2815], [2816,2816], [2817,2817], [2818,2818], [2819,2819], [2820,2820], [2821,2821], [2822,2822], [2823,2823], [2824,2824], [2825,2825], [2826,2826], [2827,2827], [2828,2828], [2829,2829], [2830,2830], [2831,2831], [2832,2832], [2833,2833], [2834,2834], [2835,2835], [2836,2836], [2837,2837], [2838,2838], [2839,2839], [2840,2840], [2841,2841], [2842,2842], [2843,2843], [2844,2844], [2845,2845], [2846,2846], [2847,2847], [2848,2848], [2849,2849], [2850,2850], [2851,2851], [2852,2852], [2853,2853], [2854,2854], [2855,2855], [2856,2856], [2857,2857], [2858,2858], [2859,2859], [2860,2860], [2861,2861], [2862,2862], [2863,2863], [2864,2864], [2865,2865], [2866,2866], [2867,2867], [2868,2868], [2869,2869], [2870,2870], [2871,2871], [2872,2872], [3139,3139], [3140,3140], [3141,3141], [3142,3142], [3143,3143], [3144,3144], [3145,3145], [3146,3146], [3147,3147], [3148,3148], [3149,3149], [3150,3150], [3151,3151], [3152,3152], [3153,3153], [3154,3154], [3155,3155], [3156,3156], [3157,3157], [3158,3158], [3386,3386], [3387,3387], [3388,3388], [3389,3389], [3390,3390], [3391,3391], [3392,3392], [3393,3393], [3394,3394], [3395,3395], [3664,3664], [3665,3665], [3666,3666], [3667,3667], [3668,3668], [3670,3670], [3671,3671], [3672,3672], [3673,3673], [3674,3674], [3676,3676], [3677,3677], [3678,3678], [3679,3679], [3680,3680], [3681,3681], [3682,3682], [3683,3683], [3684,3684], [3685,3685], [3686,3686], [3687,3687], [3688,3688], [3689,3689], [3690,3690], [3691,3691], [3692,3692], [3693,3693], [3694,3694], [3695,3695], [3696,3696], [3697,3697], [3698,3698], [3699,3699], [3700,3700], [3701,3701], [3702,3702], [3703,3703], [3704,3704], [3705,3705], [3706,3706], [3707,3707], [3708,3708], [3709,3709], [3710,3710], [3711,3711], [3712,3712], [3713,3713], [3714,3714], [3715,3715], [3960,3960], [3961,3961], [3962,3962], [3963,3963], [3964,3964], [3965,3965], [3966,3966], [3967,3967], [3968,3968], [3978,3978], [3979,3979], [3980,3980], [3981,3981], [3982,3982], [3983,3983], [3984,3984], [3985,3985], [3986,3986], [3987,3987], [4208,4208], [4209,4209], [4210,4210], [4211,4211], [4212,4212], [4304,4304], [4305,4305], [4306,4306], [4307,4307], [4308,4308], [4866,4866], [4867,4867], [4868,4868], [4869,4869], [4870,4870], [4871,4871], [4872,4872], [4873,4873], [4874,4874], [4875,4875], keep order:false, stats:pseudo
└─HashAgg_11(Probe) 53.00 cop[tikv] group by:test.dt.ds, test.dt.p1, test.dt.p2, test.dt.p3, test.dt.p4, test.dt.p5, test.dt.p6_md5, test.dt.p7_md5, funcs:count(test.dt.dic)->Column#32, funcs:firstrow(test.dt.ds2)->Column#34
└─Selection_15 66.25 cop[tikv] ge(test.dt.ds, 2016-09-01 00:00:00.000000), le(test.dt.ds, 2016-11-03 00:00:00.000000)
└─TableRowIDScan_14 2650.00 cop[tikv] table:dt keep order:false, stats:pseudo
explain select gad.id as gid,sdk.id as sid,gad.aid as aid,gad.cm as cm,sdk.dic as dic,sdk.ip as ip, sdk.t as t, gad.p1 as p1, gad.p2 as p2, gad.p3 as p3, gad.p4 as p4, gad.p5 as p5, gad.p6_md5 as p6, gad.p7_md5 as p7, gad.ext as ext, gad.t as gtime from st gad join (select id, aid, pt, dic, ip, t from dd where pt = 'android' and bm = 0 and t > 1478143908) sdk on gad.aid = sdk.aid and gad.ip = sdk.ip and sdk.t > gad.t where gad.t > 1478143908 and gad.pt = 'android' group by gad.aid, sdk.dic limit 2500;
id estRows task access object operator info
Projection_13 1.00 root test.st.id, test.dd.id, test.st.aid, test.st.cm, test.dd.dic, test.dd.ip, test.dd.t, test.st.p1, test.st.p2, test.st.p3, test.st.p4, test.st.p5, test.st.p6_md5, test.st.p7_md5, test.st.ext, test.st.t
└─Limit_16 1.00 root offset:0, count:2500
└─HashAgg_19 1.00 root group by:test.dd.dic, test.st.aid, funcs:firstrow(test.st.id)->test.st.id, funcs:firstrow(test.st.aid)->test.st.aid, funcs:firstrow(test.st.cm)->test.st.cm, funcs:firstrow(test.st.p1)->test.st.p1, funcs:firstrow(test.st.p2)->test.st.p2, funcs:firstrow(test.st.p3)->test.st.p3, funcs:firstrow(test.st.p4)->test.st.p4, funcs:firstrow(test.st.p5)->test.st.p5, funcs:firstrow(test.st.p6_md5)->test.st.p6_md5, funcs:firstrow(test.st.p7_md5)->test.st.p7_md5, funcs:firstrow(test.st.ext)->test.st.ext, funcs:firstrow(test.st.t)->test.st.t, funcs:firstrow(test.dd.id)->test.dd.id, funcs:firstrow(test.dd.dic)->test.dd.dic, funcs:firstrow(test.dd.ip)->test.dd.ip, funcs:firstrow(test.dd.t)->test.dd.t
└─HashJoin_28 0.00 root inner join, equal:[eq(test.dd.aid, test.st.aid) eq(test.dd.ip, test.st.ip)], other cond:gt(test.dd.t, test.st.t)
├─IndexLookUp_46(Build) 0.00 root
│ ├─IndexRangeScan_43(Build) 3333.33 cop[tikv] table:dd, index:t(t) range:(1478143908,+inf], keep order:false, stats:pseudo
│ └─Selection_45(Probe) 0.00 cop[tikv] eq(test.dd.bm, 0), eq(test.dd.pt, "android"), not(isnull(test.dd.ip))
│ └─TableRowIDScan_44 3333.33 cop[tikv] table:dd keep order:false, stats:pseudo
└─IndexLookUp_35(Probe) 3.33 root
├─IndexRangeScan_32(Build) 3333.33 cop[tikv] table:gad, index:t(t) range:(1478143908,+inf], keep order:false, stats:pseudo
└─Selection_34(Probe) 3.33 cop[tikv] eq(test.st.pt, "android"), not(isnull(test.st.ip))
└─TableRowIDScan_33 3333.33 cop[tikv] table:gad keep order:false, stats:pseudo
explain select gad.id as gid,sdk.id as sid,gad.aid as aid,gad.cm as cm,sdk.dic as dic,sdk.ip as ip, sdk.t as t, gad.p1 as p1, gad.p2 as p2, gad.p3 as p3, gad.p4 as p4, gad.p5 as p5, gad.p6_md5 as p6, gad.p7_md5 as p7, gad.ext as ext from st gad join dd sdk on gad.aid = sdk.aid and gad.dic = sdk.mac and gad.t < sdk.t where gad.t > 1477971479 and gad.bm = 0 and gad.pt = 'ios' and gad.dit = 'mac' and sdk.t > 1477971479 and sdk.bm = 0 and sdk.pt = 'ios' limit 3000;
id estRows task access object operator info
Projection_10 0.00 root test.st.id, test.dd.id, test.st.aid, test.st.cm, test.dd.dic, test.dd.ip, test.dd.t, test.st.p1, test.st.p2, test.st.p3, test.st.p4, test.st.p5, test.st.p6_md5, test.st.p7_md5, test.st.ext
└─Limit_13 0.00 root offset:0, count:3000
└─IndexJoin_18 0.00 root inner join, inner:IndexLookUp_17, outer key:test.st.aid, inner key:test.dd.aid, equal cond:eq(test.st.aid, test.dd.aid), eq(test.st.dic, test.dd.mac), other cond:lt(test.st.t, test.dd.t)
├─IndexLookUp_29(Build) 0.00 root
│ ├─IndexRangeScan_26(Build) 3333.33 cop[tikv] table:gad, index:t(t) range:(1477971479,+inf], keep order:false, stats:pseudo
│ └─Selection_28(Probe) 0.00 cop[tikv] eq(test.st.bm, 0), eq(test.st.dit, "mac"), eq(test.st.pt, "ios"), not(isnull(test.st.dic))
│ └─TableRowIDScan_27 3333.33 cop[tikv] table:gad keep order:false, stats:pseudo
└─IndexLookUp_17(Probe) 0.00 root
├─IndexRangeScan_14(Build) 10000.00 cop[tikv] table:sdk, index:aid(aid, dic) range: decided by [eq(test.dd.aid, test.st.aid)], keep order:false, stats:pseudo
└─Selection_16(Probe) 0.00 cop[tikv] eq(test.dd.bm, 0), eq(test.dd.pt, "ios"), gt(test.dd.t, 1477971479), not(isnull(test.dd.mac)), not(isnull(test.dd.t))
└─TableRowIDScan_15 10000.00 cop[tikv] table:sdk keep order:false, stats:pseudo
explain SELECT cm, p1, p2, p3, p4, p5, p6_md5, p7_md5, count(1) as click_pv, count(DISTINCT ip) as click_ip FROM st WHERE (t between 1478188800 and 1478275200) and aid='cn.sbkcq' and pt='android' GROUP BY cm, p1, p2, p3, p4, p5, p6_md5, p7_md5;
id estRows task access object operator info
Projection_5 1.00 root test.st.cm, test.st.p1, test.st.p2, test.st.p3, test.st.p4, test.st.p5, test.st.p6_md5, test.st.p7_md5, Column#20, Column#21
└─HashAgg_6 1.00 root group by:test.st.cm, test.st.p1, test.st.p2, test.st.p3, test.st.p4, test.st.p5, test.st.p6_md5, test.st.p7_md5, funcs:count(1)->Column#20, funcs:count(distinct test.st.ip)->Column#21, funcs:firstrow(test.st.cm)->test.st.cm, funcs:firstrow(test.st.p1)->test.st.p1, funcs:firstrow(test.st.p2)->test.st.p2, funcs:firstrow(test.st.p3)->test.st.p3, funcs:firstrow(test.st.p4)->test.st.p4, funcs:firstrow(test.st.p5)->test.st.p5, funcs:firstrow(test.st.p6_md5)->test.st.p6_md5, funcs:firstrow(test.st.p7_md5)->test.st.p7_md5
└─IndexLookUp_13 0.00 root
├─IndexRangeScan_10(Build) 250.00 cop[tikv] table:st, index:t(t) range:[1478188800,1478275200], keep order:false, stats:pseudo
└─Selection_12(Probe) 0.00 cop[tikv] eq(test.st.aid, "cn.sbkcq"), eq(test.st.pt, "android")
└─TableRowIDScan_11 250.00 cop[tikv] table:st keep order:false, stats:pseudo
explain select dt.id as id, dt.aid as aid, dt.pt as pt, dt.dic as dic, dt.cm as cm, rr.gid as gid, rr.acd as acd, rr.t as t,dt.p1 as p1, dt.p2 as p2, dt.p3 as p3, dt.p4 as p4, dt.p5 as p5, dt.p6_md5 as p6, dt.p7_md5 as p7 from dt dt join rr rr on (rr.pt = 'ios' and rr.t > 1478185592 and dt.aid = rr.aid and dt.dic = rr.dic) where dt.pt = 'ios' and dt.t > 1478185592 and dt.bm = 0 limit 2000;
id estRows task access object operator info
Projection_10 0.00 root test.dt.id, test.dt.aid, test.dt.pt, test.dt.dic, test.dt.cm, test.rr.gid, test.rr.acd, test.rr.t, test.dt.p1, test.dt.p2, test.dt.p3, test.dt.p4, test.dt.p5, test.dt.p6_md5, test.dt.p7_md5
└─Limit_13 0.00 root offset:0, count:2000
└─IndexJoin_27 0.00 root inner join, inner:IndexLookUp_26, outer key:test.rr.aid, test.rr.dic, inner key:test.dt.aid, test.dt.dic, equal cond:eq(test.rr.aid, test.dt.aid), eq(test.rr.dic, test.dt.dic)
├─TableReader_47(Build) 3.33 root data:Selection_46
│ └─Selection_46 3.33 cop[tikv] eq(test.rr.pt, "ios"), gt(test.rr.t, 1478185592)
│ └─TableFullScan_45 10000.00 cop[tikv] table:rr keep order:false, stats:pseudo
└─IndexLookUp_26(Probe) 0.00 root
├─Selection_24(Build) 1.00 cop[tikv] not(isnull(test.dt.dic))
│ └─IndexRangeScan_22 1.00 cop[tikv] table:dt, index:aid(aid, dic) range: decided by [eq(test.dt.aid, test.rr.aid) eq(test.dt.dic, test.rr.dic)], keep order:false, stats:pseudo
└─Selection_25(Probe) 0.00 cop[tikv] eq(test.dt.bm, 0), eq(test.dt.pt, "ios"), gt(test.dt.t, 1478185592)
└─TableRowIDScan_23 1.00 cop[tikv] table:dt keep order:false, stats:pseudo
explain select pc,cr,count(DISTINCT uid) as pay_users,count(oid) as pay_times,sum(am) as am from pp where ps=2 and ppt>=1478188800 and ppt<1478275200 and pi in ('510017','520017') and uid in ('18089709','18090780') group by pc,cr;
id estRows task access object operator info
Projection_5 1.00 root test.pp.pc, test.pp.cr, Column#22, Column#23, Column#24
└─HashAgg_6 1.00 root group by:test.pp.cr, test.pp.pc, funcs:count(distinct test.pp.uid)->Column#22, funcs:count(test.pp.oid)->Column#23, funcs:sum(test.pp.am)->Column#24, funcs:firstrow(test.pp.pc)->test.pp.pc, funcs:firstrow(test.pp.cr)->test.pp.cr
└─IndexLookUp_21 0.00 root
├─IndexRangeScan_18(Build) 0.40 cop[tikv] table:pp, index:sp(uid, pi) range:[18089709 510017,18089709 510017], [18089709 520017,18089709 520017], [18090780 510017,18090780 510017], [18090780 520017,18090780 520017], keep order:false, stats:pseudo
└─Selection_20(Probe) 0.00 cop[tikv] eq(test.pp.ps, 2), ge(test.pp.ppt, 1478188800), lt(test.pp.ppt, 1478275200)
└─TableRowIDScan_19 0.40 cop[tikv] table:pp keep order:false, stats:pseudo
CREATE TABLE `tbl_001` (`a` int, `b` int);
CREATE TABLE `tbl_002` (`a` int, `b` int);
CREATE TABLE `tbl_003` (`a` int, `b` int);
CREATE TABLE `tbl_004` (`a` int, `b` int);
CREATE TABLE `tbl_005` (`a` int, `b` int);
CREATE TABLE `tbl_006` (`a` int, `b` int);
CREATE TABLE `tbl_007` (`a` int, `b` int);
CREATE TABLE `tbl_008` (`a` int, `b` int);
CREATE TABLE `tbl_009` (`a` int, `b` int);
explain select sum(a) from (select * from tbl_001 union all select * from tbl_002 union all select * from tbl_003 union all select * from tbl_004 union all select * from tbl_005 union all select * from tbl_006 union all select * from tbl_007 union all select * from tbl_008 union all select * from tbl_009) x group by b;
id estRows task access object operator info
HashAgg_34 72000.00 root group by:Column#32, funcs:sum(Column#31)->Column#30
└─Projection_63 90000.00 root cast(Column#28, decimal(65,0) BINARY)->Column#31, Column#29
└─Union_35 90000.00 root
├─TableReader_38 10000.00 root data:TableFullScan_37
│ └─TableFullScan_37 10000.00 cop[tikv] table:tbl_001 keep order:false, stats:pseudo
├─TableReader_41 10000.00 root data:TableFullScan_40
│ └─TableFullScan_40 10000.00 cop[tikv] table:tbl_002 keep order:false, stats:pseudo
├─TableReader_44 10000.00 root data:TableFullScan_43
│ └─TableFullScan_43 10000.00 cop[tikv] table:tbl_003 keep order:false, stats:pseudo
├─TableReader_47 10000.00 root data:TableFullScan_46
│ └─TableFullScan_46 10000.00 cop[tikv] table:tbl_004 keep order:false, stats:pseudo
├─TableReader_50 10000.00 root data:TableFullScan_49
│ └─TableFullScan_49 10000.00 cop[tikv] table:tbl_005 keep order:false, stats:pseudo
├─TableReader_53 10000.00 root data:TableFullScan_52
│ └─TableFullScan_52 10000.00 cop[tikv] table:tbl_006 keep order:false, stats:pseudo
├─TableReader_56 10000.00 root data:TableFullScan_55
│ └─TableFullScan_55 10000.00 cop[tikv] table:tbl_007 keep order:false, stats:pseudo
├─TableReader_59 10000.00 root data:TableFullScan_58
│ └─TableFullScan_58 10000.00 cop[tikv] table:tbl_008 keep order:false, stats:pseudo
└─TableReader_62 10000.00 root data:TableFullScan_61
└─TableFullScan_61 10000.00 cop[tikv] table:tbl_009 keep order:false, stats:pseudo
CREATE TABLE org_department (
id int(11) NOT NULL AUTO_INCREMENT,
ctx int(11) DEFAULT '0' COMMENT 'organization id',
name varchar(128) DEFAULT NULL,
left_value int(11) DEFAULT NULL,
right_value int(11) DEFAULT NULL,
depth int(11) DEFAULT NULL,
leader_id bigint(20) DEFAULT NULL,
status int(11) DEFAULT '1000',
created_on datetime DEFAULT NULL,
updated_on datetime DEFAULT NULL,
PRIMARY KEY (id),
UNIQUE KEY org_department_id_uindex (id),
KEY org_department_leader_id_index (leader_id),
KEY org_department_ctx_index (ctx)
);
CREATE TABLE org_position (
id int(11) NOT NULL AUTO_INCREMENT,
ctx int(11) DEFAULT NULL,
name varchar(128) DEFAULT NULL,
left_value int(11) DEFAULT NULL,
right_value int(11) DEFAULT NULL,
depth int(11) DEFAULT NULL,
department_id int(11) DEFAULT NULL,
status int(2) DEFAULT NULL,
created_on datetime DEFAULT NULL,
updated_on datetime DEFAULT NULL,
PRIMARY KEY (id),
UNIQUE KEY org_position_id_uindex (id),
KEY org_position_department_id_index (department_id)
) ENGINE=InnoDB AUTO_INCREMENT=22 DEFAULT CHARSET=utf8;
CREATE TABLE org_employee_position (
hotel_id int(11) DEFAULT NULL,
user_id bigint(20) DEFAULT NULL,
position_id int(11) DEFAULT NULL,
status int(11) DEFAULT NULL,
created_on datetime DEFAULT NULL,
updated_on datetime DEFAULT NULL,
UNIQUE KEY org_employee_position_pk (hotel_id,user_id,position_id)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
explain SELECT d.id, d.ctx, d.name, d.left_value, d.right_value, d.depth, d.leader_id, d.status, d.created_on, d.updated_on FROM org_department AS d LEFT JOIN org_position AS p ON p.department_id = d.id AND p.status = 1000 LEFT JOIN org_employee_position AS ep ON ep.position_id = p.id AND ep.status = 1000 WHERE (d.ctx = 1 AND (ep.user_id = 62 OR d.id = 20 OR d.id = 20) AND d.status = 1000) GROUP BY d.id ORDER BY d.left_value;
id estRows task access object operator info
Sort_10 1.00 root test.org_department.left_value:asc
└─HashAgg_15 1.00 root group by:test.org_department.id, funcs:firstrow(test.org_department.id)->test.org_department.id, funcs:firstrow(test.org_department.ctx)->test.org_department.ctx, funcs:firstrow(test.org_department.name)->test.org_department.name, funcs:firstrow(test.org_department.left_value)->test.org_department.left_value, funcs:firstrow(test.org_department.right_value)->test.org_department.right_value, funcs:firstrow(test.org_department.depth)->test.org_department.depth, funcs:firstrow(test.org_department.leader_id)->test.org_department.leader_id, funcs:firstrow(test.org_department.status)->test.org_department.status, funcs:firstrow(test.org_department.created_on)->test.org_department.created_on, funcs:firstrow(test.org_department.updated_on)->test.org_department.updated_on
└─Selection_22 0.01 root or(eq(test.org_employee_position.user_id, 62), or(eq(test.org_department.id, 20), eq(test.org_department.id, 20)))
└─HashJoin_24 0.02 root left outer join, equal:[eq(test.org_position.id, test.org_employee_position.position_id)]
├─IndexJoin_33(Build) 0.01 root left outer join, inner:IndexLookUp_32, outer key:test.org_department.id, inner key:test.org_position.department_id, equal cond:eq(test.org_department.id, test.org_position.department_id)
│ ├─IndexLookUp_55(Build) 0.01 root
│ │ ├─IndexRangeScan_52(Build) 10.00 cop[tikv] table:d, index:org_department_ctx_index(ctx) range:[1,1], keep order:false, stats:pseudo
│ │ └─Selection_54(Probe) 0.01 cop[tikv] eq(test.org_department.status, 1000)
│ │ └─TableRowIDScan_53 10.00 cop[tikv] table:d keep order:false, stats:pseudo
│ └─IndexLookUp_32(Probe) 1.25 root
│ ├─Selection_30(Build) 1250.00 cop[tikv] not(isnull(test.org_position.department_id))
│ │ └─IndexRangeScan_28 1251.25 cop[tikv] table:p, index:org_position_department_id_index(department_id) range: decided by [eq(test.org_position.department_id, test.org_department.id)], keep order:false, stats:pseudo
│ └─Selection_31(Probe) 1.25 cop[tikv] eq(test.org_position.status, 1000)
│ └─TableRowIDScan_29 1250.00 cop[tikv] table:p keep order:false, stats:pseudo
└─TableReader_65(Probe) 9.99 root data:Selection_64
└─Selection_64 9.99 cop[tikv] eq(test.org_employee_position.status, 1000), not(isnull(test.org_employee_position.position_id))
└─TableFullScan_63 10000.00 cop[tikv] table:ep keep order:false, stats:pseudo

227
cmd/explaintest/r/explain_complex_stats.result

@ -0,0 +1,227 @@
drop table if exists dt;
CREATE TABLE dt (
id int(11) unsigned NOT NULL,
aid varchar(32) NOT NULL comment '[[set=cn.sbkcq,us.sbkcq]]',
cm int(10) unsigned NOT NULL comment '[[range=1000,5000]]',
pt varchar(10) NOT NULL comment '[[set=android,ios]]',
dic varchar(64) DEFAULT NULL,
ip varchar(15) DEFAULT NULL,
ds date DEFAULT NULL comment '[[range=2016-01-01,2016-12-31]]',
ds2 varchar(13) DEFAULT NULL ,
t int(13) DEFAULT NULL comment '[[range=1477971470,1480000000]]',
ext varchar(550) DEFAULT NULL,
p1 varchar(64) DEFAULT NULL ,
p2 varchar(64) DEFAULT NULL,
p3 varchar(64) DEFAULT NULL,
p4 varchar(64) DEFAULT NULL,
p5 varchar(64) DEFAULT NULL,
p6_md5 varchar(32) DEFAULT NULL,
p7_md5 varchar(32) DEFAULT NULL,
bm tinyint(1) DEFAULT '0' comment '[[set=0,1]]',
bgm tinyint(1) DEFAULT '0' comment '[[set=0,1]]',
insert_date timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (id),
UNIQUE KEY aid (aid,dic),
KEY ip (ip),
KEY cm (cm)
);
load stats 's/explain_complex_stats_dt.json';
drop table if exists st;
CREATE TABLE st (
id int(11) UNSIGNED NOT NULL,
pt varchar(10) NOT NULL comment '[[set=android,ios]]',
aid varchar(35) NOT NULL comment '[[set=cn.sbkcq,us.sbkcq]]',
cm int(10) NOT NULL comment '[[range=1000,5000]]',
ip varchar(15) DEFAULT NULL ,
dic varchar(64) DEFAULT NULL ,
dit varchar(5) DEFAULT NULL comment '[[set=win,mac,linux]]',
p1 varchar(64) DEFAULT NULL ,
p2 varchar(64) DEFAULT NULL,
p3 varchar(64) DEFAULT NULL,
p4 varchar(64) DEFAULT NULL,
p5 varchar(64) DEFAULT NULL,
p6_md5 varchar(32) DEFAULT NULL,
p7_md5 varchar(32) DEFAULT NULL,
ext varchar(550) DEFAULT NULL,
bm tinyint(1) DEFAULT '0' comment '[[set=0,1]]',
ds date NOT NULL ,
ds2 varchar(13) DEFAULT NULL ,
t int(13) NOT NULL comment '[[range=1477971470,1479144000]]',
PRIMARY KEY (id),
KEY t (t),
KEY icd (cm,ds)
);
load stats 's/explain_complex_stats_st.json';
drop table if exists dd;
CREATE TABLE dd (
id int(11) UNSIGNED NOT NULL,
aid varchar(35) NOT NULL comment '[[set=cn.sbkcq,us.sbkcq]]',
pt varchar(10) NOT NULL comment '[[set=android,ios]]',
dic varchar(64) NOT NULL,
dim varchar(32) NOT NULL ,
mac varchar(32) DEFAULT NULL ,
ip varchar(15) DEFAULT NULL ,
t int(13) DEFAULT NULL comment '[[range=1478143900,1478144000]]',
bm tinyint(1) DEFAULT '0' comment '[[set=0,1]]',
PRIMARY KEY (id),
UNIQUE KEY aid (aid,dic),
KEY ip (ip),
KEY pi (aid,dim),
KEY t (t)
);
load stats 's/explain_complex_stats_dd.json';
drop table if exists pp;
CREATE TABLE pp (
oid varchar(20) NOT NULL,
uid bigint(20) unsigned NOT NULL comment '[[set=18089709,18089710,18090780,18090781]]',
cid int(11) unsigned NOT NULL,
ppt int(11) NOT NULL DEFAULT '0' comment '[[range=1478188700,1478275300]]',
pdt int(11) DEFAULT '0',
am decimal(10,2) unsigned NOT NULL DEFAULT '0.00',
cc decimal(10,2) NOT NULL DEFAULT '0.00',
ps tinyint(1) NOT NULL comment '[[set=0,1,2]]',
tid varchar(200) DEFAULT NULL,
ppf varchar(50) NOT NULL comment '[[set=android,ios]]',
bs tinyint(1) NOT NULL DEFAULT '0' comment '[[set=0,1]]',
bex tinyint(1) NOT NULL DEFAULT '0' comment '[[set=0,1]]',
bu int(11) NOT NULL DEFAULT '0' comment '[[set=0,1]]',
pc char(10) NOT NULL DEFAULT 'CNY',
ui int(16) NOT NULL DEFAULT '1',
cr decimal(10,4) unsigned NOT NULL DEFAULT '1.0000',
pi int(11) unsigned NOT NULL comment '[[set=510017,520017,530017]]',
si int(11) unsigned NOT NULL,
bcc int(11) NOT NULL DEFAULT '0',
acc int(11) NOT NULL DEFAULT '0',
KEY oid (oid),
KEY uid (uid),
KEY ppt (ppt),
KEY pdt (pdt),
KEY cid (cid),
KEY ps (ps),
KEY sp (uid,pi)
);
load stats 's/explain_complex_stats_pp.json';
drop table if exists rr;
CREATE TABLE rr (
aid varchar(35) NOT NULL comment '[[set=cn.sbkcq,us.sbkcq]]',
pt varchar(10) NOT NULL comment '[[set=android,ios]]',
dic varchar(64) NOT NULL ,
gid varchar(42) NOT NULL ,
acd varchar(32) NOT NULL ,
t int(13) DEFAULT NULL comment '[[range=1478180000,1480275300]]',
bm tinyint(1) DEFAULT '0' comment '[[set=0,1]]',
PRIMARY KEY (aid,dic)
);
load stats 's/explain_complex_stats_rr.json';
explain SELECT ds, p1, p2, p3, p4, p5, p6_md5, p7_md5, count(dic) as install_device FROM dt use index (cm) WHERE (ds >= '2016-09-01') AND (ds <= '2016-11-03') AND (cm IN ('1062', '1086', '1423', '1424', '1425', '1426', '1427', '1428', '1429', '1430', '1431', '1432', '1433', '1434', '1435', '1436', '1437', '1438', '1439', '1440', '1441', '1442', '1443', '1444', '1445', '1446', '1447', '1448', '1449', '1450', '1451', '1452', '1488', '1489', '1490', '1491', '1492', '1493', '1494', '1495', '1496', '1497', '1550', '1551', '1552', '1553', '1554', '1555', '1556', '1557', '1558', '1559', '1597', '1598', '1599', '1600', '1601', '1602', '1603', '1604', '1605', '1606', '1607', '1608', '1609', '1610', '1611', '1612', '1613', '1614', '1615', '1616', '1623', '1624', '1625', '1626', '1627', '1628', '1629', '1630', '1631', '1632', '1709', '1719', '1720', '1843', '2813', '2814', '2815', '2816', '2817', '2818', '2819', '2820', '2821', '2822', '2823', '2824', '2825', '2826', '2827', '2828', '2829', '2830', '2831', '2832', '2833', '2834', '2835', '2836', '2837', '2838', '2839', '2840', '2841', '2842', '2843', '2844', '2845', '2846', '2847', '2848', '2849', '2850', '2851', '2852', '2853', '2854', '2855', '2856', '2857', '2858', '2859', '2860', '2861', '2862', '2863', '2864', '2865', '2866', '2867', '2868', '2869', '2870', '2871', '2872', '3139', '3140', '3141', '3142', '3143', '3144', '3145', '3146', '3147', '3148', '3149', '3150', '3151', '3152', '3153', '3154', '3155', '3156', '3157', '3158', '3386', '3387', '3388', '3389', '3390', '3391', '3392', '3393', '3394', '3395', '3664', '3665', '3666', '3667', '3668', '3670', '3671', '3672', '3673', '3674', '3676', '3677', '3678', '3679', '3680', '3681', '3682', '3683', '3684', '3685', '3686', '3687', '3688', '3689', '3690', '3691', '3692', '3693', '3694', '3695', '3696', '3697', '3698', '3699', '3700', '3701', '3702', '3703', '3704', '3705', '3706', '3707', '3708', '3709', '3710', '3711', '3712', '3713', '3714', '3715', '3960', '3961', '3962', '3963', '3964', '3965', '3966', '3967', '3968', '3978', '3979', '3980', '3981', '3982', '3983', '3984', '3985', '3986', '3987', '4208', '4209', '4210', '4211', '4212', '4304', '4305', '4306', '4307', '4308', '4866', '4867', '4868', '4869', '4870', '4871', '4872', '4873', '4874', '4875')) GROUP BY ds, p1, p2, p3, p4, p5, p6_md5, p7_md5 ORDER BY ds2 DESC;
id estRows task access object operator info
Projection_7 21.53 root test.dt.ds, test.dt.p1, test.dt.p2, test.dt.p3, test.dt.p4, test.dt.p5, test.dt.p6_md5, test.dt.p7_md5, Column#21
└─Sort_8 21.53 root test.dt.ds2:desc
└─HashAgg_16 21.53 root group by:test.dt.ds, test.dt.p1, test.dt.p2, test.dt.p3, test.dt.p4, test.dt.p5, test.dt.p6_md5, test.dt.p7_md5, funcs:count(Column#32)->Column#21, funcs:firstrow(test.dt.ds)->test.dt.ds, funcs:firstrow(Column#34)->test.dt.ds2, funcs:firstrow(test.dt.p1)->test.dt.p1, funcs:firstrow(test.dt.p2)->test.dt.p2, funcs:firstrow(test.dt.p3)->test.dt.p3, funcs:firstrow(test.dt.p4)->test.dt.p4, funcs:firstrow(test.dt.p5)->test.dt.p5, funcs:firstrow(test.dt.p6_md5)->test.dt.p6_md5, funcs:firstrow(test.dt.p7_md5)->test.dt.p7_md5
└─IndexLookUp_17 21.53 root
├─IndexRangeScan_13(Build) 128.32 cop[tikv] table:dt, index:cm(cm) range:[1062,1062], [1086,1086], [1423,1423], [1424,1424], [1425,1425], [1426,1426], [1427,1427], [1428,1428], [1429,1429], [1430,1430], [1431,1431], [1432,1432], [1433,1433], [1434,1434], [1435,1435], [1436,1436], [1437,1437], [1438,1438], [1439,1439], [1440,1440], [1441,1441], [1442,1442], [1443,1443], [1444,1444], [1445,1445], [1446,1446], [1447,1447], [1448,1448], [1449,1449], [1450,1450], [1451,1451], [1452,1452], [1488,1488], [1489,1489], [1490,1490], [1491,1491], [1492,1492], [1493,1493], [1494,1494], [1495,1495], [1496,1496], [1497,1497], [1550,1550], [1551,1551], [1552,1552], [1553,1553], [1554,1554], [1555,1555], [1556,1556], [1557,1557], [1558,1558], [1559,1559], [1597,1597], [1598,1598], [1599,1599], [1600,1600], [1601,1601], [1602,1602], [1603,1603], [1604,1604], [1605,1605], [1606,1606], [1607,1607], [1608,1608], [1609,1609], [1610,1610], [1611,1611], [1612,1612], [1613,1613], [1614,1614], [1615,1615], [1616,1616], [1623,1623], [1624,1624], [1625,1625], [1626,1626], [1627,1627], [1628,1628], [1629,1629], [1630,1630], [1631,1631], [1632,1632], [1709,1709], [1719,1719], [1720,1720], [1843,1843], [2813,2813], [2814,2814], [2815,2815], [2816,2816], [2817,2817], [2818,2818], [2819,2819], [2820,2820], [2821,2821], [2822,2822], [2823,2823], [2824,2824], [2825,2825], [2826,2826], [2827,2827], [2828,2828], [2829,2829], [2830,2830], [2831,2831], [2832,2832], [2833,2833], [2834,2834], [2835,2835], [2836,2836], [2837,2837], [2838,2838], [2839,2839], [2840,2840], [2841,2841], [2842,2842], [2843,2843], [2844,2844], [2845,2845], [2846,2846], [2847,2847], [2848,2848], [2849,2849], [2850,2850], [2851,2851], [2852,2852], [2853,2853], [2854,2854], [2855,2855], [2856,2856], [2857,2857], [2858,2858], [2859,2859], [2860,2860], [2861,2861], [2862,2862], [2863,2863], [2864,2864], [2865,2865], [2866,2866], [2867,2867], [2868,2868], [2869,2869], [2870,2870], [2871,2871], [2872,2872], [3139,3139], [3140,3140], [3141,3141], [3142,3142], [3143,3143], [3144,3144], [3145,3145], [3146,3146], [3147,3147], [3148,3148], [3149,3149], [3150,3150], [3151,3151], [3152,3152], [3153,3153], [3154,3154], [3155,3155], [3156,3156], [3157,3157], [3158,3158], [3386,3386], [3387,3387], [3388,3388], [3389,3389], [3390,3390], [3391,3391], [3392,3392], [3393,3393], [3394,3394], [3395,3395], [3664,3664], [3665,3665], [3666,3666], [3667,3667], [3668,3668], [3670,3670], [3671,3671], [3672,3672], [3673,3673], [3674,3674], [3676,3676], [3677,3677], [3678,3678], [3679,3679], [3680,3680], [3681,3681], [3682,3682], [3683,3683], [3684,3684], [3685,3685], [3686,3686], [3687,3687], [3688,3688], [3689,3689], [3690,3690], [3691,3691], [3692,3692], [3693,3693], [3694,3694], [3695,3695], [3696,3696], [3697,3697], [3698,3698], [3699,3699], [3700,3700], [3701,3701], [3702,3702], [3703,3703], [3704,3704], [3705,3705], [3706,3706], [3707,3707], [3708,3708], [3709,3709], [3710,3710], [3711,3711], [3712,3712], [3713,3713], [3714,3714], [3715,3715], [3960,3960], [3961,3961], [3962,3962], [3963,3963], [3964,3964], [3965,3965], [3966,3966], [3967,3967], [3968,3968], [3978,3978], [3979,3979], [3980,3980], [3981,3981], [3982,3982], [3983,3983], [3984,3984], [3985,3985], [3986,3986], [3987,3987], [4208,4208], [4209,4209], [4210,4210], [4211,4211], [4212,4212], [4304,4304], [4305,4305], [4306,4306], [4307,4307], [4308,4308], [4866,4866], [4867,4867], [4868,4868], [4869,4869], [4870,4870], [4871,4871], [4872,4872], [4873,4873], [4874,4874], [4875,4875], keep order:false
└─HashAgg_11(Probe) 21.53 cop[tikv] group by:test.dt.ds, test.dt.p1, test.dt.p2, test.dt.p3, test.dt.p4, test.dt.p5, test.dt.p6_md5, test.dt.p7_md5, funcs:count(test.dt.dic)->Column#32, funcs:firstrow(test.dt.ds2)->Column#34
└─Selection_15 21.56 cop[tikv] ge(test.dt.ds, 2016-09-01 00:00:00.000000), le(test.dt.ds, 2016-11-03 00:00:00.000000)
└─TableRowIDScan_14 128.32 cop[tikv] table:dt keep order:false
explain select gad.id as gid,sdk.id as sid,gad.aid as aid,gad.cm as cm,sdk.dic as dic,sdk.ip as ip, sdk.t as t, gad.p1 as p1, gad.p2 as p2, gad.p3 as p3, gad.p4 as p4, gad.p5 as p5, gad.p6_md5 as p6, gad.p7_md5 as p7, gad.ext as ext, gad.t as gtime from st gad join (select id, aid, pt, dic, ip, t from dd where pt = 'android' and bm = 0 and t > 1478143908) sdk on gad.aid = sdk.aid and gad.ip = sdk.ip and sdk.t > gad.t where gad.t > 1478143908 and gad.bm = 0 and gad.pt = 'android' group by gad.aid, sdk.dic limit 2500;
id estRows task access object operator info
Projection_13 424.00 root test.st.id, test.dd.id, test.st.aid, test.st.cm, test.dd.dic, test.dd.ip, test.dd.t, test.st.p1, test.st.p2, test.st.p3, test.st.p4, test.st.p5, test.st.p6_md5, test.st.p7_md5, test.st.ext, test.st.t
└─Limit_16 424.00 root offset:0, count:2500
└─HashAgg_19 424.00 root group by:test.dd.dic, test.st.aid, funcs:firstrow(test.st.id)->test.st.id, funcs:firstrow(test.st.aid)->test.st.aid, funcs:firstrow(test.st.cm)->test.st.cm, funcs:firstrow(test.st.p1)->test.st.p1, funcs:firstrow(test.st.p2)->test.st.p2, funcs:firstrow(test.st.p3)->test.st.p3, funcs:firstrow(test.st.p4)->test.st.p4, funcs:firstrow(test.st.p5)->test.st.p5, funcs:firstrow(test.st.p6_md5)->test.st.p6_md5, funcs:firstrow(test.st.p7_md5)->test.st.p7_md5, funcs:firstrow(test.st.ext)->test.st.ext, funcs:firstrow(test.st.t)->test.st.t, funcs:firstrow(test.dd.id)->test.dd.id, funcs:firstrow(test.dd.dic)->test.dd.dic, funcs:firstrow(test.dd.ip)->test.dd.ip, funcs:firstrow(test.dd.t)->test.dd.t
└─HashJoin_28 424.00 root inner join, equal:[eq(test.st.aid, test.dd.aid) eq(test.st.ip, test.dd.ip)], other cond:gt(test.dd.t, test.st.t)
├─TableReader_31(Build) 424.00 root data:Selection_30
│ └─Selection_30 424.00 cop[tikv] eq(test.st.bm, 0), eq(test.st.pt, "android"), gt(test.st.t, 1478143908), not(isnull(test.st.ip))
│ └─TableRangeScan_29 1999.00 cop[tikv] table:gad range:[0,+inf], keep order:false
└─TableReader_38(Probe) 455.80 root data:Selection_37
└─Selection_37 455.80 cop[tikv] eq(test.dd.bm, 0), eq(test.dd.pt, "android"), gt(test.dd.t, 1478143908), not(isnull(test.dd.ip)), not(isnull(test.dd.t))
└─TableRangeScan_36 2000.00 cop[tikv] table:dd range:[0,+inf], keep order:false
explain select gad.id as gid,sdk.id as sid,gad.aid as aid,gad.cm as cm,sdk.dic as dic,sdk.ip as ip, sdk.t as t, gad.p1 as p1, gad.p2 as p2, gad.p3 as p3, gad.p4 as p4, gad.p5 as p5, gad.p6_md5 as p6, gad.p7_md5 as p7, gad.ext as ext from st gad join dd sdk on gad.aid = sdk.aid and gad.dic = sdk.mac and gad.t < sdk.t where gad.t > 1477971479 and gad.bm = 0 and gad.pt = 'ios' and gad.dit = 'mac' and sdk.t > 1477971479 and sdk.bm = 0 and sdk.pt = 'ios' limit 3000;
id estRows task access object operator info
Projection_10 170.34 root test.st.id, test.dd.id, test.st.aid, test.st.cm, test.dd.dic, test.dd.ip, test.dd.t, test.st.p1, test.st.p2, test.st.p3, test.st.p4, test.st.p5, test.st.p6_md5, test.st.p7_md5, test.st.ext
└─Limit_13 170.34 root offset:0, count:3000
└─IndexJoin_18 170.34 root inner join, inner:IndexLookUp_17, outer key:test.st.aid, inner key:test.dd.aid, equal cond:eq(test.st.aid, test.dd.aid), eq(test.st.dic, test.dd.mac), other cond:lt(test.st.t, test.dd.t)
├─TableReader_25(Build) 170.34 root data:Selection_24
│ └─Selection_24 170.34 cop[tikv] eq(test.st.bm, 0), eq(test.st.dit, "mac"), eq(test.st.pt, "ios"), gt(test.st.t, 1477971479), not(isnull(test.st.dic))
│ └─TableRangeScan_23 1999.00 cop[tikv] table:gad range:[0,+inf], keep order:false
└─IndexLookUp_17(Probe) 1.00 root
├─IndexRangeScan_14(Build) 3.93 cop[tikv] table:sdk, index:aid(aid, dic) range: decided by [eq(test.dd.aid, test.st.aid)], keep order:false
└─Selection_16(Probe) 1.00 cop[tikv] eq(test.dd.bm, 0), eq(test.dd.pt, "ios"), gt(test.dd.t, 1477971479), not(isnull(test.dd.mac)), not(isnull(test.dd.t))
└─TableRowIDScan_15 3.93 cop[tikv] table:sdk keep order:false
explain SELECT cm, p1, p2, p3, p4, p5, p6_md5, p7_md5, count(1) as click_pv, count(DISTINCT ip) as click_ip FROM st WHERE (t between 1478188800 and 1478275200) and aid='cn.sbkcq' and pt='android' GROUP BY cm, p1, p2, p3, p4, p5, p6_md5, p7_md5;
id estRows task access object operator info
Projection_5 39.28 root test.st.cm, test.st.p1, test.st.p2, test.st.p3, test.st.p4, test.st.p5, test.st.p6_md5, test.st.p7_md5, Column#20, Column#21
└─HashAgg_6 39.28 root group by:test.st.cm, test.st.p1, test.st.p2, test.st.p3, test.st.p4, test.st.p5, test.st.p6_md5, test.st.p7_md5, funcs:count(1)->Column#20, funcs:count(distinct test.st.ip)->Column#21, funcs:firstrow(test.st.cm)->test.st.cm, funcs:firstrow(test.st.p1)->test.st.p1, funcs:firstrow(test.st.p2)->test.st.p2, funcs:firstrow(test.st.p3)->test.st.p3, funcs:firstrow(test.st.p4)->test.st.p4, funcs:firstrow(test.st.p5)->test.st.p5, funcs:firstrow(test.st.p6_md5)->test.st.p6_md5, funcs:firstrow(test.st.p7_md5)->test.st.p7_md5
└─IndexLookUp_13 39.38 root
├─IndexRangeScan_10(Build) 160.23 cop[tikv] table:st, index:t(t) range:[1478188800,1478275200], keep order:false
└─Selection_12(Probe) 39.38 cop[tikv] eq(test.st.aid, "cn.sbkcq"), eq(test.st.pt, "android")
└─TableRowIDScan_11 160.23 cop[tikv] table:st keep order:false
explain select dt.id as id, dt.aid as aid, dt.pt as pt, dt.dic as dic, dt.cm as cm, rr.gid as gid, rr.acd as acd, rr.t as t,dt.p1 as p1, dt.p2 as p2, dt.p3 as p3, dt.p4 as p4, dt.p5 as p5, dt.p6_md5 as p6, dt.p7_md5 as p7 from dt dt join rr rr on (rr.pt = 'ios' and rr.t > 1478185592 and dt.aid = rr.aid and dt.dic = rr.dic) where dt.pt = 'ios' and dt.t > 1478185592 and dt.bm = 0 limit 2000;
id estRows task access object operator info
Projection_10 428.32 root test.dt.id, test.dt.aid, test.dt.pt, test.dt.dic, test.dt.cm, test.rr.gid, test.rr.acd, test.rr.t, test.dt.p1, test.dt.p2, test.dt.p3, test.dt.p4, test.dt.p5, test.dt.p6_md5, test.dt.p7_md5
└─Limit_13 428.32 root offset:0, count:2000
└─IndexJoin_19 428.32 root inner join, inner:IndexLookUp_18, outer key:test.dt.aid, test.dt.dic, inner key:test.rr.aid, test.rr.dic, equal cond:eq(test.dt.aid, test.rr.aid), eq(test.dt.dic, test.rr.dic)
├─TableReader_44(Build) 428.32 root data:Selection_43
│ └─Selection_43 428.32 cop[tikv] eq(test.dt.bm, 0), eq(test.dt.pt, "ios"), gt(test.dt.t, 1478185592), not(isnull(test.dt.dic))
│ └─TableRangeScan_42 2000.00 cop[tikv] table:dt range:[0,+inf], keep order:false
└─IndexLookUp_18(Probe) 1.00 root
├─IndexRangeScan_15(Build) 1.00 cop[tikv] table:rr, index:PRIMARY(aid, dic) range: decided by [eq(test.rr.aid, test.dt.aid) eq(test.rr.dic, test.dt.dic)], keep order:false
└─Selection_17(Probe) 1.00 cop[tikv] eq(test.rr.pt, "ios"), gt(test.rr.t, 1478185592)
└─TableRowIDScan_16 1.00 cop[tikv] table:rr keep order:false
explain select pc,cr,count(DISTINCT uid) as pay_users,count(oid) as pay_times,sum(am) as am from pp where ps=2 and ppt>=1478188800 and ppt<1478275200 and pi in ('510017','520017') and uid in ('18089709','18090780') group by pc,cr;
id estRows task access object operator info
Projection_5 207.86 root test.pp.pc, test.pp.cr, Column#22, Column#23, Column#24
└─HashAgg_6 207.86 root group by:test.pp.cr, test.pp.pc, funcs:count(distinct test.pp.uid)->Column#22, funcs:count(test.pp.oid)->Column#23, funcs:sum(test.pp.am)->Column#24, funcs:firstrow(test.pp.pc)->test.pp.pc, funcs:firstrow(test.pp.cr)->test.pp.cr
└─IndexLookUp_17 207.86 root
├─IndexRangeScan_14(Build) 627.00 cop[tikv] table:pp, index:ps(ps) range:[2,2], keep order:false
└─Selection_16(Probe) 207.86 cop[tikv] ge(test.pp.ppt, 1478188800), in(test.pp.pi, 510017, 520017), in(test.pp.uid, 18089709, 18090780), lt(test.pp.ppt, 1478275200)
└─TableRowIDScan_15 627.00 cop[tikv] table:pp keep order:false
drop table if exists tbl_001;
CREATE TABLE tbl_001 (a int, b int);
load stats 's/explain_complex_stats_tbl_001.json';
drop table if exists tbl_002;
CREATE TABLE tbl_002 (a int, b int);
load stats 's/explain_complex_stats_tbl_002.json';
drop table if exists tbl_003;
CREATE TABLE tbl_003 (a int, b int);
load stats 's/explain_complex_stats_tbl_003.json';
drop table if exists tbl_004;
CREATE TABLE tbl_004 (a int, b int);
load stats 's/explain_complex_stats_tbl_004.json';
drop table if exists tbl_005;
CREATE TABLE tbl_005 (a int, b int);
load stats 's/explain_complex_stats_tbl_005.json';
drop table if exists tbl_006;
CREATE TABLE tbl_006 (a int, b int);
load stats 's/explain_complex_stats_tbl_006.json';
drop table if exists tbl_007;
CREATE TABLE tbl_007 (a int, b int);
load stats 's/explain_complex_stats_tbl_007.json';
drop table if exists tbl_008;
CREATE TABLE tbl_008 (a int, b int);
load stats 's/explain_complex_stats_tbl_008.json';
drop table if exists tbl_009;
CREATE TABLE tbl_009 (a int, b int);
load stats 's/explain_complex_stats_tbl_009.json';
explain select sum(a) from (select * from tbl_001 union all select * from tbl_002 union all select * from tbl_003 union all select * from tbl_004 union all select * from tbl_005 union all select * from tbl_006 union all select * from tbl_007 union all select * from tbl_008 union all select * from tbl_009) x group by b;
id estRows task access object operator info
HashAgg_34 18000.00 root group by:Column#32, funcs:sum(Column#31)->Column#30
└─Projection_63 18000.00 root cast(Column#28, decimal(65,0) BINARY)->Column#31, Column#29
└─Union_35 18000.00 root
├─TableReader_38 2000.00 root data:TableFullScan_37
│ └─TableFullScan_37 2000.00 cop[tikv] table:tbl_001 keep order:false
├─TableReader_41 2000.00 root data:TableFullScan_40
│ └─TableFullScan_40 2000.00 cop[tikv] table:tbl_002 keep order:false
├─TableReader_44 2000.00 root data:TableFullScan_43
│ └─TableFullScan_43 2000.00 cop[tikv] table:tbl_003 keep order:false
├─TableReader_47 2000.00 root data:TableFullScan_46
│ └─TableFullScan_46 2000.00 cop[tikv] table:tbl_004 keep order:false
├─TableReader_50 2000.00 root data:TableFullScan_49
│ └─TableFullScan_49 2000.00 cop[tikv] table:tbl_005 keep order:false
├─TableReader_53 2000.00 root data:TableFullScan_52
│ └─TableFullScan_52 2000.00 cop[tikv] table:tbl_006 keep order:false
├─TableReader_56 2000.00 root data:TableFullScan_55
│ └─TableFullScan_55 2000.00 cop[tikv] table:tbl_007 keep order:false
├─TableReader_59 2000.00 root data:TableFullScan_58
│ └─TableFullScan_58 2000.00 cop[tikv] table:tbl_008 keep order:false
└─TableReader_62 2000.00 root data:TableFullScan_61
└─TableFullScan_61 2000.00 cop[tikv] table:tbl_009 keep order:false

786
cmd/explaintest/r/explain_easy.result

@ -0,0 +1,786 @@
use test;
drop table if exists t1, t2, t3, t4;
create table t1 (c1 int primary key, c2 int, c3 int, index c2 (c2));
create table t2 (c1 int unique, c2 int);
insert into t2 values(1, 0), (2, 1);
create table t3 (a bigint, b bigint, c bigint, d bigint);
create table t4 (a int, b int, c int, index idx(a, b), primary key(a));
create index expr_idx on t4((a+b+1));
set @@session.tidb_opt_agg_push_down = 1;
set @@session.tidb_opt_insubq_to_join_and_agg=1;
set @@session.tidb_hashagg_partial_concurrency = 1;
set @@session.tidb_hashagg_final_concurrency = 1;
set @@session.tidb_window_concurrency = 1;
explain select * from t3 where exists (select s.a from t3 s having sum(s.a) = t3.a );
id estRows task access object operator info
HashJoin_12 8000.00 root semi join, equal:[eq(Column#13, Column#11)]
├─StreamAgg_27(Build) 1.00 root funcs:sum(Column#16)->Column#11
│ └─TableReader_28 1.00 root data:StreamAgg_19
│ └─StreamAgg_19 1.00 cop[tikv] funcs:sum(test.t3.a)->Column#16
│ └─TableFullScan_26 10000.00 cop[tikv] table:s keep order:false, stats:pseudo
└─Projection_13(Probe) 10000.00 root test.t3.a, test.t3.b, test.t3.c, test.t3.d, cast(test.t3.a, decimal(20,0) BINARY)->Column#13
└─TableReader_15 10000.00 root data:TableFullScan_14
└─TableFullScan_14 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo
explain select * from t1;
id estRows task access object operator info
TableReader_5 10000.00 root data:TableFullScan_4
└─TableFullScan_4 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
explain select * from t1 order by c2;
id estRows task access object operator info
IndexLookUp_12 10000.00 root
├─IndexFullScan_10(Build) 10000.00 cop[tikv] table:t1, index:c2(c2) keep order:true, stats:pseudo
└─TableRowIDScan_11(Probe) 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
explain select * from t2 order by c2;
id estRows task access object operator info
Sort_4 10000.00 root test.t2.c2:asc
└─TableReader_8 10000.00 root data:TableFullScan_7
└─TableFullScan_7 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo
explain select * from t1 where t1.c1 > 0;
id estRows task access object operator info
TableReader_6 3333.33 root data:TableRangeScan_5
└─TableRangeScan_5 3333.33 cop[tikv] table:t1 range:(0,+inf], keep order:false, stats:pseudo
explain select t1.c1, t1.c2 from t1 where t1.c2 = 1;
id estRows task access object operator info
IndexReader_6 10.00 root index:IndexRangeScan_5
└─IndexRangeScan_5 10.00 cop[tikv] table:t1, index:c2(c2) range:[1,1], keep order:false, stats:pseudo
explain select * from t1 left join t2 on t1.c2 = t2.c1 where t1.c1 > 1;
id estRows task access object operator info
HashJoin_16 4166.67 root left outer join, equal:[eq(test.t1.c2, test.t2.c1)]
├─TableReader_26(Build) 3333.33 root data:TableRangeScan_25
│ └─TableRangeScan_25 3333.33 cop[tikv] table:t1 range:(1,+inf], keep order:false, stats:pseudo
└─TableReader_29(Probe) 9990.00 root data:Selection_28
└─Selection_28 9990.00 cop[tikv] not(isnull(test.t2.c1))
└─TableFullScan_27 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo
explain update t1 set t1.c2 = 2 where t1.c1 = 1;
id estRows task access object operator info
Update_2 N/A root N/A
└─Point_Get_1 1.00 root table:t1 handle:1
explain delete from t1 where t1.c2 = 1;
id estRows task access object operator info
Delete_4 N/A root N/A
└─IndexLookUp_11 10.00 root
├─IndexRangeScan_9(Build) 10.00 cop[tikv] table:t1, index:c2(c2) range:[1,1], keep order:false, stats:pseudo
└─TableRowIDScan_10(Probe) 10.00 cop[tikv] table:t1 keep order:false, stats:pseudo
explain select count(b.c2) from t1 a, t2 b where a.c1 = b.c2 group by a.c1;
id estRows task access object operator info
Projection_11 9990.00 root cast(Column#8, bigint(21) BINARY)->Column#7
└─HashJoin_17 9990.00 root inner join, equal:[eq(test.t1.c1, test.t2.c2)]
├─HashAgg_24(Build) 7992.00 root group by:test.t2.c2, funcs:count(Column#9)->Column#8, funcs:firstrow(test.t2.c2)->test.t2.c2
│ └─TableReader_25 7992.00 root data:HashAgg_19
│ └─HashAgg_19 7992.00 cop[tikv] group by:test.t2.c2, funcs:count(test.t2.c2)->Column#9
│ └─Selection_23 9990.00 cop[tikv] not(isnull(test.t2.c2))
│ └─TableFullScan_22 10000.00 cop[tikv] table:b keep order:false, stats:pseudo
└─TableReader_30(Probe) 10000.00 root data:TableFullScan_29
└─TableFullScan_29 10000.00 cop[tikv] table:a keep order:false, stats:pseudo
explain select * from t2 order by t2.c2 limit 0, 1;
id estRows task access object operator info
TopN_7 1.00 root test.t2.c2:asc, offset:0, count:1
└─TableReader_15 1.00 root data:TopN_14
└─TopN_14 1.00 cop[tikv] test.t2.c2:asc, offset:0, count:1
└─TableFullScan_13 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo
explain select * from t1 where c1 > 1 and c2 = 1 and c3 < 1;
id estRows task access object operator info
IndexLookUp_11 11.08 root
├─IndexRangeScan_8(Build) 33.33 cop[tikv] table:t1, index:c2(c2) range:(1 1,1 +inf], keep order:false, stats:pseudo
└─Selection_10(Probe) 11.08 cop[tikv] lt(test.t1.c3, 1)
└─TableRowIDScan_9 33.33 cop[tikv] table:t1 keep order:false, stats:pseudo
explain select * from t1 where c1 = 1 and c2 > 1;
id estRows task access object operator info
Selection_6 0.33 root gt(test.t1.c2, 1)
└─Point_Get_5 1.00 root table:t1 handle:1
explain select sum(t1.c1 in (select c1 from t2)) from t1;
id estRows task access object operator info
StreamAgg_12 1.00 root funcs:sum(Column#10)->Column#8
└─Projection_23 10000.00 root cast(Column#7, decimal(65,0) BINARY)->Column#10
└─HashJoin_22 10000.00 root CARTESIAN left outer semi join, other cond:eq(test.t1.c1, test.t2.c1)
├─IndexReader_21(Build) 10000.00 root index:IndexFullScan_20
│ └─IndexFullScan_20 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:false, stats:pseudo
└─TableReader_15(Probe) 10000.00 root data:TableFullScan_14
└─TableFullScan_14 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
explain select c1 from t1 where c1 in (select c2 from t2);
id estRows task access object operator info
HashJoin_15 9990.00 root inner join, equal:[eq(test.t1.c1, test.t2.c2)]
├─HashAgg_19(Build) 7992.00 root group by:test.t2.c2, funcs:firstrow(test.t2.c2)->test.t2.c2
│ └─TableReader_26 9990.00 root data:Selection_25
│ └─Selection_25 9990.00 cop[tikv] not(isnull(test.t2.c2))
│ └─TableFullScan_24 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo
└─TableReader_28(Probe) 10000.00 root data:TableFullScan_27
└─TableFullScan_27 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
explain select (select count(1) k from t1 s where s.c1 = t1.c1 having k != 0) from t1;
id estRows task access object operator info
Projection_12 10000.00 root ifnull(Column#7, 0)->Column#7
└─MergeJoin_13 10000.00 root left outer join, left key:test.t1.c1, right key:test.t1.c1
├─Projection_18(Build) 8000.00 root 1->Column#7, test.t1.c1
│ └─TableReader_20 10000.00 root data:TableFullScan_19
│ └─TableFullScan_19 10000.00 cop[tikv] table:s keep order:true, stats:pseudo
└─TableReader_17(Probe) 10000.00 root data:TableFullScan_16
└─TableFullScan_16 10000.00 cop[tikv] table:t1 keep order:true, stats:pseudo
explain select * from information_schema.columns;
id estRows task access object operator info
MemTableScan_4 10000.00 root table:COLUMNS
explain select c2 = (select c2 from t2 where t1.c1 = t2.c1 order by c1 limit 1) from t1;
id estRows task access object operator info
Projection_12 10000.00 root eq(test.t1.c2, test.t2.c2)->Column#8
└─Apply_14 10000.00 root CARTESIAN left outer join
├─TableReader_16(Build) 10000.00 root data:TableFullScan_15
│ └─TableFullScan_15 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
└─Projection_43(Probe) 1.00 root test.t2.c1, test.t2.c2
└─IndexLookUp_42 1.00 root limit embedded(offset:0, count:1)
├─Limit_41(Build) 1.00 cop[tikv] offset:0, count:1
│ └─IndexRangeScan_39 1.00 cop[tikv] table:t2, index:c1(c1) range: decided by [eq(test.t1.c1, test.t2.c1)], keep order:true, stats:pseudo
└─TableRowIDScan_40(Probe) 1.00 cop[tikv] table:t2 keep order:false, stats:pseudo
explain select * from t1 order by c1 desc limit 1;
id estRows task access object operator info
Limit_10 1.00 root offset:0, count:1
└─TableReader_20 1.00 root data:Limit_19
└─Limit_19 1.00 cop[tikv] offset:0, count:1
└─TableFullScan_18 1.00 cop[tikv] table:t1 keep order:true, desc, stats:pseudo
explain select * from t4 use index(idx) where a > 1 and b > 1 and c > 1 limit 1;
id estRows task access object operator info
Limit_9 1.00 root offset:0, count:1
└─IndexLookUp_16 1.00 root
├─Selection_13(Build) 3.00 cop[tikv] gt(test.t4.b, 1)
│ └─IndexRangeScan_11 9.00 cop[tikv] table:t4, index:idx(a, b) range:(1,+inf], keep order:false, stats:pseudo
└─Limit_15(Probe) 1.00 cop[tikv] offset:0, count:1
└─Selection_14 1.00 cop[tikv] gt(test.t4.c, 1)
└─TableRowIDScan_12 3.00 cop[tikv] table:t4 keep order:false, stats:pseudo
explain select * from t4 where a > 1 and c > 1 limit 1;
id estRows task access object operator info
Limit_8 1.00 root offset:0, count:1
└─TableReader_14 1.00 root data:Limit_13
└─Limit_13 1.00 cop[tikv] offset:0, count:1
└─Selection_12 1.00 cop[tikv] gt(test.t4.c, 1)
└─TableRangeScan_11 3.00 cop[tikv] table:t4 range:(1,+inf], keep order:false, stats:pseudo
explain select ifnull(null, t1.c1) from t1;
id estRows task access object operator info
TableReader_5 10000.00 root data:TableFullScan_4
└─TableFullScan_4 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
explain select if(10, t1.c1, t1.c2) from t1;
id estRows task access object operator info
TableReader_5 10000.00 root data:TableFullScan_4
└─TableFullScan_4 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
explain select c1 from t2 union select c1 from t2 union all select c1 from t2;
id estRows task access object operator info
Union_17 26000.00 root
├─HashAgg_21 16000.00 root group by:Column#10, funcs:firstrow(Column#12)->Column#10
│ └─Union_22 16000.00 root
│ ├─StreamAgg_27 8000.00 root group by:test.t2.c1, funcs:firstrow(test.t2.c1)->Column#12, funcs:firstrow(test.t2.c1)->Column#10
│ │ └─IndexReader_40 10000.00 root index:IndexFullScan_39
│ │ └─IndexFullScan_39 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:true, stats:pseudo
│ └─StreamAgg_45 8000.00 root group by:test.t2.c1, funcs:firstrow(test.t2.c1)->Column#12, funcs:firstrow(test.t2.c1)->Column#10
│ └─IndexReader_58 10000.00 root index:IndexFullScan_57
│ └─IndexFullScan_57 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:true, stats:pseudo
└─IndexReader_63 10000.00 root index:IndexFullScan_62
└─IndexFullScan_62 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:false, stats:pseudo
explain select c1 from t2 union all select c1 from t2 union select c1 from t2;
id estRows task access object operator info
HashAgg_18 24000.00 root group by:Column#10, funcs:firstrow(Column#11)->Column#10
└─Union_19 24000.00 root
├─StreamAgg_24 8000.00 root group by:test.t2.c1, funcs:firstrow(test.t2.c1)->Column#11, funcs:firstrow(test.t2.c1)->Column#10
│ └─IndexReader_37 10000.00 root index:IndexFullScan_36
│ └─IndexFullScan_36 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:true, stats:pseudo
├─StreamAgg_42 8000.00 root group by:test.t2.c1, funcs:firstrow(test.t2.c1)->Column#11, funcs:firstrow(test.t2.c1)->Column#10
│ └─IndexReader_55 10000.00 root index:IndexFullScan_54
│ └─IndexFullScan_54 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:true, stats:pseudo
└─StreamAgg_60 8000.00 root group by:test.t2.c1, funcs:firstrow(test.t2.c1)->Column#11, funcs:firstrow(test.t2.c1)->Column#10
└─IndexReader_73 10000.00 root index:IndexFullScan_72
└─IndexFullScan_72 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:true, stats:pseudo
select * from information_schema.tidb_indexes where table_name='t4';
TABLE_SCHEMA TABLE_NAME NON_UNIQUE KEY_NAME SEQ_IN_INDEX COLUMN_NAME SUB_PART INDEX_COMMENT Expression INDEX_ID
test t4 0 PRIMARY 1 a NULL NULL 0
test t4 1 idx 1 a NULL NULL 1
test t4 1 idx 2 b NULL NULL 1
test t4 1 expr_idx 1 NULL NULL (`a` + `b` + 1) 2
explain select count(1) from (select count(1) from (select * from t1 where c3 = 100) k) k2;
id estRows task access object operator info
StreamAgg_13 1.00 root funcs:count(1)->Column#5
└─StreamAgg_28 1.00 root funcs:firstrow(Column#9)->Column#7
└─TableReader_29 1.00 root data:StreamAgg_17
└─StreamAgg_17 1.00 cop[tikv] funcs:firstrow(1)->Column#9
└─Selection_27 10.00 cop[tikv] eq(test.t1.c3, 100)
└─TableFullScan_26 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
explain select 1 from (select count(c2), count(c3) from t1) k;
id estRows task access object operator info
Projection_5 1.00 root 1->Column#6
└─StreamAgg_21 1.00 root funcs:firstrow(Column#14)->Column#9
└─TableReader_22 1.00 root data:StreamAgg_9
└─StreamAgg_9 1.00 cop[tikv] funcs:firstrow(1)->Column#14
└─TableFullScan_19 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
explain select count(1) from (select max(c2), count(c3) as m from t1) k;
id estRows task access object operator info
StreamAgg_11 1.00 root funcs:count(1)->Column#6
└─StreamAgg_27 1.00 root funcs:firstrow(Column#13)->Column#8
└─TableReader_28 1.00 root data:StreamAgg_15
└─StreamAgg_15 1.00 cop[tikv] funcs:firstrow(1)->Column#13
└─TableFullScan_25 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
explain select count(1) from (select count(c2) from t1 group by c3) k;
id estRows task access object operator info
StreamAgg_11 1.00 root funcs:count(1)->Column#5
└─HashAgg_22 8000.00 root group by:test.t1.c3, funcs:firstrow(1)->Column#7
└─TableReader_19 10000.00 root data:TableFullScan_18
└─TableFullScan_18 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
set @@session.tidb_opt_insubq_to_join_and_agg=0;
explain select sum(t1.c1 in (select c1 from t2)) from t1;
id estRows task access object operator info
StreamAgg_12 1.00 root funcs:sum(Column#10)->Column#8
└─Projection_23 10000.00 root cast(Column#7, decimal(65,0) BINARY)->Column#10
└─HashJoin_22 10000.00 root CARTESIAN left outer semi join, other cond:eq(test.t1.c1, test.t2.c1)
├─IndexReader_21(Build) 10000.00 root index:IndexFullScan_20
│ └─IndexFullScan_20 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:false, stats:pseudo
└─TableReader_15(Probe) 10000.00 root data:TableFullScan_14
└─TableFullScan_14 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
explain select 1 in (select c2 from t2) from t1;
id estRows task access object operator info
HashJoin_7 10000.00 root CARTESIAN left outer semi join, other cond:eq(1, test.t2.c2)
├─TableReader_13(Build) 10000.00 root data:TableFullScan_12
│ └─TableFullScan_12 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo
└─TableReader_9(Probe) 10000.00 root data:TableFullScan_8
└─TableFullScan_8 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
explain select sum(6 in (select c2 from t2)) from t1;
id estRows task access object operator info
StreamAgg_12 1.00 root funcs:sum(Column#10)->Column#8
└─Projection_21 10000.00 root cast(Column#7, decimal(65,0) BINARY)->Column#10
└─HashJoin_20 10000.00 root CARTESIAN left outer semi join, other cond:eq(6, test.t2.c2)
├─TableReader_19(Build) 10000.00 root data:TableFullScan_18
│ └─TableFullScan_18 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo
└─TableReader_15(Probe) 10000.00 root data:TableFullScan_14
└─TableFullScan_14 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
explain format="dot" select sum(t1.c1 in (select c1 from t2)) from t1;
dot contents
digraph StreamAgg_12 {
subgraph cluster12{
node [style=filled, color=lightgrey]
color=black
label = "root"
"StreamAgg_12" -> "Projection_23"
"Projection_23" -> "HashJoin_22"
"HashJoin_22" -> "TableReader_15"
"HashJoin_22" -> "IndexReader_21"
}
subgraph cluster14{
node [style=filled, color=lightgrey]
color=black
label = "cop"
"TableFullScan_14"
}
subgraph cluster20{
node [style=filled, color=lightgrey]
color=black
label = "cop"
"IndexFullScan_20"
}
"TableReader_15" -> "TableFullScan_14"
"IndexReader_21" -> "IndexFullScan_20"
}
explain format="dot" select 1 in (select c2 from t2) from t1;
dot contents
digraph HashJoin_7 {
subgraph cluster7{
node [style=filled, color=lightgrey]
color=black
label = "root"
"HashJoin_7" -> "TableReader_9"
"HashJoin_7" -> "TableReader_13"
}
subgraph cluster8{
node [style=filled, color=lightgrey]
color=black
label = "cop"
"TableFullScan_8"
}
subgraph cluster12{
node [style=filled, color=lightgrey]
color=black
label = "cop"
"TableFullScan_12"
}
"TableReader_9" -> "TableFullScan_8"
"TableReader_13" -> "TableFullScan_12"
}
drop table if exists t1, t2, t3, t4;
drop table if exists t;
create table t(a int primary key, b int, c int, index idx(b));
explain select t.c in (select count(*) from t s ignore index(idx), t t1 where s.a = t.a and s.a = t1.a) from t;
id estRows task access object operator info
Projection_11 10000.00 root Column#11
└─Apply_13 10000.00 root CARTESIAN left outer semi join, other cond:eq(test.t.c, Column#10)
├─TableReader_15(Build) 10000.00 root data:TableFullScan_14
│ └─TableFullScan_14 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
└─StreamAgg_20(Probe) 1.00 root funcs:count(1)->Column#10
└─MergeJoin_44 12.50 root inner join, left key:test.t.a, right key:test.t.a
├─TableReader_39(Build) 1.00 root data:TableRangeScan_38
│ └─TableRangeScan_38 1.00 cop[tikv] table:t1 range: decided by [eq(test.t.a, test.t.a)], keep order:true, stats:pseudo
└─TableReader_37(Probe) 1.00 root data:TableRangeScan_36
└─TableRangeScan_36 1.00 cop[tikv] table:s range: decided by [eq(test.t.a, test.t.a)], keep order:true, stats:pseudo
explain select t.c in (select count(*) from t s use index(idx), t t1 where s.b = t.a and s.a = t1.a) from t;
id estRows task access object operator info
Projection_11 10000.00 root Column#11
└─Apply_13 10000.00 root CARTESIAN left outer semi join, other cond:eq(test.t.c, Column#10)
├─TableReader_15(Build) 10000.00 root data:TableFullScan_14
│ └─TableFullScan_14 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
└─StreamAgg_20(Probe) 1.00 root funcs:count(1)->Column#10
└─IndexJoin_36 12.50 root inner join, inner:TableReader_35, outer key:test.t.a, inner key:test.t.a, equal cond:eq(test.t.a, test.t.a)
├─IndexReader_29(Build) 10.00 root index:IndexRangeScan_28
│ └─IndexRangeScan_28 10.00 cop[tikv] table:s, index:idx(b) range: decided by [eq(test.t.b, test.t.a)], keep order:false, stats:pseudo
└─TableReader_35(Probe) 1.00 root data:TableRangeScan_34
└─TableRangeScan_34 1.00 cop[tikv] table:t1 range: decided by [test.t.a], keep order:false, stats:pseudo
explain select t.c in (select count(*) from t s use index(idx), t t1 where s.b = t.a and s.c = t1.a) from t;
id estRows task access object operator info
Projection_11 10000.00 root Column#11
└─Apply_13 10000.00 root CARTESIAN left outer semi join, other cond:eq(test.t.c, Column#10)
├─TableReader_15(Build) 10000.00 root data:TableFullScan_14
│ └─TableFullScan_14 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
└─StreamAgg_20(Probe) 1.00 root funcs:count(1)->Column#10
└─IndexJoin_38 12.49 root inner join, inner:TableReader_37, outer key:test.t.c, inner key:test.t.a, equal cond:eq(test.t.c, test.t.a)
├─IndexLookUp_31(Build) 9.99 root
│ ├─IndexRangeScan_28(Build) 10.00 cop[tikv] table:s, index:idx(b) range: decided by [eq(test.t.b, test.t.a)], keep order:false, stats:pseudo
│ └─Selection_30(Probe) 9.99 cop[tikv] not(isnull(test.t.c))
│ └─TableRowIDScan_29 10.00 cop[tikv] table:s keep order:false, stats:pseudo
└─TableReader_37(Probe) 1.00 root data:TableRangeScan_36
└─TableRangeScan_36 1.00 cop[tikv] table:t1 range: decided by [test.t.c], keep order:false, stats:pseudo
insert into t values(1, 1, 1), (2, 2 ,2), (3, 3, 3), (4, 3, 4),(5,3,5);
analyze table t;
explain select t.c in (select count(*) from t s, t t1 where s.b = t.a and s.b = 3 and s.a = t1.a) from t;
id estRows task access object operator info
Projection_11 5.00 root Column#11
└─Apply_13 5.00 root CARTESIAN left outer semi join, other cond:eq(test.t.c, Column#10)
├─TableReader_15(Build) 5.00 root data:TableFullScan_14
│ └─TableFullScan_14 5.00 cop[tikv] table:t keep order:false
└─StreamAgg_20(Probe) 1.00 root funcs:count(1)->Column#10
└─MergeJoin_52 2.40 root inner join, left key:test.t.a, right key:test.t.a
├─TableReader_42(Build) 4.00 root data:Selection_41
│ └─Selection_41 4.00 cop[tikv] eq(3, test.t.a)
│ └─TableFullScan_40 5.00 cop[tikv] table:t1 keep order:true
└─IndexReader_39(Probe) 2.40 root index:Selection_38
└─Selection_38 2.40 cop[tikv] eq(3, test.t.a)
└─IndexRangeScan_37 3.00 cop[tikv] table:s, index:idx(b) range:[3,3], keep order:true
explain select t.c in (select count(*) from t s left join t t1 on s.a = t1.a where 3 = t.a and s.b = 3) from t;
id estRows task access object operator info
Projection_10 5.00 root Column#11
└─Apply_12 5.00 root CARTESIAN left outer semi join, other cond:eq(test.t.c, Column#10)
├─TableReader_14(Build) 5.00 root data:TableFullScan_13
│ └─TableFullScan_13 5.00 cop[tikv] table:t keep order:false
└─StreamAgg_19(Probe) 1.00 root funcs:count(1)->Column#10
└─MergeJoin_45 2.40 root left outer join, left key:test.t.a, right key:test.t.a
├─TableReader_35(Build) 4.00 root data:Selection_34
│ └─Selection_34 4.00 cop[tikv] eq(3, test.t.a)
│ └─TableFullScan_33 5.00 cop[tikv] table:t1 keep order:true
└─IndexReader_32(Probe) 2.40 root index:Selection_31
└─Selection_31 2.40 cop[tikv] eq(3, test.t.a)
└─IndexRangeScan_30 3.00 cop[tikv] table:s, index:idx(b) range:[3,3], keep order:true
explain select t.c in (select count(*) from t s right join t t1 on s.a = t1.a where 3 = t.a and t1.b = 3) from t;
id estRows task access object operator info
Projection_10 5.00 root Column#11
└─Apply_12 5.00 root CARTESIAN left outer semi join, other cond:eq(test.t.c, Column#10)
├─TableReader_14(Build) 5.00 root data:TableFullScan_13
│ └─TableFullScan_13 5.00 cop[tikv] table:t keep order:false
└─StreamAgg_19(Probe) 1.00 root funcs:count(1)->Column#10
└─MergeJoin_44 2.40 root right outer join, left key:test.t.a, right key:test.t.a
├─TableReader_31(Build) 4.00 root data:Selection_30
│ └─Selection_30 4.00 cop[tikv] eq(3, test.t.a)
│ └─TableFullScan_29 5.00 cop[tikv] table:s keep order:true
└─IndexReader_34(Probe) 2.40 root index:Selection_33
└─Selection_33 2.40 cop[tikv] eq(3, test.t.a)
└─IndexRangeScan_32 3.00 cop[tikv] table:t1, index:idx(b) range:[3,3], keep order:true
drop table if exists t;
create table t(a int unsigned);
explain select t.a = '123455' from t;
id estRows task access object operator info
Projection_3 10000.00 root eq(test.t.a, 123455)->Column#3
└─TableReader_5 10000.00 root data:TableFullScan_4
└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select t.a > '123455' from t;
id estRows task access object operator info
Projection_3 10000.00 root gt(test.t.a, 123455)->Column#3
└─TableReader_5 10000.00 root data:TableFullScan_4
└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select t.a != '123455' from t;
id estRows task access object operator info
Projection_3 10000.00 root ne(test.t.a, 123455)->Column#3
└─TableReader_5 10000.00 root data:TableFullScan_4
└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select t.a = 12345678912345678998789678687678.111 from t;
id estRows task access object operator info
Projection_3 10000.00 root 0->Column#3
└─TableReader_5 10000.00 root data:TableFullScan_4
└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
drop table if exists t;
create table t(a bigint, b bigint, index idx(a, b));
explain select * from t where a in (1, 2) and a in (1, 3);
id estRows task access object operator info
IndexReader_6 10.00 root index:IndexRangeScan_5
└─IndexRangeScan_5 10.00 cop[tikv] table:t, index:idx(a, b) range:[1,1], keep order:false, stats:pseudo
explain select * from t where b in (1, 2) and b in (1, 3);
id estRows task access object operator info
TableReader_7 10.00 root data:Selection_6
└─Selection_6 10.00 cop[tikv] in(test.t.b, 1, 2), in(test.t.b, 1, 3)
└─TableFullScan_5 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select * from t where a = 1 and a = 1;
id estRows task access object operator info
IndexReader_6 10.00 root index:IndexRangeScan_5
└─IndexRangeScan_5 10.00 cop[tikv] table:t, index:idx(a, b) range:[1,1], keep order:false, stats:pseudo
explain select * from t where a = 1 and a = 2;
id estRows task access object operator info
TableDual_5 0.00 root rows:0
explain select * from t where b = 1 and b = 2;
id estRows task access object operator info
TableDual_5 0.00 root rows:0
explain select * from t t1 join t t2 where t1.b = t2.b and t2.b is null;
id estRows task access object operator info
Projection_7 0.00 root test.t.a, test.t.b, test.t.a, test.t.b
└─HashJoin_9 0.00 root inner join, equal:[eq(test.t.b, test.t.b)]
├─TableReader_12(Build) 0.00 root data:Selection_11
│ └─Selection_11 0.00 cop[tikv] isnull(test.t.b), not(isnull(test.t.b))
│ └─TableFullScan_10 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo
└─TableReader_18(Probe) 9990.00 root data:Selection_17
└─Selection_17 9990.00 cop[tikv] not(isnull(test.t.b))
└─TableFullScan_16 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
explain select * from t t1 where not exists (select * from t t2 where t1.b = t2.b);
id estRows task access object operator info
HashJoin_9 8000.00 root anti semi join, equal:[eq(test.t.b, test.t.b)]
├─TableReader_15(Build) 10000.00 root data:TableFullScan_14
│ └─TableFullScan_14 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo
└─TableReader_11(Probe) 10000.00 root data:TableFullScan_10
└─TableFullScan_10 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
drop table if exists t;
create table t(a bigint primary key);
explain select * from t where a = 1 and a = 2;
id estRows task access object operator info
TableDual_5 0.00 root rows:0
explain select null or a > 1 from t;
id estRows task access object operator info
Projection_3 10000.00 root or(<nil>, gt(test.t.a, 1))->Column#2
└─TableReader_5 10000.00 root data:TableFullScan_4
└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select * from t where a = 1 for update;
id estRows task access object operator info
Point_Get_1 1.00 root table:t handle:1, lock
drop table if exists ta, tb;
create table ta (a varchar(20));
create table tb (a varchar(20));
begin;
insert tb values ('1');
explain select * from ta where a = 1;
id estRows task access object operator info
TableReader_7 8000.00 root data:Selection_6
└─Selection_6 8000.00 cop[tikv] eq(cast(test.ta.a), 1)
└─TableFullScan_5 10000.00 cop[tikv] table:ta keep order:false, stats:pseudo
rollback;
drop table if exists t1, t2;
create table t1(a int, b int, c int, primary key(a, b));
create table t2(a int, b int, c int, primary key(a));
explain select t1.a, t1.b from t1 left outer join t2 on t1.a = t2.a;
id estRows task access object operator info
IndexReader_9 10000.00 root index:IndexFullScan_8
└─IndexFullScan_8 10000.00 cop[tikv] table:t1, index:PRIMARY(a, b) keep order:false, stats:pseudo
explain select distinct t1.a, t1.b from t1 left outer join t2 on t1.a = t2.a;
id estRows task access object operator info
IndexReader_11 10000.00 root index:IndexFullScan_10
└─IndexFullScan_10 10000.00 cop[tikv] table:t1, index:PRIMARY(a, b) keep order:false, stats:pseudo
CREATE TABLE `test01` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`stat_date` int(11) NOT NULL DEFAULT '0',
`show_date` varchar(20) NOT NULL DEFAULT '',
`region_id` bigint(20) unsigned NOT NULL DEFAULT '0',
`period` tinyint(3) unsigned NOT NULL DEFAULT '0',
`registration_num` bigint(20) unsigned NOT NULL DEFAULT '0',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
CREATE TABLE `test02` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`region_name` varchar(128) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
EXPLAIN SELECT COUNT(1) FROM (SELECT COALESCE(b.region_name, '不详') region_name, SUM(a.registration_num) registration_num FROM (SELECT stat_date, show_date, region_id, 0 registration_num FROM test01 WHERE period = 1 AND stat_date >= 20191202 AND stat_date <= 20191202 UNION ALL SELECT stat_date, show_date, region_id, registration_num registration_num FROM test01 WHERE period = 1 AND stat_date >= 20191202 AND stat_date <= 20191202) a LEFT JOIN test02 b ON a.region_id = b.id WHERE registration_num > 0 AND a.stat_date >= '20191202' AND a.stat_date <= '20191202' GROUP BY a.stat_date , a.show_date , COALESCE(b.region_name, '不详') ) JLS;
id estRows task access object operator info
StreamAgg_22 1.00 root funcs:count(1)->Column#22
└─HashAgg_25 1.00 root group by:Column#32, Column#33, Column#34, funcs:firstrow(1)->Column#31
└─Projection_42 0.01 root Column#14, Column#15, coalesce(test.test02.region_name, 不详)->Column#34
└─IndexJoin_28 0.01 root left outer join, inner:TableReader_27, outer key:Column#16, inner key:test.test02.id, equal cond:eq(Column#16, test.test02.id)
├─Union_33(Build) 0.01 root
│ ├─Projection_34 0.00 root test.test01.stat_date, test.test01.show_date, test.test01.region_id
│ │ └─TableDual_35 0.00 root rows:0
│ └─Projection_36 0.01 root test.test01.stat_date, test.test01.show_date, test.test01.region_id
│ └─TableReader_39 0.01 root data:Selection_38
│ └─Selection_38 0.01 cop[tikv] eq(test.test01.period, 1), ge(test.test01.stat_date, 20191202), ge(test.test01.stat_date, 20191202), gt(cast(test.test01.registration_num), 0), le(test.test01.stat_date, 20191202), le(test.test01.stat_date, 20191202)
│ └─TableFullScan_37 10000.00 cop[tikv] table:test01 keep order:false, stats:pseudo
└─TableReader_27(Probe) 1.00 root data:TableRangeScan_26
└─TableRangeScan_26 1.00 cop[tikv] table:b range: decided by [Column#16], keep order:false, stats:pseudo
drop table if exists t;
create table t(a int, nb int not null, nc int not null);
explain select ifnull(a, 0) from t;
id estRows task access object operator info
Projection_3 10000.00 root ifnull(test.t.a, 0)->Column#5
└─TableReader_5 10000.00 root data:TableFullScan_4
└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select ifnull(nb, 0) from t;
id estRows task access object operator info
TableReader_5 10000.00 root data:TableFullScan_4
└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select ifnull(nb, 0), ifnull(nc, 0) from t;
id estRows task access object operator info
TableReader_5 10000.00 root data:TableFullScan_4
└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select ifnull(a, 0), ifnull(nb, 0) from t;
id estRows task access object operator info
Projection_3 10000.00 root ifnull(test.t.a, 0)->Column#5, test.t.nb
└─TableReader_5 10000.00 root data:TableFullScan_4
└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select ifnull(nb, 0), ifnull(nb, 0) from t;
id estRows task access object operator info
Projection_3 10000.00 root test.t.nb, test.t.nb
└─TableReader_5 10000.00 root data:TableFullScan_4
└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select 1+ifnull(nb, 0) from t;
id estRows task access object operator info
Projection_3 10000.00 root plus(1, test.t.nb)->Column#5
└─TableReader_5 10000.00 root data:TableFullScan_4
└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select 1+ifnull(a, 0) from t;
id estRows task access object operator info
Projection_3 10000.00 root plus(1, ifnull(test.t.a, 0))->Column#5
└─TableReader_5 10000.00 root data:TableFullScan_4
└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select 1+ifnull(nb, 0) from t where nb=1;
id estRows task access object operator info
Projection_4 10.00 root plus(1, test.t.nb)->Column#5
└─TableReader_7 10.00 root data:Selection_6
└─Selection_6 10.00 cop[tikv] eq(test.t.nb, 1)
└─TableFullScan_5 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select * from t ta left outer join t tb on ta.nb = tb.nb and ta.a > 1 where ifnull(ta.nb, 1) or ta.nb is null;
id estRows task access object operator info
HashJoin_7 8320.83 root left outer join, equal:[eq(test.t.nb, test.t.nb)], left cond:[gt(test.t.a, 1)]
├─TableReader_14(Build) 6656.67 root data:Selection_13
│ └─Selection_13 6656.67 cop[tikv] or(test.t.nb, 0)
│ └─TableFullScan_12 10000.00 cop[tikv] table:tb keep order:false, stats:pseudo
└─TableReader_11(Probe) 6656.67 root data:Selection_10
└─Selection_10 6656.67 cop[tikv] or(test.t.nb, 0)
└─TableFullScan_9 10000.00 cop[tikv] table:ta keep order:false, stats:pseudo
explain select * from t ta right outer join t tb on ta.nb = tb.nb and ta.a > 1 where ifnull(tb.nb, 1) or tb.nb is null;
id estRows task access object operator info
HashJoin_7 6656.67 root right outer join, equal:[eq(test.t.nb, test.t.nb)]
├─TableReader_11(Build) 2218.89 root data:Selection_10
│ └─Selection_10 2218.89 cop[tikv] gt(test.t.a, 1), or(test.t.nb, 0)
│ └─TableFullScan_9 10000.00 cop[tikv] table:ta keep order:false, stats:pseudo
└─TableReader_14(Probe) 6656.67 root data:Selection_13
└─Selection_13 6656.67 cop[tikv] or(test.t.nb, 0)
└─TableFullScan_12 10000.00 cop[tikv] table:tb keep order:false, stats:pseudo
explain select * from t ta inner join t tb on ta.nb = tb.nb and ta.a > 1 where ifnull(tb.nb, 1) or tb.nb is null;
id estRows task access object operator info
HashJoin_9 2773.61 root inner join, equal:[eq(test.t.nb, test.t.nb)]
├─TableReader_12(Build) 2218.89 root data:Selection_11
│ └─Selection_11 2218.89 cop[tikv] gt(test.t.a, 1), or(test.t.nb, 0)
│ └─TableFullScan_10 10000.00 cop[tikv] table:ta keep order:false, stats:pseudo
└─TableReader_15(Probe) 6656.67 root data:Selection_14
└─Selection_14 6656.67 cop[tikv] or(test.t.nb, 0)
└─TableFullScan_13 10000.00 cop[tikv] table:tb keep order:false, stats:pseudo
explain select ifnull(t.nc, 1) in (select count(*) from t s , t t1 where s.a = t.a and s.a = t1.a) from t;
id estRows task access object operator info
Projection_12 10000.00 root Column#14
└─Apply_14 10000.00 root CARTESIAN left outer semi join, other cond:eq(test.t.nc, Column#13)
├─TableReader_16(Build) 10000.00 root data:TableFullScan_15
│ └─TableFullScan_15 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
└─HashAgg_19(Probe) 1.00 root funcs:count(Column#15)->Column#13
└─HashJoin_20 9.99 root inner join, equal:[eq(test.t.a, test.t.a)]
├─HashAgg_30(Build) 7.99 root group by:test.t.a, funcs:count(Column#16)->Column#15, funcs:firstrow(test.t.a)->test.t.a
│ └─TableReader_31 7.99 root data:HashAgg_25
│ └─HashAgg_25 7.99 cop[tikv] group by:test.t.a, funcs:count(1)->Column#16
│ └─Selection_29 9.99 cop[tikv] eq(test.t.a, test.t.a), not(isnull(test.t.a))
│ └─TableFullScan_28 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
└─TableReader_24(Probe) 9.99 root data:Selection_23
└─Selection_23 9.99 cop[tikv] eq(test.t.a, test.t.a), not(isnull(test.t.a))
└─TableFullScan_22 10000.00 cop[tikv] table:s keep order:false, stats:pseudo
explain select * from t ta left outer join t tb on ta.nb = tb.nb and ta.a > 1 where ifnull(tb.a, 1) or tb.a is null;
id estRows task access object operator info
Selection_7 10000.00 root or(ifnull(test.t.a, 1), isnull(test.t.a))
└─HashJoin_8 12500.00 root left outer join, equal:[eq(test.t.nb, test.t.nb)], left cond:[gt(test.t.a, 1)]
├─TableReader_13(Build) 10000.00 root data:TableFullScan_12
│ └─TableFullScan_12 10000.00 cop[tikv] table:tb keep order:false, stats:pseudo
└─TableReader_11(Probe) 10000.00 root data:TableFullScan_10
└─TableFullScan_10 10000.00 cop[tikv] table:ta keep order:false, stats:pseudo
explain select * from t ta right outer join t tb on ta.nb = tb.nb and ta.a > 1 where ifnull(tb.a, 1) or tb.a is null;
id estRows task access object operator info
HashJoin_7 8000.00 root right outer join, equal:[eq(test.t.nb, test.t.nb)]
├─TableReader_11(Build) 3333.33 root data:Selection_10
│ └─Selection_10 3333.33 cop[tikv] gt(test.t.a, 1)
│ └─TableFullScan_9 10000.00 cop[tikv] table:ta keep order:false, stats:pseudo
└─TableReader_14(Probe) 8000.00 root data:Selection_13
└─Selection_13 8000.00 cop[tikv] or(ifnull(test.t.a, 1), isnull(test.t.a))
└─TableFullScan_12 10000.00 cop[tikv] table:tb keep order:false, stats:pseudo
explain select ifnull(t.a, 1) in (select count(*) from t s , t t1 where s.a = t.a and s.a = t1.a) from t;
id estRows task access object operator info
Projection_12 10000.00 root Column#14
└─Apply_14 10000.00 root CARTESIAN left outer semi join, other cond:eq(ifnull(test.t.a, 1), Column#13)
├─TableReader_16(Build) 10000.00 root data:TableFullScan_15
│ └─TableFullScan_15 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
└─HashAgg_19(Probe) 1.00 root funcs:count(Column#15)->Column#13
└─HashJoin_20 9.99 root inner join, equal:[eq(test.t.a, test.t.a)]
├─HashAgg_30(Build) 7.99 root group by:test.t.a, funcs:count(Column#16)->Column#15, funcs:firstrow(test.t.a)->test.t.a
│ └─TableReader_31 7.99 root data:HashAgg_25
│ └─HashAgg_25 7.99 cop[tikv] group by:test.t.a, funcs:count(1)->Column#16
│ └─Selection_29 9.99 cop[tikv] eq(test.t.a, test.t.a), not(isnull(test.t.a))
│ └─TableFullScan_28 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
└─TableReader_24(Probe) 9.99 root data:Selection_23
└─Selection_23 9.99 cop[tikv] eq(test.t.a, test.t.a), not(isnull(test.t.a))
└─TableFullScan_22 10000.00 cop[tikv] table:s keep order:false, stats:pseudo
drop table if exists t;
create table t(a int);
explain select * from t where _tidb_rowid = 0;
id estRows task access object operator info
Point_Get_1 1.00 root table:t handle:0
explain select * from t where _tidb_rowid > 0;
id estRows task access object operator info
Projection_4 8000.00 root test.t.a
└─TableReader_6 10000.00 root data:TableRangeScan_5
└─TableRangeScan_5 10000.00 cop[tikv] table:t range:(0,+inf], keep order:false, stats:pseudo
explain select a, _tidb_rowid from t where a > 0;
id estRows task access object operator info
TableReader_7 3333.33 root data:Selection_6
└─Selection_6 3333.33 cop[tikv] gt(test.t.a, 0)
└─TableFullScan_5 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select * from t where _tidb_rowid > 0 and a > 0;
id estRows task access object operator info
Projection_4 2666.67 root test.t.a
└─TableReader_7 2666.67 root data:Selection_6
└─Selection_6 2666.67 cop[tikv] gt(test.t.a, 0)
└─TableRangeScan_5 3333.33 cop[tikv] table:t range:(0,+inf], keep order:false, stats:pseudo
drop table if exists t;
create table t(a int, b int, c int);
explain select * from (select * from t order by (select 2)) t order by a, b;
id estRows task access object operator info
Sort_12 10000.00 root test.t.a:asc, test.t.b:asc
└─TableReader_18 10000.00 root data:TableFullScan_17
└─TableFullScan_17 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select * from (select * from t order by c) t order by a, b;
id estRows task access object operator info
Sort_6 10000.00 root test.t.a:asc, test.t.b:asc
└─Sort_9 10000.00 root test.t.c:asc
└─TableReader_12 10000.00 root data:TableFullScan_11
└─TableFullScan_11 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
drop table if exists t;
set @@session.tidb_opt_insubq_to_join_and_agg=1;
explain SELECT 0 AS a FROM dual UNION SELECT 1 AS a FROM dual ORDER BY a;
id estRows task access object operator info
Sort_13 2.00 root Column#3:asc
└─HashAgg_17 2.00 root group by:Column#3, funcs:firstrow(Column#6)->Column#3
└─Union_18 2.00 root
├─HashAgg_19 1.00 root group by:1, funcs:firstrow(0)->Column#6, funcs:firstrow(0)->Column#3
│ └─TableDual_22 1.00 root rows:1
└─HashAgg_25 1.00 root group by:1, funcs:firstrow(1)->Column#6, funcs:firstrow(1)->Column#3
└─TableDual_28 1.00 root rows:1
explain SELECT 0 AS a FROM dual UNION (SELECT 1 AS a FROM dual ORDER BY a);
id estRows task access object operator info
HashAgg_15 2.00 root group by:Column#3, funcs:firstrow(Column#6)->Column#3
└─Union_16 2.00 root
├─HashAgg_17 1.00 root group by:1, funcs:firstrow(0)->Column#6, funcs:firstrow(0)->Column#3
│ └─TableDual_20 1.00 root rows:1
└─StreamAgg_27 1.00 root group by:Column#1, funcs:firstrow(Column#1)->Column#6, funcs:firstrow(Column#1)->Column#3
└─Projection_32 1.00 root 1->Column#1
└─TableDual_33 1.00 root rows:1
create table t (i int key, j int, unique key (i, j));
begin;
insert into t values (1, 1);
explain update t set j = -j where i = 1 and j = 1;
id estRows task access object operator info
Update_2 N/A root N/A
└─Point_Get_1 1.00 root table:t, index:i(i, j)
rollback;
drop table if exists t;
create table t(a int);
begin;
insert into t values (1);
explain select * from t left outer join t t1 on t.a = t1.a where t.a not between 1 and 2;
id estRows task access object operator info
HashJoin_9 8320.83 root left outer join, equal:[eq(test.t.a, test.t.a)]
├─UnionScan_15(Build) 6656.67 root not(and(ge(test.t.a, 1), le(test.t.a, 2))), not(isnull(test.t.a))
│ └─TableReader_18 6656.67 root data:Selection_17
│ └─Selection_17 6656.67 cop[tikv] not(isnull(test.t.a)), or(lt(test.t.a, 1), gt(test.t.a, 2))
│ └─TableFullScan_16 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
└─UnionScan_11(Probe) 6656.67 root not(and(ge(test.t.a, 1), le(test.t.a, 2)))
└─TableReader_14 6656.67 root data:Selection_13
└─Selection_13 6656.67 cop[tikv] or(lt(test.t.a, 1), gt(test.t.a, 2))
└─TableFullScan_12 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
rollback;
drop table if exists t;
create table t(a time, b date);
insert into t values (1, "1000-01-01"), (2, "1000-01-02"), (3, "1000-01-03");
analyze table t;
explain select * from t where a = 1;
id estRows task access object operator info
TableReader_7 1.00 root data:Selection_6
└─Selection_6 1.00 cop[tikv] eq(test.t.a, 00:00:01.000000)
└─TableFullScan_5 3.00 cop[tikv] table:t keep order:false
explain select * from t where b = "1000-01-01";
id estRows task access object operator info
TableReader_7 1.00 root data:Selection_6
└─Selection_6 1.00 cop[tikv] eq(test.t.b, 1000-01-01 00:00:00.000000)
└─TableFullScan_5 3.00 cop[tikv] table:t keep order:false
drop table t;
create table t(a int);
insert into t values (1),(2),(2),(2),(9),(9),(9),(10);
analyze table t with 1 buckets;
explain select * from t where a >= 3 and a <= 8;
id estRows task access object operator info
TableReader_7 0.00 root data:Selection_6
└─Selection_6 0.00 cop[tikv] ge(test.t.a, 3), le(test.t.a, 8)
└─TableFullScan_5 8.00 cop[tikv] table:t keep order:false
drop table t;
create table t(a int, b int, index idx_ab(a, b));
explain select a, b from t where a in (1) order by b;
id estRows task access object operator info
IndexReader_12 10.00 root index:IndexRangeScan_11
└─IndexRangeScan_11 10.00 cop[tikv] table:t, index:idx_ab(a, b) range:[1,1], keep order:true, stats:pseudo
explain select a, b from t where a = 1 order by b;
id estRows task access object operator info
IndexReader_12 10.00 root index:IndexRangeScan_11
└─IndexRangeScan_11 10.00 cop[tikv] table:t, index:idx_ab(a, b) range:[1,1], keep order:true, stats:pseudo
drop table if exists t;
create table t(a int, b int);
explain select a, b from (select a, b, avg(b) over (partition by a)as avg_b from t) as tt where a > 10 and b < 10 and a > avg_b;
id estRows task access object operator info
Projection_8 2666.67 root test.t.a, test.t.b
└─Selection_9 2666.67 root gt(cast(test.t.a), Column#5), lt(test.t.b, 10)
└─Window_10 3333.33 root avg(cast(test.t.b, decimal(65,4) BINARY))->Column#5 over(partition by test.t.a)
└─Sort_14 3333.33 root test.t.a:asc
└─TableReader_13 3333.33 root data:Selection_12
└─Selection_12 3333.33 cop[tikv] gt(test.t.a, 10)
└─TableFullScan_11 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
drop table if exists t;
create table t(a int, b int);
explain format="dot" select * from t where a < 2;
dot contents
digraph TableReader_7 {
subgraph cluster7{
node [style=filled, color=lightgrey]
color=black
label = "root"
"TableReader_7"
}
subgraph cluster6{
node [style=filled, color=lightgrey]
color=black
label = "cop"
"Selection_6" -> "TableFullScan_5"
}
"TableReader_7" -> "Selection_6"
}
drop table if exists t;
create table t(a binary(16) not null, b varchar(2) default null, c varchar(100) default 'aaaa', key (a,b));
explain select * from t where a=x'FA34E1093CB428485734E3917F000000' and b='xb';
id estRows task access object operator info
IndexLookUp_10 0.10 root
├─IndexRangeScan_8(Build) 0.10 cop[tikv] table:t, index:a(a, b) range:["[250 52 225 9 60 180 40 72 87 52 227 145 127 0 0 0]" "xb","[250 52 225 9 60 180 40 72 87 52 227 145 127 0 0 0]" "xb"], keep order:false, stats:pseudo
└─TableRowIDScan_9(Probe) 0.10 cop[tikv] table:t keep order:false, stats:pseudo
explain update t set c = 'ssss' where a=x'FA34E1093CB428485734E3917F000000' and b='xb';
id estRows task access object operator info
Update_4 N/A root N/A
└─IndexLookUp_11 0.10 root
├─IndexRangeScan_9(Build) 0.10 cop[tikv] table:t, index:a(a, b) range:["[250 52 225 9 60 180 40 72 87 52 227 145 127 0 0 0]" "xb","[250 52 225 9 60 180 40 72 87 52 227 145 127 0 0 0]" "xb"], keep order:false, stats:pseudo
└─TableRowIDScan_10(Probe) 0.10 cop[tikv] table:t keep order:false, stats:pseudo
drop table if exists t;

189
cmd/explaintest/r/explain_easy_stats.result

@ -0,0 +1,189 @@
use test;
drop table if exists t1, t2, t3;
create table t1 (c1 int primary key, c2 int, c3 int, index c2 (c2));
load stats 's/explain_easy_stats_t1.json';
create table t2 (c1 int unique, c2 int);
load stats 's/explain_easy_stats_t2.json';
create table t3 (a bigint, b bigint, c bigint, d bigint);
load stats 's/explain_easy_stats_t3.json';
create table index_prune(a bigint(20) NOT NULL, b bigint(20) NOT NULL, c tinyint(4) NOT NULL, primary key(a, b), index idx_b_c_a(b, c, a));
load stats 's/explain_easy_stats_index_prune.json';
set @@session.tidb_opt_agg_push_down = 1;
set @@session.tidb_opt_insubq_to_join_and_agg=1;
set @@session.tidb_hashagg_partial_concurrency = 1;
set @@session.tidb_hashagg_final_concurrency = 1;
explain select * from t3 where exists (select s.a from t3 s having sum(s.a) = t3.a );
id estRows task access object operator info
HashJoin_12 1600.00 root semi join, equal:[eq(Column#13, Column#11)]
├─StreamAgg_27(Build) 1.00 root funcs:sum(Column#16)->Column#11
│ └─TableReader_28 1.00 root data:StreamAgg_19
│ └─StreamAgg_19 1.00 cop[tikv] funcs:sum(test.t3.a)->Column#16
│ └─TableFullScan_26 2000.00 cop[tikv] table:s keep order:false
└─Projection_13(Probe) 2000.00 root test.t3.a, test.t3.b, test.t3.c, test.t3.d, cast(test.t3.a, decimal(20,0) BINARY)->Column#13
└─TableReader_15 2000.00 root data:TableFullScan_14
└─TableFullScan_14 2000.00 cop[tikv] table:t3 keep order:false
explain select * from t1;
id estRows task access object operator info
TableReader_5 1999.00 root data:TableFullScan_4
└─TableFullScan_4 1999.00 cop[tikv] table:t1 keep order:false
explain select * from t1 order by c2;
id estRows task access object operator info
IndexLookUp_12 1999.00 root
├─IndexFullScan_10(Build) 1999.00 cop[tikv] table:t1, index:c2(c2) keep order:true
└─TableRowIDScan_11(Probe) 1999.00 cop[tikv] table:t1 keep order:false
explain select * from t2 order by c2;
id estRows task access object operator info
Sort_4 1985.00 root test.t2.c2:asc
└─TableReader_8 1985.00 root data:TableFullScan_7
└─TableFullScan_7 1985.00 cop[tikv] table:t2 keep order:false
explain select * from t1 where t1.c1 > 0;
id estRows task access object operator info
TableReader_6 1999.00 root data:TableRangeScan_5
└─TableRangeScan_5 1999.00 cop[tikv] table:t1 range:(0,+inf], keep order:false
explain select t1.c1, t1.c2 from t1 where t1.c2 = 1;
id estRows task access object operator info
IndexReader_6 0.00 root index:IndexRangeScan_5
└─IndexRangeScan_5 0.00 cop[tikv] table:t1, index:c2(c2) range:[1,1], keep order:false
explain select * from t1 left join t2 on t1.c2 = t2.c1 where t1.c1 > 1;
id estRows task access object operator info
HashJoin_15 2481.25 root left outer join, equal:[eq(test.t1.c2, test.t2.c1)]
├─TableReader_29(Build) 1985.00 root data:Selection_28
│ └─Selection_28 1985.00 cop[tikv] not(isnull(test.t2.c1))
│ └─TableFullScan_27 1985.00 cop[tikv] table:t2 keep order:false
└─TableReader_26(Probe) 1998.00 root data:TableRangeScan_25
└─TableRangeScan_25 1998.00 cop[tikv] table:t1 range:(1,+inf], keep order:false
explain update t1 set t1.c2 = 2 where t1.c1 = 1;
id estRows task access object operator info
Update_2 N/A root N/A
└─Point_Get_1 1.00 root table:t1 handle:1
explain delete from t1 where t1.c2 = 1;
id estRows task access object operator info
Delete_4 N/A root N/A
└─IndexLookUp_11 0.00 root
├─IndexRangeScan_9(Build) 0.00 cop[tikv] table:t1, index:c2(c2) range:[1,1], keep order:false
└─TableRowIDScan_10(Probe) 0.00 cop[tikv] table:t1 keep order:false
explain select count(b.c2) from t1 a, t2 b where a.c1 = b.c2 group by a.c1;
id estRows task access object operator info
Projection_11 1985.00 root cast(Column#8, bigint(21) BINARY)->Column#7
└─HashJoin_17 1985.00 root inner join, equal:[eq(test.t1.c1, test.t2.c2)]
├─HashAgg_21(Build) 1985.00 root group by:test.t2.c2, funcs:count(test.t2.c2)->Column#8, funcs:firstrow(test.t2.c2)->test.t2.c2
│ └─TableReader_28 1985.00 root data:Selection_27
│ └─Selection_27 1985.00 cop[tikv] not(isnull(test.t2.c2))
│ └─TableFullScan_26 1985.00 cop[tikv] table:b keep order:false
└─TableReader_30(Probe) 1999.00 root data:TableFullScan_29
└─TableFullScan_29 1999.00 cop[tikv] table:a keep order:false
explain select * from t2 order by t2.c2 limit 0, 1;
id estRows task access object operator info
TopN_7 1.00 root test.t2.c2:asc, offset:0, count:1
└─TableReader_15 1.00 root data:TopN_14
└─TopN_14 1.00 cop[tikv] test.t2.c2:asc, offset:0, count:1
└─TableFullScan_13 1985.00 cop[tikv] table:t2 keep order:false
explain select * from t1 where c1 > 1 and c2 = 1 and c3 < 1;
id estRows task access object operator info
IndexLookUp_11 0.00 root
├─IndexRangeScan_8(Build) 0.00 cop[tikv] table:t1, index:c2(c2) range:(1 1,1 +inf], keep order:false
└─Selection_10(Probe) 0.00 cop[tikv] lt(test.t1.c3, 1)
└─TableRowIDScan_9 0.00 cop[tikv] table:t1 keep order:false
explain select * from t1 where c1 = 1 and c2 > 1;
id estRows task access object operator info
Selection_6 0.50 root gt(test.t1.c2, 1)
└─Point_Get_5 1.00 root table:t1 handle:1
explain select c1 from t1 where c1 in (select c2 from t2);
id estRows task access object operator info
HashJoin_15 1985.00 root inner join, equal:[eq(test.t1.c1, test.t2.c2)]
├─HashAgg_19(Build) 1985.00 root group by:test.t2.c2, funcs:firstrow(test.t2.c2)->test.t2.c2
│ └─TableReader_26 1985.00 root data:Selection_25
│ └─Selection_25 1985.00 cop[tikv] not(isnull(test.t2.c2))
│ └─TableFullScan_24 1985.00 cop[tikv] table:t2 keep order:false
└─TableReader_28(Probe) 1999.00 root data:TableFullScan_27
└─TableFullScan_27 1999.00 cop[tikv] table:t1 keep order:false
explain select * from information_schema.columns;
id estRows task access object operator info
MemTableScan_4 10000.00 root table:COLUMNS
explain select c2 = (select c2 from t2 where t1.c1 = t2.c1 order by c1 limit 1) from t1;
id estRows task access object operator info
Projection_12 1999.00 root eq(test.t1.c2, test.t2.c2)->Column#8
└─Apply_14 1999.00 root CARTESIAN left outer join
├─TableReader_16(Build) 1999.00 root data:TableFullScan_15
│ └─TableFullScan_15 1999.00 cop[tikv] table:t1 keep order:false
└─Projection_43(Probe) 1.00 root test.t2.c1, test.t2.c2
└─IndexLookUp_42 1.00 root limit embedded(offset:0, count:1)
├─Limit_41(Build) 1.00 cop[tikv] offset:0, count:1
│ └─IndexRangeScan_39 1.25 cop[tikv] table:t2, index:c1(c1) range: decided by [eq(test.t1.c1, test.t2.c1)], keep order:true
└─TableRowIDScan_40(Probe) 1.00 cop[tikv] table:t2 keep order:false, stats:pseudo
explain select * from t1 order by c1 desc limit 1;
id estRows task access object operator info
Limit_10 1.00 root offset:0, count:1
└─TableReader_20 1.00 root data:Limit_19
└─Limit_19 1.00 cop[tikv] offset:0, count:1
└─TableFullScan_18 1.00 cop[tikv] table:t1 keep order:true, desc
set @@session.tidb_opt_insubq_to_join_and_agg=0;
explain select 1 in (select c2 from t2) from t1;
id estRows task access object operator info
HashJoin_7 1999.00 root CARTESIAN left outer semi join, other cond:eq(1, test.t2.c2)
├─TableReader_13(Build) 1985.00 root data:TableFullScan_12
│ └─TableFullScan_12 1985.00 cop[tikv] table:t2 keep order:false
└─TableReader_9(Probe) 1999.00 root data:TableFullScan_8
└─TableFullScan_8 1999.00 cop[tikv] table:t1 keep order:false
explain format="dot" select 1 in (select c2 from t2) from t1;
dot contents
digraph HashJoin_7 {
subgraph cluster7{
node [style=filled, color=lightgrey]
color=black
label = "root"
"HashJoin_7" -> "TableReader_9"
"HashJoin_7" -> "TableReader_13"
}
subgraph cluster8{
node [style=filled, color=lightgrey]
color=black
label = "cop"
"TableFullScan_8"
}
subgraph cluster12{
node [style=filled, color=lightgrey]
color=black
label = "cop"
"TableFullScan_12"
}
"TableReader_9" -> "TableFullScan_8"
"TableReader_13" -> "TableFullScan_12"
}
explain select * from index_prune WHERE a = 1010010404050976781 AND b = 26467085526790 LIMIT 1;
id estRows task access object operator info
Point_Get_1 1.00 root table:index_prune, index:PRIMARY(a, b)
explain select * from index_prune WHERE a = 1010010404050976781 AND b = 26467085526790 LIMIT 0;
id estRows task access object operator info
TableDual_5 0.00 root rows:0
explain select * from index_prune WHERE a = 1010010404050976781 AND b = 26467085526790 LIMIT 1, 1;
id estRows task access object operator info
Limit_9 1.00 root offset:1, count:1
└─Point_Get_11 1.00 root table:index_prune, index:PRIMARY(a, b)
explain select * from index_prune WHERE a = 1010010404050976781 AND b = 26467085526790 LIMIT 1, 0;
id estRows task access object operator info
Limit_9 0.00 root offset:1, count:0
└─Point_Get_11 1.00 root table:index_prune, index:PRIMARY(a, b)
explain select * from index_prune WHERE a = 1010010404050976781 AND b = 26467085526790 LIMIT 0, 1;
id estRows task access object operator info
Point_Get_1 1.00 root table:index_prune, index:PRIMARY(a, b)
explain select * from index_prune WHERE a = 1010010404050976781 AND b = 26467085526790 ORDER BY a;
id estRows task access object operator info
Point_Get_1 1.00 root table:index_prune, index:PRIMARY(a, b)
explain select * from index_prune WHERE a = 1010010404050976781 AND b = 26467085526790 GROUP BY b;
id estRows task access object operator info
Point_Get_1 1.00 root table:index_prune, index:PRIMARY(a, b)
explain select * from index_prune WHERE a = 1010010404050976781 AND b = 26467085526790 GROUP BY b ORDER BY a limit 1;
id estRows task access object operator info
Point_Get_1 1.00 root table:index_prune, index:PRIMARY(a, b)
drop table if exists t1, t2, t3, index_prune;
set @@session.tidb_opt_insubq_to_join_and_agg=1;
drop table if exists tbl;
create table tbl(column1 int, column2 int, index idx(column1, column2));
load stats 's/explain_easy_stats_tbl_dnf.json';
explain select * from tbl where (column1=0 and column2=1) or (column1=1 and column2=3) or (column1=2 and column2=5);
id estRows task access object operator info
IndexReader_6 3.00 root index:IndexRangeScan_5
└─IndexRangeScan_5 3.00 cop[tikv] table:tbl, index:idx(column1, column2) range:[0 1,0 1], [1 3,1 3], [2 5,2 5], keep order:false

486
cmd/explaintest/r/explain_generate_column_substitute.result

@ -0,0 +1,486 @@
use test;
drop table if exists t;
create table t(a int, b real, c int as ((a+1)) virtual, e real as ((b+a)));
insert into t values (1, 2.0, default, default), (2, 2.1, default, default), (5, 3.0, default, default),
(5, -1.0, default, default), (0, 0.0, default, default), (-1, -2.0, default, default), (0, 0, default, default);
alter table t add index idx_c(c);
alter table t add index idx_e(e);
set @@sql_mode=""
desc select * from t where a+1=3;
id estRows task access object operator info
IndexLookUp_10 10.00 root
├─IndexRangeScan_8(Build) 10.00 cop[tikv] table:t, index:idx_c(c) range:[3,3], keep order:false, stats:pseudo
└─TableRowIDScan_9(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo
select * from t where a+1=3;
a b c e
2 2.1 3 4.1
desc select a+1 from t where a+1=3;
id estRows task access object operator info
IndexReader_6 10.00 root index:IndexRangeScan_5
└─IndexRangeScan_5 10.00 cop[tikv] table:t, index:idx_c(c) range:[3,3], keep order:false, stats:pseudo
select a+1 from t where a+1=3;
a+1
3
desc select c from t where a+1=3;
id estRows task access object operator info
IndexReader_6 10.00 root index:IndexRangeScan_5
└─IndexRangeScan_5 10.00 cop[tikv] table:t, index:idx_c(c) range:[3,3], keep order:false, stats:pseudo
select c from t where a+1=3;
c
3
desc select * from t where b+a=3;
id estRows task access object operator info
IndexLookUp_10 10.00 root
├─IndexRangeScan_8(Build) 10.00 cop[tikv] table:t, index:idx_e(e) range:[3,3], keep order:false, stats:pseudo
└─TableRowIDScan_9(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo
select * from t where b+a=3;
a b c e
1 2 2 3
desc select b+a from t where b+a=3;
id estRows task access object operator info
IndexReader_6 10.00 root index:IndexRangeScan_5
└─IndexRangeScan_5 10.00 cop[tikv] table:t, index:idx_e(e) range:[3,3], keep order:false, stats:pseudo
select b+a from t where b+a=3;
b+a
3
desc select e from t where b+a=3;
id estRows task access object operator info
IndexReader_6 10.00 root index:IndexRangeScan_5
└─IndexRangeScan_5 10.00 cop[tikv] table:t, index:idx_e(e) range:[3,3], keep order:false, stats:pseudo
select e from t where b+a=3;
e
3
desc select a+1 from t where a+1 in (1, 2, 3);
id estRows task access object operator info
IndexReader_6 30.00 root index:IndexRangeScan_5
└─IndexRangeScan_5 30.00 cop[tikv] table:t, index:idx_c(c) range:[1,1], [2,2], [3,3], keep order:false, stats:pseudo
select a+1 from t where a+1 in (1, 2, 3);
a+1
1
1
2
3
desc select * from t where a+1 in (1, 2, 3);
id estRows task access object operator info
IndexLookUp_10 30.00 root
├─IndexRangeScan_8(Build) 30.00 cop[tikv] table:t, index:idx_c(c) range:[1,1], [2,2], [3,3], keep order:false, stats:pseudo
└─TableRowIDScan_9(Probe) 30.00 cop[tikv] table:t keep order:false, stats:pseudo
select * from t where a+1 in (1, 2, 3);
a b c e
1 2 2 3
2 2.1 3 4.1
0 0 1 0
0 0 1 0
desc select a+1 from t where a+1 between 1 and 4;
id estRows task access object operator info
IndexReader_6 250.00 root index:IndexRangeScan_5
└─IndexRangeScan_5 250.00 cop[tikv] table:t, index:idx_c(c) range:[1,4], keep order:false, stats:pseudo
select a+1 from t where a+1 between 1 and 4;
a+1
1
1
2
3
desc select * from t where a+1 between 1 and 4;
id estRows task access object operator info
IndexLookUp_10 250.00 root
├─IndexRangeScan_8(Build) 250.00 cop[tikv] table:t, index:idx_c(c) range:[1,4], keep order:false, stats:pseudo
└─TableRowIDScan_9(Probe) 250.00 cop[tikv] table:t keep order:false, stats:pseudo
select * from t where a+1 between 1 and 4;
a b c e
1 2 2 3
2 2.1 3 4.1
0 0 1 0
0 0 1 0
desc select * from t order by a+1;
id estRows task access object operator info
Projection_13 10000.00 root test.t.a, test.t.b, test.t.c, test.t.e
└─IndexLookUp_12 10000.00 root
├─IndexFullScan_10(Build) 10000.00 cop[tikv] table:t, index:idx_c(c) keep order:true, stats:pseudo
└─TableRowIDScan_11(Probe) 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
select * from t order by a+1;
a b c e
-1 -2 0 -3
0 0 1 0
0 0 1 0
1 2 2 3
2 2.1 3 4.1
5 3 6 8
5 -1 6 4
desc select a+1 from t order by a+1;
id estRows task access object operator info
IndexReader_13 10000.00 root index:IndexFullScan_12
└─IndexFullScan_12 10000.00 cop[tikv] table:t, index:idx_c(c) keep order:true, stats:pseudo
select a+1 from t order by a+1;
a+1
0
1
1
2
3
6
6
desc select b+a from t order by b+a;
id estRows task access object operator info
IndexReader_13 10000.00 root index:IndexFullScan_12
└─IndexFullScan_12 10000.00 cop[tikv] table:t, index:idx_e(e) keep order:true, stats:pseudo
select b+a from t order by b+a;
b+a
-3
0
0
3
4
4.1
8
desc update t set a=1 where a+1 = 3;
id estRows task access object operator info
Update_4 N/A root N/A
└─IndexLookUp_11 10.00 root
├─IndexRangeScan_9(Build) 10.00 cop[tikv] table:t, index:idx_c(c) range:[3,3], keep order:false, stats:pseudo
└─TableRowIDScan_10(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo
desc update t set a=2, b = 3 where b+a = 3;
id estRows task access object operator info
Update_4 N/A root N/A
└─IndexLookUp_11 10.00 root
├─IndexRangeScan_9(Build) 10.00 cop[tikv] table:t, index:idx_e(e) range:[3,3], keep order:false, stats:pseudo
└─TableRowIDScan_10(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo
desc delete from t where a+1 = 3;
id estRows task access object operator info
Delete_4 N/A root N/A
└─IndexLookUp_11 10.00 root
├─IndexRangeScan_9(Build) 10.00 cop[tikv] table:t, index:idx_c(c) range:[3,3], keep order:false, stats:pseudo
└─TableRowIDScan_10(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo
desc delete from t where b+a = 0;
id estRows task access object operator info
Delete_4 N/A root N/A
└─IndexLookUp_11 10.00 root
├─IndexRangeScan_9(Build) 10.00 cop[tikv] table:t, index:idx_e(e) range:[0,0], keep order:false, stats:pseudo
└─TableRowIDScan_10(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo
alter table t drop index idx_c;
alter table t drop index idx_e;
alter table t add index expr_idx_c((a+1));
alter table t add index expr_idx_e((b+a));
truncate table t;
insert into t values (1, 2.0, default, default), (2, 2.1, default, default), (5, 3.0, default, default),
(5, -1.0, default, default), (0, 0.0, default, default), (-1, -2.0, default, default), (0, 0, default, default);
desc select * from t where a+1=3;
id estRows task access object operator info
Projection_4 10.00 root test.t.a, test.t.b, test.t.c, test.t.e
└─IndexLookUp_10 10.00 root
├─IndexRangeScan_8(Build) 10.00 cop[tikv] table:t, index:expr_idx_c(`a` + 1) range:[3,3], keep order:false, stats:pseudo
└─TableRowIDScan_9(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo
select * from t where a+1=3;
a b c e
2 2.1 3 4.1
desc select a+1 from t where a+1=3;
id estRows task access object operator info
IndexReader_6 10.00 root index:IndexRangeScan_5
└─IndexRangeScan_5 10.00 cop[tikv] table:t, index:expr_idx_c(`a` + 1) range:[3,3], keep order:false, stats:pseudo
select a+1 from t where a+1=3;
a+1
3
desc select c from t where a+1=3;
id estRows task access object operator info
Projection_4 10.00 root test.t.c
└─IndexLookUp_10 10.00 root
├─IndexRangeScan_8(Build) 10.00 cop[tikv] table:t, index:expr_idx_c(`a` + 1) range:[3,3], keep order:false, stats:pseudo
└─TableRowIDScan_9(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo
select c from t where a+1=3;
c
3
desc select * from t where b+a=3;
id estRows task access object operator info
Projection_4 10.00 root test.t.a, test.t.b, test.t.c, test.t.e
└─IndexLookUp_10 10.00 root
├─IndexRangeScan_8(Build) 10.00 cop[tikv] table:t, index:expr_idx_e(`b` + `a`) range:[3,3], keep order:false, stats:pseudo
└─TableRowIDScan_9(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo
select * from t where b+a=3;
a b c e
1 2 2 3
desc select b+a from t where b+a=3;
id estRows task access object operator info
IndexReader_6 10.00 root index:IndexRangeScan_5
└─IndexRangeScan_5 10.00 cop[tikv] table:t, index:expr_idx_e(`b` + `a`) range:[3,3], keep order:false, stats:pseudo
select b+a from t where b+a=3;
b+a
3
desc select e from t where b+a=3;
id estRows task access object operator info
Projection_4 10.00 root test.t.e
└─IndexLookUp_10 10.00 root
├─IndexRangeScan_8(Build) 10.00 cop[tikv] table:t, index:expr_idx_e(`b` + `a`) range:[3,3], keep order:false, stats:pseudo
└─TableRowIDScan_9(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo
select e from t where b+a=3;
e
3
desc select a+1 from t where a+1 in (1, 2, 3);
id estRows task access object operator info
IndexReader_6 30.00 root index:IndexRangeScan_5
└─IndexRangeScan_5 30.00 cop[tikv] table:t, index:expr_idx_c(`a` + 1) range:[1,1], [2,2], [3,3], keep order:false, stats:pseudo
select a+1 from t where a+1 in (1, 2, 3);
a+1
1
1
2
3
desc select * from t where a+1 in (1, 2, 3);
id estRows task access object operator info
Projection_4 30.00 root test.t.a, test.t.b, test.t.c, test.t.e
└─IndexLookUp_10 30.00 root
├─IndexRangeScan_8(Build) 30.00 cop[tikv] table:t, index:expr_idx_c(`a` + 1) range:[1,1], [2,2], [3,3], keep order:false, stats:pseudo
└─TableRowIDScan_9(Probe) 30.00 cop[tikv] table:t keep order:false, stats:pseudo
select * from t where a+1 in (1, 2, 3);
a b c e
1 2 2 3
2 2.1 3 4.1
0 0 1 0
0 0 1 0
desc select a+1 from t where a+1 between 1 and 4;
id estRows task access object operator info
IndexReader_6 250.00 root index:IndexRangeScan_5
└─IndexRangeScan_5 250.00 cop[tikv] table:t, index:expr_idx_c(`a` + 1) range:[1,4], keep order:false, stats:pseudo
select a+1 from t where a+1 between 1 and 4;
a+1
1
1
2
3
desc select * from t where a+1 between 1 and 4;
id estRows task access object operator info
Projection_4 250.00 root test.t.a, test.t.b, test.t.c, test.t.e
└─IndexLookUp_10 250.00 root
├─IndexRangeScan_8(Build) 250.00 cop[tikv] table:t, index:expr_idx_c(`a` + 1) range:[1,4], keep order:false, stats:pseudo
└─TableRowIDScan_9(Probe) 250.00 cop[tikv] table:t keep order:false, stats:pseudo
select * from t where a+1 between 1 and 4;
a b c e
1 2 2 3
2 2.1 3 4.1
0 0 1 0
0 0 1 0
desc select * from t order by a+1;
id estRows task access object operator info
Projection_5 10000.00 root test.t.a, test.t.b, test.t.c, test.t.e
└─Projection_13 10000.00 root test.t.a, test.t.b, test.t.c, test.t.e, EMPTY_NAME
└─IndexLookUp_12 10000.00 root
├─IndexFullScan_10(Build) 10000.00 cop[tikv] table:t, index:expr_idx_c(`a` + 1) keep order:true, stats:pseudo
└─TableRowIDScan_11(Probe) 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
select * from t order by a+1;
a b c e
-1 -2 0 -3
0 0 1 0
0 0 1 0
1 2 2 3
2 2.1 3 4.1
5 3 6 8
5 -1 6 4
desc select a+1 from t order by a+1;
id estRows task access object operator info
IndexReader_13 10000.00 root index:IndexFullScan_12
└─IndexFullScan_12 10000.00 cop[tikv] table:t, index:expr_idx_c(`a` + 1) keep order:true, stats:pseudo
select a+1 from t order by a+1;
a+1
0
1
1
2
3
6
6
desc select b+a from t order by b+a;
id estRows task access object operator info
IndexReader_13 10000.00 root index:IndexFullScan_12
└─IndexFullScan_12 10000.00 cop[tikv] table:t, index:expr_idx_e(`b` + `a`) keep order:true, stats:pseudo
select b+a from t order by b+a;
b+a
-3
0
0
3
4
4.1
8
desc update t set a=1 where a+1 = 3;
id estRows task access object operator info
Update_4 N/A root N/A
└─IndexLookUp_11 10.00 root
├─IndexRangeScan_9(Build) 10.00 cop[tikv] table:t, index:expr_idx_c(`a` + 1) range:[3,3], keep order:false, stats:pseudo
└─TableRowIDScan_10(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo
desc update t set a=2, b = 3 where b+a = 3;
id estRows task access object operator info
Update_4 N/A root N/A
└─IndexLookUp_11 10.00 root
├─IndexRangeScan_9(Build) 10.00 cop[tikv] table:t, index:expr_idx_e(`b` + `a`) range:[3,3], keep order:false, stats:pseudo
└─TableRowIDScan_10(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo
desc delete from t where a+1 = 3;
id estRows task access object operator info
Delete_4 N/A root N/A
└─IndexLookUp_11 10.00 root
├─IndexRangeScan_9(Build) 10.00 cop[tikv] table:t, index:expr_idx_c(`a` + 1) range:[3,3], keep order:false, stats:pseudo
└─TableRowIDScan_10(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo
desc delete from t where b+a = 0;
id estRows task access object operator info
Delete_4 N/A root N/A
└─IndexLookUp_11 10.00 root
├─IndexRangeScan_9(Build) 10.00 cop[tikv] table:t, index:expr_idx_e(`b` + `a`) range:[0,0], keep order:false, stats:pseudo
└─TableRowIDScan_10(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo
drop table if exists t;
create table t(c0 char as (c1), c1 int);
insert into t(c1) values (0), (1);
desc select * from t;
id estRows task access object operator info
TableReader_5 10000.00 root data:TableFullScan_4
└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
select * from t;
c0 c1
0 0
1 1
drop table if exists t;
create table t(c0 int as (c1) unique, c1 int);
insert into t(c1) values (0), (1);
desc select * from t;
id estRows task access object operator info
Projection_3 10000.00 root test.t.c0, test.t.c0
└─IndexReader_7 10000.00 root index:IndexFullScan_6
└─IndexFullScan_6 10000.00 cop[tikv] table:t, index:c0(c0) keep order:false, stats:pseudo
select * from t;
c0 c1
0 0
1 1
drop table if exists t;
create table t(c0 char as (c1) unique, c1 int);
insert into t(c1) values (0), (1);
desc select * from t;
id estRows task access object operator info
TableReader_5 10000.00 root data:TableFullScan_4
└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
select * from t;
c0 c1
0 0
1 1
drop table if exists t;
create table t(c0 int, c1 int as (c0+1) unique);
insert into t(c0) values (1),(2);
explain select c0+1 from t;
id estRows task access object operator info
IndexReader_7 10000.00 root index:IndexFullScan_6
└─IndexFullScan_6 10000.00 cop[tikv] table:t, index:c1(c1) keep order:false, stats:pseudo
select c0+1 from t;
c0+1
2
3
drop table if exists t;
create table t(c0 int, c1 int as (c0) unique);
insert into t(c0) values (1),(2);
explain select c1 from t where c1 = 2;
id estRows task access object operator info
Projection_4 1.00 root test.t.c1
└─Point_Get_5 1.00 root table:t, index:c1(c1)
select c1 from t where c1 = 2;
c1
2
drop table if exists t;
create table t(c0 double, c1 float as (c0+1) unique);
insert into t(c0) values (1.1),(2.2);
explain select c0+1 from t;
id estRows task access object operator info
Projection_3 10000.00 root plus(test.t.c0, 1)->Column#4
└─TableReader_5 10000.00 root data:TableFullScan_4
└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
select c0+1 from t;
c0+1
2.1
3.2
drop table if exists t;
create table t(c0 float, c1 float as (c0+1) unique);
insert into t(c0) values (1.1),(2.2);
explain select c0+1 from t;
id estRows task access object operator info
Projection_3 10000.00 root plus(test.t.c0, 1)->Column#4
└─TableReader_5 10000.00 root data:TableFullScan_4
└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
select c0+1 from t;
c0+1
2.100000023841858
3.200000047683716
drop table if exists t;
create table t(c0 int, c1 tinyint as (c0) unique);
insert into t(c0) values (1),(127);
desc select * from t;
id estRows task access object operator info
Projection_3 10000.00 root test.t.c1, test.t.c1
└─IndexReader_7 10000.00 root index:IndexFullScan_6
└─IndexFullScan_6 10000.00 cop[tikv] table:t, index:c1(c1) keep order:false, stats:pseudo
select * from t;
c0 c1
1 1
127 127
drop table if exists t;
create table t(c0 int, c1 year as (c0) unique);
insert into t(c0) values (48);
desc select * from t;
id estRows task access object operator info
TableReader_5 10000.00 root data:TableFullScan_4
└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
select * from t;
c0 c1
48 2048
drop table t;
create table t(c0 varchar(10), c1 char(10) as (c0) unique);
insert into t(c0) values ("a ");
desc select * from t;
id estRows task access object operator info
TableReader_5 10000.00 root data:TableFullScan_4
└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
select * from t;
c0 c1
a a
drop table if exists t;
create table t(c0 timestamp, c1 date as (c0) unique);
insert into t(c0) values('2038-01-19 03:14:07.999999');
desc select * from t;
id estRows task access object operator info
TableReader_5 10000.00 root data:TableFullScan_4
└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
select * from t;
c0 c1
2038-01-19 03:14:08 2038-01-19
drop table t;
create table t(c0 decimal(5,3), c1 decimal(5,2) as (c0) unique);
insert into t(c0) values (3.1415926);
desc select * from t;
id estRows task access object operator info
TableReader_5 10000.00 root data:TableFullScan_4
└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
select * from t;
c0 c1
3.142 3.14
drop table t;
create table t(c0 char(10), c1 binary(10) as (c0) unique);
select hex(c0) from (select c0 from t use index()) tt;
hex(c0)
drop table t;
create table t(c0 char(10), c1 binary(10) as (c0) unique);
select c0 from t use index();
c0
desc select c0 from t use index();
id estRows task access object operator info
TableReader_5 10000.00 root data:TableFullScan_4
└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
select hex(c0) from (select c0 from t use index()) tt;
hex(c0)
desc select hex(c0) from (select c0 from t use index()) tt;
id estRows task access object operator info
Projection_4 10000.00 root hex(test.t.c0)->Column#4
└─TableReader_6 10000.00 root data:TableFullScan_5
└─TableFullScan_5 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
drop table t;
create table t(a enum('1', '2', '3'), b enum('a', 'b', 'c') as (a) unique);
insert into t(a) values ('1');
desc select * from t;
id estRows task access object operator info
TableReader_5 10000.00 root data:TableFullScan_4
└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
select * from t;
a b
1 a

128
cmd/explaintest/r/explain_indexmerge.result

@ -0,0 +1,128 @@
drop table if exists t;
create table t (a int primary key, b int, c int, d int, e int, f int);
create index tb on t (b);
create index tc on t (c);
create index td on t (d);
load stats 's/explain_indexmerge_stats_t.json';
explain select * from t where a < 50 or b < 50;
id estRows task access object operator info
TableReader_7 4000000.00 root data:Selection_6
└─Selection_6 4000000.00 cop[tikv] or(lt(test.t.a, 50), lt(test.t.b, 50))
└─TableFullScan_5 5000000.00 cop[tikv] table:t keep order:false
explain select * from t where (a < 50 or b < 50) and f > 100;
id estRows task access object operator info
TableReader_7 4000000.00 root data:Selection_6
└─Selection_6 4000000.00 cop[tikv] gt(test.t.f, 100), or(lt(test.t.a, 50), lt(test.t.b, 50))
└─TableFullScan_5 5000000.00 cop[tikv] table:t keep order:false
explain select * from t where b < 50 or c < 50;
id estRows task access object operator info
TableReader_7 4000000.00 root data:Selection_6
└─Selection_6 4000000.00 cop[tikv] or(lt(test.t.b, 50), lt(test.t.c, 50))
└─TableFullScan_5 5000000.00 cop[tikv] table:t keep order:false
set session tidb_enable_index_merge = on;
explain select * from t where a < 50 or b < 50;
id estRows task access object operator info
IndexMerge_11 98.00 root
├─TableRangeScan_8(Build) 49.00 cop[tikv] table:t range:[-inf,50), keep order:false
├─IndexRangeScan_9(Build) 49.00 cop[tikv] table:t, index:tb(b) range:[-inf,50), keep order:false
└─TableRowIDScan_10(Probe) 98.00 cop[tikv] table:t keep order:false
explain select * from t where (a < 50 or b < 50) and f > 100;
id estRows task access object operator info
IndexMerge_12 98.00 root
├─TableRangeScan_8(Build) 49.00 cop[tikv] table:t range:[-inf,50), keep order:false
├─IndexRangeScan_9(Build) 49.00 cop[tikv] table:t, index:tb(b) range:[-inf,50), keep order:false
└─Selection_11(Probe) 98.00 cop[tikv] gt(test.t.f, 100)
└─TableRowIDScan_10 98.00 cop[tikv] table:t keep order:false
explain select * from t where a < 50 or b < 5000000;
id estRows task access object operator info
TableReader_7 4000000.00 root data:Selection_6
└─Selection_6 4000000.00 cop[tikv] or(lt(test.t.a, 50), lt(test.t.b, 5000000))
└─TableFullScan_5 5000000.00 cop[tikv] table:t keep order:false
explain select * from t where b < 50 or c < 50;
id estRows task access object operator info
IndexMerge_11 98.00 root
├─IndexRangeScan_8(Build) 49.00 cop[tikv] table:t, index:tb(b) range:[-inf,50), keep order:false
├─IndexRangeScan_9(Build) 49.00 cop[tikv] table:t, index:tc(c) range:[-inf,50), keep order:false
└─TableRowIDScan_10(Probe) 98.00 cop[tikv] table:t keep order:false
explain select * from t where b < 50 or c < 5000000;
id estRows task access object operator info
TableReader_7 4000000.00 root data:Selection_6
└─Selection_6 4000000.00 cop[tikv] or(lt(test.t.b, 50), lt(test.t.c, 5000000))
└─TableFullScan_5 5000000.00 cop[tikv] table:t keep order:false
explain select * from t where a < 50 or b < 50 or c < 50;
id estRows task access object operator info
IndexMerge_12 147.00 root
├─TableRangeScan_8(Build) 49.00 cop[tikv] table:t range:[-inf,50), keep order:false
├─IndexRangeScan_9(Build) 49.00 cop[tikv] table:t, index:tb(b) range:[-inf,50), keep order:false
├─IndexRangeScan_10(Build) 49.00 cop[tikv] table:t, index:tc(c) range:[-inf,50), keep order:false
└─TableRowIDScan_11(Probe) 147.00 cop[tikv] table:t keep order:false
explain select * from t where (b < 10000 or c < 10000) and (a < 10 or d < 10) and f < 10;
id estRows task access object operator info
IndexMerge_17 0.00 root
├─TableRangeScan_13(Build) 9.00 cop[tikv] table:t range:[-inf,10), keep order:false
├─IndexRangeScan_14(Build) 9.00 cop[tikv] table:t, index:td(d) range:[-inf,10), keep order:false
└─Selection_16(Probe) 0.00 cop[tikv] lt(test.t.f, 10), or(lt(test.t.b, 10000), lt(test.t.c, 10000))
└─TableRowIDScan_15 18.00 cop[tikv] table:t keep order:false
explain format="dot" select * from t where (a < 50 or b < 50) and f > 100;
dot contents
digraph IndexMerge_12 {
subgraph cluster12{
node [style=filled, color=lightgrey]
color=black
label = "root"
"IndexMerge_12"
}
subgraph cluster8{
node [style=filled, color=lightgrey]
color=black
label = "cop"
"TableRangeScan_8"
}
subgraph cluster9{
node [style=filled, color=lightgrey]
color=black
label = "cop"
"IndexRangeScan_9"
}
subgraph cluster11{
node [style=filled, color=lightgrey]
color=black
label = "cop"
"Selection_11" -> "TableRowIDScan_10"
}
"IndexMerge_12" -> "TableRangeScan_8"
"IndexMerge_12" -> "IndexRangeScan_9"
"IndexMerge_12" -> "Selection_11"
}
set session tidb_enable_index_merge = off;
explain select /*+ use_index_merge(t, tb, tc) */ * from t where b < 50 or c < 5000000;
id estRows task access object operator info
IndexMerge_8 5000000.00 root
├─IndexRangeScan_5(Build) 49.00 cop[tikv] table:t, index:tb(b) range:[-inf,50), keep order:false
├─IndexRangeScan_6(Build) 4999999.00 cop[tikv] table:t, index:tc(c) range:[-inf,5000000), keep order:false
└─TableRowIDScan_7(Probe) 5000000.00 cop[tikv] table:t keep order:false
explain select /*+ use_index_merge(t, tb, tc) */ * from t where (b < 10000 or c < 10000) and (a < 10 or d < 10) and f < 10;
id estRows task access object operator info
IndexMerge_9 0.00 root
├─IndexRangeScan_5(Build) 9999.00 cop[tikv] table:t, index:tb(b) range:[-inf,10000), keep order:false
├─IndexRangeScan_6(Build) 9999.00 cop[tikv] table:t, index:tc(c) range:[-inf,10000), keep order:false
└─Selection_8(Probe) 0.00 cop[tikv] lt(test.t.f, 10), or(lt(test.t.a, 10), lt(test.t.d, 10))
└─TableRowIDScan_7 19998.00 cop[tikv] table:t keep order:false
explain select /*+ use_index_merge(t, tb) */ * from t where b < 50 or c < 5000000;
id estRows task access object operator info
TableReader_7 4000000.00 root data:Selection_6
└─Selection_6 4000000.00 cop[tikv] or(lt(test.t.b, 50), lt(test.t.c, 5000000))
└─TableFullScan_5 5000000.00 cop[tikv] table:t keep order:false
explain select /*+ no_index_merge(), use_index_merge(t, tb, tc) */ * from t where b < 50 or c < 5000000;
id estRows task access object operator info
TableReader_7 4000000.00 root data:Selection_6
└─Selection_6 4000000.00 cop[tikv] or(lt(test.t.b, 50), lt(test.t.c, 5000000))
└─TableFullScan_5 5000000.00 cop[tikv] table:t keep order:false
explain select /*+ use_index_merge(t, primary, tb) */ * from t where a < 50 or b < 5000000;
id estRows task access object operator info
IndexMerge_8 5000000.00 root
├─TableRangeScan_5(Build) 49.00 cop[tikv] table:t range:[-inf,50), keep order:false
├─IndexRangeScan_6(Build) 4999999.00 cop[tikv] table:t, index:tb(b) range:[-inf,5000000), keep order:false
└─TableRowIDScan_7(Probe) 5000000.00 cop[tikv] table:t keep order:false

27
cmd/explaintest/r/explain_join_stats.result

@ -0,0 +1,27 @@
use test;
drop table if exists e, lo;
create table e(a int, b int, key idx_a(a), key idx_b(b)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
load stats 's/explain_join_stats_e.json';
create table lo(a int(11) NOT NULL AUTO_INCREMENT, PRIMARY KEY (a)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin AUTO_INCREMENT=30002;
load stats 's/explain_join_stats_lo.json';
explain select count(*) from e, lo where lo.a=e.a and e.b=22336;
id estRows task access object operator info
StreamAgg_13 1.00 root funcs:count(1)->Column#5
└─HashJoin_65 19977.00 root inner join, equal:[eq(test.lo.a, test.e.a)]
├─TableReader_38(Build) 250.00 root data:TableFullScan_37
│ └─TableFullScan_37 250.00 cop[tikv] table:lo keep order:false
└─IndexLookUp_49(Probe) 19977.00 root
├─IndexRangeScan_46(Build) 19977.00 cop[tikv] table:e, index:idx_b(b) range:[22336,22336], keep order:false
└─Selection_48(Probe) 19977.00 cop[tikv] not(isnull(test.e.a))
└─TableRowIDScan_47 19977.00 cop[tikv] table:e keep order:false
explain select /*+ TIDB_INLJ(e) */ count(*) from e, lo where lo.a=e.a and e.b=22336;
id estRows task access object operator info
StreamAgg_12 1.00 root funcs:count(1)->Column#5
└─IndexJoin_40 19977.00 root inner join, inner:IndexLookUp_39, outer key:test.lo.a, inner key:test.e.a, equal cond:eq(test.lo.a, test.e.a)
├─TableReader_28(Build) 250.00 root data:TableFullScan_27
│ └─TableFullScan_27 250.00 cop[tikv] table:lo keep order:false
└─IndexLookUp_39(Probe) 79.91 root
├─Selection_37(Build) 4080.00 cop[tikv] not(isnull(test.e.a))
│ └─IndexRangeScan_35 4080.00 cop[tikv] table:e, index:idx_a(a) range: decided by [eq(test.e.a, test.lo.a)], keep order:false
└─Selection_38(Probe) 79.91 cop[tikv] eq(test.e.b, 22336)
└─TableRowIDScan_36 4080.00 cop[tikv] table:e keep order:false

25
cmd/explaintest/r/explain_stats.result

@ -0,0 +1,25 @@
drop table if exists t;
create table t (id int, c1 timestamp);
load stats 's/explain_stats_t.json';
show columns from t;
Field Type Null Key Default Extra
id int(11) YES NULL
c1 timestamp YES NULL
explain t;
Field Type Null Key Default Extra
id int(11) YES NULL
c1 timestamp YES NULL
describe t;
Field Type Null Key Default Extra
id int(11) YES NULL
c1 timestamp YES NULL
desc t;
Field Type Null Key Default Extra
id int(11) YES NULL
c1 timestamp YES NULL
desc t c1;
Field Type Null Key Default Extra
c1 timestamp YES NULL
desc t id;
Field Type Null Key Default Extra
id int(11) YES NULL

31
cmd/explaintest/r/explain_union_scan.result

@ -0,0 +1,31 @@
drop table if exists city;
CREATE TABLE `city` (
`id` varchar(70) NOT NULL,
`province_id` int(15) DEFAULT NULL,
`city_name` varchar(90) DEFAULT NULL,
`description` varchar(90) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
load stats "s/explain_union_scan.json";
insert into city values("06766b3ef41d484d8878606393f1ed0b", 88, "chongqing", "chongqing city");
begin;
update city set province_id = 77 where id="06766b3ef41d484d8878606393f1ed0b";
explain select t1.*, t2.province_id as provinceID, t2.city_name as cityName, t3.description as description from city t1 inner join city t2 on t1.id = t2.id left join city t3 on t1.province_id = t3.province_id where t1.province_id > 1 and t1.province_id < 100 limit 10;
id estRows task access object operator info
Limit_20 10.00 root offset:0, count:10
└─HashJoin_22 10.00 root left outer join, equal:[eq(test.city.province_id, test.city.province_id)]
├─Limit_25(Build) 10.00 root offset:0, count:10
│ └─IndexJoin_38 10.00 root inner join, inner:UnionScan_37, outer key:test.city.id, inner key:test.city.id, equal cond:eq(test.city.id, test.city.id)
│ ├─UnionScan_47(Build) 10.00 root
│ │ └─TableReader_49 10.00 root data:TableFullScan_48
│ │ └─TableFullScan_48 10.00 cop[tikv] table:t2 keep order:false
│ └─UnionScan_37(Probe) 1.00 root gt(test.city.province_id, 1), lt(test.city.province_id, 100)
│ └─IndexLookUp_36 1.00 root
│ ├─IndexRangeScan_33(Build) 1.00 cop[tikv] table:t1, index:PRIMARY(id) range: decided by [eq(test.city.id, test.city.id)], keep order:false
│ └─Selection_35(Probe) 1.00 cop[tikv] gt(test.city.province_id, 1), lt(test.city.province_id, 100)
│ └─TableRowIDScan_34 1.00 cop[tikv] table:t1 keep order:false
└─UnionScan_57(Probe) 536284.00 root gt(test.city.province_id, 1), lt(test.city.province_id, 100), not(isnull(test.city.province_id))
└─TableReader_60 536284.00 root data:Selection_59
└─Selection_59 536284.00 cop[tikv] gt(test.city.province_id, 1), lt(test.city.province_id, 100), not(isnull(test.city.province_id))
└─TableFullScan_58 536284.00 cop[tikv] table:t3 keep order:false
commit;

233
cmd/explaintest/r/generated_columns.result

@ -0,0 +1,233 @@
DROP TABLE IF EXISTS person;
CREATE TABLE person (
id INT NOT NULL AUTO_INCREMENT PRIMARY KEY,
name VARCHAR(255) NOT NULL,
address_info JSON,
city VARCHAR(64) AS (JSON_UNQUOTE(JSON_EXTRACT(address_info, '$.city'))) STORED,
KEY (city)
);
EXPLAIN SELECT name, id FROM person WHERE city = 'Beijing';
id estRows task access object operator info
Projection_4 10.00 root test.person.name, test.person.id
└─IndexLookUp_10 10.00 root
├─IndexRangeScan_8(Build) 10.00 cop[tikv] table:person, index:city(city) range:["Beijing","Beijing"], keep order:false, stats:pseudo
└─TableRowIDScan_9(Probe) 10.00 cop[tikv] table:person keep order:false, stats:pseudo
DROP TABLE IF EXISTS `sgc`;
CREATE TABLE `sgc` (
`j1` JSON DEFAULT NULL,
`j2` JSON DEFAULT NULL,
`a` int(11) GENERATED ALWAYS AS (JSON_EXTRACT(`j1`, "$.a")) STORED,
`b` int(2) GENERATED ALWAYS AS (JSON_CONTAINS(j2, '1')) STORED,
KEY `idx_a` (`a`),
KEY `idx_b` (`b`),
KEY `idx_a_b` (`a`,`b`)
);
EXPLAIN SELECT a FROM sgc where a < 3;
id estRows task access object operator info
IndexReader_6 3323.33 root index:IndexRangeScan_5
└─IndexRangeScan_5 3323.33 cop[tikv] table:sgc, index:idx_a(a) range:[-inf,3), keep order:false, stats:pseudo
EXPLAIN SELECT a, b FROM sgc where a < 3;
id estRows task access object operator info
IndexReader_6 3323.33 root index:IndexRangeScan_5
└─IndexRangeScan_5 3323.33 cop[tikv] table:sgc, index:idx_a_b(a, b) range:[-inf,3), keep order:false, stats:pseudo
EXPLAIN SELECT a, b from sgc where b < 3;
id estRows task access object operator info
IndexReader_13 3323.33 root index:Selection_12
└─Selection_12 3323.33 cop[tikv] lt(test.sgc.b, 3)
└─IndexFullScan_11 10000.00 cop[tikv] table:sgc, index:idx_a_b(a, b) keep order:false, stats:pseudo
EXPLAIN SELECT a, b from sgc where a < 3 and b < 3;
id estRows task access object operator info
IndexReader_11 1104.45 root index:Selection_10
└─Selection_10 1104.45 cop[tikv] lt(test.sgc.b, 3)
└─IndexRangeScan_9 3323.33 cop[tikv] table:sgc, index:idx_a_b(a, b) range:[-inf,3), keep order:false, stats:pseudo
DROP TABLE IF EXISTS sgc1,
sgc2;
CREATE TABLE `sgc1` (
`j1` JSON,
`j2` JSON,
`a` INT AS (JSON_EXTRACT(j1, "$.a")) STORED,
`b` VARCHAR(20) AS (JSON_KEYS(j2)) STORED,
KEY `idx_a` (`a`),
KEY `idx_b` (`b`),
KEY `idx_a_b` (`a`, `b`)
);
CREATE TABLE `sgc2` (
`j1` JSON,
`j2` JSON,
`a` INT AS (JSON_EXTRACT(j1, "$.a")) STORED,
`b` VARCHAR(20) AS (JSON_KEYS(j2)) STORED,
KEY `idx_a` (`a`),
KEY `idx_b` (`b`),
KEY `idx_a_b` (`a`, `b`)
);
INSERT INTO sgc1(j1, j2)
VALUES ('{"a": 1}', '{"1": "1"}'),
('{"a": 1}', '{"1": "1"}'),
('{"a": 1}', '{"1": "1"}'),
('{"a": 1}', '{"1": "1"}'),
('{"a": 1}', '{"1": "1"}');
INSERT INTO sgc2(j1, j2)
VALUES ('{"a": 1}', '{"1": "1"}');
ANALYZE TABLE sgc1, sgc2;
EXPLAIN SELECT /*+ TIDB_INLJ(sgc1, sgc2) */ * from sgc1 join sgc2 on sgc1.a=sgc2.a;
id estRows task access object operator info
IndexJoin_19 5.00 root inner join, inner:IndexLookUp_18, outer key:test.sgc2.a, inner key:test.sgc1.a, equal cond:eq(test.sgc2.a, test.sgc1.a)
├─TableReader_33(Build) 1.00 root data:Selection_32
│ └─Selection_32 1.00 cop[tikv] not(isnull(test.sgc2.a))
│ └─TableFullScan_31 1.00 cop[tikv] table:sgc2 keep order:false
└─IndexLookUp_18(Probe) 5.00 root
├─Selection_17(Build) 5.00 cop[tikv] not(isnull(test.sgc1.a))
│ └─IndexRangeScan_15 5.00 cop[tikv] table:sgc1, index:idx_a(a) range: decided by [eq(test.sgc1.a, test.sgc2.a)], keep order:false
└─TableRowIDScan_16(Probe) 5.00 cop[tikv] table:sgc1 keep order:false
EXPLAIN SELECT * from sgc1 join sgc2 on sgc1.a=sgc2.a;
id estRows task access object operator info
Projection_6 5.00 root test.sgc1.j1, test.sgc1.j2, test.sgc1.a, test.sgc1.b, test.sgc2.j1, test.sgc2.j2, test.sgc2.a, test.sgc2.b
└─HashJoin_24 5.00 root inner join, equal:[eq(test.sgc2.a, test.sgc1.a)]
├─TableReader_43(Build) 1.00 root data:Selection_42
│ └─Selection_42 1.00 cop[tikv] not(isnull(test.sgc2.a))
│ └─TableFullScan_41 1.00 cop[tikv] table:sgc2 keep order:false
└─TableReader_52(Probe) 5.00 root data:Selection_51
└─Selection_51 5.00 cop[tikv] not(isnull(test.sgc1.a))
└─TableFullScan_50 5.00 cop[tikv] table:sgc1 keep order:false
DROP TABLE IF EXISTS sgc3;
CREATE TABLE sgc3 (
j JSON,
a INT AS (JSON_EXTRACT(j, "$.a")) STORED
)
PARTITION BY RANGE (a) (
PARTITION p0 VALUES LESS THAN (1),
PARTITION p1 VALUES LESS THAN (2),
PARTITION p2 VALUES LESS THAN (3),
PARTITION p3 VALUES LESS THAN (4),
PARTITION p4 VALUES LESS THAN (5),
PARTITION p5 VALUES LESS THAN (6),
PARTITION max VALUES LESS THAN MAXVALUE);
EXPLAIN SELECT * FROM sgc3 WHERE a <= 1;
id estRows task access object operator info
PartitionUnion_8 6646.67 root
├─TableReader_11 3323.33 root data:Selection_10
│ └─Selection_10 3323.33 cop[tikv] le(test.sgc3.a, 1)
│ └─TableFullScan_9 10000.00 cop[tikv] table:sgc3, partition:p0 keep order:false, stats:pseudo
└─TableReader_14 3323.33 root data:Selection_13
└─Selection_13 3323.33 cop[tikv] le(test.sgc3.a, 1)
└─TableFullScan_12 10000.00 cop[tikv] table:sgc3, partition:p1 keep order:false, stats:pseudo
EXPLAIN SELECT * FROM sgc3 WHERE a < 7;
id estRows task access object operator info
PartitionUnion_13 23263.33 root
├─TableReader_16 3323.33 root data:Selection_15
│ └─Selection_15 3323.33 cop[tikv] lt(test.sgc3.a, 7)
│ └─TableFullScan_14 10000.00 cop[tikv] table:sgc3, partition:p0 keep order:false, stats:pseudo
├─TableReader_19 3323.33 root data:Selection_18
│ └─Selection_18 3323.33 cop[tikv] lt(test.sgc3.a, 7)
│ └─TableFullScan_17 10000.00 cop[tikv] table:sgc3, partition:p1 keep order:false, stats:pseudo
├─TableReader_22 3323.33 root data:Selection_21
│ └─Selection_21 3323.33 cop[tikv] lt(test.sgc3.a, 7)
│ └─TableFullScan_20 10000.00 cop[tikv] table:sgc3, partition:p2 keep order:false, stats:pseudo
├─TableReader_25 3323.33 root data:Selection_24
│ └─Selection_24 3323.33 cop[tikv] lt(test.sgc3.a, 7)
│ └─TableFullScan_23 10000.00 cop[tikv] table:sgc3, partition:p3 keep order:false, stats:pseudo
├─TableReader_28 3323.33 root data:Selection_27
│ └─Selection_27 3323.33 cop[tikv] lt(test.sgc3.a, 7)
│ └─TableFullScan_26 10000.00 cop[tikv] table:sgc3, partition:p4 keep order:false, stats:pseudo
├─TableReader_31 3323.33 root data:Selection_30
│ └─Selection_30 3323.33 cop[tikv] lt(test.sgc3.a, 7)
│ └─TableFullScan_29 10000.00 cop[tikv] table:sgc3, partition:p5 keep order:false, stats:pseudo
└─TableReader_34 3323.33 root data:Selection_33
└─Selection_33 3323.33 cop[tikv] lt(test.sgc3.a, 7)
└─TableFullScan_32 10000.00 cop[tikv] table:sgc3, partition:max keep order:false, stats:pseudo
DROP TABLE IF EXISTS t1;
CREATE TABLE t1(a INT, b INT AS (a+1) VIRTUAL, c INT AS (b+1) VIRTUAL, d INT AS (c+1) VIRTUAL, KEY(b), INDEX IDX(c, d));
INSERT INTO t1 (a) VALUES (0);
EXPLAIN SELECT b FROM t1 WHERE b=1;
id estRows task access object operator info
IndexReader_6 10.00 root index:IndexRangeScan_5
└─IndexRangeScan_5 10.00 cop[tikv] table:t1, index:b(b) range:[1,1], keep order:false, stats:pseudo
EXPLAIN SELECT b, c, d FROM t1 WHERE b=1;
id estRows task access object operator info
Projection_4 10.00 root test.t1.b, test.t1.c, test.t1.d
└─IndexLookUp_10 10.00 root
├─IndexRangeScan_8(Build) 10.00 cop[tikv] table:t1, index:b(b) range:[1,1], keep order:false, stats:pseudo
└─TableRowIDScan_9(Probe) 10.00 cop[tikv] table:t1 keep order:false, stats:pseudo
EXPLAIN SELECT * FROM t1 WHERE b=1;
id estRows task access object operator info
IndexLookUp_10 10.00 root
├─IndexRangeScan_8(Build) 10.00 cop[tikv] table:t1, index:b(b) range:[1,1], keep order:false, stats:pseudo
└─TableRowIDScan_9(Probe) 10.00 cop[tikv] table:t1 keep order:false, stats:pseudo
EXPLAIN SELECT c FROM t1 WHERE c=2 AND d=3;
id estRows task access object operator info
Projection_4 0.10 root test.t1.c
└─IndexReader_6 0.10 root index:IndexRangeScan_5
└─IndexRangeScan_5 0.10 cop[tikv] table:t1, index:IDX(c, d) range:[2 3,2 3], keep order:false, stats:pseudo
DROP TABLE IF EXISTS person;
CREATE TABLE person (
id INT NOT NULL AUTO_INCREMENT PRIMARY KEY,
name VARCHAR(255) NOT NULL,
address_info JSON,
city_no INT AS (JSON_EXTRACT(address_info, '$.city_no')) VIRTUAL,
KEY(city_no));
INSERT INTO person (name, address_info) VALUES ("John", CAST('{"city_no": 1}' AS JSON));
EXPLAIN SELECT name FROM person where city_no=1;
id estRows task access object operator info
Projection_4 10.00 root test.person.name
└─IndexLookUp_10 10.00 root
├─IndexRangeScan_8(Build) 10.00 cop[tikv] table:person, index:city_no(city_no) range:[1,1], keep order:false, stats:pseudo
└─TableRowIDScan_9(Probe) 10.00 cop[tikv] table:person keep order:false, stats:pseudo
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 (a INT,
b INT GENERATED ALWAYS AS (-a) VIRTUAL,
c INT GENERATED ALWAYS AS (-a) STORED,
index (c));
INSERT INTO t1 (a) VALUES (2), (1), (1), (3), (NULL);
EXPLAIN SELECT sum(a) FROM t1 GROUP BY b;
id estRows task access object operator info
HashAgg_5 8000.00 root group by:Column#7, funcs:sum(Column#6)->Column#5
└─Projection_12 10000.00 root cast(test.t1.a, decimal(65,0) BINARY)->Column#6, test.t1.b
└─TableReader_9 10000.00 root data:TableFullScan_8
└─TableFullScan_8 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
EXPLAIN SELECT sum(a) FROM t1 GROUP BY c;
id estRows task access object operator info
HashAgg_11 8000.00 root group by:test.t1.c, funcs:sum(Column#6)->Column#5
└─TableReader_12 8000.00 root data:HashAgg_5
└─HashAgg_5 8000.00 cop[tikv] group by:test.t1.c, funcs:sum(test.t1.a)->Column#6
└─TableFullScan_10 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
EXPLAIN SELECT sum(b) FROM t1 GROUP BY a;
id estRows task access object operator info
HashAgg_5 8000.00 root group by:Column#7, funcs:sum(Column#6)->Column#5
└─Projection_12 10000.00 root cast(test.t1.b, decimal(65,0) BINARY)->Column#6, test.t1.a
└─TableReader_9 10000.00 root data:TableFullScan_8
└─TableFullScan_8 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
EXPLAIN SELECT sum(b) FROM t1 GROUP BY c;
id estRows task access object operator info
HashAgg_5 8000.00 root group by:Column#9, funcs:sum(Column#8)->Column#5
└─Projection_18 10000.00 root cast(test.t1.b, decimal(65,0) BINARY)->Column#8, test.t1.c
└─TableReader_11 10000.00 root data:TableFullScan_10
└─TableFullScan_10 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
EXPLAIN SELECT sum(c) FROM t1 GROUP BY a;
id estRows task access object operator info
HashAgg_9 8000.00 root group by:test.t1.a, funcs:sum(Column#6)->Column#5
└─TableReader_10 8000.00 root data:HashAgg_5
└─HashAgg_5 8000.00 cop[tikv] group by:test.t1.a, funcs:sum(test.t1.c)->Column#6
└─TableFullScan_8 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
EXPLAIN SELECT sum(c) FROM t1 GROUP BY b;
id estRows task access object operator info
HashAgg_5 8000.00 root group by:Column#7, funcs:sum(Column#6)->Column#5
└─Projection_12 10000.00 root cast(test.t1.c, decimal(65,0) BINARY)->Column#6, test.t1.b
└─TableReader_9 10000.00 root data:TableFullScan_8
└─TableFullScan_8 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
DROP TABLE IF EXISTS tu;
CREATE TABLE tu (a INT, b INT, c INT GENERATED ALWAYS AS (a + b) VIRTUAL, primary key (a), unique key uk(c));
INSERT INTO tu(a, b) VALUES(1, 2);
EXPLAIN SELECT * FROM tu WHERE c = 1;
id estRows task access object operator info
Point_Get_5 1.00 root table:tu, index:uk(c)
EXPLAIN SELECT a, c FROM tu WHERE c = 1;
id estRows task access object operator info
Projection_4 1.00 root test.tu.a, test.tu.c
└─Point_Get_5 1.00 root table:tu, index:uk(c)
EXPLAIN SELECT * FROM tu WHERE c in(1, 2, 3);
id estRows task access object operator info
Batch_Point_Get_5 3.00 root table:tu, index:uk(c) keep order:false, desc:false
EXPLAIN SELECT c, a FROM tu WHERE c in(1, 2, 3);
id estRows task access object operator info
Projection_4 3.00 root test.tu.c, test.tu.a
└─Batch_Point_Get_5 3.00 root table:tu, index:uk(c) keep order:false, desc:false

54
cmd/explaintest/r/index_join.result

@ -0,0 +1,54 @@
drop table if exists t1, t2;
create table t1(a bigint, b bigint, index idx(a));
create table t2(a bigint, b bigint, index idx(a));
insert into t1 values(1, 1), (1, 1), (1, 1), (1, 1), (1, 1);
insert into t2 values(1, 1);
analyze table t1, t2;
set session tidb_hashagg_partial_concurrency = 1;
set session tidb_hashagg_final_concurrency = 1;
explain select /*+ TIDB_INLJ(t1, t2) */ * from t1 join t2 on t1.a=t2.a;
id estRows task access object operator info
IndexJoin_18 5.00 root inner join, inner:IndexLookUp_17, outer key:test.t2.a, inner key:test.t1.a, equal cond:eq(test.t2.a, test.t1.a)
├─TableReader_29(Build) 1.00 root data:Selection_28
│ └─Selection_28 1.00 cop[tikv] not(isnull(test.t2.a))
│ └─TableFullScan_27 1.00 cop[tikv] table:t2 keep order:false
└─IndexLookUp_17(Probe) 5.00 root
├─Selection_16(Build) 5.00 cop[tikv] not(isnull(test.t1.a))
│ └─IndexRangeScan_14 5.00 cop[tikv] table:t1, index:idx(a) range: decided by [eq(test.t1.a, test.t2.a)], keep order:false
└─TableRowIDScan_15(Probe) 5.00 cop[tikv] table:t1 keep order:false
explain select * from t1 join t2 on t1.a=t2.a;
id estRows task access object operator info
Projection_6 5.00 root test.t1.a, test.t1.b, test.t2.a, test.t2.b
└─HashJoin_23 5.00 root inner join, equal:[eq(test.t2.a, test.t1.a)]
├─TableReader_34(Build) 1.00 root data:Selection_33
│ └─Selection_33 1.00 cop[tikv] not(isnull(test.t2.a))
│ └─TableFullScan_32 1.00 cop[tikv] table:t2 keep order:false
└─TableReader_40(Probe) 5.00 root data:Selection_39
└─Selection_39 5.00 cop[tikv] not(isnull(test.t1.a))
└─TableFullScan_38 5.00 cop[tikv] table:t1 keep order:false
drop table if exists t1, t2;
create table t1(a int not null, b int not null);
create table t2(a int not null, b int not null, key a(a));
set @@tidb_opt_insubq_to_join_and_agg=0;
explain select /*+ TIDB_INLJ(t2@sel_2) */ * from t1 where t1.a in (select t2.a from t2);
id estRows task access object operator info
IndexJoin_10 8000.00 root semi join, inner:IndexReader_9, outer key:test.t1.a, inner key:test.t2.a, equal cond:eq(test.t1.a, test.t2.a)
├─TableReader_14(Build) 10000.00 root data:TableFullScan_13
│ └─TableFullScan_13 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
└─IndexReader_9(Probe) 1.25 root index:IndexRangeScan_8
└─IndexRangeScan_8 1.25 cop[tikv] table:t2, index:a(a) range: decided by [eq(test.t2.a, test.t1.a)], keep order:false, stats:pseudo
show warnings;
Level Code Message
set @@tidb_opt_insubq_to_join_and_agg=1;
drop table if exists t1, t2;
create table t1(a int not null, b int not null, key a(a));
create table t2(a int not null, b int not null, key a(a));
explain select /*+ TIDB_INLJ(t1) */ * from t1 where t1.a in (select t2.a from t2);
id estRows task access object operator info
IndexJoin_13 10000.00 root inner join, inner:IndexLookUp_12, outer key:test.t2.a, inner key:test.t1.a, equal cond:eq(test.t2.a, test.t1.a)
├─StreamAgg_20(Build) 8000.00 root group by:test.t2.a, funcs:firstrow(test.t2.a)->test.t2.a
│ └─IndexReader_33 10000.00 root index:IndexFullScan_32
│ └─IndexFullScan_32 10000.00 cop[tikv] table:t2, index:a(a) keep order:true, stats:pseudo
└─IndexLookUp_12(Probe) 1.25 root
├─IndexRangeScan_10(Build) 1.25 cop[tikv] table:t1, index:a(a) range: decided by [eq(test.t1.a, test.t2.a)], keep order:false, stats:pseudo
└─TableRowIDScan_11(Probe) 1.25 cop[tikv] table:t1 keep order:false, stats:pseudo

4386
cmd/explaintest/r/partition_pruning.result

File diff suppressed because it is too large

482
cmd/explaintest/r/select.result

@ -0,0 +1,482 @@
DROP TABLE IF EXISTS t;
CREATE TABLE t (
c1 int,
c2 int,
c3 int,
PRIMARY KEY (c1)
);
INSERT INTO t VALUES (1,2,3);
set session tidb_hashagg_partial_concurrency = 1;
set session tidb_hashagg_final_concurrency = 1;
SELECT * from t;
c1 c2 c3
1 2 3
SELECT c1, c2, c3 from t;
c1 c2 c3
1 2 3
SELECT c1, c1 from t;
c1 c1
1 1
SELECT c1 as a, c2 as a from t;
a a
1 2
SELECT 1;
1
1
SELECT 1, 1;
1 1
1 1
SET @@autocommit = 1;
SELECT @@autocommit;
@@autocommit
1
SELECT @@autocommit, @@autocommit;
@@autocommit @@autocommit
1 1
SET @a = 10;
SET @b = 11;
SELECT @a, @@autocommit;
@a @@autocommit
10 1
SELECT @a, @b;
@a @b
10 11
SELECT 1, @a;
1 @a
1 10
SELECT 1, @a as a;
1 a
1 10
SELECT 1, @a, @@autocommit as a, c1 from t;
1 @a a c1
1 10 1 1
SET @b = "123";
SELECT @b + "123";
@b + "123"
246
SELECT 1 + 1;
1 + 1
2
SELECT 1 a, 1 as a, 1 + 1 a;
a a a
1 1 2
SELECT c1 a, c1 as a from t;
a a
1 1
SELECT * from t LIMIT 0,1;
c1 c2 c3
1 2 3
SELECT * from t LIMIT 1;
c1 c2 c3
1 2 3
SELECT * from t LIMIT 1,1;
c1 c2 c3
SELECT * from t LIMIT 1 OFFSET 0;
c1 c2 c3
1 2 3
DROP TABLE IF EXISTS t2;
CREATE TABLE t2 (
c1 int,
c2 int,
PRIMARY KEY (c1)
);
INSERT INTO t2 VALUES (1,2);
SELECT * from t a;
c1 c2 c3
1 2 3
SELECT * from t a, t2 b;
c1 c2 c3 c1 c2
1 2 3 1 2
SELECT * from t as a, t2 as b;
c1 c2 c3 c1 c2
1 2 3 1 2
SELECT * from t a left join t2 b on a.c1 = b.c1;
c1 c2 c3 c1 c2
1 2 3 1 2
SELECT * from (SELECT 1, 1) as a;
Error 1060: Duplicate column name '1'
SELECT * from (SELECT * FROM t, t2) as a;
Error 1060: Duplicate column name 'c1'
DROP TABLE IF EXISTS t;
CREATE TABLE t (c1 INT, c2 INT);
INSERT INTO t VALUES (1, 2), (1, 1), (1, 3);
SELECT c1=c2 FROM t;
c1=c2
0
1
0
SELECT 1=1;
1=1
1
SELECT t.c1 + t.c2 from t limit 1;
t.c1 + t.c2
3
SELECT t.c1 from t limit 1;
c1
1
SELECT t.c1 + c2 from t limit 1;
t.c1 + c2
3
SELECT c1 + 10 from t limit 1;
c1 + 10
11
SELECT t.c1 + 10 from t limit 1;
t.c1 + 10
11
SELECT all c1, c2 from t limit 1;
c1 c2
1 2
SELECT distinct c1, c2 from t order by c1, c2 limit 1;
c1 c2
1 1
SELECT c2 from t where not (c2 > 2);
c2
2
1
select c2 from t where not null is null;
c2
select !(1 + 2);
!(1 + 2)
0
select + - 1, --1, +-+-+1, + "123";
+ - 1 --1 +-+-+1 123
-1 1 1 123
select --------------------1, ++++++++++++++++++++1;
--------------------1 1
1 1
select +(+(1)), (-+1), ((+1)), +1.23, +1e23, +1E23, +null, +true, +false, + ( ( 1 ) );
1 (-+1) 1 1.23 1e23 1E23 NULL TRUE FALSE 1
1 -1 1 1.23 1e23 1e23 NULL 1 0 1
select +
(
+
(
1
)
)
;
1
1
select + ( + 1 );
1
1
select --+(1 + 1), +-+-(1 * 1);
--+(1 + 1) +-+-(1 * 1)
2 1
select * from t where null;
c1 c2
select * from t where 1;
c1 c2
1 2
1 1
1 3
select * from t where 0;
c1 c2
select * from t where 0 * 10;
c1 c2
select * from t where null is not null;
c1 c2
select * from t where !1;
c1 c2
select * from t where 1 && 0 || 3 && null;
c1 c2
select * from t as a, t2 as b;
c1 c2 c1 c2
1 2 1 2
1 1 1 2
1 3 1 2
select * from t as a cross join t2 as b;
c1 c2 c1 c2
1 2 1 2
1 1 1 2
1 3 1 2
select * from t as a join t2 as b;
c1 c2 c1 c2
1 2 1 2
1 1 1 2
1 3 1 2
select * from t as a join t2 as b on a.c2 = b.c2;
c1 c2 c1 c2
1 2 1 2
select * from (t);
c1 c2
1 2
1 1
1 3
select * from (t as a, t2 as b);
c1 c2 c1 c2
1 2 1 2
1 1 1 2
1 3 1 2
select * from (t as a cross join t2 as b);
c1 c2 c1 c2
1 2 1 2
1 1 1 2
1 3 1 2
select 1 as a from t;
a
1
1
1
select count(*), 1 from t;
count(*) 1
3 1
select *, 1 from t;
c1 c2 1
1 2 1
1 1 1
1 3 1
select 1, count(1), sum(1);
1 count(1) sum(1)
1 1 1
drop table if exists t1;
create table t1(a int primary key, b int, c int, index idx(b, c));
insert into t1 values(1, 2, 3);
insert into t1 values(2, 3, 4);
insert into t1 values(3 ,4, 5);
insert into t1 values(4, 5, 6);
insert into t1 values(5, 6, 7);
insert into t1 values(6, 7, 8);
insert into t1 values(7, 8, 9);
insert into t1 values(9, 10, 11);
explain select a, c from t1 use index(idx) order by a limit 5;
id estRows task access object operator info
TopN_7 5.00 root test.t1.a:asc, offset:0, count:5
└─IndexReader_15 5.00 root index:TopN_14
└─TopN_14 5.00 cop[tikv] test.t1.a:asc, offset:0, count:5
└─IndexFullScan_13 10000.00 cop[tikv] table:t1, index:idx(b, c) keep order:false, stats:pseudo
select c, a from t1 use index(idx) order by a limit 5;
c a
3 1
4 2
5 3
6 4
7 5
drop table if exists t;
create table t (a int, b int, c int, key idx(a, b, c));
explain select count(a) from t;
id estRows task access object operator info
StreamAgg_20 1.00 root funcs:count(Column#13)->Column#5
└─TableReader_21 1.00 root data:StreamAgg_8
└─StreamAgg_8 1.00 cop[tikv] funcs:count(test.t.a)->Column#13
└─TableFullScan_18 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
select count(a) from t;
count(a)
0
insert t values(0,0,0);
explain select distinct b from t group by a;
id estRows task access object operator info
HashAgg_7 8000.00 root group by:test.t.b, funcs:firstrow(test.t.b)->test.t.b
└─StreamAgg_22 8000.00 root group by:test.t.a, funcs:firstrow(Column#9)->test.t.b
└─IndexReader_23 8000.00 root index:StreamAgg_11
└─StreamAgg_11 8000.00 cop[tikv] group by:test.t.a, funcs:firstrow(test.t.b)->Column#9
└─IndexFullScan_21 10000.00 cop[tikv] table:t, index:idx(a, b, c) keep order:true, stats:pseudo
select distinct b from t group by a;
b
0
explain select count(b) from t group by a;
id estRows task access object operator info
StreamAgg_19 8000.00 root group by:test.t.a, funcs:count(Column#10)->Column#5
└─IndexReader_20 8000.00 root index:StreamAgg_8
└─StreamAgg_8 8000.00 cop[tikv] group by:test.t.a, funcs:count(test.t.b)->Column#10
└─IndexFullScan_18 10000.00 cop[tikv] table:t, index:idx(a, b, c) keep order:true, stats:pseudo
select count(b) from t group by a;
count(b)
1
insert t values(1,1,1),(3,3,6),(3,2,5),(2,1,4),(1,1,3),(1,1,2);
explain select count(a) from t where b>0 group by a, b;
id estRows task access object operator info
StreamAgg_25 2666.67 root group by:test.t.a, test.t.b, funcs:count(Column#10)->Column#5
└─IndexReader_26 2666.67 root index:StreamAgg_9
└─StreamAgg_9 2666.67 cop[tikv] group by:test.t.a, test.t.b, funcs:count(test.t.a)->Column#10
└─Selection_24 3333.33 cop[tikv] gt(test.t.b, 0)
└─IndexFullScan_23 10000.00 cop[tikv] table:t, index:idx(a, b, c) keep order:true, stats:pseudo
select count(a) from t where b>0 group by a, b;
count(a)
3
1
1
1
explain select count(a) from t where b>0 group by a, b order by a;
id estRows task access object operator info
Projection_7 2666.67 root Column#5
└─StreamAgg_36 2666.67 root group by:test.t.a, test.t.b, funcs:count(Column#15)->Column#5, funcs:firstrow(test.t.a)->test.t.a
└─IndexReader_37 2666.67 root index:StreamAgg_34
└─StreamAgg_34 2666.67 cop[tikv] group by:test.t.a, test.t.b, funcs:count(test.t.a)->Column#15
└─Selection_28 3333.33 cop[tikv] gt(test.t.b, 0)
└─IndexFullScan_27 10000.00 cop[tikv] table:t, index:idx(a, b, c) keep order:true, stats:pseudo
select count(a) from t where b>0 group by a, b order by a;
count(a)
3
1
1
1
explain select count(a) from t where b>0 group by a, b order by a limit 1;
id estRows task access object operator info
Projection_9 1.00 root Column#5
└─Limit_15 1.00 root offset:0, count:1
└─StreamAgg_44 1.00 root group by:test.t.a, test.t.b, funcs:count(Column#16)->Column#5, funcs:firstrow(test.t.a)->test.t.a
└─IndexReader_45 1.00 root index:StreamAgg_40
└─StreamAgg_40 1.00 cop[tikv] group by:test.t.a, test.t.b, funcs:count(test.t.a)->Column#16
└─Selection_43 1.25 cop[tikv] gt(test.t.b, 0)
└─IndexFullScan_42 3.75 cop[tikv] table:t, index:idx(a, b, c) keep order:true, stats:pseudo
select count(a) from t where b>0 group by a, b order by a limit 1;
count(a)
3
drop table if exists t;
create table t (id int primary key, a int, b int);
explain select * from (t t1 left join t t2 on t1.a = t2.a) left join (t t3 left join t t4 on t3.a = t4.a) on t2.b = 1;
id estRows task access object operator info
HashJoin_10 155937656.25 root CARTESIAN left outer join, left cond:[eq(test.t.b, 1)]
├─HashJoin_19(Build) 12487.50 root left outer join, equal:[eq(test.t.a, test.t.a)]
│ ├─TableReader_25(Build) 9990.00 root data:Selection_24
│ │ └─Selection_24 9990.00 cop[tikv] not(isnull(test.t.a))
│ │ └─TableFullScan_23 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo
│ └─TableReader_22(Probe) 10000.00 root data:TableFullScan_21
│ └─TableFullScan_21 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo
└─HashJoin_12(Probe) 12487.50 root left outer join, equal:[eq(test.t.a, test.t.a)]
├─TableReader_18(Build) 9990.00 root data:Selection_17
│ └─Selection_17 9990.00 cop[tikv] not(isnull(test.t.a))
│ └─TableFullScan_16 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo
└─TableReader_15(Probe) 10000.00 root data:TableFullScan_14
└─TableFullScan_14 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
drop table if exists t;
create table t(a bigint primary key, b bigint);
desc select * from t where a = 1;
id estRows task access object operator info
Point_Get_1 1.00 root table:t handle:1
desc select * from t where a = '1';
id estRows task access object operator info
Point_Get_1 1.00 root table:t handle:1
desc select sysdate(), sleep(1), sysdate();
id estRows task access object operator info
Projection_3 1.00 root sysdate()->Column#1, sleep(1)->Column#2, sysdate()->Column#3
└─TableDual_4 1.00 root rows:1
drop table if exists th;
set @@session.tidb_enable_table_partition = '1';
create table th (a int, b int) partition by hash(a) partitions 3;
insert into th values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8);
insert into th values (-1,-1),(-2,-2),(-3,-3),(-4,-4),(-5,-5),(-6,-6),(-7,-7),(-8,-8);
desc select * from th where a=-2;
id estRows task access object operator info
TableReader_8 10.00 root data:Selection_7
└─Selection_7 10.00 cop[tikv] eq(test.th.a, -2)
└─TableFullScan_6 10000.00 cop[tikv] table:th, partition:p2 keep order:false, stats:pseudo
desc select * from th;
id estRows task access object operator info
PartitionUnion_8 30000.00 root
├─TableReader_10 10000.00 root data:TableFullScan_9
│ └─TableFullScan_9 10000.00 cop[tikv] table:th, partition:p0 keep order:false, stats:pseudo
├─TableReader_12 10000.00 root data:TableFullScan_11
│ └─TableFullScan_11 10000.00 cop[tikv] table:th, partition:p1 keep order:false, stats:pseudo
└─TableReader_14 10000.00 root data:TableFullScan_13
└─TableFullScan_13 10000.00 cop[tikv] table:th, partition:p2 keep order:false, stats:pseudo
desc select * from th partition (p2,p1);
id estRows task access object operator info
PartitionUnion_7 20000.00 root
├─TableReader_9 10000.00 root data:TableFullScan_8
│ └─TableFullScan_8 10000.00 cop[tikv] table:th, partition:p1 keep order:false, stats:pseudo
└─TableReader_11 10000.00 root data:TableFullScan_10
└─TableFullScan_10 10000.00 cop[tikv] table:th, partition:p2 keep order:false, stats:pseudo
drop table if exists t;
create table t(a int, b int);
explain select a != any (select a from t t2) from t t1;
id estRows task access object operator info
Projection_8 10000.00 root and(or(or(gt(Column#8, 1), ne(test.t.a, Column#7)), if(ne(Column#9, 0), <nil>, 0)), and(ne(Column#10, 0), if(isnull(test.t.a), <nil>, 1)))->Column#11
└─HashJoin_9 10000.00 root CARTESIAN inner join
├─StreamAgg_14(Build) 1.00 root funcs:max(Column#13)->Column#7, funcs:count(distinct Column#14)->Column#8, funcs:sum(Column#15)->Column#9, funcs:count(1)->Column#10
│ └─Projection_19 10000.00 root test.t.a, test.t.a, cast(isnull(test.t.a), decimal(65,0) BINARY)->Column#15
│ └─TableReader_18 10000.00 root data:TableFullScan_17
│ └─TableFullScan_17 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo
└─TableReader_12(Probe) 10000.00 root data:TableFullScan_11
└─TableFullScan_11 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
explain select a = all (select a from t t2) from t t1;
id estRows task access object operator info
Projection_8 10000.00 root or(and(and(le(Column#8, 1), eq(test.t.a, Column#7)), if(ne(Column#9, 0), <nil>, 1)), or(eq(Column#10, 0), if(isnull(test.t.a), <nil>, 0)))->Column#11
└─HashJoin_9 10000.00 root CARTESIAN inner join
├─StreamAgg_14(Build) 1.00 root funcs:firstrow(Column#13)->Column#7, funcs:count(distinct Column#14)->Column#8, funcs:sum(Column#15)->Column#9, funcs:count(1)->Column#10
│ └─Projection_19 10000.00 root test.t.a, test.t.a, cast(isnull(test.t.a), decimal(65,0) BINARY)->Column#15
│ └─TableReader_18 10000.00 root data:TableFullScan_17
│ └─TableFullScan_17 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo
└─TableReader_12(Probe) 10000.00 root data:TableFullScan_11
└─TableFullScan_11 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
drop table if exists t;
create table t(a int, b int);
drop table if exists s;
create table s(a varchar(20), b varchar(20));
explain select a in (select a from s where s.b = t.b) from t;
id estRows task access object operator info
HashJoin_10 10000.00 root left outer semi join, equal:[eq(Column#8, Column#9)], other cond:eq(cast(test.t.a), cast(test.s.a))
├─Projection_14(Build) 10000.00 root test.s.a, cast(test.s.b, double BINARY)->Column#9
│ └─TableReader_16 10000.00 root data:TableFullScan_15
│ └─TableFullScan_15 10000.00 cop[tikv] table:s keep order:false, stats:pseudo
└─Projection_11(Probe) 10000.00 root test.t.a, cast(test.t.b, double BINARY)->Column#8
└─TableReader_13 10000.00 root data:TableFullScan_12
└─TableFullScan_12 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select a in (select a+b from t t2 where t2.b = t1.b) from t t1;
id estRows task access object operator info
HashJoin_8 10000.00 root left outer semi join, equal:[eq(test.t.b, test.t.b)], other cond:eq(test.t.a, plus(test.t.a, test.t.b))
├─TableReader_12(Build) 10000.00 root data:TableFullScan_11
│ └─TableFullScan_11 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo
└─TableReader_10(Probe) 10000.00 root data:TableFullScan_9
└─TableFullScan_9 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
drop table t;
create table t(a int not null, b int);
explain select a in (select a from t t2 where t2.b = t1.b) from t t1;
id estRows task access object operator info
HashJoin_8 10000.00 root left outer semi join, equal:[eq(test.t.b, test.t.b) eq(test.t.a, test.t.a)]
├─TableReader_12(Build) 10000.00 root data:TableFullScan_11
│ └─TableFullScan_11 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo
└─TableReader_10(Probe) 10000.00 root data:TableFullScan_9
└─TableFullScan_9 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
explain select 1 from (select sleep(1)) t;
id estRows task access object operator info
Projection_4 1.00 root 1->Column#2
└─Projection_5 1.00 root sleep(1)->Column#1
└─TableDual_6 1.00 root rows:1
drop table if exists t;
create table t(a int, b int);
explain select a from t order by rand();
id estRows task access object operator info
Projection_8 10000.00 root test.t.a
└─Sort_4 10000.00 root Column#4:asc
└─Projection_9 10000.00 root test.t.a, rand()->Column#4
└─TableReader_7 10000.00 root data:TableFullScan_6
└─TableFullScan_6 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select a, b from t order by abs(2);
id estRows task access object operator info
TableReader_8 10000.00 root data:TableFullScan_7
└─TableFullScan_7 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select a from t order by abs(rand())+1;
id estRows task access object operator info
Projection_8 10000.00 root test.t.a
└─Sort_4 10000.00 root Column#4:asc
└─Projection_9 10000.00 root test.t.a, plus(abs(rand()), 1)->Column#4
└─TableReader_7 10000.00 root data:TableFullScan_6
└─TableFullScan_6 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
drop table if exists t1;
create table t1(a int, b int);
drop table if exists t2;
create table t2(a int, b int);
explain select * from t1 where t1.a in (select t2.a as a from t2 where t2.b > t1.b order by t1.b);
id estRows task access object operator info
HashJoin_10 7984.01 root semi join, equal:[eq(test.t1.a, test.t2.a)], other cond:gt(test.t2.b, test.t1.b)
├─TableReader_16(Build) 9980.01 root data:Selection_15
│ └─Selection_15 9980.01 cop[tikv] not(isnull(test.t2.a)), not(isnull(test.t2.b))
│ └─TableFullScan_14 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo
└─TableReader_13(Probe) 9980.01 root data:Selection_12
└─Selection_12 9980.01 cop[tikv] not(isnull(test.t1.a)), not(isnull(test.t1.b))
└─TableFullScan_11 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
drop table t;
CREATE TABLE t (id int(10) unsigned NOT NULL AUTO_INCREMENT,
i int(10) unsigned DEFAULT NULL,
x int(10) unsigned DEFAULT '0',
PRIMARY KEY (`id`)
);
explain select row_number() over( partition by i ) - x as rnk from t;
id estRows task access object operator info
Projection_7 10000.00 root minus(Column#5, test.t.x)->Column#7
└─Window_8 10000.00 root row_number()->Column#5 over(partition by test.t.i)
└─Sort_11 10000.00 root test.t.i:asc
└─TableReader_10 10000.00 root data:TableRangeScan_9
└─TableRangeScan_9 10000.00 cop[tikv] table:t range:[0,+inf], keep order:false, stats:pseudo

43
cmd/explaintest/r/subquery.result

@ -0,0 +1,43 @@
drop table if exists t1;
drop table if exists t2;
create table t1(a bigint, b bigint);
create table t2(a bigint, b bigint);
set session tidb_hashagg_partial_concurrency = 1;
set session tidb_hashagg_final_concurrency = 1;
explain select * from t1 where t1.a in (select t1.b + t2.b from t2);
id estRows task access object operator info
HashJoin_8 8000.00 root CARTESIAN semi join, other cond:eq(test.t1.a, plus(test.t1.b, test.t2.b))
├─TableReader_12(Build) 10000.00 root data:TableFullScan_11
│ └─TableFullScan_11 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo
└─TableReader_10(Probe) 10000.00 root data:TableFullScan_9
└─TableFullScan_9 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
drop table if exists t;
create table t(a int primary key, b int, c int, d int, index idx(b,c,d));
insert into t values(1,1,1,1),(2,2,2,2),(3,2,2,2),(4,2,2,2),(5,2,2,2);
analyze table t;
explain select t.c in (select count(*) from t s use index(idx), t t1 where s.b = 1 and s.c = 1 and s.d = t.a and s.a = t1.a) from t;
id estRows task access object operator info
Projection_11 5.00 root Column#14
└─Apply_13 5.00 root CARTESIAN left outer semi join, other cond:eq(test.t.c, Column#13)
├─TableReader_15(Build) 5.00 root data:TableFullScan_14
│ └─TableFullScan_14 5.00 cop[tikv] table:t keep order:false
└─StreamAgg_22(Probe) 1.00 root funcs:count(1)->Column#13
└─IndexJoin_25 0.50 root inner join, inner:TableReader_24, outer key:test.t.a, inner key:test.t.a, equal cond:eq(test.t.a, test.t.a)
├─IndexReader_31(Build) 1.00 root index:IndexRangeScan_30
│ └─IndexRangeScan_30 1.00 cop[tikv] table:s, index:idx(b, c, d) range: decided by [eq(test.t.b, 1) eq(test.t.c, 1) eq(test.t.d, test.t.a)], keep order:false
└─TableReader_24(Probe) 1.00 root data:TableRangeScan_23
└─TableRangeScan_23 1.00 cop[tikv] table:t1 range: decided by [test.t.a], keep order:false
drop table if exists t;
create table t(a int, b int, c int);
explain select a from t t1 where t1.a = (select max(t2.a) from t t2 where t1.b=t2.b and t1.c=t2.b);
id estRows task access object operator info
HashJoin_12 7992.00 root inner join, equal:[eq(test.t.b, test.t.b) eq(test.t.c, test.t.b) eq(test.t.a, Column#9)]
├─Selection_17(Build) 6393.60 root not(isnull(Column#9))
│ └─HashAgg_23 7992.00 root group by:test.t.b, funcs:max(Column#10)->Column#9, funcs:firstrow(test.t.b)->test.t.b
│ └─TableReader_24 7992.00 root data:HashAgg_18
│ └─HashAgg_18 7992.00 cop[tikv] group by:test.t.b, funcs:max(test.t.a)->Column#10
│ └─Selection_22 9990.00 cop[tikv] not(isnull(test.t.b))
│ └─TableFullScan_21 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo
└─TableReader_16(Probe) 9970.03 root data:Selection_15
└─Selection_15 9970.03 cop[tikv] not(isnull(test.t.a)), not(isnull(test.t.b)), not(isnull(test.t.c))
└─TableFullScan_14 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo

271
cmd/explaintest/r/topn_push_down.result

@ -0,0 +1,271 @@
CREATE TABLE `tr` (
`id` bigint(20) NOT NULL,
`biz_date` date NOT NULL,
`domain_type` tinyint(4) NOT NULL,
`business_type` tinyint(4) NOT NULL,
`trade_type` tinyint(4) NOT NULL DEFAULT '1',
`trade_time` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
`trade_status` tinyint(4) NOT NULL DEFAULT '0',
`trade_pay_status` tinyint(4) NOT NULL DEFAULT '0',
`delivery_type` tinyint(4) NOT NULL DEFAULT '0',
`source` tinyint(4) NOT NULL,
`source_child` mediumint(9) DEFAULT NULL,
`trade_no` varchar(26) NOT NULL,
`sku_kind_count` int(11) NOT NULL,
`sale_amount` decimal(10,2) NOT NULL DEFAULT '0.00',
`privilege_amount` decimal(10,2) NOT NULL DEFAULT '0.00',
`trade_amount` decimal(10,2) NOT NULL DEFAULT '0.00',
`trade_amount_before` decimal(10,2) NOT NULL DEFAULT '0.00',
`trade_memo` varchar(100) DEFAULT NULL,
`relate_trade_id` bigint(20) DEFAULT NULL,
`relate_trade_uuid` varchar(32) DEFAULT NULL,
`brand_identy` bigint(20) NOT NULL,
`shop_identy` bigint(20) NOT NULL,
`device_identy` varchar(36) NOT NULL,
`uuid` varchar(32) NOT NULL,
`status_flag` tinyint(4) NOT NULL,
`client_create_time` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
`client_update_time` timestamp(3) NULL DEFAULT NULL,
`server_create_time` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
`server_update_time` timestamp(3) DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
`creator_id` bigint(20) DEFAULT NULL,
`creator_name` varchar(32) DEFAULT NULL,
`updator_id` bigint(20) DEFAULT NULL,
`updator_name` varchar(32) DEFAULT NULL,
`trade_people_count` int(4) DEFAULT NULL,
`trade_pay_form` tinyint(4) NOT NULL DEFAULT '1',
`print_time` timestamp(3) NULL DEFAULT NULL,
`action_type` tinyint(4) NOT NULL DEFAULT '1',
`recycle_status` tinyint(1) NOT NULL DEFAULT '1',
`rds_source_calm` varchar(100) DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `uuid` (`uuid`),
KEY `idx_server_update_time` (`shop_identy`,`server_update_time`),
KEY `idx_server_create_time` (`server_create_time`),
KEY `idx_trade_no` (`trade_no`),
KEY `idx_relate_trade_id` (`relate_trade_id`),
KEY `idx_brand_identy_biz_date` (`brand_identy`,`biz_date`),
KEY `idx_trade_status_server_create_time` (`trade_status`,`server_create_time`),
KEY `idx_shop_identy_biz_date` (`shop_identy`,`biz_date`),
KEY `idx_shop_identy_server_create_time` (`shop_identy`,`server_create_time`),
KEY `idx_shop_identy_trade_status_business_type` (`shop_identy`,`trade_status`,`business_type`,`trade_pay_status`,`trade_type`,`delivery_type`,`source`,`biz_date`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='';
CREATE TABLE `p` (
`id` bigint(20) NOT NULL,
`biz_date` date NOT NULL,
`payment_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
`payment_type` int(11) NOT NULL,
`relate_id` bigint(20) DEFAULT NULL,
`relate_uuid` varchar(32) DEFAULT NULL,
`receivable_amount` decimal(10,2) NOT NULL DEFAULT '0.00',
`exempt_amount` decimal(10,2) NOT NULL DEFAULT '0.00',
`actual_amount` decimal(10,2) NOT NULL DEFAULT '0.00',
`handover_uuid` varchar(32) DEFAULT NULL,
`brand_identy` bigint(20) NOT NULL,
`shop_identy` bigint(20) NOT NULL,
`device_identy` varchar(36) NOT NULL,
`uuid` varchar(32) NOT NULL,
`status_flag` tinyint(4) NOT NULL DEFAULT '1',
`client_create_time` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
`client_update_time` timestamp(3) NULL DEFAULT NULL,
`server_create_time` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
`server_update_time` timestamp(3) DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
`creator_id` bigint(20) DEFAULT NULL,
`creator_name` varchar(32) DEFAULT NULL,
`updator_id` bigint(20) DEFAULT NULL,
`updator_name` varchar(32) DEFAULT NULL,
`is_paid` tinyint(4) DEFAULT '1',
`memo` varchar(100) DEFAULT NULL,
`recycle_status` tinyint(1) NOT NULL DEFAULT '1',
`shop_actual_amount` decimal(10,2) NOT NULL DEFAULT '0.00',
`rds_source_calm` varchar(100) DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `uuid` (`uuid`),
KEY `payment_relate_id` (`relate_id`),
KEY `idx_shop_identy_biz_date` (`shop_identy`,`biz_date`),
KEY `idx_relate_uuid` (`relate_uuid`(8)),
KEY `idx_shop_identy_server_update_time` (`shop_identy`,`server_update_time`),
KEY `idx_shop_identy_server_create_time` (`shop_identy`,`server_create_time`),
KEY `idx_server_create_time` (`server_create_time`),
KEY `idx_brand_identy_shop_identy_payment_time` (`brand_identy`,`shop_identy`,`payment_time`),
KEY `idx_handover_uuid` (`handover_uuid`(8)),
KEY `idx_shop_identy_handover_uuid_payment_time` (`shop_identy`,`handover_uuid`(1),`payment_time`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='';
CREATE TABLE `te` (
`id` bigint(20) NOT NULL,
`trade_id` bigint(20) NOT NULL,
`trade_uuid` varchar(32) NOT NULL,
`number_plate` varchar(32) DEFAULT NULL,
`fix_type` tinyint(4) DEFAULT NULL,
`called` tinyint(4) DEFAULT NULL,
`invoice_title` varchar(64) DEFAULT NULL,
`expect_time` timestamp NULL DEFAULT NULL,
`receiver_phone` varchar(16) DEFAULT NULL,
`receiver_name` varchar(32) DEFAULT NULL,
`receiver_sex` tinyint(4) DEFAULT NULL,
`delivery_address_id` bigint(20) DEFAULT NULL,
`delivery_address` varchar(500) DEFAULT NULL,
`received_time` timestamp NULL DEFAULT NULL,
`delivery_fee` decimal(10,2) DEFAULT NULL,
`device_platform` varchar(20) DEFAULT NULL,
`device_token` varchar(128) DEFAULT NULL,
`open_identy` varchar(100) DEFAULT NULL,
`user_identy` bigint(20) DEFAULT NULL,
`third_tran_no` varchar(100) DEFAULT NULL,
`brand_identy` bigint(20) NOT NULL,
`shop_identy` bigint(20) NOT NULL,
`device_identy` varchar(36) NOT NULL,
`uuid` varchar(32) NOT NULL,
`status_flag` tinyint(4) NOT NULL,
`client_create_time` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
`client_update_time` timestamp(3) NULL DEFAULT NULL,
`server_create_time` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
`server_update_time` timestamp(3) DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
`creator_id` bigint(20) DEFAULT NULL,
`creator_name` varchar(32) DEFAULT NULL,
`updator_id` bigint(20) DEFAULT NULL,
`updator_name` varchar(32) DEFAULT NULL,
`call_dish_status` tinyint(4) NOT NULL DEFAULT '0',
`delivery_man` varchar(50) DEFAULT NULL,
`delivery_status` tinyint(4) NOT NULL DEFAULT '0',
`delivery_user_id` varchar(50) DEFAULT NULL,
`delivery_real_time` timestamp NULL DEFAULT NULL,
`send_area_id` bigint(20) DEFAULT NULL,
`order_tip` tinyint(4) NOT NULL DEFAULT '0',
`binding_delivery_user_time` timestamp(3) NULL DEFAULT NULL,
`square_up_time` timestamp(3) NULL DEFAULT NULL,
`is_sub_mch` tinyint(1) DEFAULT '0',
`serial_number` varchar(50) NOT NULL DEFAULT '',
`recycle_status` tinyint(1) NOT NULL DEFAULT '1',
`delivery_platform` bigint(20) NOT NULL DEFAULT '1',
`is_printed` tinyint(4) NOT NULL DEFAULT '1',
`third_serial_no` varchar(50) DEFAULT NULL,
`has_serving` tinyint(4) NOT NULL DEFAULT '1',
`device_no` varchar(6) DEFAULT NULL,
`third_service_charge` decimal(10,2) DEFAULT '0.00',
`third_subsidies` decimal(10,2) DEFAULT '0.00',
`rds_source_calm` varchar(100) DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `uuid` (`uuid`),
KEY `idx_trade_id` (`trade_id`),
KEY `idx_server_update_time` (`shop_identy`,`server_update_time`),
KEY `idx_receiver_phone` (`receiver_phone`(11)),
KEY `idx_delivery_status_delivery_user_id` (`delivery_status`,`delivery_user_id`(10)),
KEY `idx_trade_uuid` (`trade_uuid`(10)),
KEY `idx_third_tran_no` (`third_tran_no`(10))
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='';
EXPLAIN SELECT te.expect_time AS expected_time FROM
tr tr inner JOIN te te ON te.trade_id = tr.id
left JOIN p p ON p.relate_id = tr.id
WHERE
tr.brand_identy = 32314 AND
tr.shop_identy = 810094178 AND
tr.domain_type = 2 AND
tr.business_type = 18 AND
tr.trade_type IN (1) AND
te.expect_time BETWEEN '2018-04-23 00:00:00.0' AND '2018-04-23 23:59:59.0'
ORDER BY te.expect_time asc
LIMIT 0, 5;
id estRows task access object operator info
Limit_19 0.00 root offset:0, count:5
└─IndexJoin_97 0.00 root left outer join, inner:IndexReader_96, outer key:test.tr.id, inner key:test.p.relate_id, equal cond:eq(test.tr.id, test.p.relate_id)
├─TopN_102(Build) 0.00 root test.te.expect_time:asc, offset:0, count:5
│ └─IndexJoin_39 0.00 root inner join, inner:IndexLookUp_38, outer key:test.tr.id, inner key:test.te.trade_id, equal cond:eq(test.tr.id, test.te.trade_id)
│ ├─IndexLookUp_81(Build) 0.00 root
│ │ ├─Selection_79(Build) 0.00 cop[tikv] eq(test.tr.business_type, 18), eq(test.tr.trade_type, 1)
│ │ │ └─IndexRangeScan_77 10.00 cop[tikv] table:tr, index:idx_shop_identy_trade_status_business_type(shop_identy, trade_status, business_type, trade_pay_status, trade_type, delivery_type, source, biz_date) range:[810094178,810094178], keep order:false, stats:pseudo
│ │ └─Selection_80(Probe) 0.00 cop[tikv] eq(test.tr.brand_identy, 32314), eq(test.tr.domain_type, 2)
│ │ └─TableRowIDScan_78 0.00 cop[tikv] table:tr keep order:false, stats:pseudo
│ └─IndexLookUp_38(Probe) 1.25 root
│ ├─IndexRangeScan_35(Build) 50.00 cop[tikv] table:te, index:idx_trade_id(trade_id) range: decided by [eq(test.te.trade_id, test.tr.id)], keep order:false, stats:pseudo
│ └─Selection_37(Probe) 1.25 cop[tikv] ge(test.te.expect_time, 2018-04-23 00:00:00.000000), le(test.te.expect_time, 2018-04-23 23:59:59.000000)
│ └─TableRowIDScan_36 50.00 cop[tikv] table:te keep order:false, stats:pseudo
└─IndexReader_96(Probe) 1.25 root index:Selection_95
└─Selection_95 1.25 cop[tikv] not(isnull(test.p.relate_id))
└─IndexRangeScan_94 1.25 cop[tikv] table:p, index:payment_relate_id(relate_id) range: decided by [eq(test.p.relate_id, test.tr.id)], keep order:false, stats:pseudo
desc select 1 as a from dual order by a limit 1;
id estRows task access object operator info
Projection_6 1.00 root 1->Column#1
└─TableDual_7 1.00 root rows:1
drop table if exists t1;
drop table if exists t2;
create table t1(a bigint, b bigint);
create table t2(a bigint, b bigint);
desc select * from t1 where t1.a in (select t2.a as a from t2 where t2.b > t1.b order by t1.b limit 1);
id estRows task access object operator info
Apply_15 9990.00 root semi join, equal:[eq(test.t1.a, test.t2.a)]
├─TableReader_18(Build) 9990.00 root data:Selection_17
│ └─Selection_17 9990.00 cop[tikv] not(isnull(test.t1.a))
│ └─TableFullScan_16 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
└─Selection_19(Probe) 0.80 root not(isnull(test.t2.a))
└─Limit_20 1.00 root offset:0, count:1
└─TableReader_26 1.00 root data:Limit_25
└─Limit_25 1.00 cop[tikv] offset:0, count:1
└─Selection_24 1.00 cop[tikv] gt(test.t2.b, test.t1.b)
└─TableFullScan_23 1.25 cop[tikv] table:t2 keep order:false, stats:pseudo
desc select * from t1 where t1.a in (select a from (select t2.a as a, t1.b as b from t2 where t2.b > t1.b) x order by b limit 1);
id estRows task access object operator info
Apply_17 9990.00 root semi join, equal:[eq(test.t1.a, test.t2.a)]
├─TableReader_20(Build) 9990.00 root data:Selection_19
│ └─Selection_19 9990.00 cop[tikv] not(isnull(test.t1.a))
│ └─TableFullScan_18 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
└─Selection_21(Probe) 0.80 root not(isnull(test.t2.a))
└─Projection_22 1.00 root test.t2.a
└─Limit_23 1.00 root offset:0, count:1
└─TableReader_29 1.00 root data:Limit_28
└─Limit_28 1.00 cop[tikv] offset:0, count:1
└─Selection_27 1.00 cop[tikv] gt(test.t2.b, test.t1.b)
└─TableFullScan_26 1.25 cop[tikv] table:t2 keep order:false, stats:pseudo
drop table if exists t;
create table t(a int not null, index idx(a));
explain select /*+ TIDB_INLJ(t2) */ * from t t1 join t t2 on t1.a = t2.a limit 5;
id estRows task access object operator info
Limit_11 5.00 root offset:0, count:5
└─IndexJoin_15 5.00 root inner join, inner:IndexReader_14, outer key:test.t.a, inner key:test.t.a, equal cond:eq(test.t.a, test.t.a)
├─TableReader_19(Build) 4.00 root data:TableFullScan_18
│ └─TableFullScan_18 4.00 cop[tikv] table:t1 keep order:false, stats:pseudo
└─IndexReader_14(Probe) 1.25 root index:IndexRangeScan_13
└─IndexRangeScan_13 1.25 cop[tikv] table:t2, index:idx(a) range: decided by [eq(test.t.a, test.t.a)], keep order:false, stats:pseudo
explain select /*+ TIDB_INLJ(t2) */ * from t t1 left join t t2 on t1.a = t2.a where t2.a is null limit 5;
id estRows task access object operator info
Limit_12 5.00 root offset:0, count:5
└─Selection_13 5.00 root isnull(test.t.a)
└─IndexJoin_17 5.00 root left outer join, inner:IndexReader_16, outer key:test.t.a, inner key:test.t.a, equal cond:eq(test.t.a, test.t.a)
├─TableReader_21(Build) 4.00 root data:TableFullScan_20
│ └─TableFullScan_20 4.00 cop[tikv] table:t1 keep order:false, stats:pseudo
└─IndexReader_16(Probe) 1.25 root index:IndexRangeScan_15
└─IndexRangeScan_15 1.25 cop[tikv] table:t2, index:idx(a) range: decided by [eq(test.t.a, test.t.a)], keep order:false, stats:pseudo
explain select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 on t1.a = t2.a limit 5;
id estRows task access object operator info
Limit_11 5.00 root offset:0, count:5
└─MergeJoin_12 5.00 root inner join, left key:test.t.a, right key:test.t.a
├─IndexReader_17(Build) 4.00 root index:IndexFullScan_16
│ └─IndexFullScan_16 4.00 cop[tikv] table:t2, index:idx(a) keep order:true, stats:pseudo
└─IndexReader_15(Probe) 4.00 root index:IndexFullScan_14
└─IndexFullScan_14 4.00 cop[tikv] table:t1, index:idx(a) keep order:true, stats:pseudo
explain select /*+ TIDB_SMJ(t1, t2) */ * from t t1 left join t t2 on t1.a = t2.a where t2.a is null limit 5;
id estRows task access object operator info
Limit_12 5.00 root offset:0, count:5
└─Selection_13 5.00 root isnull(test.t.a)
└─MergeJoin_14 5.00 root left outer join, left key:test.t.a, right key:test.t.a
├─IndexReader_19(Build) 4.00 root index:IndexFullScan_18
│ └─IndexFullScan_18 4.00 cop[tikv] table:t2, index:idx(a) keep order:true, stats:pseudo
└─IndexReader_17(Probe) 4.00 root index:IndexFullScan_16
└─IndexFullScan_16 4.00 cop[tikv] table:t1, index:idx(a) keep order:true, stats:pseudo
explain select /*+ TIDB_HJ(t1, t2) */ * from t t1 join t t2 on t1.a = t2.a limit 5;
id estRows task access object operator info
Limit_11 5.00 root offset:0, count:5
└─HashJoin_23 5.00 root inner join, equal:[eq(test.t.a, test.t.a)]
├─TableReader_30(Build) 10000.00 root data:TableFullScan_29
│ └─TableFullScan_29 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo
└─TableReader_26(Probe) 4.00 root data:TableFullScan_25
└─TableFullScan_25 4.00 cop[tikv] table:t1 keep order:false, stats:pseudo
explain select /*+ TIDB_HJ(t1, t2) */ * from t t1 left join t t2 on t1.a = t2.a where t2.a is null limit 5;
id estRows task access object operator info
Limit_12 5.00 root offset:0, count:5
└─Selection_13 5.00 root isnull(test.t.a)
└─HashJoin_21 5.00 root left outer join, equal:[eq(test.t.a, test.t.a)]
├─TableReader_23(Build) 4.00 root data:TableFullScan_22
│ └─TableFullScan_22 4.00 cop[tikv] table:t1 keep order:false, stats:pseudo
└─TableReader_27(Probe) 10000.00 root data:TableFullScan_26
└─TableFullScan_26 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo

8
cmd/explaintest/r/topn_pushdown.result

@ -0,0 +1,8 @@
explain select * from ((select 4 as a) union all (select 33 as a)) tmp order by a desc limit 1;
id estRows task access object operator info
TopN_17 1.00 root Column#3:desc, offset:0, count:1
└─Union_21 2.00 root
├─Projection_22 1.00 root 4->Column#3
│ └─TableDual_23 1.00 root rows:1
└─Projection_24 1.00 root 33->Column#3
└─TableDual_25 1.00 root rows:1

1301
cmd/explaintest/r/tpch.result

File diff suppressed because it is too large

126
cmd/explaintest/r/window_function.result

@ -0,0 +1,126 @@
use test;
drop table if exists t;
create table t (a int, b int, c timestamp, index idx(a));
set @@tidb_enable_window_function = 1;
set @@session.tidb_window_concurrency = 1;
explain select sum(a) over() from t;
id estRows task access object operator info
Projection_7 10000.00 root Column#6
└─Window_8 10000.00 root sum(cast(test.t.a, decimal(65,0) BINARY))->Column#6 over()
└─IndexReader_12 10000.00 root index:IndexFullScan_11
└─IndexFullScan_11 10000.00 cop[tikv] table:t, index:idx(a) keep order:false, stats:pseudo
explain select sum(a) over(partition by a) from t;
id estRows task access object operator info
Projection_7 10000.00 root Column#6
└─Window_8 10000.00 root sum(cast(test.t.a, decimal(65,0) BINARY))->Column#6 over(partition by test.t.a)
└─IndexReader_10 10000.00 root index:IndexFullScan_9
└─IndexFullScan_9 10000.00 cop[tikv] table:t, index:idx(a) keep order:true, stats:pseudo
explain select sum(a) over(partition by a order by b) from t;
id estRows task access object operator info
Projection_7 10000.00 root Column#6
└─Window_8 10000.00 root sum(cast(test.t.a, decimal(65,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.b asc range between unbounded preceding and current row)
└─Sort_11 10000.00 root test.t.a:asc, test.t.b:asc
└─TableReader_10 10000.00 root data:TableFullScan_9
└─TableFullScan_9 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select sum(a) over(partition by a order by b rows unbounded preceding) from t;
id estRows task access object operator info
Projection_7 10000.00 root Column#6
└─Window_8 10000.00 root sum(cast(test.t.a, decimal(65,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.b asc rows between unbounded preceding and current row)
└─Sort_11 10000.00 root test.t.a:asc, test.t.b:asc
└─TableReader_10 10000.00 root data:TableFullScan_9
└─TableFullScan_9 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select sum(a) over(partition by a order by b rows between 1 preceding and 1 following) from t;
id estRows task access object operator info
Projection_7 10000.00 root Column#6
└─Window_8 10000.00 root sum(cast(test.t.a, decimal(65,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.b asc rows between 1 preceding and 1 following)
└─Sort_11 10000.00 root test.t.a:asc, test.t.b:asc
└─TableReader_10 10000.00 root data:TableFullScan_9
└─TableFullScan_9 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select sum(a) over(partition by a order by b range between 1 preceding and 1 following) from t;
id estRows task access object operator info
Projection_7 10000.00 root Column#6
└─Window_8 10000.00 root sum(cast(test.t.a, decimal(65,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.b asc range between 1 preceding and 1 following)
└─Sort_11 10000.00 root test.t.a:asc, test.t.b:asc
└─TableReader_10 10000.00 root data:TableFullScan_9
└─TableFullScan_9 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select sum(a) over(partition by a order by c range between interval '2:30' minute_second preceding and interval '2:30' minute_second following) from t;
id estRows task access object operator info
Projection_7 10000.00 root Column#6
└─Window_8 10000.00 root sum(cast(test.t.a, decimal(65,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.c asc range between interval "2:30" "MINUTE_SECOND" preceding and interval "2:30" "MINUTE_SECOND" following)
└─Sort_11 10000.00 root test.t.a:asc, test.t.c:asc
└─TableReader_10 10000.00 root data:TableFullScan_9
└─TableFullScan_9 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
set @@session.tidb_window_concurrency = 4;
explain select sum(a) over() from t;
id estRows task access object operator info
Projection_7 10000.00 root Column#6
└─Window_8 10000.00 root sum(cast(test.t.a, decimal(65,0) BINARY))->Column#6 over()
└─IndexReader_12 10000.00 root index:IndexFullScan_11
└─IndexFullScan_11 10000.00 cop[tikv] table:t, index:idx(a) keep order:false, stats:pseudo
explain select sum(a) over(partition by a) from t;
id estRows task access object operator info
Projection_7 10000.00 root Column#6
└─Window_8 10000.00 root sum(cast(test.t.a, decimal(65,0) BINARY))->Column#6 over(partition by test.t.a)
└─IndexReader_10 10000.00 root index:IndexFullScan_9
└─IndexFullScan_9 10000.00 cop[tikv] table:t, index:idx(a) keep order:true, stats:pseudo
explain select sum(a) over(partition by a order by b) from t;
id estRows task access object operator info
Projection_7 10000.00 root Column#6
└─Shuffle_12 10000.00 root execution info: concurrency:4, data source:TableReader_10
└─Window_8 10000.00 root sum(cast(test.t.a, decimal(65,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.b asc range between unbounded preceding and current row)
└─Sort_11 10000.00 root test.t.a:asc, test.t.b:asc
└─TableReader_10 10000.00 root data:TableFullScan_9
└─TableFullScan_9 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select sum(a) over(partition by a order by b rows unbounded preceding) from t;
id estRows task access object operator info
Projection_7 10000.00 root Column#6
└─Shuffle_12 10000.00 root execution info: concurrency:4, data source:TableReader_10
└─Window_8 10000.00 root sum(cast(test.t.a, decimal(65,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.b asc rows between unbounded preceding and current row)
└─Sort_11 10000.00 root test.t.a:asc, test.t.b:asc
└─TableReader_10 10000.00 root data:TableFullScan_9
└─TableFullScan_9 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select sum(a) over(partition by a order by b rows between 1 preceding and 1 following) from t;
id estRows task access object operator info
Projection_7 10000.00 root Column#6
└─Shuffle_12 10000.00 root execution info: concurrency:4, data source:TableReader_10
└─Window_8 10000.00 root sum(cast(test.t.a, decimal(65,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.b asc rows between 1 preceding and 1 following)
└─Sort_11 10000.00 root test.t.a:asc, test.t.b:asc
└─TableReader_10 10000.00 root data:TableFullScan_9
└─TableFullScan_9 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select sum(a) over(partition by a order by b range between 1 preceding and 1 following) from t;
id estRows task access object operator info
Projection_7 10000.00 root Column#6
└─Shuffle_12 10000.00 root execution info: concurrency:4, data source:TableReader_10
└─Window_8 10000.00 root sum(cast(test.t.a, decimal(65,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.b asc range between 1 preceding and 1 following)
└─Sort_11 10000.00 root test.t.a:asc, test.t.b:asc
└─TableReader_10 10000.00 root data:TableFullScan_9
└─TableFullScan_9 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain select sum(a) over(partition by a order by c range between interval '2:30' minute_second preceding and interval '2:30' minute_second following) from t;
id estRows task access object operator info
Projection_7 10000.00 root Column#6
└─Shuffle_12 10000.00 root execution info: concurrency:4, data source:TableReader_10
└─Window_8 10000.00 root sum(cast(test.t.a, decimal(65,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.c asc range between interval "2:30" "MINUTE_SECOND" preceding and interval "2:30" "MINUTE_SECOND" following)
└─Sort_11 10000.00 root test.t.a:asc, test.t.c:asc
└─TableReader_10 10000.00 root data:TableFullScan_9
└─TableFullScan_9 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
drop table if exists t1;
create table t1(a int primary key, b int);
insert into t1 values(1, 1), (2, 1);
analyze table t1;
explain select sum(a) over(partition by b) from t1;
id estRows task access object operator info
Projection_7 2.00 root Column#4
└─Window_8 2.00 root sum(cast(test.t1.a, decimal(65,0) BINARY))->Column#4 over(partition by test.t1.b)
└─Sort_11 2.00 root test.t1.b:asc
└─TableReader_10 2.00 root data:TableFullScan_9
└─TableFullScan_9 2.00 cop[tikv] table:t1 keep order:false
insert into t1 values(3, 3);
analyze table t1;
explain select sum(a) over(partition by b) from t1;
id estRows task access object operator info
Projection_7 3.00 root Column#4
└─Shuffle_12 3.00 root execution info: concurrency:2, data source:TableReader_10
└─Window_8 3.00 root sum(cast(test.t1.a, decimal(65,0) BINARY))->Column#4 over(partition by test.t1.b)
└─Sort_11 3.00 root test.t1.b:asc
└─TableReader_10 3.00 root data:TableFullScan_9
└─TableFullScan_9 3.00 cop[tikv] table:t1 keep order:false

200
cmd/explaintest/run-tests.sh

@ -0,0 +1,200 @@
#!/usr/bin/env bash
# Copyright 2019 PingCAP, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# See the License for the specific language governing permissions and
# limitations under the License.
TIDB_TEST_STORE_NAME=$TIDB_TEST_STORE_NAME
TIKV_PATH=$TIKV_PATH
build=1
explain_test="./explain_test"
importer=""
tidb_server=""
explain_test_log="./explain-test.out"
tests=""
record=0
record_case=""
create=0
create_case=""
set -eu
trap 'set +e; PIDS=$(jobs -p); [ -n "$PIDS" ] && kill -9 $PIDS' EXIT
function help_message()
{
echo "Usage: $0 [options]
-h: Print this help message.
-s <tidb-server-path>: Use tidb-server in <tidb-server-path> for testing.
eg. \"./run-tests.sh -s ./explaintest_tidb-server\"
-b <y|Y|n|N>: \"y\" or \"Y\" for building test binaries [default \"y\" if this option is not specified].
\"n\" or \"N\" for not to build.
The building of tidb-server will be skiped if \"-s <tidb-server-path>\" is provided.
-r <test-name>|all: Run tests in file \"t/<test-name>.test\" and record result to file \"r/<test-name>.result\".
\"all\" for running all tests and record their results.
-t <test-name>: Run tests in file \"t/<test-name>.test\".
This option will be ignored if \"-r <test-name>\" is provided.
Run all tests if this option is not provided.
-c <test-name>|all: Create data according to creating statements in file \"t/<test-name>.test\" and save stats in \"s/<test-name>_tableName.json\".
<test-name> must has a suffix of '_stats'.
\"all\" for creating stats of all tests.
-i <importer-path>: Use importer in <importer-path> for creating data.
"
}
function build_importer()
{
importer="./importer"
echo "building importer binary: $importer"
rm -rf $importer
GO111MODULE=on go build -o $importer github.com/pingcap/tidb/cmd/importer
}
function build_tidb_server()
{
tidb_server="./explaintest_tidb-server"
echo "building tidb-server binary: $tidb_server"
rm -rf $tidb_server
GO111MODULE=on go build -race -o $tidb_server github.com/pingcap/tidb/tidb-server
}
function build_explain_test()
{
echo "building explain-test binary: $explain_test"
rm -rf $explain_test
GO111MODULE=on go build -o $explain_test
}
while getopts "t:s:r:b:c:i:h" opt; do
case $opt in
t)
tests="$OPTARG"
;;
s)
tidb_server="$OPTARG"
;;
r)
record=1
record_case="$OPTARG"
;;
b)
case $OPTARG in
y|Y)
build=1
;;
n|N)
build=0
;;
*)
help_messge 1>&2
exit 1
;;
esac
;;
h)
help_message
exit 0
;;
c)
create=1
create_case="$OPTARG"
;;
i)
importer="$OPTARG"
;;
*)
help_message 1>&2
exit 1
;;
esac
done
if [ $build -eq 1 ]; then
if [ -z "$tidb_server" ]; then
build_tidb_server
else
echo "skip building tidb-server, using existing binary: $tidb_server"
fi
if [[ -z "$importer" && $create -eq 1 ]]; then
build_importer
else
echo "skip building importer, using existing binary: $importer"
fi
build_explain_test
else
if [ -z "$tidb_server" ]; then
tidb_server="./explaintest_tidb-server"
fi
if [ -z "$explain_test" ]; then
explain_test="./explain_test"
fi
if [ -z "$importer" ]; then
importer="./importer"
fi
echo "skip building tidb-server, using existing binary: $tidb_server"
echo "skip building explaintest, using existing binary: $explain_test"
echo "skip building importer, using existing binary: $importer"
fi
rm -rf $explain_test_log
echo "start tidb-server, log file: $explain_test_log"
if [ "${TIDB_TEST_STORE_NAME}" = "tikv" ]; then
$tidb_server -config config.toml -store tikv -path "${TIKV_PATH}" > $explain_test_log 2>&1 &
SERVER_PID=$!
else
$tidb_server -config config.toml -store mocktikv -path "" > $explain_test_log 2>&1 &
SERVER_PID=$!
fi
echo "tidb-server(PID: $SERVER_PID) started"
sleep 5
if [ $record -eq 1 ]; then
if [ "$record_case" = 'all' ]; then
echo "record all cases"
$explain_test --record --log-level=error
else
echo "record result for case: \"$record_case\""
$explain_test --record $record_case --log-level=error
fi
elif [ $create -eq 1 ]; then
if [ "$create_case" = 'all' ]; then
echo "create all cases"
$explain_test --create --log-level=error
else
echo "create result for case: \"$create_case\""
$explain_test --create $create_case --log-level=error
fi
else
if [ -z "$tests" ]; then
echo "run all explain test cases"
else
echo "run explain test cases: $tests"
fi
$explain_test --log-level=error $tests
fi
race=`grep 'DATA RACE' $explain_test_log || true`
if [ ! -z "$race" ]; then
echo "tidb-server DATA RACE!"
cat $explain_test_log
exit 1
fi
echo "explaintest end"

1
cmd/explaintest/s/explain_complex_stats_dd.json

File diff suppressed because one or more lines are too long

1
cmd/explaintest/s/explain_complex_stats_dt.json

File diff suppressed because one or more lines are too long

1
cmd/explaintest/s/explain_complex_stats_pp.json

File diff suppressed because one or more lines are too long

1
cmd/explaintest/s/explain_complex_stats_rr.json

File diff suppressed because one or more lines are too long

1
cmd/explaintest/s/explain_complex_stats_st.json

File diff suppressed because one or more lines are too long

1
cmd/explaintest/s/explain_complex_stats_tbl_001.json

File diff suppressed because one or more lines are too long

1
cmd/explaintest/s/explain_complex_stats_tbl_002.json

File diff suppressed because one or more lines are too long

1
cmd/explaintest/s/explain_complex_stats_tbl_003.json

File diff suppressed because one or more lines are too long

1
cmd/explaintest/s/explain_complex_stats_tbl_004.json

File diff suppressed because one or more lines are too long

1
cmd/explaintest/s/explain_complex_stats_tbl_005.json

File diff suppressed because one or more lines are too long

1
cmd/explaintest/s/explain_complex_stats_tbl_006.json

File diff suppressed because one or more lines are too long

1
cmd/explaintest/s/explain_complex_stats_tbl_007.json

File diff suppressed because one or more lines are too long

1
cmd/explaintest/s/explain_complex_stats_tbl_008.json

File diff suppressed because one or more lines are too long

1
cmd/explaintest/s/explain_complex_stats_tbl_009.json

File diff suppressed because one or more lines are too long

1
cmd/explaintest/s/explain_easy_stats_index_prune.json

File diff suppressed because one or more lines are too long

1
cmd/explaintest/s/explain_easy_stats_t1.json

File diff suppressed because one or more lines are too long

1
cmd/explaintest/s/explain_easy_stats_t2.json

File diff suppressed because one or more lines are too long

1
cmd/explaintest/s/explain_easy_stats_t3.json

File diff suppressed because one or more lines are too long

1
cmd/explaintest/s/explain_easy_stats_tbl_dnf.json

File diff suppressed because one or more lines are too long

1
cmd/explaintest/s/explain_indexmerge_stats_t.json

File diff suppressed because one or more lines are too long

46859
cmd/explaintest/s/explain_join_stats_e.json

File diff suppressed because it is too large

1522
cmd/explaintest/s/explain_join_stats_lo.json

File diff suppressed because it is too large

1
cmd/explaintest/s/explain_stats_t.json

File diff suppressed because one or more lines are too long

55951
cmd/explaintest/s/explain_union_scan.json

File diff suppressed because it is too large

1
cmd/explaintest/s/tpch_stats/customer.json

File diff suppressed because one or more lines are too long

1
cmd/explaintest/s/tpch_stats/lineitem.json

File diff suppressed because one or more lines are too long

1
cmd/explaintest/s/tpch_stats/nation.json

File diff suppressed because one or more lines are too long

1
cmd/explaintest/s/tpch_stats/orders.json

File diff suppressed because one or more lines are too long

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save