赞
踩
select table_name, table_type from information_schema.tables
where table_schema = '$dbname'
table_type标识是表还是视图,
SHOW CREATE TABLE tpch.customer
CREATE TABLE `customer` (
`C_CUSTKEY` int NOT NULL,
`C_NAME` varchar(25) NOT NULL,
`C_ADDRESS` varchar(40) NOT NULL,
`C_NATIONKEY` int NOT NULL,
`C_PHONE` char(15) NOT NULL,
`C_ACCTBAL` decimal(15,2) NOT NULL,
`C_MKTSEGMENT` char(10) NOT NULL,
`C_COMMENT` varchar(117) NOT NULL,
PRIMARY KEY `PK_IDX1614428511` (`C_CUSTKEY`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
对于MySQL数据库,索引信息可以从建表语句中获取,无需单独获取。
SHOW CREATE TABLE tpch.customer_v
create view `customer_v` as
select
`customer`.`C_CUSTKEY` as `C_CUSTKEY`,
`customer`.`C_NAME` as `C_NAME`,
`customer`.`C_ADDRESS` as `C_ADDRESS`,
`customer`.`C_NATIONKEY` as `C_NATIONKEY`,
`customer`.`C_PHONE` as `C_PHONE`,
`customer`.`C_ACCTBAL` as `C_ACCTBAL`,
`customer`.`C_MKTSEGMENT` as `C_MKTSEGMENT`,
`customer`.`C_COMMENT` as `C_COMMENT`
from
`customer`
where
(`customer`.`C_CUSTKEY` < 100)
MySQL不支持物化视图
select
table_schema,
table_name,
table_type,
engine,
table_rows
from
information_schema.tables
where
table_schema = $dbname
查询结果
TABLE_SCHEMA | TABLE_NAME | TABLE_TYPE | ENGINE | TABLE_ROWS |
---|---|---|---|---|
tpch | customer | BASE TABLE | InnoDB | 9,935 |
tpch | customer_v | VIEW | NULL | NULL |
tpch | lineitem | BASE TABLE | InnoDB | 148,390 |
tpch | nation | BASE TABLE | InnoDB | 543 |
tpch | orders | BASE TABLE | InnoDB | 200,128 |
tpch | part | BASE TABLE | InnoDB | 721,764 |
tpch | partsupp | BASE TABLE | InnoDB | 248,270 |
tpch | region | BASE TABLE | InnoDB | 98,545 |
analyze table customer;
- analyze table 会统计索引分布信息。
- 支持 InnoDB、NDB、MyISAM 等存储引擎
- 对于 MyISAM 表,相当于执行了一次 myisamchk --analyze
- 执行 analyze table 时,会对表加上读锁
- 该操作会记录binlog
- 不支持视图
select
table_name,
index_name,
stat_name,
stat_value,
stat_description
from
mysql.innodb_index_stats
where
database_name = 'tpch'
查询结果
table_name | index_name | stat_name | stat_value | stat_description |
---|---|---|---|---|
customer | key_idx | n_diff_pfx01 | 9,935 | C_CUSTKEY |
customer | key_idx | n_leaf_pages | 133 | Number of leaf pages in the index |
customer | key_idx | size | 161 | Number of pages in the index |
lineitem | GEN_CLUST_INDEX | n_diff_pfx01 | 148,390 | DB_ROW_ID |
lineitem | GEN_CLUST_INDEX | n_leaf_pages | 1,562 | Number of leaf pages in the index |
lineitem | GEN_CLUST_INDEX | size | 1,571 | Number of pages in the index |
lineitem | l_partkey_idx | n_diff_pfx01 | 18,356 | L_PARTKEY |
lineitem | l_partkey_idx | n_diff_pfx02 | 149,721 | L_PARTKEY,DB_ROW_ID |
lineitem | l_partkey_idx | n_leaf_pages | 143 | Number of leaf pages in the index |
lineitem | l_partkey_idx | size | 225 | Number of pages in the index |
lineitem | l_shipdate_idx | n_diff_pfx01 | 15,745 | L_SHIPDATE |
lineitem | l_shipdate_idx | n_diff_pfx02 | 149,946 | L_SHIPDATE,DB_ROW_ID |
lineitem | l_shipdate_idx | n_leaf_pages | 134 | Number of leaf pages in the index |
lineitem | l_shipdate_idx | size | 161 | Number of pages in the index |
analyze table orders update histogram on o_custkey, o_orderdate with 100 buckets;
select
schema_name,
table_name,
column_name,
histogram->>'$."histogram-type"' htype,
histogram
from
information_schema.column_statistics
where
schema_name = 'tpch'
SCHEMA_NAME | TABLE_NAME | COLUMN_NAME | htype | HISTOGRAM |
---|---|---|---|---|
tpch | orders | O_CUSTKEY | equi-height | {“buckets”: [[0, 803, 0.09997181005099819, 804], [804, 1682, 0.20001195937230382, 879], [1683, 3685, 0.30000939664966725, 2004], [3686, 6331, 0.3999897491094539, 2647], [6332, 8964, 0.4999957287956058, 2634], [8965, 284782258, 0.6000102508905462, 4304], [284876800, 743350400, 0.7000076881679096, 5371], [743377234, 1205176678, 0.8000136678540615, 5442], [1205354704, 1662703498, 0.8999940203138481, 5380], [1662881524, 2147483647, 1.0, 5502]], “data-type”: “int”, “null-values”: 0.0, “collation-id”: 8, “last-updated”: “2023-05-11 08:12:50.964396”, “sampling-rate”: 0.5678184143966043, “histogram-type”: “equi-height”, “number-of-buckets-specified”: 10} |
tpch | orders | O_ORDERDATE | equi-height | {“buckets”: [[“1900-01-01”, “1924-11-27”, 0.09999743727736347, 4533], [“1924-11-30”, “1950-01-21”, 0.20000341696351537, 4483], [“1950-01-22”, “1975-04-21”, 0.2999666846057251, 4562], [“1975-04-22”, “2000-06-27”, 0.3999982915182423, 4533], [“2000-07-01”, “2020-03-05”, 0.5000469832483364, 3249], [“2020-03-06”, “2020-08-07”, 0.599907741985085, 155], [“2020-08-08”, “2021-01-09”, 0.7000418578030633, 155], [“2021-01-10”, “2021-06-12”, 0.8002528553001376, 154], [“2021-06-13”, “2021-11-14”, 0.9002759198038663, 155], [“2021-11-15”, “2022-09-01”, 1.0, 179]], “data-type”: “date”, “null-values”: 0.0, “collation-id”: 8, “last-updated”: “2023-05-11 08:12:50.965784”, “sampling-rate”: 0.5678184143966043, “histogram-type”: “equi-height”, “number-of-buckets-specified”: 10} |
explain select C_NAME, C_ADDRESS from customer c where c.C_CUSTKEY < 100
1 SIMPLE c range key_idx key_idx 4 100 100.0 Using where
explain format = json select C_NAME, C_ADDRESS
from customer c
where c.C_CUSTKEY < 100
{ "query_block": { "select_id": 1, "cost_info": { "query_cost": "20.30" }, "table": { "table_name": "c", "access_type": "range", "possible_keys": [ "key_idx" ], "key": "key_idx", "used_key_parts": [ "C_CUSTKEY" ], "key_length": "4", "rows_examined_per_scan": 100, "rows_produced_per_join": 100, "filtered": "100.00", "cost_info": { "read_cost": "10.30", "eval_cost": "10.00", "prefix_cost": "20.30", "data_read_per_join": "89K" }, "used_columns": [ "C_CUSTKEY", "C_NAME", "C_ADDRESS" ], "attached_condition": "(`tpch`.`c`.`C_CUSTKEY` < 100)" } } }
explain format = tree select C_NAME, C_ADDRESS
from customer c
where c.C_CUSTKEY < 100
-> Filter: (c.C_CUSTKEY < 100) (cost=20.30 rows=100)
-> Index range scan on c using key_idx over (C_CUSTKEY < 100) (cost=20.30 rows=100)
explain analyze select C_NAME, C_ADDRESS
from customer c
where c.C_CUSTKEY < 100
-> Filter: (c.C_CUSTKEY < 100) (cost=20.30 rows=100) (actual time=0.254..0.312 rows=100 loops=1)
-> Index range scan on c using key_idx over (C_CUSTKEY < 100) (cost=20.30 rows=100) (actual time=0.017..0.069 rows=100 loops=1)
PawSQL专注数据库性能优化的自动化和智能化,支持MySQL,PostgreSQL,Opengauss,Oracle等,提供的SQL优化产品包括
网址: https://app.pawsql.com
Twitter: https://twitter.com/pawsql
扫描关注PawSQL公众号
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。